text stringlengths 4 1.02M | meta dict |
|---|---|
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, get_object_or_404, Http404
from django.contrib.auth.decorators import login_required
from .models import Comment
from .forms import CommentForm
@login_required(login_url='login')
def comment_delete(request, id):
try:
comment = Comment.objects.get(id=id)
except:
raise Http404
if comment.user != request.user:
response = HttpResponse("You do not have permission to view this.")
response.status_code = 403
return response
if request.method == "POST":
parent_obj_url = comment.content_object.get_absolute_url()
comment.delete()
messages.success(request, "This has been deleted ")
return HttpResponseRedirect(parent_obj_url)
context = {
"comment": comment,
}
return render(request, "comment_delete.html", context)
# Create your views here.
def comment_thread(request, id):
try:
comment = Comment.objects.get(id=id)
except:
raise Http404
if not comment.is_parent:
comment = comment.parent
initial_data = {
"content_type": comment.content_type,
"object_id": comment.object_id
}
comment_form = CommentForm(request.POST or None, initial=initial_data)
if comment_form.is_valid() and request.user.is_authenticated():
c_type = comment_form.cleaned_data.get('content_type')
content_type = ContentType.objects.get(model=c_type)
obj_id = comment_form.cleaned_data.get('object_id')
content_data = comment_form.cleaned_data.get('content')
parent_obj = None
try:
parent_id = int(request.POST.get("parent_id"))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists() and parent_qs.count() == 1:
parent_obj = parent_qs.first()
new_comment, created = Comment.objects.get_or_create(
user=request.user,
content_type=content_type,
object_id=obj_id,
content=content_data,
parent=parent_obj,
)
return HttpResponseRedirect(new_comment.content_object.get_absolute_url())
context = {
"comment": comment,
"comment_form": comment_form,
}
return render(request, "comment_thread.html", context) | {
"content_hash": "287be702393ee8e6700ca91afd093c31",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 82,
"avg_line_length": 34.848101265822784,
"alnum_prop": 0.5909916454776607,
"repo_name": "alien3211/lom-web",
"id": "8f0394cf7bae407b2cd7d77348f6d407eb1a5b8c",
"size": "2753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "comments/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "135552"
},
{
"name": "HTML",
"bytes": "41653"
},
{
"name": "JavaScript",
"bytes": "713328"
},
{
"name": "Python",
"bytes": "61884"
}
],
"symlink_target": ""
} |
"""Miscellaneous file manipulation functions
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import str, bytes, open
from future import standard_library
standard_library.install_aliases()
import sys
import pickle
import gzip
import hashlib
from hashlib import md5
import os
import re
import shutil
import posixpath
import simplejson as json
import numpy as np
from .. import logging, config
from .misc import is_container
from ..interfaces.traits_extension import isdefined
fmlogger = logging.getLogger("filemanip")
related_filetype_sets = [
('.hdr', '.img', '.mat'),
('.BRIK', '.HEAD'),
]
class FileNotFoundError(Exception):
pass
def split_filename(fname):
"""Split a filename into parts: path, base filename and extension.
Parameters
----------
fname : str
file or path name
Returns
-------
pth : str
base path from fname
fname : str
filename from fname, without extension
ext : str
file extension from fname
Examples
--------
>>> from nipype.utils.filemanip import split_filename
>>> pth, fname, ext = split_filename('/home/data/subject.nii.gz')
>>> pth # doctest: +IGNORE_UNICODE
'/home/data'
>>> fname # doctest: +IGNORE_UNICODE
'subject'
>>> ext # doctest: +IGNORE_UNICODE
'.nii.gz'
"""
special_extensions = [".nii.gz", ".tar.gz"]
pth = os.path.dirname(fname)
fname = os.path.basename(fname)
ext = None
for special_ext in special_extensions:
ext_len = len(special_ext)
if (len(fname) > ext_len) and \
(fname[-ext_len:].lower() == special_ext.lower()):
ext = fname[-ext_len:]
fname = fname[:-ext_len]
break
if not ext:
fname, ext = os.path.splitext(fname)
return pth, fname, ext
def to_str(value):
"""
Manipulates ordered dicts before they are hashed (Py2/3 compat.)
"""
if sys.version_info[0] > 2:
retval = str(value)
else:
retval = to_str_py27(value)
return retval
def to_str_py27(value):
"""
Encode dictionary for python 2
"""
if isinstance(value, dict):
entry = '{}: {}'.format
retval = '{'
for key, val in list(value.items()):
if len(retval) > 1:
retval += ', '
kenc = repr(key)
if kenc.startswith(("u'", 'u"')):
kenc = kenc[1:]
venc = to_str_py27(val)
if venc.startswith(("u'", 'u"')):
venc = venc[1:]
retval+= entry(kenc, venc)
retval += '}'
return retval
istuple = isinstance(value, tuple)
if isinstance(value, (tuple, list)):
retval = '(' if istuple else '['
nels = len(value)
for i, v in enumerate(value):
venc = to_str_py27(v)
if venc.startswith(("u'", 'u"')):
venc = venc[1:]
retval += venc
if i < nels - 1:
retval += ', '
if istuple and nels == 1:
retval += ','
retval += ')' if istuple else ']'
return retval
retval = repr(value).decode()
if retval.startswith(("u'", 'u"')):
retval = retval[1:]
return retval
def fname_presuffix(fname, prefix='', suffix='', newpath=None, use_ext=True):
"""Manipulates path and name of input filename
Parameters
----------
fname : string
A filename (may or may not include path)
prefix : string
Characters to prepend to the filename
suffix : string
Characters to append to the filename
newpath : string
Path to replace the path of the input fname
use_ext : boolean
If True (default), appends the extension of the original file
to the output name.
Returns
-------
Absolute path of the modified filename
>>> from nipype.utils.filemanip import fname_presuffix
>>> fname = 'foo.nii.gz'
>>> fname_presuffix(fname,'pre','post','/tmp') # doctest: +IGNORE_UNICODE
'/tmp/prefoopost.nii.gz'
"""
pth, fname, ext = split_filename(fname)
if not use_ext:
ext = ''
if newpath and isdefined(newpath):
pth = os.path.abspath(newpath)
return os.path.join(pth, prefix + fname + suffix + ext)
def fnames_presuffix(fnames, prefix='', suffix='', newpath=None, use_ext=True):
"""Calls fname_presuffix for a list of files.
"""
f2 = []
for fname in fnames:
f2.append(fname_presuffix(fname, prefix, suffix, newpath, use_ext))
return f2
def hash_rename(filename, hashvalue):
"""renames a file given original filename and hash
and sets path to output_directory
"""
path, name, ext = split_filename(filename)
newfilename = ''.join((name, '_0x', hashvalue, ext))
return os.path.join(path, newfilename)
def check_forhash(filename):
"""checks if file has a hash in its filename"""
if isinstance(filename, list):
filename = filename[0]
path, name = os.path.split(filename)
if re.search('(_0x[a-z0-9]{32})', name):
hashvalue = re.findall('(_0x[a-z0-9]{32})', name)
return True, hashvalue
else:
return False, None
def hash_infile(afile, chunk_len=8192, crypto=hashlib.md5):
""" Computes hash of a file using 'crypto' module"""
hex = None
if os.path.isfile(afile):
crypto_obj = crypto()
with open(afile, 'rb') as fp:
while True:
data = fp.read(chunk_len)
if not data:
break
crypto_obj.update(data)
hex = crypto_obj.hexdigest()
return hex
def hash_timestamp(afile):
""" Computes md5 hash of the timestamp of a file """
md5hex = None
if os.path.isfile(afile):
md5obj = md5()
stat = os.stat(afile)
md5obj.update(str(stat.st_size).encode())
md5obj.update(str(stat.st_mtime).encode())
md5hex = md5obj.hexdigest()
return md5hex
def copyfile(originalfile, newfile, copy=False, create_new=False,
hashmethod=None, use_hardlink=False,
copy_related_files=True):
"""Copy or link ``originalfile`` to ``newfile``.
If ``use_hardlink`` is True, and the file can be hard-linked, then a
link is created, instead of copying the file.
If a hard link is not created and ``copy`` is False, then a symbolic
link is created.
Parameters
----------
originalfile : str
full path to original file
newfile : str
full path to new file
copy : Bool
specifies whether to copy or symlink files
(default=False) but only for POSIX systems
use_hardlink : Bool
specifies whether to hard-link files, when able
(Default=False), taking precedence over copy
copy_related_files : Bool
specifies whether to also operate on related files, as defined in
``related_filetype_sets``
Returns
-------
None
"""
newhash = None
orighash = None
fmlogger.debug(newfile)
if create_new:
while os.path.exists(newfile):
base, fname, ext = split_filename(newfile)
s = re.search('_c[0-9]{4,4}$', fname)
i = 0
if s:
i = int(s.group()[2:]) + 1
fname = fname[:-6] + "_c%04d" % i
else:
fname += "_c%04d" % i
newfile = base + os.sep + fname + ext
if hashmethod is None:
hashmethod = config.get('execution', 'hash_method').lower()
# Existing file
# -------------
# Options:
# symlink
# to regular file originalfile (keep if symlinking)
# to same dest as symlink originalfile (keep if symlinking)
# to other file (unlink)
# regular file
# hard link to originalfile (keep)
# copy of file (same hash) (keep)
# different file (diff hash) (unlink)
keep = False
if os.path.lexists(newfile):
if os.path.islink(newfile):
if all((os.readlink(newfile) == os.path.realpath(originalfile),
not use_hardlink, not copy)):
keep = True
elif posixpath.samefile(newfile, originalfile):
keep = True
else:
if hashmethod == 'timestamp':
hashfn = hash_timestamp
elif hashmethod == 'content':
hashfn = hash_infile
newhash = hashfn(newfile)
fmlogger.debug("File: %s already exists,%s, copy:%d" %
(newfile, newhash, copy))
orighash = hashfn(originalfile)
keep = newhash == orighash
if keep:
fmlogger.debug("File: %s already exists, not overwriting, copy:%d"
% (newfile, copy))
else:
os.unlink(newfile)
# New file
# --------
# use_hardlink & can_hardlink => hardlink
# ~hardlink & ~copy & can_symlink => symlink
# ~hardlink & ~symlink => copy
if not keep and use_hardlink:
try:
fmlogger.debug("Linking File: %s->%s" % (newfile, originalfile))
# Use realpath to avoid hardlinking symlinks
os.link(os.path.realpath(originalfile), newfile)
except OSError:
use_hardlink = False # Disable hardlink for associated files
else:
keep = True
if not keep and not copy and os.name == 'posix':
try:
fmlogger.debug("Symlinking File: %s->%s" % (newfile, originalfile))
os.symlink(originalfile, newfile)
except OSError:
copy = True # Disable symlink for associated files
else:
keep = True
if not keep:
try:
fmlogger.debug("Copying File: %s->%s" % (newfile, originalfile))
shutil.copyfile(originalfile, newfile)
except shutil.Error as e:
fmlogger.warn(e.message)
# Associated files
if copy_related_files:
related_file_pairs = (get_related_files(f, include_this_file=False)
for f in (originalfile, newfile))
for alt_ofile, alt_nfile in zip(*related_file_pairs):
if os.path.exists(alt_ofile):
copyfile(alt_ofile, alt_nfile, copy, hashmethod=hashmethod,
use_hardlink=use_hardlink, copy_related_files=False)
return newfile
def get_related_files(filename, include_this_file=True):
"""Returns a list of related files, as defined in
``related_filetype_sets``, for a filename. (e.g., Nifti-Pair, Analyze (SPM)
and AFNI files).
Parameters
----------
filename : str
File name to find related filetypes of.
include_this_file : bool
If true, output includes the input filename.
"""
related_files = []
path, name, this_type = split_filename(filename)
for type_set in related_filetype_sets:
if this_type in type_set:
for related_type in type_set:
if include_this_file or related_type != this_type:
related_files.append(os.path.join(path, name + related_type))
if not len(related_files):
related_files = [filename]
return related_files
def copyfiles(filelist, dest, copy=False, create_new=False):
"""Copy or symlink files in ``filelist`` to ``dest`` directory.
Parameters
----------
filelist : list
List of files to copy.
dest : path/files
full path to destination. If it is a list of length greater
than 1, then it assumes that these are the names of the new
files.
copy : Bool
specifies whether to copy or symlink files
(default=False) but only for posix systems
Returns
-------
None
"""
outfiles = filename_to_list(dest)
newfiles = []
for i, f in enumerate(filename_to_list(filelist)):
if isinstance(f, list):
newfiles.insert(i, copyfiles(f, dest, copy=copy,
create_new=create_new))
else:
if len(outfiles) > 1:
destfile = outfiles[i]
else:
destfile = fname_presuffix(f, newpath=outfiles[0])
destfile = copyfile(f, destfile, copy, create_new=create_new)
newfiles.insert(i, destfile)
return newfiles
def filename_to_list(filename):
"""Returns a list given either a string or a list
"""
if isinstance(filename, (str, bytes)):
return [filename]
elif isinstance(filename, list):
return filename
elif is_container(filename):
return [x for x in filename]
else:
return None
def list_to_filename(filelist):
"""Returns a list if filelist is a list of length greater than 1,
otherwise returns the first element
"""
if len(filelist) > 1:
return filelist
else:
return filelist[0]
def save_json(filename, data):
"""Save data to a json file
Parameters
----------
filename : str
Filename to save data in.
data : dict
Dictionary to save in json file.
"""
mode = 'w'
if sys.version_info[0] < 3:
mode = 'wb'
with open(filename, mode) as fp:
json.dump(data, fp, sort_keys=True, indent=4)
def load_json(filename):
"""Load data from a json file
Parameters
----------
filename : str
Filename to load data from.
Returns
-------
data : dict
"""
with open(filename, 'r') as fp:
data = json.load(fp)
return data
def loadcrash(infile, *args):
if '.pkl' in infile:
return loadpkl(infile)
elif '.npz' in infile:
DeprecationWarning(('npz files will be deprecated in the next '
'release. you can use numpy to open them.'))
data = np.load(infile)
out = {}
for k in data.files:
out[k] = [f for f in data[k].flat]
if len(out[k]) == 1:
out[k] = out[k].pop()
return out
else:
raise ValueError('Only pickled crashfiles are supported')
def loadpkl(infile):
"""Load a zipped or plain cPickled file
"""
fmlogger.debug('Loading pkl: %s', infile)
if infile.endswith('pklz'):
pkl_file = gzip.open(infile, 'rb')
else:
pkl_file = open(infile, 'rb')
try:
unpkl = pickle.load(pkl_file)
except UnicodeDecodeError:
unpkl = pickle.load(pkl_file, fix_imports=True, encoding='utf-8')
return unpkl
def savepkl(filename, record):
if filename.endswith('pklz'):
pkl_file = gzip.open(filename, 'wb')
else:
pkl_file = open(filename, 'wb')
pickle.dump(record, pkl_file)
pkl_file.close()
rst_levels = ['=', '-', '~', '+']
def write_rst_header(header, level=0):
return '\n'.join((header, ''.join([rst_levels[level]
for _ in header]))) + '\n\n'
def write_rst_list(items, prefix=''):
out = []
for item in items:
out.append('{} {}'.format(prefix, str(item)))
return '\n'.join(out) + '\n\n'
def write_rst_dict(info, prefix=''):
out = []
for key, value in sorted(info.items()):
out.append('{}* {} : {}'.format(prefix, key, str(value)))
return '\n'.join(out) + '\n\n'
| {
"content_hash": "f62b1bff33f47904f86a4d0e6ccd2a9f",
"timestamp": "",
"source": "github",
"line_count": 544,
"max_line_length": 82,
"avg_line_length": 28.514705882352942,
"alnum_prop": 0.5660778751933987,
"repo_name": "carolFrohlich/nipype",
"id": "7cf81c06495b8ea1e002f692cca0c75d0e7cabad",
"size": "15650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nipype/utils/filemanip.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2320"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "5451077"
},
{
"name": "Shell",
"bytes": "3302"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
import ccxt.pro
from asyncio import gather, run
async def symbol_loop(exchange, method, symbol):
print('Starting', exchange.id, method, symbol)
while True:
try:
response = await getattr(exchange, method)(symbol)
now = exchange.milliseconds()
iso8601 = exchange.iso8601(now)
if method == 'watchOrderBook':
print(iso8601, exchange.id, method, symbol, response['asks'][0], response['bids'][0])
elif method == 'watchTicker':
print(iso8601, exchange.id, method, symbol, response['high'], response['low'], response['bid'], response['ask'])
elif method == 'watchTrades':
print(iso8601, exchange.id, method, symbol, len(response), 'trades')
except Exception as e:
print(str(e))
# raise e # uncomment to break all loops in case of an error in any one of them
break # you can break just this one loop if it fails
async def method_loop(exchange, method, symbols):
print('Starting', exchange.id, method, symbols)
loops = [symbol_loop(exchange, method, symbol) for symbol in symbols]
await gather(*loops)
async def exchange_loop(exchange_id, methods):
print('Starting', exchange_id, methods)
exchange = getattr(ccxt.pro, exchange_id)()
loops = [method_loop(exchange, method, symbols) for method, symbols in methods.items()]
await gather(*loops)
await exchange.close()
async def main():
exchanges = {
'okex': {
'watchOrderBook': ['BTC/USDT', 'ETH/BTC', 'ETH/USDT'],
'watchTicker': ['BTC/USDT'],
},
'binance': {
'watchOrderBook': ['BTC/USDT', 'ETH/BTC'],
'watchTrades': [ 'ETH/BTC' ],
},
}
loops = [exchange_loop(exchange_id, methods) for exchange_id, methods in exchanges.items()]
await gather(*loops)
run(main())
| {
"content_hash": "708ca741ff727aa3298e6c4e6bb19c59",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 128,
"avg_line_length": 35.574074074074076,
"alnum_prop": 0.6017699115044248,
"repo_name": "ccxt/ccxt",
"id": "c9ff7d494d68d04871cf1e8cdaa4368a2a59df4d",
"size": "1946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/ccxt.pro/py/many-exchanges-many-different-streams.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1724"
},
{
"name": "HTML",
"bytes": "246"
},
{
"name": "JavaScript",
"bytes": "11619228"
},
{
"name": "PHP",
"bytes": "10272973"
},
{
"name": "Python",
"bytes": "9037496"
},
{
"name": "Shell",
"bytes": "6887"
}
],
"symlink_target": ""
} |
from google.cloud.bigquery import analyticshub_v1
async def sample_delete_data_exchange():
# Create a client
client = analyticshub_v1.AnalyticsHubServiceAsyncClient()
# Initialize request argument(s)
request = analyticshub_v1.DeleteDataExchangeRequest(
name="name_value",
)
# Make the request
await client.delete_data_exchange(request=request)
# [END analyticshub_v1_generated_AnalyticsHubService_DeleteDataExchange_async]
| {
"content_hash": "0895de97a70760b60adee5fe206ee408",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 78,
"avg_line_length": 27.352941176470587,
"alnum_prop": 0.7483870967741936,
"repo_name": "googleapis/python-bigquery-analyticshub",
"id": "1ebb980d70eb5950ecd6573abd48a1e218400016",
"size": "1886",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/analyticshub_v1_generated_analytics_hub_service_delete_data_exchange_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "530070"
},
{
"name": "Shell",
"bytes": "30705"
}
],
"symlink_target": ""
} |
import subprocess
import StringIO
import tempfile
import re
FIRST_RESPONSE_VALUE_REGEX = re.compile('.*\n[ ]+[^ ]+ ([^ ]+).*', re.MULTILINE)
class Dbus(object):
def get_first_response_value(self, std_out):
match = FIRST_RESPONSE_VALUE_REGEX.search(std_out)
if match is not None:
return match.group(1)
raise ValueError('Could not find first response value in : ' + std_out)
def send(self, args):
process_result = self.execute_process(args)
self.validate_response_code(process_result, args)
def send_and_get_response(self, args):
process_result = self.execute_process(args)
self.validate_response_code(process_result, args)
return process_result.std_out
def execute_process(self, args):
stdout_capture = tempfile.TemporaryFile()
stderr_capture = tempfile.TemporaryFile()
response_code = subprocess.call(args, stdout=stdout_capture, stderr=stderr_capture)
stdout_capture.seek(0)
stderr_capture.seek(0)
return ProcessResult(response_code, stdout_capture.read(), stderr_capture.read())
def validate_response_code(self, process_result, args):
if int(process_result.response_code) is not 0:
raise ValueError('Process exited with code {0}, args: {1}'.format(process_result.response_code, str(args)))
class ProcessResult(object):
def __init__(self, response_code, std_out, std_err):
self.response_code = response_code
self.std_out = std_out
self.std_err = std_err
if __name__ == '__main__':
dbus = Dbus()
cmd = "dbus-send --type=method_call --print-reply --dest=com.gnome.mplayer / com.gnome.mplayer.GetFullScreen"
cmd = ['dbus-send','/','com.gnome.mplayer.Open','string:/home/mark/dev/controller/foo/music 2/2013-09-26-180454.ogg']
result = dbus.execute_process(cmd)
print 'response code: {0}'.format(result.response_code)
print 'stdout: {0}'.format(result.std_out)
print 'stderr: {0}'.format(result.std_err)
| {
"content_hash": "8763e22b5db78dedd35910a12c617183",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 121,
"avg_line_length": 40.13725490196079,
"alnum_prop": 0.6570591108939912,
"repo_name": "epickrram/controller",
"id": "242f69518a6814ef54d7b7613eb92226922853e6",
"size": "2047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbus.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "99"
},
{
"name": "JavaScript",
"bytes": "2894"
},
{
"name": "Python",
"bytes": "10715"
}
],
"symlink_target": ""
} |
from gensim.models import Word2Vec
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import numpy
from .corrector import correct, prepare_dictionary
class TextProcessor(object):
def __init__(self, language, stopwords_whitelist, word2vec):
"""
Initialize text processor
:param language: language name (for nltk)
:type language: str
:param stopwords_whitelist: not remove stopword if it's part of these combinations (e.g. ["turn", "on"], ["turn", "off"])
:type stopwords_whitelist: list[list[str]]
:param word2vec: word2vec model
:type word2vec: Word2Vec
"""
self.language = language
self.word2vec = word2vec
self.stopwords = stopwords.words(language)
self.stopwords_whitelist = stopwords_whitelist
self.correction = True
self.correction_dictionary = prepare_dictionary(self.word2vec.vocab)
def corrected(self, word):
"""
Get corrected word if can
:param word: word
:type word: str
:return: word or None
:rtype: str|NoneType
"""
if not self.correction:
if word in self.word2vec.vocab:
return word
else:
return None
else:
return correct(word, self.correction_dictionary)
def split(self, text):
"""
Split text
:param text: text
:type text: str
:return: splitted text
:rtype: list[str]
"""
words = word_tokenize(text.lower(), self.language)
corrected_words = [self.corrected(word) for word in words
if word.isalnum() and word is not None]
not_none_words = [word for word in corrected_words
if word is not None]
result = []
for word in not_none_words:
if word not in self.stopwords: # if word isn't stopword - store it
result.append(word)
else: # for stopword - check "whitelisting"
for stopwords_whitelist_combination in self.stopwords_whitelist:
if word not in stopwords_whitelist_combination:
continue
# Just check that text contains all words from combination.
# it must be better to replace with building some "tree" structure from sentence
# (e.g. turn -> off ...)
found_combination_words = [stopword
for stopword in stopwords_whitelist_combination
if stopword in not_none_words]
if len(found_combination_words) == len(stopwords_whitelist_combination):
result.append(word)
return result
def matrix(self, text):
"""
Get text as matrix
:param text: text
:type text: str
:return: matrix
:rtype: numpy.ndarray
"""
words = self.split(text)
vectors = [self.word2vec[word] for word in words]
return numpy.array(vectors)
def similarity(self, word1, word2):
"""
Get similarity of 2 words
:param word1: word
:type word1: str
:param word2: word
:type word2: str
:return: words vectors cosine similarity
:rtype: float
"""
if word1 not in self.word2vec.vocab or word2 not in self.word2vec.vocab:
return 0.0
return self.word2vec.similarity(word1, word2)
| {
"content_hash": "ab203b1169bd06cc4c0bf53912d20854",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 129,
"avg_line_length": 37.21649484536083,
"alnum_prop": 0.5637119113573407,
"repo_name": "b09dan/universities_sentiment",
"id": "36511a9e3f927931d371efc76f87af58155b1ff9",
"size": "3610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "text_class/pynlc/text_processor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1364363377"
},
{
"name": "Jupyter Notebook",
"bytes": "1841442"
},
{
"name": "Python",
"bytes": "51191"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from itertools import groupby
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import (
AuthenticationForm,
PasswordResetForm as djangoPasswordResetForm,
)
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.urlresolvers import reverse
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from brasilcomvc.common.email import send_template_email
from cities_light.models import City, Region
from .models import UserAddress
User = get_user_model()
class DeleteUserForm(forms.Form):
password = forms.CharField(label='Senha', widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
super(DeleteUserForm, self).__init__(*args, **kwargs)
self.user = user
def clean_password(self):
password = self.cleaned_data['password']
if not self.user.check_password(password):
self.add_error('password', 'Senha inválida')
return password
class LoginForm(AuthenticationForm):
# this is named username so we can use Django's login view
username = forms.EmailField(required=True)
class PasswordResetForm(djangoPasswordResetForm):
'''
Override the built-in form to customize email sending
'''
def save(
self, use_https=False, token_generator=default_token_generator,
from_email=None, domain_override=None, request=None, **kwargs):
try:
user = User.objects.get(email=self.cleaned_data['email'])
except User.DoesNotExist:
return
if not user.has_usable_password():
return
if domain_override:
site_name = domain = domain_override
else:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
context = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'user': user,
'token': token_generator.make_token(user),
'protocol': 'https' if use_https else 'http',
}
context['reset_link'] = '{protocol}://{domain}{url}'.format(
url=reverse('accounts:password_reset_confirm', kwargs={
'uidb64': context['uid'], 'token': context['token']}),
**context)
send_template_email(
subject='Redefinição de senha',
to=self.cleaned_data['email'],
template_name='emails/password_reset.html',
context=context)
class SignupForm(forms.ModelForm):
password = forms.CharField(label='Senha', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('full_name', 'email',)
def save(self, **kwargs):
# Set password from user input
self.instance.set_password(self.cleaned_data['password'])
return super(SignupForm, self).save(**kwargs)
class EditNotificationsForm(forms.ModelForm):
class Meta:
model = User
fields = ('email_newsletter',)
labels = {
'email_newsletter': 'Receber novidades sobre o Brasil.com.vc',
}
class UserAddressForm(forms.ModelForm):
class Meta:
model = UserAddress
fields = '__all__'
def __init__(self, *args, **kwargs):
super(UserAddressForm, self).__init__(*args, **kwargs)
# Limit regions to available cities
states = Region.objects.filter(
id__in=set(City.objects.values_list('region_id', flat=True)))
self.fields['state'].queryset = states
self.fields['state'].choices = states.values_list('id', 'name')
# Group cities by region (state)
self.fields['city'].choices = self._group_cities()
def _group_cities(self):
'''
Build a choices-like list with all cities grouped by state (region)
'''
return [
(state, [(city.pk, city.name) for city in cities],)
for state, cities in groupby(
self.fields['city'].queryset.order_by('region__name', 'name'),
lambda city: city.region.name)]
| {
"content_hash": "a23d5b3e1dafb061081c8cf969577b34",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 78,
"avg_line_length": 31.207142857142856,
"alnum_prop": 0.6241702906843671,
"repo_name": "brasilcomvc/brasilcomvc",
"id": "b41b78238b2db32571e69741531cd521ccec89af",
"size": "4396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "brasilcomvc/accounts/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "20338"
},
{
"name": "CoffeeScript",
"bytes": "6868"
},
{
"name": "HTML",
"bytes": "35753"
},
{
"name": "Python",
"bytes": "114063"
},
{
"name": "Shell",
"bytes": "94"
}
],
"symlink_target": ""
} |
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
#
# Test for a control flow problem with a particular version of Cheri2.
# A CP2 instruction followed by a jump caused the jump to be skipped.
#
class test_cp2_cmove_j(BaseBERITestCase):
@attr('capabilities')
def test_cp2_cmove_uperms(self):
'''Test that cmove retained u, perms fields correctly'''
self.assertRegisterEqual(self.MIPS.a0, 0xff, "cmove failed to retain correct u, perms fields")
@attr('capabilities')
def test_cp2_cmove_offset(self):
'''Test that cmove retained the offset field correctly'''
self.assertRegisterEqual(self.MIPS.a1, 0x5, "cmove failed to retain correct offset")
@attr('capabilities')
def test_cp2_cmove_base(self):
'''Test that cmove retained the base field correctly'''
self.assertRegisterEqual(self.MIPS.a2, 0x100, "cmove failed to retain correct base address")
@attr('capabilities')
def test_cp2_cmove_length(self):
'''Test that cmove retained the length field correctly'''
self.assertRegisterEqual(self.MIPS.a3, 0x200, "cmove failed to retain correct length")
@attr('capabilities')
def test_branch_delay(self):
'''Test that branch delay was executed.'''
self.assertRegisterEqual(self.MIPS.a4, 0x1, "branch delay not executed")
@attr('capabilities')
def test_jump_taken(self):
'''Test jump taken.'''
self.assertRegisterEqual(self.MIPS.a5, 0x0, "jump did not skip over instruction.")
@attr('capabilities')
def test_jump_dest(self):
'''Test jump destination reached.'''
self.assertRegisterEqual(self.MIPS.a6, 0x1, "jump did not reach destination.")
| {
"content_hash": "e7508f32eed9e08507c43bee0b18688c",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 102,
"avg_line_length": 40.395348837209305,
"alnum_prop": 0.6902705814622913,
"repo_name": "8l/beri",
"id": "b02e9d6ea6f4672785cd93dc197d21904087410e",
"size": "2915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cheritest/trunk/tests/cp2/test_cp2_cmove_j.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1629022"
},
{
"name": "Bluespec",
"bytes": "2336405"
},
{
"name": "C",
"bytes": "1058899"
},
{
"name": "C++",
"bytes": "1864"
},
{
"name": "Groff",
"bytes": "14381"
},
{
"name": "Haskell",
"bytes": "11711"
},
{
"name": "Lex",
"bytes": "2894"
},
{
"name": "Makefile",
"bytes": "242450"
},
{
"name": "Mathematica",
"bytes": "291"
},
{
"name": "Objective-C",
"bytes": "2387"
},
{
"name": "OpenEdge ABL",
"bytes": "568"
},
{
"name": "Perl",
"bytes": "19159"
},
{
"name": "Python",
"bytes": "1491002"
},
{
"name": "Shell",
"bytes": "91130"
},
{
"name": "SystemVerilog",
"bytes": "12058"
},
{
"name": "Tcl",
"bytes": "132818"
},
{
"name": "TeX",
"bytes": "4996"
},
{
"name": "Verilog",
"bytes": "125674"
},
{
"name": "Yacc",
"bytes": "5871"
}
],
"symlink_target": ""
} |
import os
import dj_database_url
import django
from django.utils import six
gettext = lambda s: s
urlpatterns = []
def _get_migration_modules(apps):
modules = {}
for module in apps:
module_name = '%s.migrations_django' % module
try:
__import__(module_name)
except ImportError:
pass
else:
modules[module] = module_name
return modules
def configure(db_url, **extra):
from django.conf import settings
if six.PY3:
siteid = 1
else:
siteid = long(1) # nopyflakes
os.environ['DJANGO_SETTINGS_MODULE'] = 'cms.test_utils.cli'
if not 'DATABASES' in extra:
DB = dj_database_url.parse(db_url)
else:
DB = {}
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
defaults = dict(
PROJECT_PATH=PROJECT_PATH,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
},
CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True,
DEBUG=True,
TEMPLATE_DEBUG=True,
DATABASE_SUPPORTS_TRANSACTIONS=True,
DATABASES={
'default': DB
},
SITE_ID=siteid,
USE_I18N=True,
MEDIA_ROOT='/media/',
STATIC_ROOT='/static/',
CMS_MEDIA_ROOT='/cms-media/',
CMS_MEDIA_URL='/cms-media/',
MEDIA_URL='/media/',
STATIC_URL='/static/',
ADMIN_MEDIA_PREFIX='/static/admin/',
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
SECRET_KEY='key',
MIDDLEWARE_CLASSES=[
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.BrokenLinkEmailsMiddleware',
'django.middleware.common.CommonMiddleware',
'cms.middleware.language.LanguageCookieMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
],
INSTALLED_APPS=[
'debug_toolbar',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'djangocms_admin_style',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.messages',
'treebeard',
'cms',
'menus',
'djangocms_text_ckeditor',
'djangocms_column',
'djangocms_picture',
'djangocms_file',
'djangocms_googlemap',
'djangocms_teaser',
'djangocms_video',
'djangocms_inherit',
'djangocms_style',
'djangocms_link',
'cms.test_utils.project.sampleapp',
'cms.test_utils.project.placeholderapp',
'cms.test_utils.project.pluginapp.plugins.manytomany_rel',
'cms.test_utils.project.pluginapp.plugins.extra_context',
'cms.test_utils.project.pluginapp.plugins.meta',
'cms.test_utils.project.pluginapp.plugins.one_thing',
'cms.test_utils.project.fakemlng',
'cms.test_utils.project.fileapp',
'cms.test_utils.project.objectpermissionsapp',
'cms.test_utils.project.bunch_of_plugins',
'cms.test_utils.project.extensionapp',
'cms.test_utils.project.mti_pluginapp',
'reversion',
'sekizai',
'hvad',
],
DEBUG_TOOLBAR_PATCH_SETTINGS = False,
INTERNAL_IPS = ['127.0.0.1'],
AUTHENTICATION_BACKENDS=(
'django.contrib.auth.backends.ModelBackend',
'cms.test_utils.project.objectpermissionsapp.backends.ObjectPermissionBackend',
),
LANGUAGE_CODE="en",
LANGUAGES=(
('en', gettext('English')),
('fr', gettext('French')),
('de', gettext('German')),
('pt-br', gettext('Brazilian Portuguese')),
('nl', gettext("Dutch")),
('es-mx', u'Español'),
),
CMS_LANGUAGES={
1: [
{
'code': 'en',
'name': gettext('English'),
'fallbacks': ['fr', 'de'],
'public': True,
},
{
'code': 'de',
'name': gettext('German'),
'fallbacks': ['fr', 'en'],
'public': True,
},
{
'code': 'fr',
'name': gettext('French'),
'public': True,
},
{
'code': 'pt-br',
'name': gettext('Brazilian Portuguese'),
'public': False,
},
{
'code': 'es-mx',
'name': u'Español',
'public': True,
},
],
2: [
{
'code': 'de',
'name': gettext('German'),
'fallbacks': ['fr'],
'public': True,
},
{
'code': 'fr',
'name': gettext('French'),
'public': True,
},
],
3: [
{
'code': 'nl',
'name': gettext('Dutch'),
'fallbacks': ['de'],
'public': True,
},
{
'code': 'de',
'name': gettext('German'),
'fallbacks': ['nl'],
'public': False,
},
],
'default': {
'hide_untranslated': False,
},
},
CMS_TEMPLATES=(
('col_two.html', gettext('two columns')),
('col_three.html', gettext('three columns')),
('nav_playground.html', gettext('navigation examples')),
('simple.html', 'simple'),
('static.html', 'static placeholders'),
),
CMS_PLACEHOLDER_CONF={
'col_sidebar': {
'plugins': ('FilePlugin', 'LinkPlugin', 'PicturePlugin',
'TextPlugin', 'MultiColumnPlugin', 'SnippetPlugin'),
'name': gettext("sidebar column")
},
'col_left': {
'plugins': ('FilePlugin', 'LinkPlugin', 'PicturePlugin',
'TextPlugin', 'SnippetPlugin', 'GoogleMapPlugin',
'MultiColumnPlugin', 'StylePlugin', 'EmptyPlugin'),
'name': gettext("left column"),
'plugin_modules': {
'LinkPlugin': 'Different Grouper'
},
'plugin_labels': {
'LinkPlugin': gettext('Add a link')
},
},
'col_right': {
'plugins': ('FilePlugin', 'LinkPlugin', 'PicturePlugin',
'TextPlugin', 'SnippetPlugin', 'GoogleMapPlugin',
'MultiColumnPlugin', 'StylePlugin'),
'name': gettext("right column")
},
'extra_context': {
"plugins": ('TextPlugin',),
"extra_context": {"extra_width": 250},
"name": "extra context"
},
},
CMS_PERMISSION=True,
CMS_PUBLIC_FOR='all',
CMS_CACHE_DURATIONS={
'menus': 0,
'content': 0,
'permissions': 0,
},
CMS_APPHOOKS=[],
CMS_PLUGIN_PROCESSORS=tuple(),
CMS_PLUGIN_CONTEXT_PROCESSORS=tuple(),
CMS_SITE_CHOICES_CACHE_KEY='CMS:site_choices',
CMS_PAGE_CHOICES_CACHE_KEY='CMS:page_choices',
SOUTH_TESTS_MIGRATE=False,
CMS_NAVIGATION_EXTENDERS=(
('cms.test_utils.project.sampleapp.menu_extender.get_nodes', 'SampleApp Menu'),
),
TEST_RUNNER='cms.test_utils.runners.NormalTestRunner',
JUNIT_OUTPUT_DIR='.',
TIME_TESTS=False,
ROOT_URLCONF='cms.test_utils.cli',
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.MD5PasswordHasher',
),
ALLOWED_HOSTS=['localhost'],
)
from django.utils.functional import empty
settings._wrapped = empty
defaults.update(extra)
defaults['TEMPLATES'] = [
{
'NAME': 'django',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.abspath(os.path.join(PROJECT_PATH, 'project', 'templates'))],
'OPTIONS': {
'context_processors': [
"django.contrib.auth.context_processors.auth",
'django.contrib.messages.context_processors.messages',
"django.template.context_processors.i18n",
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.template.context_processors.media",
'django.template.context_processors.csrf',
"cms.context_processors.cms_settings",
"sekizai.context_processors.sekizai",
"django.template.context_processors.static",
],
}
}
]
plugins = ('djangocms_column', 'djangocms_file', 'djangocms_googlemap',
'djangocms_inherit', 'djangocms_link', 'djangocms_picture',
'djangocms_style', 'djangocms_teaser', 'djangocms_video')
defaults['MIGRATION_MODULES'] = _get_migration_modules(plugins)
if not defaults.get('TESTS_MIGRATE', False):
# Disable migrations
class DisableMigrations(object):
def __contains__(self, item):
return True
def __getitem__(self, item):
return "notmigrations"
defaults['MIGRATION_MODULES'] = DisableMigrations()
if 'AUTH_USER_MODEL' in extra:
custom_user_app = 'cms.test_utils.project.' + extra['AUTH_USER_MODEL'].split('.')[0]
defaults['INSTALLED_APPS'].insert(defaults['INSTALLED_APPS'].index('cms'), custom_user_app)
# add data from env
extra_settings = os.environ.get("DJANGO_EXTRA_SETTINGS", None)
if extra_settings:
from json import load, loads
if os.path.exists(extra_settings):
with open(extra_settings) as fobj:
defaults.update(load(fobj))
else:
defaults.update(loads(extra_settings))
settings.configure(**defaults)
django.setup()
| {
"content_hash": "ec0da9ed4fc71247087d42eef29c722b",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 99,
"avg_line_length": 35.79746835443038,
"alnum_prop": 0.4957567185289958,
"repo_name": "iddqd1/django-cms",
"id": "9180484f089fb47fb6cd72e8318e3e1977bc2dbb",
"size": "11338",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cms/test_utils/cli.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "128012"
},
{
"name": "HTML",
"bytes": "104983"
},
{
"name": "JavaScript",
"bytes": "665955"
},
{
"name": "Python",
"bytes": "1943557"
},
{
"name": "XSLT",
"bytes": "5917"
}
],
"symlink_target": ""
} |
"""
This is our testing framework.
Goals:
* it should be compatible with py.test and operate very similarly (or
identically)
* doesn't require any external dependencies
* preferably all the functionality should be in this file only
* no magic, just import the test file and execute the test functions, that's it
* portable
"""
import os
import sys
import inspect
import traceback
import pdb
from glob import glob
from timeit import default_timer as clock
def isgeneratorfunction(object):
"""
Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
"""
CO_GENERATOR = 0x20
if (inspect.isfunction(object) or inspect.ismethod(object)) and \
object.func_code.co_flags & CO_GENERATOR:
return True
return False
def test(*paths, **kwargs):
"""
Runs the tests specified by paths, or all tests if paths=[].
Note: paths are specified relative to the sympy root directory in a unix
format (on all platforms including windows).
Examples:
Run all tests:
>> import sympy
>> sympy.test()
Run one file:
>> import sympy
>> sympy.test("sympy/core/tests/test_basic.py")
Run all tests in sympy/functions/ and some particular file:
>> import sympy
>> sympy.test("sympy/core/tests/test_basic.py", "sympy/functions")
"""
verbose = kwargs.get("verbose", False)
tb = kwargs.get("tb", "short")
kw = kwargs.get("kw", "")
post_mortem = kwargs.get("pdb", False)
colors = kwargs.get("colors", True)
r = PyTestReporter(verbose, tb, colors)
t = SymPyTests(r, kw, post_mortem)
if len(paths) > 0:
t.add_paths(paths)
else:
t.add_paths(["sympy"])
return t.test()
def doctest(*paths, **kwargs):
"""
Runs the doctests specified by paths, or all tests if paths=[].
Note: paths are specified relative to the sympy root directory in a unix
format (on all platforms including windows).
Examples:
Run all tests:
>> import sympy
>> sympy.doctest()
Run one file:
>> import sympy
>> sympy.doctest("sympy/core/tests/test_basic.py")
Run all tests in sympy/functions/ and some particular file:
>> import sympy
>> sympy.doctest("sympy/core/tests/test_basic.py", "sympy/functions")
"""
verbose = kwargs.get("verbose", False)
blacklist = kwargs.get("blacklist", [])
blacklist.extend([
"sympy/thirdparty/pyglet", # segfaults
"sympy/mpmath", # needs to be fixed upstream
"sympy/plotting", # generates live plots
"sympy/utilities/compilef.py", # needs tcc
"sympy/galgebra/GA.py", # needs numpy
"sympy/galgebra/latex_ex.py", # needs numpy
"sympy/conftest.py", # needs py.test
"sympy/utilities/benchmarking.py", # needs py.test
])
r = PyTestReporter(verbose)
t = SymPyDocTests(r, blacklist=blacklist)
if len(paths) > 0:
t.add_paths(paths)
else:
t.add_paths(["sympy"])
return t.test()
class SymPyTests(object):
def __init__(self, reporter, kw="", post_mortem=False):
self._post_mortem = post_mortem
self._kw = kw
self._count = 0
self._root_dir = self.get_sympy_dir()
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._tests = []
def add_paths(self, paths):
for path in paths:
path2 = os.path.join(self._root_dir, *path.split("/"))
if path2.endswith(".py"):
self._tests.append(path2)
else:
self._tests.extend(self.get_tests(path2))
def test(self):
"""
Runs the tests.
Returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._tests:
try:
self.test_file(f)
except KeyboardInterrupt:
print " interrupted by user"
break
return self._reporter.finish()
def test_file(self, filename):
name = "test%d" % self._count
name = os.path.splitext(os.path.basename(filename))[0]
self._count += 1
gl = {'__file__':filename}
try:
execfile(filename, gl)
except (ImportError, SyntaxError):
self._reporter.import_error(filename, sys.exc_info())
return
pytestfile = ""
if gl.has_key("XFAIL"):
pytestfile = inspect.getsourcefile(gl["XFAIL"])
disabled = gl.get("disabled", False)
if disabled:
funcs = []
else:
# we need to filter only those functions that begin with 'test_'
# that are defined in the testing file or in the file where
# is defined the XFAIL decorator
funcs = [gl[f] for f in gl.keys() if f.startswith("test_") and
(inspect.isfunction(gl[f])
or inspect.ismethod(gl[f])) and
(inspect.getsourcefile(gl[f]) == filename or
inspect.getsourcefile(gl[f]) == pytestfile)]
# Sorting of XFAILed functions isn't fixed yet :-(
funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
i = 0
while i is not len(funcs):
if isgeneratorfunction(funcs[i]):
# some tests can be generators, that return the actual
# test functions. We unpack it below:
f = funcs.pop(i)
for fg in f():
func = fg[0]
args = fg[1:]
fgw = lambda: func(*args)
funcs.insert(i, fgw)
i += 1
else:
i += 1
# drop functions that are not selected with the keyword expression:
funcs = [x for x in funcs if self.matches(x)]
self._reporter.entering_filename(filename, len(funcs))
for f in funcs:
self._reporter.entering_test(f)
try:
f()
except KeyboardInterrupt:
raise
except:
t, v, tr = sys.exc_info()
if t is AssertionError:
self._reporter.test_fail((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
elif t.__name__ == "Skipped":
self._reporter.test_skip()
elif t.__name__ == "XFail":
self._reporter.test_xfail()
elif t.__name__ == "XPass":
self._reporter.test_xpass(v)
else:
self._reporter.test_exception((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def get_sympy_dir(self):
"""
Returns the root sympy directory.
"""
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
return sympy_dir
def matches(self, x):
"""
Does the keyword expression self._kw match "x"? Returns True/False.
Always returns True if self._kw is "".
"""
if self._kw == "":
return True
return x.__name__.find(self._kw) != -1
def get_paths(self, dir="", level=15):
"""
Generates a set of paths for testfiles searching.
Example:
>> get_paths(2)
['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py']
>> get_paths(6)
['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py',
'sympy/*/*/*/test_*.py', 'sympy/*/*/*/*/test_*.py',
'sympy/*/*/*/*/*/test_*.py', 'sympy/*/*/*/*/*/*/test_*.py']
"""
wildcards = [dir]
for i in range(level):
wildcards.append(os.path.join(wildcards[-1], "*"))
p = [os.path.join(x, "test_*.py") for x in wildcards]
return p
def get_tests(self, dir):
"""
Returns the list of tests.
"""
g = []
for x in self.get_paths(dir):
g.extend(glob(x))
g = list(set(g))
g.sort()
return g
class SymPyDocTests(object):
def __init__(self, reporter, blacklist=[]):
self._count = 0
self._root_dir = self.get_sympy_dir()
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._tests = []
self._blacklist = blacklist
def add_paths(self, paths):
for path in paths:
path2 = os.path.join(self._root_dir, *path.split("/"))
if path2.endswith(".py"):
self._tests.append(path2)
else:
self._tests.extend(self.get_tests(path2))
def test(self):
"""
Runs the tests.
Returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._tests:
try:
self.test_file(f)
except KeyboardInterrupt:
print " interrupted by user"
break
return self._reporter.finish()
def test_file(self, filename):
def setup_pprint():
from sympy import pprint_use_unicode
# force pprint to be in ascii mode in doctests
pprint_use_unicode(False)
# hook our nice, hash-stable strprinter
from sympy.interactive import init_printing
from sympy.printing import sstrrepr
init_printing(sstrrepr)
import doctest
import unittest
from StringIO import StringIO
rel_name = filename[len(self._root_dir)+1:]
module = rel_name.replace('/', '.')[:-3]
setup_pprint()
try:
module = doctest._normalize_module(module)
tests = doctest.DocTestFinder().find(module)
except:
self._reporter.import_error(filename, sys.exc_info())
return
tests.sort()
tests = [test for test in tests if len(test.examples) > 0]
self._reporter.entering_filename(filename, len(tests))
for test in tests:
assert len(test.examples) != 0
runner = doctest.DocTestRunner()
old = sys.stdout
new = StringIO()
sys.stdout = new
try:
f, t = runner.run(test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if f > 0:
self._reporter.doctest_fail(test.name, new.getvalue())
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def get_sympy_dir(self):
"""
Returns the root sympy directory.
"""
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
return sympy_dir
def get_paths(self, dir="", level=15):
"""
Generates a set of paths for testfiles searching.
Example:
>> get_paths(2)
['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py']
>> get_paths(6)
['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py',
'sympy/*/*/*/test_*.py', 'sympy/*/*/*/*/test_*.py',
'sympy/*/*/*/*/*/test_*.py', 'sympy/*/*/*/*/*/*/test_*.py']
"""
wildcards = [dir]
for i in range(level):
wildcards.append(os.path.join(wildcards[-1], "*"))
p = [os.path.join(x, "*.py") for x in wildcards]
return p
def is_on_blacklist(self, x):
"""
Returns True if "x" is on the blacklist. Otherwise False.
"""
for p in self._blacklist:
if x.find(p) != -1:
return True
return False
def get_tests(self, dir):
"""
Returns the list of tests.
"""
def importable(x):
"""
Checks if given pathname x is an importable module by checking for
__init__.py file.
Returns True/False.
Currently we only test if the __init__.py file exists in the
directory with the file "x" (in theory we should also test all the
parent dirs) and if "x" is not on self._blacklist.
"""
if self.is_on_blacklist(x):
return False
init_py = os.path.dirname(x) + os.path.sep + "__init__.py"
return os.path.exists(init_py)
g = []
for x in self.get_paths(dir):
g.extend(glob(x))
g = list(set(g))
g.sort()
# skip files that are not importable (i.e. missing __init__.py)
g = [x for x in g if importable(x)]
return g
class Reporter(object):
"""
Parent class for all reporters.
"""
pass
class PyTestReporter(Reporter):
"""
Py.test like reporter. Should produce output identical to py.test.
"""
def __init__(self, verbose=False, tb="short", colors=True):
self._verbose = verbose
self._tb_style = tb
self._colors = colors
self._xfailed = 0
self._xpassed = []
self._failed = []
self._failed_doctest = []
self._passed = 0
self._skipped = 0
self._exceptions = []
# this tracks the x-position of the cursor (useful for positioning
# things on the screen), without the need for any readline library:
self._write_pos = 0
self._line_wrap = False
def root_dir(self, dir):
self._root_dir = dir
def write(self, text, color="", align="left", width=80):
"""
Prints a text on the screen.
It uses sys.stdout.write(), so no readline library is necessary.
color ... choose from the colors below, "" means default color
align ... left/right, left is a normal print, right is aligned on the
right hand side of the screen, filled with " " if necessary
width ... the screen width
"""
color_templates = (
("Black" , "0;30"),
("Red" , "0;31"),
("Green" , "0;32"),
("Brown" , "0;33"),
("Blue" , "0;34"),
("Purple" , "0;35"),
("Cyan" , "0;36"),
("LightGray" , "0;37"),
("DarkGray" , "1;30"),
("LightRed" , "1;31"),
("LightGreen" , "1;32"),
("Yellow" , "1;33"),
("LightBlue" , "1;34"),
("LightPurple" , "1;35"),
("LightCyan" , "1;36"),
("White" , "1;37"), )
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
if align == "right":
if self._write_pos+len(text) > width:
# we don't fit on the current line, create a new line
self.write("\n")
self.write(" "*(width-self._write_pos-len(text)))
if not sys.stdout.isatty():
# the stdout is not a terminal, this for example happens if the
# output is piped to less, e.g. "bin/test | less". In this case,
# the terminal control sequences would be printed verbatim, so
# don't use any colors.
color = ""
if self._line_wrap:
if text[0] != "\n":
sys.stdout.write("\n")
if color == "":
sys.stdout.write(text)
else:
sys.stdout.write("%s%s%s" % (c_color % colors[color], text, c_normal))
sys.stdout.flush()
l = text.rfind("\n")
if l == -1:
self._write_pos += len(text)
else:
self._write_pos = len(text)-l-1
self._line_wrap = self._write_pos >= width
self._write_pos %= width
def write_center(self, text, delim="="):
width = 80
if text != "":
text = " %s " % text
idx = (width-len(text)) // 2
t = delim*idx + text + delim*(width-idx-len(text))
self.write(t+"\n")
def write_exception(self, e, val, tb):
t = traceback.extract_tb(tb)
# remove the first item, as that is always runtests.py
t = t[1:]
t = traceback.format_list(t)
self.write("".join(t))
t = traceback.format_exception_only(e, val)
self.write("".join(t))
def start(self):
self.write_center("test process starts")
executable = sys.executable
v = sys.version_info
python_version = "%s.%s.%s-%s-%s" % v
self.write("executable: %s (%s)\n\n" % (executable, python_version))
self._t_start = clock()
def finish(self):
self._t_end = clock()
self.write("\n")
text = "tests finished: %d passed" % self._passed
if len(self._failed) > 0:
text += ", %d failed" % len(self._failed)
if len(self._failed_doctest) > 0:
text += ", %d failed" % len(self._failed_doctest)
if self._skipped > 0:
text += ", %d skipped" % self._skipped
if self._xfailed > 0:
text += ", %d xfailed" % self._xfailed
if len(self._xpassed) > 0:
text += ", %d xpassed" % len(self._xpassed)
if len(self._exceptions) > 0:
text += ", %d exceptions" % len(self._exceptions)
text += " in %.2f seconds" % (self._t_end - self._t_start)
if len(self._xpassed) > 0:
self.write_center("xpassed tests", "_")
for e in self._xpassed:
self.write("%s:%s\n" % (e[0], e[1]))
self.write("\n")
if self._tb_style != "no" and len(self._exceptions) > 0:
#self.write_center("These tests raised an exception", "_")
for e in self._exceptions:
filename, f, (t, val, tb) = e
self.write_center("", "_")
if f is None:
s = "%s" % filename
else:
s = "%s:%s" % (filename, f.__name__)
self.write_center(s, "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed) > 0:
#self.write_center("Failed", "_")
for e in self._failed:
filename, f, (t, val, tb) = e
self.write_center("", "_")
self.write_center("%s:%s" % (filename, f.__name__), "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed_doctest) > 0:
#self.write_center("Failed", "_")
for e in self._failed_doctest:
filename, msg = e
self.write_center("", "_")
self.write_center("%s" % filename, "_")
self.write(msg)
self.write("\n")
self.write_center(text)
ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \
len(self._failed_doctest) == 0
if not ok:
self.write("DO *NOT* COMMIT!\n")
return ok
def entering_filename(self, filename, n):
rel_name = filename[len(self._root_dir)+1:]
self._active_file = rel_name
self._active_file_error = False
self.write(rel_name)
self.write("[%d] " % n)
def leaving_filename(self):
if self._colors:
self.write(" ")
if self._active_file_error:
self.write("[FAIL]", "Red", align="right")
else:
self.write("[OK]", "Green", align="right")
self.write("\n")
if self._verbose:
self.write("\n")
def entering_test(self, f):
self._active_f = f
if self._verbose:
self.write("\n"+f.__name__+" ")
def test_xfail(self):
self._xfailed += 1
self.write("f")
def test_xpass(self, fname):
self._xpassed.append((self._active_file, fname))
self.write("X")
def test_fail(self, exc_info):
self._failed.append((self._active_file, self._active_f, exc_info))
self.write("F")
self._active_file_error = True
def doctest_fail(self, name, error_msg):
# the first line contains "******", remove it:
error_msg = "\n".join(error_msg.split("\n")[1:])
self._failed_doctest.append((name, error_msg))
self.write("F")
self._active_file_error = True
def test_pass(self):
self._passed += 1
if self._verbose:
self.write("ok")
else:
self.write(".")
def test_skip(self):
self._skipped += 1
self.write("s")
def test_exception(self, exc_info):
self._exceptions.append((self._active_file, self._active_f, exc_info))
self.write("E")
self._active_file_error = True
def import_error(self, filename, exc_info):
self._exceptions.append((filename, None, exc_info))
rel_name = filename[len(self._root_dir)+1:]
self.write(rel_name)
self.write("[?] Failed to import")
if self._colors:
self.write(" ")
self.write("[FAIL]", "Red", align="right")
self.write("\n")
| {
"content_hash": "030585bf117f0e91d3b7ed92d8aa0046",
"timestamp": "",
"source": "github",
"line_count": 663,
"max_line_length": 95,
"avg_line_length": 33.009049773755656,
"alnum_prop": 0.5104866346812885,
"repo_name": "gnulinooks/sympy",
"id": "cd514e2aa170250691f95adf2f310748f0c161e7",
"size": "21885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/utilities/runtests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
__author__ = 'orhan'
class UnionFind(object):
def __init__(self, n):
self.n = n
self.__initArray()
def __initArray(self):
self.arr = [i for i in xrange(self.n)]
def connect(self, item1, item2):
raise Exception("Not implemented")
def parent(self, index):
raise Exception("Not implemented")
def isConnected(self, item1, item2):
return self.parent(item1) == self.parent(item2) | {
"content_hash": "c3c887a279d99724c68604da70b3416f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 55,
"avg_line_length": 22.4,
"alnum_prop": 0.5915178571428571,
"repo_name": "ocozalp/Algorithms",
"id": "3a51f2f4ee4c697b11320df5e99251dab72c20eb",
"size": "448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unionfind/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "45722"
}
],
"symlink_target": ""
} |
import sys
import os
import numpy as np
import pathlib
import itk
def custom_callback(name, progress):
if progress == 0:
print(f"Loading {name}...", file=sys.stderr)
if progress == 1:
print("done", file=sys.stderr)
import itkConfig
itkConfig.ImportCallback = custom_callback
# test setting the number of threads
itk.set_nthreads(4)
assert itk.get_nthreads() == 4
# test the force load function
itk.force_load()
filename = sys.argv[1]
mesh_filename = sys.argv[2]
transform_filename = sys.argv[3]
PixelType = itk.UC
dim = 2
ImageType = itk.Image[PixelType, dim]
ReaderType = itk.ImageFileReader[ImageType]
reader = ReaderType.New(FileName=filename)
# test snake_case keyword arguments
reader = ReaderType.New(file_name=filename)
# test echo
itk.echo(reader)
itk.echo(reader, sys.stdout)
# test class_
assert itk.class_(reader) == ReaderType
assert itk.class_("dummy") == str
# test template
assert itk.template(ReaderType) == (itk.ImageFileReader, (ImageType,))
assert itk.template(reader) == (itk.ImageFileReader, (ImageType,))
try:
itk.template(str)
raise Exception("unknown class should send an exception")
except KeyError:
pass
# test ctype
assert itk.ctype("unsigned short") == itk.US
assert itk.ctype(" unsigned \n short \t ") == itk.US
assert itk.ctype("signed short") == itk.SS
assert itk.ctype("short") == itk.SS
try:
itk.ctype("dummy")
raise Exception("unknown C type should send an exception")
except KeyError:
pass
# test output
assert itk.output(reader) == reader.GetOutput()
assert itk.output(1) == 1
# test the deprecated image
assert itk.image(reader) == reader.GetOutput()
assert itk.image(1) == 1
# test size
s = itk.size(reader)
assert s[0] == s[1] == 256
s = itk.size(reader.GetOutput())
assert s[0] == s[1] == 256
# test physical size
s = itk.physical_size(reader)
assert s[0] == s[1] == 256.0
s = itk.physical_size(reader.GetOutput())
assert s[0] == s[1] == 256.0
# test spacing
s = itk.spacing(reader)
assert s[0] == s[1] == 1.0
s = itk.spacing(reader.GetOutput())
assert s[0] == s[1] == 1.0
# test origin
s = itk.origin(reader)
assert s[0] == s[1] == 0.0
s = itk.origin(reader.GetOutput())
assert s[0] == s[1] == 0.0
# test index
s = itk.index(reader)
assert s[0] == s[1] == 0
s = itk.index(reader.GetOutput())
assert s[0] == s[1] == 0
# test region
s = itk.region(reader)
assert s.GetIndex()[0] == s.GetIndex()[1] == 0
assert s.GetSize()[0] == s.GetSize()[1] == 256
s = itk.region(reader.GetOutput())
assert s.GetIndex()[0] == s.GetIndex()[1] == 0
assert s.GetSize()[0] == s.GetSize()[1] == 256
# test range
assert itk.range(reader) == (0, 255)
assert itk.range(reader.GetOutput()) == (0, 255)
# test write
itk.imwrite(reader, sys.argv[4])
itk.imwrite(reader, sys.argv[4], imageio=itk.PNGImageIO.New())
itk.imwrite(reader, sys.argv[4], True)
# test read
image = itk.imread(pathlib.Path(filename))
assert type(image) == itk.Image[itk.RGBPixel[itk.UC], 2]
image = itk.imread(filename, itk.F)
assert type(image) == itk.Image[itk.F, 2]
image = itk.imread(filename, itk.F, fallback_only=True)
assert type(image) == itk.Image[itk.RGBPixel[itk.UC], 2]
try:
image = itk.imread(filename, fallback_only=True)
# Should never reach this point if test passes since an exception
# is expected.
raise Exception("`itk.imread()` fallback_only should have failed")
except Exception as e:
if str(e) == "pixel_type must be set when using the fallback_only option":
pass
else:
raise e
image = itk.imread(filename, imageio=itk.PNGImageIO.New())
assert type(image) == itk.Image[itk.RGBPixel[itk.UC], 2]
# Make sure we can read unsigned short, unsigned int, and cast
image = itk.imread(filename, itk.UI)
assert type(image) == itk.Image[itk.UI, 2]
image = itk.imread(filename, itk.SI)
assert type(image) == itk.Image[itk.SI, 2]
as_float = image.astype(np.float32)
assert type(as_float) == itk.Image[itk.F, 2]
# test mesh read / write
mesh = itk.meshread(mesh_filename)
assert type(mesh) == itk.Mesh[itk.F, 3]
mesh = itk.meshread(mesh_filename, itk.UC)
assert type(mesh) == itk.Mesh[itk.UC, 3]
mesh = itk.meshread(mesh_filename, itk.UC, fallback_only=True)
assert type(mesh) == itk.Mesh[itk.F, 3]
itk.meshwrite(mesh, sys.argv[5])
itk.meshwrite(mesh, sys.argv[5], compression=True)
# test search
res = itk.search("Index")
assert res[0] == "Index"
assert res[1] == "index"
assert "ContinuousIndex" in res
res = itk.search("index", True)
assert "Index" not in res
# test down_cast
obj = itk.Object.cast(reader)
# be sure that the reader is casted to itk::Object
assert obj.__class__ == itk.Object
down_casted = itk.down_cast(obj)
assert down_casted == reader
assert down_casted.__class__ == ReaderType
# test setting the IO manually
png_io = itk.PNGImageIO.New()
assert png_io.GetFileName() == ""
reader = itk.ImageFileReader.New(FileName=filename, ImageIO=png_io)
reader.Update()
assert png_io.GetFileName() == filename
# test reading image series
series_reader = itk.ImageSeriesReader.New(FileNames=[filename, filename])
series_reader.Update()
assert series_reader.GetOutput().GetImageDimension() == 3
assert series_reader.GetOutput().GetLargestPossibleRegion().GetSize()[2] == 2
# test reading image series and check that dimension is not increased if
# last dimension is 1.
image_series = itk.Image[itk.UC, 3].New()
image_series.SetRegions([10, 7, 1])
image_series.Allocate()
image_series.FillBuffer(0)
image_series3d_filename = os.path.join(sys.argv[6], "image_series_extras_py.mha")
itk.imwrite(image_series, image_series3d_filename)
series_reader = itk.ImageSeriesReader.New(
FileNames=[image_series3d_filename, image_series3d_filename]
)
series_reader.Update()
assert series_reader.GetOutput().GetImageDimension() == 3
# test reading image series with itk.imread()
image_series = itk.imread([pathlib.Path(filename), pathlib.Path(filename)])
assert image_series.GetImageDimension() == 3
# Numeric series filename generation without any integer index. It is
# only to produce an ITK object that users could set as an input to
# `itk.ImageSeriesReader.New()` or `itk.imread()` and test that it works.
numeric_series_filename = itk.NumericSeriesFileNames.New()
numeric_series_filename.SetStartIndex(0)
numeric_series_filename.SetEndIndex(3)
numeric_series_filename.SetIncrementIndex(1)
numeric_series_filename.SetSeriesFormat(filename)
image_series = itk.imread(numeric_series_filename.GetFileNames())
number_of_files = len(numeric_series_filename.GetFileNames())
assert image_series.GetImageDimension() == 3
assert image_series.GetLargestPossibleRegion().GetSize()[2] == number_of_files
# test reading image series with `itk.imread()` and check that dimension is
# not increased if last dimension is 1.
image_series = itk.imread([image_series3d_filename, image_series3d_filename])
assert image_series.GetImageDimension() == 3
baseline_parameters = np.array(
[
0.6563149,
0.58065837,
-0.48175367,
-0.74079868,
0.37486398,
-0.55739959,
-0.14306664,
0.72271215,
0.67617978,
-66.0,
69.0,
32.0,
]
)
baseline_fixed_parameters = np.array([0.0, 0.0, 0.0])
# test transform read / write
transforms = itk.transformread(transform_filename)
fixed_parameters = np.asarray(transforms[0].GetFixedParameters())
parameters = np.asarray(transforms[0].GetParameters())
assert np.allclose(fixed_parameters, baseline_fixed_parameters)
assert np.allclose(parameters, baseline_parameters)
additional_transform = itk.TranslationTransform[itk.D, 3].New()
baseline_additional_transform_params = [3.0, 2.0, 8.0]
parameters = additional_transform.GetParameters()
parameters[0] = baseline_additional_transform_params[0]
parameters[1] = baseline_additional_transform_params[1]
parameters[2] = baseline_additional_transform_params[2]
additional_transform.SetParameters(parameters)
transforms.insert(0, additional_transform)
itk.transformwrite(transforms, sys.argv[7], compression=True)
transforms = itk.transformread(sys.argv[7])
fixed_parameters = np.asarray(transforms[1].GetFixedParameters())
parameters = np.asarray(transforms[1].GetParameters())
assert np.allclose(fixed_parameters, baseline_fixed_parameters)
assert np.allclose(parameters, baseline_parameters)
parameters = np.asarray(transforms[0].GetParameters())
assert np.allclose(parameters, np.array(baseline_additional_transform_params))
# pipeline, auto_pipeline and templated class are tested in other files
# BridgeNumPy
# Images
image = itk.imread(filename)
arr = itk.array_from_image(image)
arr.fill(1)
assert np.any(arr != itk.array_from_image(image))
arr = itk.array_from_image(image)
arr.fill(1)
assert np.any(arr != itk.array_from_image(image))
view = itk.GetArrayViewFromImage(image)
view.fill(1)
assert np.all(view == itk.array_from_image(image))
image = itk.image_from_array(arr)
image.FillBuffer(2)
assert np.any(arr != itk.array_from_image(image))
image = itk.GetImageViewFromArray(arr)
image.FillBuffer(2)
assert np.all(arr == itk.array_from_image(image))
image = itk.image_from_array(arr, is_vector=True)
assert image.GetImageDimension() == 2
image = itk.GetImageViewFromArray(arr, is_vector=True)
assert image.GetImageDimension() == 2
arr = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.uint8)
assert arr.shape[0] == 2
assert arr.shape[1] == 3
assert arr[1, 1] == 5
image = itk.image_from_array(arr)
arrKeepAxes = itk.array_from_image(image, keep_axes=True)
assert arrKeepAxes.shape[0] == 3
assert arrKeepAxes.shape[1] == 2
assert arrKeepAxes[1, 1] == 4
arr = itk.array_from_image(image, keep_axes=False)
assert arr.shape[0] == 2
assert arr.shape[1] == 3
assert arr[1, 1] == 5
arrKeepAxes = itk.GetArrayViewFromImage(image, keep_axes=True)
assert arrKeepAxes.shape[0] == 3
assert arrKeepAxes.shape[1] == 2
assert arrKeepAxes[1, 1] == 4
arr = itk.GetArrayViewFromImage(image, keep_axes=False)
assert arr.shape[0] == 2
assert arr.shape[1] == 3
assert arr[1, 1] == 5
arr = arr.copy()
image = itk.image_from_array(arr)
image2 = type(image).New()
image2.Graft(image)
del image # Delete image but pixel data should be kept in img2
image = itk.image_from_array(arr + 1) # Fill former memory if wrongly released
assert np.array_equal(arr, itk.GetArrayViewFromImage(image2))
image2.SetPixel(
[0] * image2.GetImageDimension(), 3
) # For mem check in dynamic analysis
# VNL Vectors
v1 = itk.vnl_vector.D(2)
v1.fill(1)
v_np = itk.GetArrayFromVnlVector(v1)
assert v1.get(0) == v_np[0]
v_np[0] = 0
assert v1.get(0) != v_np[0]
view = itk.GetArrayViewFromVnlVector(v1)
assert v1.get(0) == view[0]
view[0] = 0
assert v1.get(0) == view[0]
# VNL Matrices
m1 = itk.vnl_matrix.D(2, 2)
m1.fill(1)
m_np = itk.GetArrayFromVnlMatrix(m1)
assert m1.get(0, 0) == m_np[0, 0]
m_np[0, 0] = 0
assert m1.get(0, 0) != m_np[0, 0]
view = itk.GetArrayViewFromVnlMatrix(m1)
assert m1.get(0, 0) == view[0, 0]
view[0, 0] = 0
assert m1.get(0, 0) == view[0, 0]
arr = np.zeros([3, 3])
m_vnl = itk.GetVnlMatrixFromArray(arr)
assert m_vnl(0, 0) == 0
m_vnl.put(0, 0, 3)
assert m_vnl(0, 0) == 3
assert arr[0, 0] == 0
# ITK Matrix
arr = np.zeros([3, 3], float)
m_itk = itk.GetMatrixFromArray(arr)
# Test snake case function
m_itk = itk.matrix_from_array(arr)
m_itk.SetIdentity()
# Test that the numpy array has not changed,...
assert arr[0, 0] == 0
# but that the ITK matrix has the correct value.
assert m_itk(0, 0) == 1
arr2 = itk.GetArrayFromMatrix(m_itk)
# Check that snake case function also works
arr2 = itk.array_from_matrix(m_itk)
# Check that the new array has the new value.
assert arr2[0, 0] == 1
arr2[0, 0] = 2
# Change the array value,...
assert arr2[0, 0] == 2
# and make sure that the matrix hasn't changed.
assert m_itk(0, 0) == 1
# Test __repr__
assert repr(m_itk) == "itkMatrixD33 ([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])"
# Test __array__
assert np.array_equal(np.asarray(m_itk), np.eye(3))
# test .astype for itk.Image
numpyImage = np.random.randint(0, 256, (8, 12, 5)).astype(np.uint8)
image = itk.image_from_array(numpyImage, is_vector=False)
assert type(image) == type(itk.image_from_array(numpyImage, ttype=(type(image),)))
assert type(image) == type(itk.image_from_array(numpyImage, ttype=[type(image)]))
assert type(image) == type(itk.image_from_array(numpyImage, ttype=type(image)))
cast = image.astype(np.uint8)
assert cast == image
(input_image_template, (input_pixel_type, input_image_dimension)) = itk.template(image)
assert hasattr(itk.CastImageFilter, "IUC3IF3")
for t in [
[itk.F, np.float32, "IUC3IF3"],
[itk.SS, np.int16, "IUC3ISS3"],
[itk.UI, np.uint32, "IUC3IUI3"],
[np.float32, np.float32, "IUC3IF3"],
]:
if hasattr(itk.CastImageFilter, t[2]):
cast = image.astype(t[0])
(cast_image_template, (cast_pixel_type, cast_image_dimension)) = itk.template(
cast
)
assert (
cast_image_template == input_image_template
and cast_image_dimension == input_image_dimension
and cast.dtype == t[1]
)
# test .astype for itk.VectorImage
numpyImage = np.random.randint(0, 256, (8, 5, 3)).astype(np.float32)
image = itk.image_from_array(numpyImage, is_vector=True)
assert type(image) == type(itk.image_from_array(numpyImage, ttype=(type(image),)))
assert type(image) == type(itk.image_from_array(numpyImage, ttype=[type(image)]))
assert type(image) == type(itk.image_from_array(numpyImage, ttype=type(image)))
ImageVectorsType = itk.Image[itk.Vector[itk.F, 3], 2]
imagevectors = itk.cast_image_filter(Input=image, ttype=(type(image), ImageVectorsType))
assert type(imagevectors) == ImageVectorsType
cast = image.astype(np.float32)
assert cast == image
(vector_image_template, (vector_pixel_type, vector_image_dimension)) = itk.template(
image
)
for t in [
[itk.D, np.float64, "VIF2VID2"],
[itk.SS, np.int16, "VIF2VISS2"],
[itk.UI, np.uint32, "VIF2VIUI2"],
[np.float64, np.float64, "VIF2VID2"],
]:
if hasattr(itk.CastImageFilter, t[2]):
cast = image.astype(t[0])
(cast_image_template, (cast_pixel_type, cast_image_dimension)) = itk.template(
cast
)
assert (
cast_image_template == vector_image_template
and cast_image_dimension == vector_image_dimension
and cast.dtype == t[1]
)
# Test .astype for conversion between vector-like pixel types.
components = 3
numpyImage = np.random.randint(0, 256, (12, 8, components)).astype(np.uint8)
input_image = itk.image_from_array(numpyImage, is_vector=True)
if type(input_image) == itk.Image[itk.RGBPixel[itk.UC], 2] and hasattr(
itk.CastImageFilter, "IRGBUC2IVF32"
):
output_pixel_type = itk.Vector[itk.F, components]
output_image = input_image.astype(output_pixel_type)
assert type(output_image) == itk.Image[output_pixel_type, 2]
if "(<itkCType unsigned char>, 4)" in itk.Image.GetTypesAsList():
arr = np.random.randint(0, 255, size=(3, 4, 5, 6), dtype=np.uint8)
image = itk.image_view_from_array(arr)
arr_back = itk.array_view_from_image(image)
assert np.allclose(arr, arr_back)
image = itk.image_from_array(arr)
arr_back = itk.array_from_image(image)
assert np.allclose(arr, arr_back)
# xarray conversion
try:
import xarray as xr
print("Testing xarray conversion")
image = itk.imread(filename)
image.SetSpacing((0.1, 0.2))
image.SetOrigin((30.0, 44.0))
theta = np.radians(30)
cosine = np.cos(theta)
sine = np.sin(theta)
rotation = np.array(((cosine, -sine), (sine, cosine)))
image.SetDirection(rotation)
image["MyMeta"] = 4.0
data_array = itk.xarray_from_image(image)
assert data_array.dims[0] == "y"
assert data_array.dims[1] == "x"
assert data_array.dims[2] == "c"
assert np.array_equal(data_array.values, itk.array_from_image(image))
assert len(data_array.coords["x"]) == 256
assert len(data_array.coords["y"]) == 256
assert len(data_array.coords["c"]) == 3
assert data_array.coords["x"][0] == 30.0
assert data_array.coords["x"][1] == 30.1
assert data_array.coords["y"][0] == 44.0
assert data_array.coords["y"][1] == 44.2
assert data_array.coords["c"][0] == 0
assert data_array.coords["c"][1] == 1
assert data_array.attrs["direction"][0, 0] == cosine
assert data_array.attrs["direction"][0, 1] == sine
assert data_array.attrs["direction"][1, 0] == -sine
assert data_array.attrs["direction"][1, 1] == cosine
assert data_array.attrs["MyMeta"] == 4.0
round_trip = itk.image_from_xarray(data_array)
assert np.array_equal(itk.array_from_image(round_trip), itk.array_from_image(image))
spacing = round_trip.GetSpacing()
assert np.isclose(spacing[0], 0.1)
assert np.isclose(spacing[1], 0.2)
origin = round_trip.GetOrigin()
assert np.isclose(origin[0], 30.0)
assert np.isclose(origin[1], 44.0)
direction = round_trip.GetDirection()
assert np.isclose(direction(0, 0), cosine)
assert np.isclose(direction(0, 1), -sine)
assert np.isclose(direction(1, 0), sine)
assert np.isclose(direction(1, 1), cosine)
assert round_trip["MyMeta"] == 4.0
wrong_order = data_array.swap_dims({"y": "z"})
try:
round_trip = itk.image_from_xarray(wrong_order)
assert False
except ValueError:
pass
# Check empty array
empty_array = np.array([], dtype=np.uint8)
empty_array.shape = (0, 0, 0)
empty_image = itk.image_from_array(empty_array)
empty_da = itk.xarray_from_image(empty_image)
empty_image_round = itk.image_from_xarray(empty_da)
# Check order
arr = np.random.randint(0, 255, size=(4, 5, 6), dtype=np.uint8)
data_array = xr.DataArray(arr, dims=["z", "y", "x"])
image = itk.image_from_xarray(data_array)
assert np.allclose(arr, itk.array_view_from_image(image))
assert np.allclose(arr.shape, itk.array_view_from_image(image).shape)
data_array = xr.DataArray(arr, dims=["x", "y", "z"])
image = itk.image_from_xarray(data_array)
assert np.allclose(arr.transpose(), itk.array_view_from_image(image))
assert np.allclose(arr.shape[::-1], itk.array_view_from_image(image).shape)
data_array = xr.DataArray(arr, dims=["y", "x", "c"])
image = itk.image_from_xarray(data_array)
assert np.allclose(arr, itk.array_view_from_image(image))
assert np.allclose(arr.shape, itk.array_view_from_image(image).shape)
data_array = xr.DataArray(arr, dims=["c", "x", "y"])
image = itk.image_from_xarray(data_array)
assert np.allclose(arr.transpose(), itk.array_view_from_image(image))
assert np.allclose(arr.shape[::-1], itk.array_view_from_image(image).shape)
data_array = xr.DataArray(arr, dims=["q", "x", "y"])
try:
image = itk.image_from_xarray(data_array)
assert False
except ValueError:
pass
if "(<itkCType unsigned char>, 4)" in itk.Image.GetTypesAsList():
arr = np.random.randint(0, 255, size=(4, 5, 6, 3), dtype=np.uint8)
data_array = xr.DataArray(arr, dims=["t", "z", "y", "x"])
image = itk.image_from_xarray(data_array)
assert np.allclose(arr, itk.array_view_from_image(image))
assert np.allclose(arr.shape, itk.array_view_from_image(image).shape)
except ImportError:
print("xarray not imported. Skipping xarray conversion tests")
pass
# vtk conversion
try:
import vtk
print("Testing vtk conversion")
image = itk.image_from_array(np.random.rand(2, 3, 4))
z_rot = np.asarray([[0, 1, 0], [-1, 0, 0], [0, 0, 1]], dtype=np.float64)
z_rot_itk = itk.matrix_from_array(z_rot)
image.SetDirection(z_rot_itk)
vtk_image = itk.vtk_image_from_image(image)
image_round = itk.image_from_vtk_image(vtk_image)
assert np.array_equal(itk.origin(image), itk.origin(image_round))
assert np.array_equal(itk.spacing(image), itk.spacing(image_round))
assert np.array_equal(itk.size(image), itk.size(image_round))
assert np.array_equal(
itk.array_view_from_image(image), itk.array_view_from_image(image_round)
)
if vtk.vtkVersion.GetVTKMajorVersion() >= 9:
z_rot_round = itk.array_from_matrix(image_round.GetDirection())
assert np.array_equal(z_rot, z_rot_round)
else:
print("VTK version <9. Direction unsupported.")
image = itk.image_from_array(
np.random.rand(5, 4, 2).astype(np.float32), is_vector=True
)
z_rot = np.asarray([[0, 1], [-1, 0]], dtype=np.float64)
z_rot_itk = itk.matrix_from_array(z_rot)
image.SetDirection(z_rot_itk)
vtk_image = itk.vtk_image_from_image(image)
image_round = itk.image_from_vtk_image(vtk_image)
assert np.array_equal(itk.origin(image), itk.origin(image_round))
assert np.array_equal(itk.spacing(image), itk.spacing(image_round))
assert np.array_equal(itk.size(image), itk.size(image_round))
assert np.array_equal(
itk.array_view_from_image(image), itk.array_view_from_image(image_round)
)
if vtk.vtkVersion.GetVTKMajorVersion() >= 9:
z_rot_round = itk.array_from_matrix(image_round.GetDirection())
assert np.array_equal(z_rot, z_rot_round)
except ImportError:
print("vtk not imported. Skipping vtk conversion tests")
pass
| {
"content_hash": "36e3ded2a9aa52d8f2f5bfa8a29b76b8",
"timestamp": "",
"source": "github",
"line_count": 613,
"max_line_length": 90,
"avg_line_length": 34.58727569331158,
"alnum_prop": 0.6863031789453825,
"repo_name": "vfonov/ITK",
"id": "89839eb3ffec265f23e2d89bd27e2c4c260f6c76",
"size": "21998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Wrapping/Generators/Python/Tests/extras.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "463579"
},
{
"name": "C++",
"bytes": "35045408"
},
{
"name": "CMake",
"bytes": "1623319"
},
{
"name": "CSS",
"bytes": "17428"
},
{
"name": "HTML",
"bytes": "8370"
},
{
"name": "Java",
"bytes": "28281"
},
{
"name": "JavaScript",
"bytes": "1522"
},
{
"name": "Objective-C++",
"bytes": "5640"
},
{
"name": "Perl",
"bytes": "6029"
},
{
"name": "Python",
"bytes": "569543"
},
{
"name": "Ruby",
"bytes": "296"
},
{
"name": "SWIG",
"bytes": "66033"
},
{
"name": "Shell",
"bytes": "165002"
},
{
"name": "Tcl",
"bytes": "77628"
},
{
"name": "XSLT",
"bytes": "8634"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('monitor', '0006_auto_20180809_1109'),
]
operations = [
migrations.AddField(
model_name='portlog',
name='host',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='monitor.Host'),
),
]
| {
"content_hash": "851bf5e187b6930d10136abaea7e7fd5",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 111,
"avg_line_length": 25.176470588235293,
"alnum_prop": 0.6191588785046729,
"repo_name": "chonpz28/django-monitor",
"id": "060ec6610c044a2119946a64143cfdafbf7d9144",
"size": "475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monitor/migrations/0007_portlog_host.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2352"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "7303"
}
],
"symlink_target": ""
} |
"""
Lexer generate tokens for parsing.
"""
from typing import List
from src.token import Token, TokenType
from src.source import BaseSource
from src.exceptions import InvalidTokenException
KEYWORDS = ("if", "else", "case", "while",
"do", "break", "continue", "return", "switch",
"default", "int", "float", "char", "bool")
VARTYPES = ("int", "float", "char", "bool")
DELIMITERS = (",", ";", ":", "(", ")", "[", "]", "{", "}")
OPERATORS = ("+", "-", "*", "/", "<", "=", ">", "&", "|", "!")
class Lexer(object):
"""Lexical analyzer to generate token from source code."""
def __init__(self, file_source: BaseSource):
self.read_buffer: List[str] = []
self.token_list: List[Token] = []
self.src: BaseSource = file_source
def create_token(self, t_type: str, token_literal: str) -> Token:
"""create token from string"""
token: Token = Token(t_type, token_literal, self.src)
self.token_list.append(token)
return token
def match_literal(self) -> str:
"""match literal elements(identifier, keyword, true and false value)"""
while True:
next_char: str = self.src.next_char(True)
if next_char == ' ' or self.src.is_line_end(next_char) or next_char in DELIMITERS or next_char in OPERATORS:
token_literal: str = ''.join(self.read_buffer)
if token_literal == 'true':
return self.create_token(TokenType.BOOL_CONST, True)
elif token_literal == 'flase':
return self.create_token(TokenType.BOOL_CONST, False)
else:
return self.create_token(TokenType.KEYWORD if token_literal in KEYWORDS else TokenType.IDENTIFIER, token_literal)
elif not next_char.isalpha() and not next_char.isdigit() and next_char != '_':
raise InvalidTokenException(
"illegal character %s appeared" % next_char)
elif next_char == -1:
raise InvalidTokenException("unexpected EOF")
self.read_buffer.append(self.src.next_char())
def match_character(self) -> str:
"""match character elements"""
self.src.next_char()
next_char: str = self.src.next_char()
self.read_buffer.append(next_char)
if next_char == '\\':
next_char: str = self.src.next_char()
self.read_buffer.append(next_char)
if next_char == '\'':
token_literal: str = ''.join(self.read_buffer)
return self.create_token(TokenType.CHAR_CONST, token_literal)
else:
raise InvalidTokenException(
"character must be quote in apostrophes.")
def match_string(self) -> str:
"""match char array elements"""
self.src.next_char()
while True:
next_char: str = self.src.next_char()
if next_char == '\"':
token_literal: str = ''.join(self.read_buffer)
return self.create_token(TokenType.STRING_CONST, token_literal)
elif self.src.is_line_end(next_char):
raise InvalidTokenException("unclosed quote")
elif next_char == -1:
raise InvalidTokenException("unexpected EOF")
def match_digit(self) -> int:
"""match digit element"""
while True:
next_char: str = self.src.next_char(True)
if next_char == '.':
self.read_buffer.append(next_char)
return self.match_float()
elif next_char == ' ' or self.src.is_line_end(next_char) or next_char in DELIMITERS:
token_literal: str = ''.join(self.read_buffer)
return self.create_token(TokenType.INT_CONST, token_literal)
elif not next_char.isdigit():
raise InvalidTokenException(
"illegal character %s appeared" % next_char)
elif next_char == -1:
raise InvalidTokenException("unexpected EOF")
self.read_buffer.append(self.src.next_char())
def match_float(self) -> float:
"""match float element"""
while True:
next_char = self.src.next_char(True)
if next_char == ' ' or self.src.is_line_end(next_char) or next_char in DELIMITERS:
token_literal: str = ''.join(self.read_buffer)
return self.create_token(TokenType.FLOAT_CONST, token_literal)
elif not next_char.isdigit():
raise InvalidTokenException(
"illegal character %s appeared" % next_char)
elif next_char == -1:
raise InvalidTokenException("unexpected EOF")
self.read_buffer.append(self.src.next_char())
def match_line_comment(self) -> None:
"""match line comment element"""
while True:
next_char = self.src.next_char()
if self.src.is_line_end(next_char) or next_char == -1:
return
self.read_buffer.append(next_char)
def match_operator(self) -> str:
"""match operator element"""
next_char = self.src.next_char()
self.read_buffer.append(next_char)
if next_char == '+':
self._peek_next_tk('+')
elif next_char == '-':
self._peek_next_tk('-')
elif next_char == '=':
self._peek_next_tk('=')
elif next_char == '!':
self._peek_next_tk('=')
elif next_char == '>':
self._peek_next_tk('=')
elif next_char == '<':
self._peek_next_tk('=')
elif next_char == '&':
self._peek_next_tk('&')
elif next_char == '|':
self._peek_next_tk('|')
token_literal: str = ''.join(self.read_buffer)
return self.create_token(TokenType.OPERATOR, token_literal)
def _peek_next_tk(self, token, harsh=False):
"""to see if a token is binocular operator."""
if self.src.next_char(True) == token:
next_char = self.src.next_char()
self.read_buffer.append(next_char)
elif harsh:
raise InvalidTokenException("token %s invalid" % (token))
def match_delimiters(self):
"""match delimiter elements"""
next_char = self.src.next_char()
self.read_buffer.append(next_char)
token_literal: str = ''.join(self.read_buffer)
return self.create_token(TokenType.DELIMITER, token_literal)
def match(self):
"""match engine"""
while True:
peek_next_char: str = self.src.next_char(True)
if peek_next_char == -1:
break
if peek_next_char.isalpha():
self.match_literal()
elif peek_next_char.isdigit():
self.match_digit()
elif peek_next_char == '#':
self.match_line_comment()
elif peek_next_char in OPERATORS:
self.match_operator()
elif peek_next_char in DELIMITERS:
self.match_delimiters()
elif self.src.is_line_end(peek_next_char) or peek_next_char == ' ':
self.src.next_char()
else:
raise InvalidTokenException
self.read_buffer = []
return self.token_list
| {
"content_hash": "eaaea4f150f250cd984c647db07f8790",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 133,
"avg_line_length": 41.247191011235955,
"alnum_prop": 0.547534731680741,
"repo_name": "Thrimbda/Thrive-Compiler",
"id": "d067d14c27e694d7fed1f98bfbc800afadf58141",
"size": "7342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lexer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "96176"
}
],
"symlink_target": ""
} |
from designateclient import exceptions as designate_exception
from designateclient.v1 import records
import mock
from heat.engine.resources.openstack.designate import record
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
sample_template = {
'heat_template_version': '2015-04-30',
'resources': {
'test_resource': {
'type': 'OS::Designate::Record',
'properties': {
'name': 'test-record.com',
'description': 'Test record',
'ttl': 3600,
'type': 'MX',
'priority': 1,
'data': '1.1.1.1',
'domain': '1234567'
}
}
}
}
class DesignateRecordTest(common.HeatTestCase):
def setUp(self):
super(DesignateRecordTest, self).setUp()
self.ctx = utils.dummy_context()
self.stack = stack.Stack(
self.ctx, 'test_stack',
template.Template(sample_template)
)
self.test_resource = self.stack['test_resource']
# Mock client plugin
self.test_client_plugin = mock.MagicMock()
self.test_resource.client_plugin = mock.MagicMock(
return_value=self.test_client_plugin)
# Mock client
self.test_client = mock.MagicMock()
self.test_resource.client = mock.MagicMock(
return_value=self.test_client)
def _get_mock_resource(self):
value = mock.MagicMock()
value.id = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
return value
def test_resource_validate_properties(self):
mock_record_create = self.test_client_plugin.record_create
mock_resource = self._get_mock_resource()
mock_record_create.return_value = mock_resource
# validate the properties
self.assertEqual(
'test-record.com',
self.test_resource.properties.get(record.DesignateRecord.NAME))
self.assertEqual(
'Test record',
self.test_resource.properties.get(
record.DesignateRecord.DESCRIPTION))
self.assertEqual(
3600,
self.test_resource.properties.get(record.DesignateRecord.TTL))
self.assertEqual(
'MX',
self.test_resource.properties.get(record.DesignateRecord.TYPE))
self.assertEqual(
1,
self.test_resource.properties.get(record.DesignateRecord.PRIORITY))
self.assertEqual(
'1.1.1.1',
self.test_resource.properties.get(record.DesignateRecord.DATA))
self.assertEqual(
'1234567',
self.test_resource.properties.get(
record.DesignateRecord.DOMAIN))
def test_resource_handle_create_non_mx_or_srv(self):
mock_record_create = self.test_client_plugin.record_create
mock_resource = self._get_mock_resource()
mock_record_create.return_value = mock_resource
for type in (set(self.test_resource._ALLOWED_TYPES) -
set([self.test_resource.MX,
self.test_resource.SRV])):
self.test_resource.properties = args = dict(
name='test-record.com',
description='Test record',
ttl=3600,
type=type,
priority=1,
data='1.1.1.1',
domain='1234567'
)
self.test_resource.handle_create()
# Make sure priority is set to None for non mx or srv records
args['priority'] = None
mock_record_create.assert_called_with(
**args
)
# validate physical resource id
self.assertEqual(mock_resource.id, self.test_resource.resource_id)
def test_resource_handle_create_mx_or_srv(self):
mock_record_create = self.test_client_plugin.record_create
mock_resource = self._get_mock_resource()
mock_record_create.return_value = mock_resource
for type in [self.test_resource.MX, self.test_resource.SRV]:
self.test_resource.properties = args = dict(
name='test-record.com',
description='Test record',
ttl=3600,
type=type,
priority=1,
data='1.1.1.1',
domain='1234567'
)
self.test_resource.handle_create()
mock_record_create.assert_called_with(
**args
)
# validate physical resource id
self.assertEqual(mock_resource.id, self.test_resource.resource_id)
def test_resource_handle_update_non_mx_or_srv(self):
mock_record_update = self.test_client_plugin.record_update
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
for type in (set(self.test_resource._ALLOWED_TYPES) -
set([self.test_resource.MX,
self.test_resource.SRV])):
prop_diff = args = {
record.DesignateRecord.DESCRIPTION: 'updated description',
record.DesignateRecord.TTL: 4200,
record.DesignateRecord.TYPE: type,
record.DesignateRecord.DATA: '2.2.2.2',
record.DesignateRecord.PRIORITY: 1}
self.test_resource.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=prop_diff)
# priority is not considered for records other than mx or srv
args.update(dict(
id=self.test_resource.resource_id,
priority=None,
domain='1234567',
))
mock_record_update.assert_called_with(**args)
def test_resource_handle_update_mx_or_srv(self):
mock_record_update = self.test_client_plugin.record_update
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
for type in [self.test_resource.MX, self.test_resource.SRV]:
prop_diff = args = {
record.DesignateRecord.DESCRIPTION: 'updated description',
record.DesignateRecord.TTL: 4200,
record.DesignateRecord.TYPE: type,
record.DesignateRecord.DATA: '2.2.2.2',
record.DesignateRecord.PRIORITY: 1}
self.test_resource.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=prop_diff)
args.update(dict(
id=self.test_resource.resource_id,
domain='1234567',
))
mock_record_update.assert_called_with(**args)
def test_resource_handle_delete(self):
mock_record_delete = self.test_client_plugin.record_delete
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
mock_record_delete.return_value = None
self.assertIsNone(self.test_resource.handle_delete())
mock_record_delete.assert_called_once_with(
domain='1234567',
id=self.test_resource.resource_id
)
def test_resource_handle_delete_resource_id_is_none(self):
self.test_resource.resource_id = None
self.assertIsNone(self.test_resource.handle_delete())
def test_resource_handle_delete_not_found(self):
mock_record_delete = self.test_client_plugin.record_delete
mock_record_delete.side_effect = designate_exception.NotFound
self.assertIsNone(self.test_resource.handle_delete())
def test_resource_show_resource(self):
args = dict(
name='test-record.com',
description='Test record',
ttl=3600,
type='A',
priority=1,
data='1.1.1.1'
)
rsc = records.Record(args)
mock_notification_get = self.test_client_plugin.record_show
mock_notification_get.return_value = rsc
self.assertEqual(args,
self.test_resource._show_resource(),
'Failed to show resource')
def test_resource_get_live_state(self):
tmpl = {
'heat_template_version': '2015-04-30',
'resources': {
'test_resource': {
'type': 'OS::Designate::Record',
'properties': {
'name': 'test-record.com',
'description': 'Test record',
'ttl': 3600,
'type': 'MX',
'priority': 1,
'data': '1.1.1.1',
'domain': 'example.com.'
}
}
}
}
s = stack.Stack(
self.ctx, 'test_stack',
template.Template(tmpl)
)
test_resource = s['test_resource']
test_resource.resource_id = '1234'
test_resource.client_plugin().get_domain_id = mock.MagicMock()
test_resource.client_plugin().get_domain_id.return_value = '1234567'
test_resource.client().records = mock.MagicMock()
test_resource.client().records.get.return_value = {
'type': 'MX',
'data': '1.1.1.1',
'ttl': 3600,
'description': 'test',
'domain_id': '1234567',
'name': 'www.example.com.',
'priority': 0
}
reality = test_resource.get_live_state(test_resource.properties)
expected = {
'type': 'MX',
'data': '1.1.1.1',
'ttl': 3600,
'description': 'test',
'priority': 0
}
self.assertEqual(expected, reality)
| {
"content_hash": "361d2ac2b5d68dc6c5bdb4d9963ff67c",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 79,
"avg_line_length": 35.667870036101085,
"alnum_prop": 0.5467611336032389,
"repo_name": "noironetworks/heat",
"id": "747ed51e8e21ac628239ba6f95e3c4b04f153039",
"size": "10455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/openstack/designate/test_record.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8804896"
},
{
"name": "Shell",
"bytes": "64533"
}
],
"symlink_target": ""
} |
"""Makes sure that the app/ code is cpplint clean."""
INCLUDE_CPP_FILES_ONLY = (
r'.*\.cc$', r'.*\.h$'
)
EXCLUDE = (
# Autogenerated window resources files are off limits
r'.*resource.h$',
)
def CheckChangeOnUpload(input_api, output_api):
results = []
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
results.extend(input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources))
return results
| {
"content_hash": "1fd9b58f32d0be1e48ef2411f54a65f4",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 64,
"avg_line_length": 29.05263157894737,
"alnum_prop": 0.697463768115942,
"repo_name": "rwatson/chromium-capsicum",
"id": "b15b852b6b30c1210f4a4743cf23023505212c64",
"size": "738",
"binary": false,
"copies": "1",
"ref": "refs/heads/chromium-capsicum",
"path": "app/PRESUBMIT.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from celery import Celery
app = Celery('celery')
app.config_from_object('config.settings.celery', namespace='CELERY')
app.autodiscover_tasks()
| {
"content_hash": "0eded2134f0a2a87c369cace27d4354c",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 68,
"avg_line_length": 25.375,
"alnum_prop": 0.7684729064039408,
"repo_name": "artinnok/django-default-skeleton",
"id": "3b312d4b47d13092829e6e75d08d889a55f697f0",
"size": "203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{ cookiecutter.name }}_project/{{ cookiecutter.name }}/config/celery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4432"
},
{
"name": "JavaScript",
"bytes": "1414"
},
{
"name": "Python",
"bytes": "7572"
}
],
"symlink_target": ""
} |
import os
import sys
import arcpy
try:
if (sys.version_info[0] < 3): # _winreg has been renamed as (winreg) in python3+
from _winreg import *
else:
from winreg import *
except ImportError as e:
print ('winreg support is disabled!\n{}'.format(e))
from datetime import datetime
from xml.dom import minidom
try:
import MDCS_UC
except Exception as e:
print (f'User-Code functions disabled.\n{e}')
try:
from arcpy.ia import *
arcpy.CheckOutExtension("ImageAnalyst")
except:
print ('arcpy.ia is not available.')
class DynaInvoke:
# log status types enums
const_general_text = 0
const_warning_text = 1
const_critical_text = 2
const_status_text = 3
# ends
def __init__(self, name, args, evnt_fnc_update_args=None, log=None):
self.m_name = name
self.m_args = args
self.m_evnt_update_args = evnt_fnc_update_args
self.m_log = log
self._sArgs = []
def _message(self, msg, msg_type):
if (self.m_log):
return self.m_log(msg, msg_type)
print (msg)
def init(self, **kwargs):
if ('sArgs' in kwargs):
self._sArgs = kwargs['sArgs']
if (isinstance(self._sArgs, list)):
if (self._sArgs):
if (isinstance(self._sArgs[0], list)):
self._sArgs = self._sArgs[0] # handles only 1 sub method on the parent object for now.
# sub args to use in a method of the main function object. e.g. X = a->fn1(args) X->fn2(sargs)
try:
arg_count = eval('%s.__code__.co_argcount' % (self.m_name))
except Exception as exp:
self._message(str(exp), self.const_critical_text)
return False
len_args = len(self.m_args)
if (len_args < arg_count):
## self._message('Args less than required, filling with default (#)', self.const_warning_text)
for i in range(len_args, arg_count):
self.m_args.append('#')
elif (len_args > arg_count):
## self._message('More args supplied than required to function (%s)' % (self.m_name), self.const_warning_text)
self.m_args = self.m_args[:arg_count]
return True
def invoke(self): # chs
result = 'OK'
try:
if (self.m_evnt_update_args is not None):
usr_args = self.m_evnt_update_args(self.m_args, self.m_name)
if (usr_args is None): # set to (None) to skip fnc invocation, it's treated as a non-error.
return True
if (usr_args is not None and
len(usr_args) == len(self.m_args)): # user is only able to update the contents, not to trim or expand args.
## self._message('Original args may have been updated through custom code.', self.const_warning_text)
self.m_args = usr_args
self._message('Calling (%s)' % (self.m_name), self.const_general_text)
ret = eval('%s(*self.m_args)' % (self.m_name)) # gp-tools return NULL?
if (self._sArgs):
fn = self._sArgs.pop(0)
if (hasattr(ret, fn)):
ret = eval('ret.{}(*self._sArgs)'.format(fn))
return True
except Exception as exp:
result = 'FAILED'
self._message(str(exp), self.const_critical_text)
return False
finally:
self._message('Status: %s' % (result), self.const_general_text)
class Base(object):
#begin - constansts
const_general_text = 0
const_warning_text = 1
const_critical_text = 2
const_status_text = 3
const_cmd_default_text = "#defaults"
const_geodatabase_ext = '.GDB'
const_geodatabase_SDE_ext = '.SDE'
# base init codes. (const_strings)
const_init_ret_version = 'version'
const_init_ret_sde = 'sde'
const_init_ret_patch = 'patch'
# ends
# version specific
const_ver_len = 4
CMAJOR = 0
CMINOR = 1
CSP = 2
CBUILD = 3
CVERSION_ATTRIB = 'version'
# ends
# externally user defined functions specific
CCLASS_NAME = 'UserCode'
CMODULE_NAME = 'MDCS_UC'
# ends
# log status (codes)
CCMD_STATUS_OK = 'OK'
CCMD_STATUS_FAILED = 'Failed!'
# ends
# ends
def __init__(self):
self.m_log = None
self.m_doc = None
# the follwoing variables could be overridden by the command-line to replace respective values in XML config file.
self.m_workspace = ''
self.m_geodatabase = ''
self.m_mdName = '' # mosaic dataset name.
# ends
self.m_sources = ''
self.m_gdbName = ''
self.m_geoPath = ''
self.m_config = ''
self.m_commands = ''
self.m_sources = '' # source data paths for adding new rasters.
self.m_dynamic_params = {}
# art file update specific variables
self.m_art_apply_changes = ''
self.m_art_ws = ''
self.m_art_ds = ''
# ends
# To keep track of the last objectID before any new data items could be added.
self.m_last_AT_ObjectID = 0 # by default, take in all the previous records for any operation.
# SDE specific variables
self.m_IsSDE = False
self.m_SDE_database_user = ''
# ends
# set MDCS code base path
self.m_code_base = ''
self.setCodeBase(os.path.dirname(__file__))
# ends
# client_callback_ptrs
self.m_cli_callback_ptr = None
self.m_cli_msg_callback_ptr = None
# ends
self.m_userClassInstance = None
self.m_data = None
def init(self): # return (status [true|false], reason)
try:
# Update in memory parameter DOM to reflect {-m} user values
if (self.m_workspace):
self.setXMLNodeValue('Application/Workspace/WorkspacePath', 'WorkspacePath', self.m_workspace, '', '')
if (self.m_geodatabase):
self.setXMLNodeValue('Application/Workspace/Geodatabase', 'Geodatabase', self.m_geodatabase, '', '')
if (self.m_mdName):
self.setXMLNodeValue('Application/Workspace/MosaicDataset/Name', 'Name', self.m_mdName, '', '')
# ends
min = self.getXMLXPathValue("Application/ArcGISVersion/Product/Min", "Min").split('.')
max = self.getXMLXPathValue("Application/ArcGISVersion/Product/Max", "Max").split('.')
if (len(min) == self.const_ver_len): # version check is disabled if no values have been defined in the MDCS for min and max.
CMAJOR = 0
CBUILD = self.const_ver_len
if (len(max) != self.const_ver_len):
max = [0, 0, 0, 0] # zero up max if max version isn't defined / has errors.
for n in range(CMAJOR, CBUILD):
if (min[n] == ''):
min[n] = 0
if (max[n] == ''):
max[n] = 0
min[n] = int(min[n])
max[n] = int(max[n])
if (self.CheckMDCSVersion(min, max) == False):
return (False, self.const_init_ret_version) # version check failed.
except Exception as inst:
self.log('Version check failure/' + str(inst), self.const_critical_text)
return False
# ends
# ArcGIS patch test.
if (self.isArcGISPatched() == False):
self.log('An ArcGIS patch required to run MDCS is not yet installed. Unable to proceed.', self.const_critical_text)
return (False, self.const_init_ret_patch)
# ends
self.setUserDefinedValues() # replace user defined dynamic variables in config file with values provided at the command-line.
if (self.m_workspace == ''):
self.m_workspace = self.prefixFolderPath(self.getAbsPath(self.getXMLNodeValue(self.m_doc, "WorkspacePath")), self.const_workspace_path_)
if (self.m_geodatabase == ''):
self.m_geodatabase = self.getXMLNodeValue(self.m_doc, "Geodatabase")
if (self.m_mdName == ''):
self.m_mdName = self.getXMLXPathValue("Application/Workspace/MosaicDataset/Name", "Name")
const_len_ext = len(self.const_geodatabase_ext)
ext = self.m_geodatabase[-const_len_ext:].upper()
if (ext != self.const_geodatabase_ext and
ext != self.const_geodatabase_SDE_ext):
self.m_geodatabase += self.const_geodatabase_ext.lower() # if no extension specified, defaults to '.gdb'
self.m_gdbName = self.m_geodatabase[:len(self.m_geodatabase) - const_len_ext] # .gdb
self.m_geoPath = os.path.join(self.m_workspace, self.m_geodatabase)
self.m_commands = self.getXMLNodeValue(self.m_doc, "Command")
if (ext == self.const_geodatabase_SDE_ext):
self.m_IsSDE = True
try:
self.log('Reading SDE connection properties from (%s)' % (self.m_geoPath))
conProperties = arcpy.Describe(self.m_geoPath).connectionProperties
self.m_SDE_database_user = ('%s.%s.') % (conProperties.database, conProperties.user)
except Exception as inst:
self.log(str(inst), self.const_critical_text)
return (False, self.const_init_ret_sde)
try:
self.m_data = {
'log': self.m_log,
'workspace': self.m_geoPath,
'mosaicdataset': self.m_mdName,
'mdcs': self.m_doc,
'sourcePath': self.m_sources,
'base': self # pass in the base object to allow access to common functions.
}
frame = sys._getframe(0).f_globals
module = frame[self.CMODULE_NAME]
self.m_userClassInstance = getattr(module, self.CCLASS_NAME)(self.m_data)
except:
self.log('{}/{} not found. Users commands disabled!'.format(self.CMODULE_NAME, self.CCLASS_NAME), self.const_warning_text)
if (self.m_doc is None):
return (False, 'UserCode')
return (True, 'OK')
def invokeDynamicFnCallback(self, args, fn_name=None):
if (fn_name is None):
return args
fn = fn_name.lower()
if (self.invoke_cli_callback(fn_name, args)):
return args
return None
# cli callback ptrs
def invoke_cli_callback(self, fname, args):
if (not self.m_cli_callback_ptr is None):
return self.m_cli_callback_ptr(fname, args)
return args
def invoke_cli_msg_callback(self, mtype, args):
if (not self.m_cli_msg_callback_ptr is None):
return self.m_cli_msg_callback_ptr(mtype, args)
return args
# ends
def setCodeBase(self, path):
if (os.path.exists(path) == False):
return None
self.m_code_base = path
self.const_statistics_path_ = os.path.join(self.m_code_base, '../../Parameter/Statistics')
self.const_raster_function_templates_path_ = os.path.join(self.m_code_base, '../../Parameter/RasterFunctionTemplates')
self.const_raster_type_path_ = os.path.join(self.m_code_base, '../../Parameter/RasterType')
self.const_workspace_path_ = os.path.join(self.m_code_base, '../../') # .gdb output
self.const_import_geometry_features_path_ = os.path.join(self.m_code_base, '../../Parameter')
return self.m_code_base
def setXMLNodeValue(self, xPath, key, value, subKey, subValue):
nodes = self.m_doc.getElementsByTagName(key)
for node in nodes:
parents = []
c = node
while(c.parentNode is not None):
parents.insert(0, c.nodeName)
c = c.parentNode
p = '/'.join(parents)
if (p == xPath):
if (subKey != ''):
try:
if (node.firstChild.nodeValue == value): # taking a short-cut to edit/this could change in future to support any child-node lookup
if (node.nextSibling.nextSibling.nodeName == subKey):
node.nextSibling.nextSibling.firstChild.data = subValue
break
except:
break
continue
node.firstChild.data = value
break
def getXMLXPathValue(self, xPath, key):
nodes = self.m_doc.getElementsByTagName(key)
for node in nodes:
parents = []
c = node
while(c.parentNode is not None):
parents.insert(0, c.nodeName)
c = c.parentNode
p = '/'.join(parents)
if (p == xPath):
if (node.hasChildNodes() == False):
return ''
return str(node.firstChild.data).strip()
return ''
def setLog(self, log):
self.m_log = log
return True
def isLog(self):
return (not self.m_log is None)
def log(self, msg, level=const_general_text):
if (self.m_log is not None):
return self.m_log.Message(msg, level)
errorTypeText = 'msg'
if (level > self.const_general_text):
errorTypeText = 'warning'
elif(level == self.const_critical_text):
errorTypeText = 'critical'
print ('log-' + errorTypeText + ': ' + msg)
return True
# user defined functions implementation code
def isUser_Function(self, name):
try:
if (self.m_userClassInstance is None):
return None
fnc = getattr(self.m_userClassInstance, name)
except:
return False
return True
def invoke_user_function(self, name, data):
ret = False
try:
if (self.m_userClassInstance is None):
return False
fnc = getattr(self.m_userClassInstance, name)
try:
ret = fnc(data)
except Exception as inf:
self.log('Executing user defined function (%s)' % (name), self.const_critical_text)
self.log(str(inf), self.const_critical_text)
return False
except Exception as inf:
self.log('Please check if user function (%s) is found in class (%s) of MDCS_UC module.' % (name, self.CCLASS_NAME), self.const_critical_text)
self.log(str(inf), self.const_critical_text)
return False
return ret
# ends
def processEnv(self, node, pos, json): # support fnc for 'SE' command.
while(node.nextSibling is not None):
if(node.nodeType != minidom.Node.TEXT_NODE):
k = str(pos)
if ((k in json.keys()) == False):
json[k] = {'key': [], 'val': [], 'type': []}
json[k]['key'].append(node.nodeName)
v = ''
if (node.firstChild is not None):
v = node.firstChild.nodeValue.strip()
json[k]['val'].append(v)
json[k]['parent'] = node.parentNode.nodeName
json[k]['type'].append('c')
if (node.firstChild is not None):
if (node.firstChild.nextSibling is not None):
pos = len(json)
json[k]['type'][len(json[k]['type']) - 1] = 'p'
self.processEnv(node.firstChild.nextSibling, pos, json)
pos = 0 # defaults to root always, assuming only 1 level deep xml.
node = node.nextSibling
return True
def getAbsPath(self, input):
absPath = input
if (os.path.exists(absPath) == True):
absPath = os.path.abspath(input)
return absPath
def prefixFolderPath(self, input, prefix):
_file = input.strip()
_p, _f = os.path.split(_file)
_indx = _p.lower().find('.gdb')
if (_p == '' or _indx >= 0):
if (_indx >= 0):
_f = _p + '\\' + _f
_file = os.path.join(prefix, _f)
return _file
def isArcGISPatched(self): # return values [true | false]
# if we're running on python 3+, it's assumed we're on (ArcGIS Pro) and there's no need to check for patches.
if (sys.version_info[0] >= 3):
return True
# if the patch XML node is not properly formatted in structure/with values, MDCS returns an error and will abort the operation.
patch_node = self.getXMLNode(self.m_doc, "Patch")
if (patch_node == ''):
return True
if (patch_node.attributes.length == 0):
return False
if ((self.CVERSION_ATTRIB in patch_node.attributes.keys()) == False):
return False
target_ver = patch_node.attributes.getNamedItem(self.CVERSION_ATTRIB).nodeValue.strip()
if (len(target_ver) == 0):
return False
search_key = ''
patch_desc_node = patch_node.firstChild.nextSibling
while (patch_desc_node is not None):
node_name = patch_desc_node.nodeName
if (node_name == 'Name'):
if (patch_desc_node.hasChildNodes() == True):
search_key = patch_desc_node.firstChild.nodeValue
break
patch_desc_node = patch_desc_node.nextSibling.nextSibling
if (len(search_key) == 0): # if no patch description could be found, return False
return False
ver = (target_ver + '.0.0.0.0').split('.')
for n in range(self.CMAJOR, self.CBUILD + 1):
if (ver[n] == ''):
ver[n] = 0
ver[n] = int(ver[n])
ver = ver[:4] # accept only the first 4 digits.
target_v_str = installed_v_str = ''
for i in range(self.CMAJOR, self.CBUILD + 1):
target_v_str += "%04d" % ver[i]
installed_ver = self.getDesktopVersion()
for i in range(self.CMAJOR, self.CBUILD + 1):
installed_v_str += "%04d" % installed_ver[i]
tVersion = int(target_v_str)
iVersion = int(installed_v_str)
if (iVersion > tVersion): # the first priority is to check for the patch version against the installed version
return True # if the installed ArcGIS version is greater than the patch's, it's OK to proceed.
if (self.isLinux()):
return True
# if the installed ArcGIS version is lower than the intended target patch version, continue with the registry key check for the
# possible patches installed.
# HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\ESRI\Desktop10.2\Updates
CPRODUCT_NAME = 'ProductName'
CVERSION = 'Version'
setupInfo = arcpy.GetInstallInfo()
if ((CVERSION in setupInfo.keys()) == False or
(CPRODUCT_NAME in setupInfo.keys()) == False):
return False
key = setupInfo[CPRODUCT_NAME] + setupInfo[CVERSION]
try:
reg_path = "Software\\Wow6432Node\\ESRI\\%s\\Updates" % (key)
arcgis = OpenKey(
HKEY_LOCAL_MACHINE, reg_path)
i = 0
while True:
name = EnumKey(arcgis, i)
arcgis_sub = OpenKey(
HKEY_LOCAL_MACHINE, reg_path + '\\' + name)
try:
value, type = QueryValueEx(arcgis_sub, "Name")
if (type == 1): # reg_sz
if (value.lower().find(search_key.lower()) >= 0):
return True # return true if the value is found!
except:
pass
i += 1
except:
pass
return False
def getDesktopVersion(self): # returns major, minor, sp and the build number.
d = arcpy.GetInstallInfo()
version = []
buildNumber = 0
spNumber = 0
CVERSION = 'version'
CBUILDNUMBER = 'buildnumber'
CSPNUMBER = 'spnumber'
ValError = False
for k in d:
key = k.lower()
if (key == CVERSION or
key == CBUILDNUMBER or
key == CSPNUMBER):
try:
if (key == CVERSION):
[version.append(int(x)) for x in d[k].split(".")]
elif (key == CBUILDNUMBER):
buildNumber = int(d[k])
elif (key == CSPNUMBER):
spNumber = int(d[k]) # could be N/A
except:
ValError = True
CMAJOR_MINOR_REVISION = 3
if (len(version) < CMAJOR_MINOR_REVISION): # On a system with full-install, ArcGIS version piece of information could return 3 numbers (major, minor, revision/SP)
version.append(spNumber) # and thus the SP number shouldn't be added to the version sperately.
version.append(buildNumber)
return version
def CheckMDCSVersion(self, min, max, print_err_msg=True):
# if python version is >= 3, it's asssumed we're being run from ArcGIS Pro
if (sys.version_info[0] >= 3):
min = [1, 0, 0, 0]
max = [0, 0, 0, 0]
if (len(min) != self.const_ver_len or
len(max) != self.const_ver_len):
return False
CMAJOR = 0
CMINOR = 1
CSP = 2
CBUILD = 3
min_major = min[CMAJOR]
min_minor = min[CMINOR]
min_sp = min[CSP]
min_build = min[CBUILD]
max_major = max[CMAJOR]
max_minor = max[CMINOR]
max_cp = max[CSP]
max_build = max[CBUILD]
try:
version = self.getDesktopVersion()
if (len(version) >= self.const_ver_len): # major, minor, sp, build
inst_major = version[CMAJOR]
inst_minor = version[CMINOR]
inst_sp = version[CSP]
inst_build = version[CBUILD]
ver_failed = False
if (max_major > 0 and
inst_major > max_major):
ver_failed = True
elif (max_minor > 0 and
inst_minor > max_minor):
ver_failed = True
elif (max_cp > 0 and
inst_sp > max_cp):
ver_failed = True
elif (max_build > 0 and
inst_build > max_build):
ver_failed = True
elif (inst_major < min_major):
ver_failed = True
elif (inst_minor < min_minor):
ver_failed = True
elif (inst_sp < min_sp):
ver_failed = True
elif (min_build > 0 and
inst_build < min_build):
ver_failed = True
if (ver_failed):
if (print_err_msg == True):
self.log('MDCS can\'t proceed due to ArcGIS version incompatiblity.', self.const_critical_text)
self.log('ArcGIS Desktop version is (%s.%s.%s.%s). MDCS min and max versions are (%s.%s.%s.%s) and (%s.%s.%s.%s) respectively.' %
(inst_major, inst_minor, inst_sp, inst_build, min_major, min_minor, min_sp, min_build, max_major, max_minor, max_cp, max_build), self.const_critical_text)
return False
except Exception as inst:
self.log('Version check failed: (%s)' % (str(inst)), self.const_critical_text)
return False
return True
def getXMLNodeValue(self, doc, nodeName):
if (doc is None):
return ''
node = doc.getElementsByTagName(nodeName)
if (node is None or
node.length == 0 or
node[0].hasChildNodes() == False or
node[0].firstChild.nodeType != minidom.Node.TEXT_NODE):
return ''
return node[0].firstChild.data
def updateART(self, doc, workspace, dataset):
if (doc is None):
return False
if (workspace.strip() == ''
and dataset.strip() == ''):
return False # nothing to do.
try:
nodeName = 'Key'
node_list = doc.getElementsByTagName(nodeName)
for node in node_list:
if (node.hasChildNodes()):
_nValue = node.firstChild.nodeValue
if (_nValue):
_nValue = _nValue.lower()
if (_nValue == 'dem' or
_nValue == 'database'):
_node = node.nextSibling
while(_node):
if (_node.hasChildNodes() and
_node.firstChild.nodeValue):
_node.firstChild.nodeValue = '{}'.format(
os.path.join(workspace, dataset) if _nValue == 'dem' else workspace)
break
_node = _node.nextSibling
nodeName = 'NameString'
node_list = doc.getElementsByTagName(nodeName)
for node in node_list:
if (node.hasChildNodes() == True):
vals = node.firstChild.nodeValue.split(';')
upd_buff = []
for v in vals:
vs = v.split('=')
for vs_ in vs:
vs_ = vs_.lower()
if (vs_.find('workspace') > 0):
if (workspace != ''):
vs[1] = ' ' + workspace
_node = node.nextSibling
while(_node):
if (_node.nodeName == 'PathName'):
_node.firstChild.nodeValue = workspace
break
_node = _node.nextSibling
elif (vs_.find('rasterdataset') > 0):
if (dataset != ''):
vs[1] = ' ' + dataset
_node = node.previousSibling
while(_node):
if (_node.nodeName == 'Name'):
_node.firstChild.nodeValue = dataset
break
_node = _node.previousSibling
upd_buff.append('='.join(vs))
if (len(upd_buff) > 0):
upd_nodeValue = ';'.join(upd_buff)
node.firstChild.nodeValue = upd_nodeValue
except Exception as inst:
self.log(str(inst), self.const_critical_text)
return False
return True
def getInternalPropValue(self, dic, key):
if (key in dic.keys()):
return dic[key]
else:
return ''
def setUserDefinedValues(self):
nodes = self.m_doc.getElementsByTagName('*')
for node in nodes:
if (node.firstChild is not None):
v = node.firstChild.data.strip()
if (v.find('$') == -1):
continue
usr_key = v
default = ''
d = v.split(';')
if (len(d) > 1):
default = d[0].strip()
usr_key = d[1].strip()
revalue = []
first = usr_key.find('$')
first += 1
second = first + usr_key[first + 1:].find('$') + 1
if (first > 1):
revalue.append(usr_key[0:first - 1])
while(second >= 0):
uValue = usr_key[first:second]
if (uValue.upper() in self.m_dynamic_params.keys()):
revalue.append(self.m_dynamic_params[uValue.upper()])
else:
if (uValue.find('\$') >= 0):
uValue = uValue.replace('\$', '$')
else:
if (default == ''):
default = uValue
if (first == 1
and second == (len(usr_key) - 1)):
uValue = default
revalue.append(uValue)
first = second + 1
indx = usr_key[first + 1:].find('$')
if (indx == -1):
if (first != len(usr_key)):
revalue.append(usr_key[first:len(usr_key)])
break
second = first + indx + 1
updateVal = ''.join(revalue)
node.firstChild.data = updateVal
def getXMLNode(self, doc, nodeName):
if (doc is None):
return ''
node = doc.getElementsByTagName(nodeName)
if (node is None or
node.length == 0 or
node[0].hasChildNodes() == False or
node[0].firstChild.nodeType != minidom.Node.TEXT_NODE):
return ''
return node[0]
def foundLockFiles(self, folder_path):
file_list_ = os.listdir(folder_path)
found_lock_ = False
for i in file_list_:
if (i[-5:].lower() == '.lock'):
sp = i.split('.')
pid = os.getpid()
if (pid == int(sp[3])): # indx 3 == process id
found_lock_ = True
break
return found_lock_
def waitForLockRelease(self, folder_path_):
if (os.path.exists(folder_path_) == False):
self.log('lock file path does not exist!. Quitting...', self.const_critical_text)
return -2 # path does not exist error code!
t0 = datetime.now()
duration_req_sec_ = 3
max_time_to_wait_sec_ = 10
tot_count_sec_ = 0
while True:
if (tot_count_sec_ == 0):
if (self.foundLockFiles(folder_path_) == False): # try to see if we could get lucky on the first try, else enter periodic check.
break
t1 = datetime.now() - t0
if (t1.seconds > duration_req_sec_):
if (self.foundLockFiles(folder_path_) == False):
break
tot_count_sec_ += duration_req_sec_
if (tot_count_sec_ > max_time_to_wait_sec_):
self.log('lock file release timed out!. Quitting...', self.const_critical_text)
tot_count_sec_ = -1
break
t0 = datetime.now()
return tot_count_sec_
@staticmethod
def isLinux(self):
return sys.platform.lower().startswith(('linux', 'darwin'))
def getBooleanValue(self, value):
if (value is None):
return False
if (isinstance(value, bool)):
return value
val = value
if (not isinstance(val, str)):
val = str(val)
val = val.lower()
if val in ['true', 'yes', 't', '1', 'y']:
return True
return False
def _updateResponse(self, resp, **kwargs):
if (resp is None):
return resp
for k in kwargs:
resp[k] = kwargs[k]
return resp
def _getResponseResult(self, resp):
if (resp is None):
return False
if (isinstance(resp, bool)):
status = resp
elif (isinstance(resp, dict)):
status = False
if ('status' in resp):
status = self.getBooleanValue(resp['status'])
return status
| {
"content_hash": "4900ce5bc64b674c3a000739366a685d",
"timestamp": "",
"source": "github",
"line_count": 871,
"max_line_length": 187,
"avg_line_length": 37.20206659012629,
"alnum_prop": 0.5021448631299571,
"repo_name": "Esri/mdcs-py",
"id": "f907b12b9258d2e421d17690f6a2ae3b7f95354c",
"size": "33408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/Base/Base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2553"
},
{
"name": "Python",
"bytes": "337340"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import numpy as np
from statsmodels.regression.linear_model import OLS, GLSAR
from statsmodels.tools.tools import add_constant
from statsmodels.datasets import macrodata
import statsmodels.regression.tests.results.results_macro_ols_robust as res
d2 = macrodata.load(as_pandas=False).data
g_gdp = 400*np.diff(np.log(d2['realgdp']))
g_inv = 400*np.diff(np.log(d2['realinv']))
exogg = add_constant(np.c_[g_gdp, d2['realint'][:-1]], prepend=False)
res_olsg = OLS(g_inv, exogg).fit()
print(res_olsg.summary())
res_hc0 = res_olsg.get_robustcov_results('HC1')
print('\n\n')
print(res_hc0.summary())
print('\n\n')
res_hac4 = res_olsg.get_robustcov_results('HAC', maxlags=4, use_correction=True)
print(res_hac4.summary())
print('\n\n')
tt = res_hac4.t_test(np.eye(len(res_hac4.params)))
print(tt.summary())
print('\n\n')
print(tt.summary_frame())
res_hac4.use_t = False
print('\n\n')
tt = res_hac4.t_test(np.eye(len(res_hac4.params)))
print(tt.summary())
print('\n\n')
print(tt.summary_frame())
print(vars(res_hac4.f_test(np.eye(len(res_hac4.params))[:-1])))
print(vars(res_hac4.wald_test(np.eye(len(res_hac4.params))[:-1], use_f=True)))
print(vars(res_hac4.wald_test(np.eye(len(res_hac4.params))[:-1], use_f=False)))
# new cov_type can be set in fit method of model
mod_olsg = OLS(g_inv, exogg)
res_hac4b = mod_olsg.fit(cov_type='HAC',
cov_kwds=dict(maxlags=4, use_correction=True))
print(res_hac4b.summary())
res_hc1b = mod_olsg.fit(cov_type='HC1')
print(res_hc1b.summary())
# force t-distribution
res_hc1c = mod_olsg.fit(cov_type='HC1', cov_kwds={'use_t':True})
print(res_hc1c.summary())
# force t-distribution
decade = (d2['year'][1:] // 10).astype(int) # just make up a group variable
res_clu = mod_olsg.fit(cov_type='cluster',
cov_kwds={'groups':decade, 'use_t':True})
print(res_clu.summary())
| {
"content_hash": "54e148e9543e48879b562d4b0dd2555e",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 80,
"avg_line_length": 30.063492063492063,
"alnum_prop": 0.6837381203801478,
"repo_name": "ChadFulton/statsmodels",
"id": "5252ff228473a5b5f62bba7c1f67801f52cb9ed5",
"size": "1895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statsmodels/examples/ex_ols_robustcov.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "3469"
},
{
"name": "C",
"bytes": "381"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "MATLAB",
"bytes": "2609"
},
{
"name": "Python",
"bytes": "11749760"
},
{
"name": "R",
"bytes": "90986"
},
{
"name": "Rebol",
"bytes": "123"
},
{
"name": "Shell",
"bytes": "8181"
},
{
"name": "Smarty",
"bytes": "1014"
},
{
"name": "Stata",
"bytes": "65045"
}
],
"symlink_target": ""
} |
"""
Created on Mon Nov 7 15:03:00 2016
dHocTD implementation
@author: leno
"""
from adhoc import AdHoc
import math
import random
import actions
class AdHocTD(AdHoc):
logAdv = False
logAsk = []
logAdvice = []
def __init__(self,agentIndex,alpha=0.2,gamma=0.9,T=0.4,budgetAsk = 350,budgetAdv = 350):
super(AdHocTD, self).__init__(agentIndex,alpha=alpha,gamma=gamma,T=T,budgetAsk = budgetAsk, budgetAdv = budgetAdv)
def check_advise(self,state):
"""Returns if the agent should advice in this state.
The advised action is also returned in the positive case"""
#print "ENTER HERE"
numberVisits = self.visitedNumber.get(state,0)
if numberVisits == 0:
return False,None
allActions = [actions.NORTH, actions.SOUTH, actions.WEST, actions.EAST]
maxQ,minQ = self.get_max_min_q_value(state,allActions)
# print "MaxQ "+str(maxQ)
# print "MinQ "+str(minQ)
# print "len "+str(len(actions))
difQ = math.fabs(maxQ - minQ)
#0.9 experimentos ja exec.
param = 1.5
value = (math.sqrt(numberVisits) * difQ )
#Calculates the probability
prob = 1 - (math.pow((1 + param),-value))
##
#processedState = self.quantize_features(state)
#numberVisits = self.number_visits(processedState)
#if value>0:
#print str(numberVisits)+" - "+str(value)+" - "+str(prob)
##
#Check if the agent should advise
if random.random() < prob: #and prob > 0.1:
advisedAction = self.action(state,True)
if self.logAdv:
self.logAdvice.append([prob,numberVisits,difQ])
return True,advisedAction
return False,None
def check_ask(self,state):
"""Returns if the agent should ask for advise in this state"""
if self.exploring and self.spentBudgetAsk < self.budgetAsk and state[0] != float('inf'):#not (state in self.advisedState) and :
numberVisits = self.visitedNumber.get(state,0)
if numberVisits == 0:
return True
param = 0.3
#Calculates the pobability
prob = math.pow((1 + param),-math.sqrt(numberVisits))
##
#processedState = self.quantize_features(state)
#numberVisits = self.number_visits(processedState)
#print str(numberVisits)+" - "+str(prob)
##
if random.random() < prob: #and prob > 0.1:
#print "Asked: prob:"+str(prob)+" visits: "+str(numberVisits)
if self.logAdv:
self.logAsk.append([prob,numberVisits])
return True
return False
def observe_reward(self,state,action,statePrime,reward) :
"""Does the necessary updates (Q-table, etc)"""
super(AdHocTD, self).observe_reward(state,action,statePrime,reward)
if reward==1 and self.logAdv and self.exploring: #terminal state
with open("LogAdvTD.log","w") as myFile:
myFile.write('\n'.join(map(str, self.logAdvice)))
with open("LogAskTD.log","w") as myFile:
myFile.write('\n'.join(map(str, self.logAsk)))
self.logAdvice = []
self.logAsk = [] | {
"content_hash": "9334effb833a4d97d591a0086209d420",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 135,
"avg_line_length": 35.474747474747474,
"alnum_prop": 0.5506833712984055,
"repo_name": "cowhi/HFO",
"id": "c7b4622c6c4fb93328728dbb48f78505a32b624c",
"size": "3536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "predator_prey/adhoctd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1230076"
},
{
"name": "CMake",
"bytes": "8931"
},
{
"name": "Jupyter Notebook",
"bytes": "1684588"
},
{
"name": "Makefile",
"bytes": "7718"
},
{
"name": "Python",
"bytes": "218145"
},
{
"name": "Shell",
"bytes": "41259"
}
],
"symlink_target": ""
} |
def main():
maxRange = int(raw_input("What is the max range of numbers you want tested for being prime? "))
numRange= range(3, maxRange +1, 2) #currently has composites, they will be removed @end of program
primes = [2]
while numRange:
repeat = 1
guess = numRange[0]
primes.append(numRange[0])
while repeat * guess <= maxRange:
if repeat * guess in numRange:
numRange.remove(repeat * guess)
repeat += 1
print 'Total primes: %d' % len(primes)
raw_input()
if __name__ == '__main__':
main()
| {
"content_hash": "a216c4a6d2afe8da4a6aadc992350f14",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 103,
"avg_line_length": 28.434782608695652,
"alnum_prop": 0.5229357798165137,
"repo_name": "Ic3Venom/primefinder",
"id": "231fbdd517b2a90a2d31ffba809da42198ec884e",
"size": "718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prime1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1952"
},
{
"name": "C++",
"bytes": "1200"
},
{
"name": "Python",
"bytes": "3723"
}
],
"symlink_target": ""
} |
import hashlib
import json
import re
from enum import Enum
from os import listdir, makedirs
from os.path import dirname, isfile, join, realpath
import jsonschema
import yaml
from jinja2 import Environment, PackageLoader
from yaml import MarkedYAMLError
from binary import FixedEntryListTypes, FixedLengthTypes, FixedListTypes, FixedMapTypes
from cpp import cpp_ignore_service_list, cpp_types_decode, cpp_types_encode, get_size, is_trivial
from cs import cs_escape_keyword, cs_ignore_service_list, cs_types_decode, cs_types_encode
from java import java_types_decode, java_types_encode
from md import internal_services
from py import (
py_escape_keyword,
py_get_import_path_holders,
py_ignore_service_list,
py_param_name,
py_types_encode_decode,
)
from ts import (
ts_escape_keyword,
ts_get_import_path_holders,
ts_ignore_service_list,
ts_types_decode,
ts_types_encode,
)
MAJOR_VERSION_MULTIPLIER = 10000
MINOR_VERSION_MULTIPLIER = 100
PATCH_VERSION_MULTIPLIER = 1
def java_name(type_name):
return "".join([capital(part) for part in type_name.split("_")])
def cs_name(type_name):
return "".join(
[capital(part) for part in type_name.replace("(", "").replace(")", "").split("_")]
)
def cpp_name(type_name):
return "".join(
[capital(part) for part in type_name.replace("(", "").replace(")", "").split("_")]
)
def param_name(type_name):
return type_name[0].lower() + type_name[1:]
def is_fixed_type(param):
return param["type"] in FixedLengthTypes
def capital(txt):
return txt[0].capitalize() + txt[1:]
def to_upper_snake_case(camel_case_str):
return re.sub("((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))", r"_\1", camel_case_str).upper()
# s1 = re.sub('(.)([A-Z]+[a-z]+)', r'\1_\2', camel_case_str)
# return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).upper()
def version_to_number(major, minor, patch=0):
return (
MAJOR_VERSION_MULTIPLIER * major
+ MINOR_VERSION_MULTIPLIER * minor
+ PATCH_VERSION_MULTIPLIER * patch
)
def get_version_as_number(version):
if not isinstance(version, str):
version = str(version)
return version_to_number(*map(int, version.split(".")))
def fixed_params(params):
return [p for p in params if is_fixed_type(p)]
def var_size_params(params):
return [p for p in params if not is_fixed_type(p)]
def new_params(since, params):
"""
Returns the list of parameters that are added later than given version.
Because the method should precede all the parameters that are added
latter, a simple equality check between the versions that the method and
the parameter is added is enough.
"""
return [p for p in params if p["since"] != since]
def filter_new_params(params, version):
"""
Returns the filtered list of parameters such that,
the resulting list contains only the ones that are added
before or at the same time with the given version.
"""
version_as_number = get_version_as_number(version)
return [p for p in params if version_as_number >= get_version_as_number(p["since"])]
def generate_codecs(services, template, output_dir, lang, env):
makedirs(output_dir, exist_ok=True)
id_fmt = "0x%02x%02x%02x"
if lang is SupportedLanguages.CPP:
curr_dir = dirname(realpath(__file__))
cpp_dir = "%s/cpp" % curr_dir
f = open(join(cpp_dir, "header_includes.txt"), "r")
save_file(join(output_dir, "codecs.h"), f.read(), "w")
f = open(join(cpp_dir, "source_header.txt"), "r")
save_file(join(output_dir, "codecs.cpp"), f.read(), "w")
for service in services:
if service["name"] in language_service_ignore_list[lang]:
print("[%s] is in ignore list so ignoring it." % service["name"])
continue
if "methods" in service:
methods = service["methods"]
if methods is None:
raise NotImplementedError("Methods not found for service " + service)
for method in service["methods"]:
if (service["name"] + "." + method["name"]) in language_service_ignore_list[lang]:
print(
"[%s] is in ignore list so ignoring it."
% (service["name"] + "." + method["name"])
)
continue
method["request"]["id"] = int(id_fmt % (service["id"], method["id"], 0), 16)
method["response"]["id"] = int(id_fmt % (service["id"], method["id"], 1), 16)
events = method.get("events", None)
if events is not None:
for i in range(len(events)):
method["events"][i]["id"] = int(
id_fmt % (service["id"], method["id"], i + 2), 16
)
codec_file_name = file_name_generators[lang](service["name"], method["name"])
try:
if lang is SupportedLanguages.CPP:
codec_template = env.get_template("codec-template.h.j2")
content = codec_template.render(service_name=service["name"], method=method)
save_file(join(output_dir, "codecs.h"), content, "a+")
codec_template = env.get_template("codec-template.cpp.j2")
content = codec_template.render(service_name=service["name"], method=method)
save_file(join(output_dir, "codecs.cpp"), content, "a+")
else:
content = template.render(service_name=service["name"], method=method)
save_file(join(output_dir, codec_file_name), content)
except NotImplementedError:
print("[%s] contains missing type mapping so ignoring it." % codec_file_name)
if lang is SupportedLanguages.CPP:
f = open(join(cpp_dir, "footer.txt"), "r")
content = f.read()
save_file(join(output_dir, "codecs.h"), content, "a+")
save_file(join(output_dir, "codecs.cpp"), content, "a+")
def generate_custom_codecs(services, template, output_dir, lang, env):
makedirs(output_dir, exist_ok=True)
if lang == SupportedLanguages.CPP:
cpp_header_template = env.get_template("custom-codec-template.h.j2")
cpp_source_template = env.get_template("custom-codec-template.cpp.j2")
for service in services:
if "customTypes" in service:
custom_types = service["customTypes"]
for codec in custom_types:
try:
if lang == SupportedLanguages.CPP:
file_name_prefix = codec["name"].lower() + "_codec"
header_file_name = file_name_prefix + ".h"
source_file_name = file_name_prefix + ".cpp"
codec_file_name = header_file_name
content = cpp_header_template.render(codec=codec)
save_file(join(output_dir, header_file_name), content)
codec_file_name = source_file_name
content = cpp_source_template.render(codec=codec)
save_file(join(output_dir, source_file_name), content)
else:
codec_file_name = file_name_generators[lang](codec["name"])
content = template.render(codec=codec)
save_file(join(output_dir, codec_file_name), content)
except NotImplementedError:
print("[%s] contains missing type mapping so ignoring it." % codec_file_name)
def generate_documentation(services, custom_definitions, template, output_dir):
makedirs(output_dir, exist_ok=True)
content = template.render(
services=list(filter(lambda s: s["name"] not in internal_services, services)),
custom_definitions=custom_definitions,
)
file_name = join(output_dir, "documentation.md")
with open(file_name, "w", newline="\n") as file:
file.writelines(content)
def item_type(lang_name, param_type):
if param_type.startswith("List_") or param_type.startswith("ListCN_"):
return lang_name(param_type.split("_", 1)[1])
def key_type(lang_name, param_type):
return lang_name(param_type.split("_", 2)[1])
def value_type(lang_name, param_type):
return lang_name(param_type.split("_", 2)[2])
def is_var_sized_list(param_type):
return param_type.startswith("List_") and param_type not in FixedListTypes
def is_var_sized_list_contains_nullable(param_type):
return param_type.startswith("ListCN_") and param_type not in FixedListTypes
def is_var_sized_map(param_type):
return param_type.startswith("Map_") and param_type not in FixedMapTypes
def is_var_sized_entry_list(param_type):
return param_type.startswith("EntryList_") and param_type not in FixedEntryListTypes
def load_services(protocol_def_dir):
service_list = listdir(protocol_def_dir)
services = []
for service_file in service_list:
file_path = join(protocol_def_dir, service_file)
if isfile(file_path):
with open(file_path, "r") as file:
try:
data = yaml.load(file, Loader=yaml.Loader)
except MarkedYAMLError as err:
print(err)
exit(-1)
services.append(data)
return services
def validate_services(services, schema_path, no_id_check, protocol_versions):
valid = True
with open(schema_path, "r") as schema_file:
schema = json.load(schema_file)
for i in range(len(services)):
service = services[i]
if not validate_against_schema(service, schema):
return False
if not no_id_check:
# Validate id ordering of services.
service_id = service["id"]
if i != service_id:
print(
"Check the service id of the %s. Expected: %s, found: %s."
% (service["name"], i, service_id)
)
valid = False
# Validate id ordering of definition methods.
methods = service["methods"]
for j in range(len(methods)):
method = methods[j]
method_id = method["id"]
if (j + 1) != method_id:
print(
"Check the method id of %s#%s. Expected: %s, found: %s"
% (service["name"], method["name"], (j + 1), method_id)
)
valid = False
request_params = method["request"].get("params", [])
method_name = service["name"] + "#" + method["name"]
if not is_parameters_ordered_and_semantically_correct(
method["since"], method_name + "#request", request_params, protocol_versions
):
valid = False
response_params = method["response"].get("params", [])
if not is_parameters_ordered_and_semantically_correct(
method["since"],
method_name + "#response",
response_params,
protocol_versions,
):
valid = False
events = method.get("events", [])
for event in events:
event_params = event.get("params", [])
if not is_parameters_ordered_and_semantically_correct(
event["since"],
method_name + "#" + event["name"] + "#event",
event_params,
protocol_versions,
):
valid = False
return valid
def is_semantically_correct_param(version, protocol_versions):
is_semantically_correct = True
if version != protocol_versions[0]:
# Not 2.0
if version % MINOR_VERSION_MULTIPLIER == 0:
# Minor version
if (version - MINOR_VERSION_MULTIPLIER) not in protocol_versions:
# since is set to 2.x but 2.(x-1) is not in the protocol definitions
is_semantically_correct = False
elif version % PATCH_VERSION_MULTIPLIER == 0:
# Patch version
if (version - PATCH_VERSION_MULTIPLIER) not in protocol_versions:
# since is set to 2.x.y but 2.x.(y-1) is not in the protocol definitions
is_semantically_correct = False
return is_semantically_correct
def is_parameters_ordered_and_semantically_correct(since, name, params, protocol_versions):
is_ordered = True
is_semantically_correct = True
version = get_version_as_number(since)
if not is_semantically_correct_param(version, protocol_versions):
method_or_event_name = name[: name.rindex("#")]
print(
'Check the since value of the "%s"\n'
'It is set to version "%s" but this protocol version does '
"not semantically follow other protocol versions!" % (method_or_event_name, since)
)
is_semantically_correct = False
for param in params:
param_version = get_version_as_number(param["since"])
if not is_semantically_correct_param(param_version, protocol_versions):
print(
'Check the since value of "%s" field of the "%s".\n'
'It is set version "%s" but this protocol version does '
"not semantically follow other protocol versions!"
% (param["name"], name, param["since"])
)
is_semantically_correct = False
if version > param_version:
print(
'Check the since value of "%s" field of the "%s".\n'
"Parameters should be in the increasing order of since values!"
% (param["name"], name)
)
is_ordered = False
version = param_version
return is_ordered and is_semantically_correct
def validate_custom_protocol_definitions(definition, schema_path, protocol_versions):
valid = True
with open(schema_path, "r") as schema_file:
schema = json.load(schema_file)
custom_types = definition[0]
if not validate_against_schema(custom_types, schema):
return False
for custom_type in custom_types["customTypes"]:
params = custom_type.get("params", [])
if not is_parameters_ordered_and_semantically_correct(
custom_type["since"], "CustomTypes#" + custom_type["name"], params, protocol_versions
):
valid = False
return valid
def validate_against_schema(service, schema):
try:
jsonschema.validate(service, schema)
except jsonschema.ValidationError as e:
print("Validation error on %s: %s" % (service.get("name", None), e))
return False
return True
def save_file(file, content, mode="w"):
m = hashlib.md5()
m.update(content.encode("utf-8"))
codec_hash = m.hexdigest()
with open(file, mode, newline="\n") as file:
file.writelines(content.replace("!codec_hash!", codec_hash))
def get_protocol_versions(protocol_defs, custom_codec_defs):
protocol_versions = set()
if not custom_codec_defs:
custom_codec_defs = []
else:
custom_codec_defs = custom_codec_defs[0]["customTypes"]
for service in protocol_defs:
for method in service["methods"]:
protocol_versions.add(method["since"])
for req_param in method["request"].get("params", []):
protocol_versions.add(req_param["since"])
for res_param in method["response"].get("params", []):
protocol_versions.add(res_param["since"])
for event in method.get("events", []):
protocol_versions.add(event["since"])
for event_param in event.get("params", []):
protocol_versions.add(event_param["since"])
for custom_codec in custom_codec_defs:
protocol_versions.add(custom_codec["since"])
for param in custom_codec.get("params", []):
protocol_versions.add(param["since"])
return map(str, protocol_versions)
class SupportedLanguages(Enum):
JAVA = "java"
CPP = "cpp"
CS = "cs"
PY = "py"
TS = "ts"
# GO = 'go'
MD = "md"
codec_output_directories = {
SupportedLanguages.JAVA: "hazelcast/src/main/java/com/hazelcast/client/impl/protocol/codec/",
SupportedLanguages.CPP: "hazelcast/generated-sources/src/hazelcast/client/protocol/codec/",
SupportedLanguages.CS: "src/Hazelcast.Net/Protocol/Codecs/",
SupportedLanguages.PY: "hazelcast/protocol/codec/",
SupportedLanguages.TS: "src/codec/",
# SupportedLanguages.GO: 'internal/proto/'
SupportedLanguages.MD: "documentation",
}
custom_codec_output_directories = {
SupportedLanguages.JAVA: "hazelcast/src/main/java/com/hazelcast/client/impl/protocol/codec/custom/",
SupportedLanguages.CPP: "hazelcast/generated-sources/src/hazelcast/client/protocol/codec/",
SupportedLanguages.CS: "src/Hazelcast.Net/Protocol/CustomCodecs/",
SupportedLanguages.PY: "hazelcast/protocol/codec/custom/",
SupportedLanguages.TS: "src/codec/custom",
# SupportedLanguages.GO: 'internal/proto/'
}
def _capitalized_name_generator(extension):
def inner(*names):
return "%sCodec.%s" % ("".join(map(capital, names)), extension)
return inner
def _snake_cased_name_generator(extension):
def inner(*names):
return "%s_codec.%s" % ("_".join(map(py_param_name, names)), extension)
return inner
file_name_generators = {
SupportedLanguages.JAVA: _capitalized_name_generator("java"),
SupportedLanguages.CPP: _snake_cased_name_generator("cpp"),
SupportedLanguages.CS: _capitalized_name_generator("cs"),
SupportedLanguages.PY: _snake_cased_name_generator("py"),
SupportedLanguages.TS: _capitalized_name_generator("ts"),
# SupportedLanguages.GO: 'go'
SupportedLanguages.MD: "md",
}
language_specific_funcs = {
"lang_types_encode": {
SupportedLanguages.JAVA: java_types_encode,
SupportedLanguages.CS: cs_types_encode,
SupportedLanguages.CPP: cpp_types_encode,
SupportedLanguages.TS: ts_types_encode,
SupportedLanguages.PY: py_types_encode_decode,
SupportedLanguages.MD: lambda x: x,
},
"lang_types_decode": {
SupportedLanguages.JAVA: java_types_decode,
SupportedLanguages.CS: cs_types_decode,
SupportedLanguages.CPP: cpp_types_decode,
SupportedLanguages.TS: ts_types_decode,
SupportedLanguages.PY: py_types_encode_decode,
SupportedLanguages.MD: lambda x: x,
},
"lang_name": {
SupportedLanguages.JAVA: java_name,
SupportedLanguages.CS: cs_name,
SupportedLanguages.CPP: cpp_name,
SupportedLanguages.TS: java_name,
SupportedLanguages.PY: java_name,
SupportedLanguages.MD: lambda x: x,
},
"param_name": {
SupportedLanguages.JAVA: param_name,
SupportedLanguages.CS: param_name,
SupportedLanguages.CPP: param_name,
SupportedLanguages.TS: param_name,
SupportedLanguages.PY: py_param_name,
SupportedLanguages.MD: lambda x: x,
},
"escape_keyword": {
SupportedLanguages.JAVA: lambda x: x,
SupportedLanguages.CS: cs_escape_keyword,
SupportedLanguages.CPP: lambda x: x,
SupportedLanguages.TS: ts_escape_keyword,
SupportedLanguages.PY: py_escape_keyword,
SupportedLanguages.MD: lambda x: x,
},
"get_import_path_holders": {
SupportedLanguages.JAVA: lambda x: x,
SupportedLanguages.CS: lambda x: x,
SupportedLanguages.CPP: lambda x: x,
SupportedLanguages.TS: ts_get_import_path_holders,
SupportedLanguages.PY: py_get_import_path_holders,
SupportedLanguages.MD: lambda x: x,
},
}
language_service_ignore_list = {
SupportedLanguages.JAVA: set(),
SupportedLanguages.CPP: cpp_ignore_service_list,
SupportedLanguages.CS: cs_ignore_service_list,
SupportedLanguages.PY: py_ignore_service_list,
SupportedLanguages.TS: ts_ignore_service_list,
# SupportedLanguages.GO: set()
}
def create_environment(lang, namespace):
env = Environment(
loader=PackageLoader(lang.value, "."),
extensions=["jinja2.ext.do", "jinja2.ext.loopcontrols"],
)
env.trim_blocks = True
env.lstrip_blocks = True
env.keep_trailing_newline = False
env.filters["capital"] = capital
env.globals["to_upper_snake_case"] = to_upper_snake_case
env.globals["fixed_params"] = fixed_params
env.globals["var_size_params"] = var_size_params
env.globals["new_params"] = new_params
env.globals["filter_new_params"] = filter_new_params
env.globals["is_var_sized_list"] = is_var_sized_list
env.globals["is_var_sized_list_contains_nullable"] = is_var_sized_list_contains_nullable
env.globals["is_var_sized_entry_list"] = is_var_sized_entry_list
env.globals["is_var_sized_map"] = is_var_sized_map
env.globals["item_type"] = item_type
env.globals["key_type"] = key_type
env.globals["value_type"] = value_type
env.globals["namespace"] = namespace
env.globals["lang_types_encode"] = language_specific_funcs["lang_types_encode"][lang]
env.globals["lang_types_decode"] = language_specific_funcs["lang_types_decode"][lang]
env.globals["lang_name"] = language_specific_funcs["lang_name"][lang]
env.globals["param_name"] = language_specific_funcs["param_name"][lang]
env.globals["escape_keyword"] = language_specific_funcs["escape_keyword"][lang]
env.globals["get_size"] = get_size
env.globals["is_trivial"] = is_trivial
env.globals["get_import_path_holders"] = language_specific_funcs["get_import_path_holders"][
lang
]
return env
| {
"content_hash": "5fe61cae087f30e94fa2beccefc83395",
"timestamp": "",
"source": "github",
"line_count": 576,
"max_line_length": 104,
"avg_line_length": 38.470486111111114,
"alnum_prop": 0.6026445236698407,
"repo_name": "mmedenjak/hazelcast-client-protocol",
"id": "f07375836b749e12db7d746a2af9d94a9dbc0d04",
"size": "22159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "60774"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import functools
import os
import re
from typing import (
Dict,
Mapping,
Optional,
Iterable,
Iterator,
Sequence,
Tuple,
Type,
Union,
)
import warnings
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.pubsub_v1.services.subscriber import pagers
from google.pubsub_v1.types import pubsub
import grpc
from .transports.base import SubscriberTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import SubscriberGrpcTransport
from .transports.grpc_asyncio import SubscriberGrpcAsyncIOTransport
class SubscriberClientMeta(type):
"""Metaclass for the Subscriber client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[SubscriberTransport]]
_transport_registry["grpc"] = SubscriberGrpcTransport
_transport_registry["grpc_asyncio"] = SubscriberGrpcAsyncIOTransport
def get_transport_class(
cls,
label: str = None,
) -> Type[SubscriberTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class SubscriberClient(metaclass=SubscriberClientMeta):
"""The service that an application uses to manipulate subscriptions and
to consume messages from a subscription via the ``Pull`` method or
by establishing a bi-directional stream using the ``StreamingPull``
method.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_DEFAULT_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/pubsub",
)
SERVICE_ADDRESS = "pubsub.googleapis.com:443"
"""The default address of the service."""
DEFAULT_ENDPOINT = "pubsub.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SubscriberClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SubscriberClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> SubscriberTransport:
"""Returns the transport used by the client instance.
Returns:
SubscriberTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def snapshot_path(
project: str,
snapshot: str,
) -> str:
"""Returns a fully-qualified snapshot string."""
return "projects/{project}/snapshots/{snapshot}".format(
project=project,
snapshot=snapshot,
)
@staticmethod
def parse_snapshot_path(path: str) -> Dict[str, str]:
"""Parses a snapshot path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/snapshots/(?P<snapshot>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def subscription_path(
project: str,
subscription: str,
) -> str:
"""Returns a fully-qualified subscription string."""
return "projects/{project}/subscriptions/{subscription}".format(
project=project,
subscription=subscription,
)
@staticmethod
def parse_subscription_path(path: str) -> Dict[str, str]:
"""Parses a subscription path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/subscriptions/(?P<subscription>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def topic_path(
project: str,
topic: str,
) -> str:
"""Returns a fully-qualified topic string."""
return "projects/{project}/topics/{topic}".format(
project=project,
topic=topic,
)
@staticmethod
def parse_topic_path(path: str) -> Dict[str, str]:
"""Parses a topic path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/topics/(?P<topic>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(
billing_account: str,
) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(
folder: str,
) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(
folder=folder,
)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(
organization: str,
) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(
organization=organization,
)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(
project: str,
) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(
project=project,
)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(
project: str,
location: str,
) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, SubscriberTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the subscriber client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, SubscriberTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, SubscriberTransport):
# transport is a SubscriberTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
emulator_host = os.environ.get("PUBSUB_EMULATOR_HOST")
if emulator_host:
if issubclass(Transport, type(self)._transport_registry["grpc"]):
channel = grpc.insecure_channel(target=emulator_host)
else:
channel = grpc.aio.insecure_channel(target=emulator_host)
Transport = functools.partial(Transport, channel=channel)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
api_audience=client_options.api_audience,
)
def create_subscription(
self,
request: Union[pubsub.Subscription, dict] = None,
*,
name: str = None,
topic: str = None,
push_config: pubsub.PushConfig = None,
ack_deadline_seconds: int = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pubsub.Subscription:
r"""Creates a subscription to a given topic. See the [resource name
rules]
(https://cloud.google.com/pubsub/docs/admin#resource_names). If
the subscription already exists, returns ``ALREADY_EXISTS``. If
the corresponding topic doesn't exist, returns ``NOT_FOUND``.
If the name is not provided in the request, the server will
assign a random name for this subscription on the same project
as the topic, conforming to the [resource name format]
(https://cloud.google.com/pubsub/docs/admin#resource_names). The
generated name is populated in the returned Subscription object.
Note that for REST API requests, you must specify a name in the
request.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google import pubsub_v1
def sample_create_subscription():
# Create a client
client = pubsub_v1.SubscriberClient()
# Initialize request argument(s)
request = pubsub_v1.Subscription(
name="name_value",
topic="topic_value",
)
# Make the request
response = client.create_subscription(request=request)
# Handle the response
print(response)
Args:
request (Union[google.pubsub_v1.types.Subscription, dict]):
The request object. A subscription resource.
name (str):
Required. The name of the subscription. It must have the
format
``"projects/{project}/subscriptions/{subscription}"``.
``{subscription}`` must start with a letter, and contain
only letters (``[A-Za-z]``), numbers (``[0-9]``), dashes
(``-``), underscores (``_``), periods (``.``), tildes
(``~``), plus (``+``) or percent signs (``%``). It must
be between 3 and 255 characters in length, and it must
not start with ``"goog"``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
topic (str):
Required. The name of the topic from which this
subscription is receiving messages. Format is
``projects/{project}/topics/{topic}``. The value of this
field will be ``_deleted-topic_`` if the topic has been
deleted.
This corresponds to the ``topic`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
push_config (google.pubsub_v1.types.PushConfig):
If push delivery is used with this subscription, this
field is used to configure it. Either ``pushConfig`` or
``bigQueryConfig`` can be set, but not both. If both are
empty, then the subscriber will pull and ack messages
using API methods.
This corresponds to the ``push_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
ack_deadline_seconds (int):
The approximate amount of time (on a best-effort basis)
Pub/Sub waits for the subscriber to acknowledge receipt
before resending the message. In the interval after the
message is delivered and before it is acknowledged, it
is considered to be outstanding. During that time
period, the message will not be redelivered (on a
best-effort basis).
For pull subscriptions, this value is used as the
initial value for the ack deadline. To override this
value for a given message, call ``ModifyAckDeadline``
with the corresponding ``ack_id`` if using non-streaming
pull or send the ``ack_id`` in a
``StreamingModifyAckDeadlineRequest`` if using streaming
pull. The minimum custom deadline you can specify is 10
seconds. The maximum custom deadline you can specify is
600 seconds (10 minutes). If this parameter is 0, a
default value of 10 seconds is used.
For push delivery, this value is also used to set the
request timeout for the call to the push endpoint.
If the subscriber never acknowledges the message, the
Pub/Sub system will eventually redeliver the message.
This corresponds to the ``ack_deadline_seconds`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.pubsub_v1.types.Subscription:
A subscription resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, topic, push_config, ack_deadline_seconds])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.Subscription.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.Subscription):
request = pubsub.Subscription(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if topic is not None:
request.topic = topic
if push_config is not None:
request.push_config = push_config
if ack_deadline_seconds is not None:
request.ack_deadline_seconds = ack_deadline_seconds
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_subscription]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_subscription(
self,
request: Union[pubsub.GetSubscriptionRequest, dict] = None,
*,
subscription: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pubsub.Subscription:
r"""Gets the configuration details of a subscription.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google import pubsub_v1
def sample_get_subscription():
# Create a client
client = pubsub_v1.SubscriberClient()
# Initialize request argument(s)
request = pubsub_v1.GetSubscriptionRequest(
subscription="subscription_value",
)
# Make the request
response = client.get_subscription(request=request)
# Handle the response
print(response)
Args:
request (Union[google.pubsub_v1.types.GetSubscriptionRequest, dict]):
The request object. Request for the GetSubscription
method.
subscription (str):
Required. The name of the subscription to get. Format is
``projects/{project}/subscriptions/{sub}``.
This corresponds to the ``subscription`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.pubsub_v1.types.Subscription:
A subscription resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([subscription])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.GetSubscriptionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.GetSubscriptionRequest):
request = pubsub.GetSubscriptionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if subscription is not None:
request.subscription = subscription
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_subscription]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("subscription", request.subscription),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_subscription(
self,
request: Union[pubsub.UpdateSubscriptionRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pubsub.Subscription:
r"""Updates an existing subscription. Note that certain
properties of a subscription, such as its topic, are not
modifiable.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google import pubsub_v1
def sample_update_subscription():
# Create a client
client = pubsub_v1.SubscriberClient()
# Initialize request argument(s)
subscription = pubsub_v1.Subscription()
subscription.name = "name_value"
subscription.topic = "topic_value"
request = pubsub_v1.UpdateSubscriptionRequest(
subscription=subscription,
)
# Make the request
response = client.update_subscription(request=request)
# Handle the response
print(response)
Args:
request (Union[google.pubsub_v1.types.UpdateSubscriptionRequest, dict]):
The request object. Request for the UpdateSubscription
method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.pubsub_v1.types.Subscription:
A subscription resource.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.UpdateSubscriptionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.UpdateSubscriptionRequest):
request = pubsub.UpdateSubscriptionRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_subscription]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("subscription.name", request.subscription.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_subscriptions(
self,
request: Union[pubsub.ListSubscriptionsRequest, dict] = None,
*,
project: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListSubscriptionsPager:
r"""Lists matching subscriptions.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google import pubsub_v1
def sample_list_subscriptions():
# Create a client
client = pubsub_v1.SubscriberClient()
# Initialize request argument(s)
request = pubsub_v1.ListSubscriptionsRequest(
project="project_value",
)
# Make the request
page_result = client.list_subscriptions(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.pubsub_v1.types.ListSubscriptionsRequest, dict]):
The request object. Request for the `ListSubscriptions`
method.
project (str):
Required. The name of the project in which to list
subscriptions. Format is ``projects/{project-id}``.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.pubsub_v1.services.subscriber.pagers.ListSubscriptionsPager:
Response for the ListSubscriptions method.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.ListSubscriptionsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.ListSubscriptionsRequest):
request = pubsub.ListSubscriptionsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_subscriptions]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("project", request.project),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListSubscriptionsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def delete_subscription(
self,
request: Union[pubsub.DeleteSubscriptionRequest, dict] = None,
*,
subscription: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes an existing subscription. All messages retained in the
subscription are immediately dropped. Calls to ``Pull`` after
deletion will return ``NOT_FOUND``. After a subscription is
deleted, a new one may be created with the same name, but the
new one has no association with the old subscription or its
topic unless the same topic is specified.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google import pubsub_v1
def sample_delete_subscription():
# Create a client
client = pubsub_v1.SubscriberClient()
# Initialize request argument(s)
request = pubsub_v1.DeleteSubscriptionRequest(
subscription="subscription_value",
)
# Make the request
client.delete_subscription(request=request)
Args:
request (Union[google.pubsub_v1.types.DeleteSubscriptionRequest, dict]):
The request object. Request for the DeleteSubscription
method.
subscription (str):
Required. The subscription to delete. Format is
``projects/{project}/subscriptions/{sub}``.
This corresponds to the ``subscription`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([subscription])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.DeleteSubscriptionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.DeleteSubscriptionRequest):
request = pubsub.DeleteSubscriptionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if subscription is not None:
request.subscription = subscription
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_subscription]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("subscription", request.subscription),)
),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def modify_ack_deadline(
self,
request: Union[pubsub.ModifyAckDeadlineRequest, dict] = None,
*,
subscription: str = None,
ack_ids: Sequence[str] = None,
ack_deadline_seconds: int = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Modifies the ack deadline for a specific message. This method is
useful to indicate that more time is needed to process a message
by the subscriber, or to make the message available for
redelivery if the processing was interrupted. Note that this
does not modify the subscription-level ``ackDeadlineSeconds``
used for subsequent messages.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google import pubsub_v1
def sample_modify_ack_deadline():
# Create a client
client = pubsub_v1.SubscriberClient()
# Initialize request argument(s)
request = pubsub_v1.ModifyAckDeadlineRequest(
subscription="subscription_value",
ack_ids=['ack_ids_value1', 'ack_ids_value2'],
ack_deadline_seconds=2066,
)
# Make the request
client.modify_ack_deadline(request=request)
Args:
request (Union[google.pubsub_v1.types.ModifyAckDeadlineRequest, dict]):
The request object. Request for the ModifyAckDeadline
method.
subscription (str):
Required. The name of the subscription. Format is
``projects/{project}/subscriptions/{sub}``.
This corresponds to the ``subscription`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
ack_ids (Sequence[str]):
Required. List of acknowledgment IDs.
This corresponds to the ``ack_ids`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
ack_deadline_seconds (int):
Required. The new ack deadline with respect to the time
this request was sent to the Pub/Sub system. For
example, if the value is 10, the new ack deadline will
expire 10 seconds after the ``ModifyAckDeadline`` call
was made. Specifying zero might immediately make the
message available for delivery to another subscriber
client. This typically results in an increase in the
rate of message redeliveries (that is, duplicates). The
minimum deadline you can specify is 0 seconds. The
maximum deadline you can specify is 600 seconds (10
minutes).
This corresponds to the ``ack_deadline_seconds`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([subscription, ack_ids, ack_deadline_seconds])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.ModifyAckDeadlineRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.ModifyAckDeadlineRequest):
request = pubsub.ModifyAckDeadlineRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if subscription is not None:
request.subscription = subscription
if ack_ids is not None:
request.ack_ids = ack_ids
if ack_deadline_seconds is not None:
request.ack_deadline_seconds = ack_deadline_seconds
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.modify_ack_deadline]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("subscription", request.subscription),)
),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def acknowledge(
self,
request: Union[pubsub.AcknowledgeRequest, dict] = None,
*,
subscription: str = None,
ack_ids: Sequence[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Acknowledges the messages associated with the ``ack_ids`` in the
``AcknowledgeRequest``. The Pub/Sub system can remove the
relevant messages from the subscription.
Acknowledging a message whose ack deadline has expired may
succeed, but such a message may be redelivered later.
Acknowledging a message more than once will not result in an
error.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google import pubsub_v1
def sample_acknowledge():
# Create a client
client = pubsub_v1.SubscriberClient()
# Initialize request argument(s)
request = pubsub_v1.AcknowledgeRequest(
subscription="subscription_value",
ack_ids=['ack_ids_value1', 'ack_ids_value2'],
)
# Make the request
client.acknowledge(request=request)
Args:
request (Union[google.pubsub_v1.types.AcknowledgeRequest, dict]):
The request object. Request for the Acknowledge method.
subscription (str):
Required. The subscription whose message is being
acknowledged. Format is
``projects/{project}/subscriptions/{sub}``.
This corresponds to the ``subscription`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
ack_ids (Sequence[str]):
Required. The acknowledgment ID for the messages being
acknowledged that was returned by the Pub/Sub system in
the ``Pull`` response. Must not be empty.
This corresponds to the ``ack_ids`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([subscription, ack_ids])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.AcknowledgeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.AcknowledgeRequest):
request = pubsub.AcknowledgeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if subscription is not None:
request.subscription = subscription
if ack_ids is not None:
request.ack_ids = ack_ids
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.acknowledge]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("subscription", request.subscription),)
),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def pull(
self,
request: Union[pubsub.PullRequest, dict] = None,
*,
subscription: str = None,
return_immediately: bool = None,
max_messages: int = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pubsub.PullResponse:
r"""Pulls messages from the server. The server may return
``UNAVAILABLE`` if there are too many concurrent pull requests
pending for the given subscription.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google import pubsub_v1
def sample_pull():
# Create a client
client = pubsub_v1.SubscriberClient()
# Initialize request argument(s)
request = pubsub_v1.PullRequest(
subscription="subscription_value",
max_messages=1277,
)
# Make the request
response = client.pull(request=request)
# Handle the response
print(response)
Args:
request (Union[google.pubsub_v1.types.PullRequest, dict]):
The request object. Request for the `Pull` method.
subscription (str):
Required. The subscription from which messages should be
pulled. Format is
``projects/{project}/subscriptions/{sub}``.
This corresponds to the ``subscription`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
return_immediately (bool):
Optional. If this field set to true, the system will
respond immediately even if it there are no messages
available to return in the ``Pull`` response. Otherwise,
the system may wait (for a bounded amount of time) until
at least one message is available, rather than returning
no messages. Warning: setting this field to ``true`` is
discouraged because it adversely impacts the performance
of ``Pull`` operations. We recommend that users do not
set this field.
This corresponds to the ``return_immediately`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
max_messages (int):
Required. The maximum number of
messages to return for this request.
Must be a positive integer. The Pub/Sub
system may return fewer than the number
specified.
This corresponds to the ``max_messages`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.pubsub_v1.types.PullResponse:
Response for the Pull method.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([subscription, return_immediately, max_messages])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.PullRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.PullRequest):
request = pubsub.PullRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if subscription is not None:
request.subscription = subscription
if return_immediately is not None:
request.return_immediately = return_immediately
if max_messages is not None:
request.max_messages = max_messages
if request.return_immediately:
warnings.warn(
"The return_immediately flag is deprecated and should be set to False.",
category=DeprecationWarning,
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.pull]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("subscription", request.subscription),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def streaming_pull(
self,
requests: Iterator[pubsub.StreamingPullRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Iterable[pubsub.StreamingPullResponse]:
r"""Establishes a stream with the server, which sends messages down
to the client. The client streams acknowledgements and ack
deadline modifications back to the server. The server will close
the stream and return the status on any error. The server may
close the stream with status ``UNAVAILABLE`` to reassign
server-side resources, in which case, the client should
re-establish the stream. Flow control can be achieved by
configuring the underlying RPC channel.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google import pubsub_v1
def sample_streaming_pull():
# Create a client
client = pubsub_v1.SubscriberClient()
# Initialize request argument(s)
request = pubsub_v1.StreamingPullRequest(
subscription="subscription_value",
stream_ack_deadline_seconds=2813,
)
# This method expects an iterator which contains
# 'pubsub_v1.StreamingPullRequest' objects
# Here we create a generator that yields a single `request` for
# demonstrative purposes.
requests = [request]
def request_generator():
for request in requests:
yield request
# Make the request
stream = client.streaming_pull(requests=request_generator())
# Handle the response
for response in stream:
print(response)
Args:
requests (Iterator[google.pubsub_v1.types.StreamingPullRequest]):
The request object iterator. Request for the `StreamingPull`
streaming RPC method. This request is used to establish
the initial stream as well as to stream acknowledgements
and ack deadline modifications from the client to the
server.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
Iterable[google.pubsub_v1.types.StreamingPullResponse]:
Response for the StreamingPull method. This response is used to stream
messages from the server to the client.
"""
# Wrappers in api-core should not automatically pre-fetch the first
# stream result, as this breaks the stream when re-opening it.
# https://github.com/googleapis/python-pubsub/issues/93#issuecomment-630762257
self._transport.streaming_pull._prefetch_first_result_ = False
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.streaming_pull]
# Send the request.
response = rpc(
requests,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def modify_push_config(
self,
request: Union[pubsub.ModifyPushConfigRequest, dict] = None,
*,
subscription: str = None,
push_config: pubsub.PushConfig = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Modifies the ``PushConfig`` for a specified subscription.
This may be used to change a push subscription to a pull one
(signified by an empty ``PushConfig``) or vice versa, or change
the endpoint URL and other attributes of a push subscription.
Messages will accumulate for delivery continuously through the
call regardless of changes to the ``PushConfig``.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google import pubsub_v1
def sample_modify_push_config():
# Create a client
client = pubsub_v1.SubscriberClient()
# Initialize request argument(s)
request = pubsub_v1.ModifyPushConfigRequest(
subscription="subscription_value",
)
# Make the request
client.modify_push_config(request=request)
Args:
request (Union[google.pubsub_v1.types.ModifyPushConfigRequest, dict]):
The request object. Request for the ModifyPushConfig
method.
subscription (str):
Required. The name of the subscription. Format is
``projects/{project}/subscriptions/{sub}``.
This corresponds to the ``subscription`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
push_config (google.pubsub_v1.types.PushConfig):
Required. The push configuration for future deliveries.
An empty ``pushConfig`` indicates that the Pub/Sub
system should stop pushing messages from the given
subscription and allow messages to be pulled and
acknowledged - effectively pausing the subscription if
``Pull`` or ``StreamingPull`` is not called.
This corresponds to the ``push_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([subscription, push_config])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.ModifyPushConfigRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.ModifyPushConfigRequest):
request = pubsub.ModifyPushConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if subscription is not None:
request.subscription = subscription
if push_config is not None:
request.push_config = push_config
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.modify_push_config]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("subscription", request.subscription),)
),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def get_snapshot(
self,
request: Union[pubsub.GetSnapshotRequest, dict] = None,
*,
snapshot: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pubsub.Snapshot:
r"""Gets the configuration details of a snapshot.
Snapshots are used in <a
href="https://cloud.google.com/pubsub/docs/replay-overview">Seek</a>
operations, which allow you to manage message
acknowledgments in bulk. That is, you can set the
acknowledgment state of messages in an existing
subscription to the state captured by a snapshot.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google import pubsub_v1
def sample_get_snapshot():
# Create a client
client = pubsub_v1.SubscriberClient()
# Initialize request argument(s)
request = pubsub_v1.GetSnapshotRequest(
snapshot="snapshot_value",
)
# Make the request
response = client.get_snapshot(request=request)
# Handle the response
print(response)
Args:
request (Union[google.pubsub_v1.types.GetSnapshotRequest, dict]):
The request object. Request for the GetSnapshot method.
snapshot (str):
Required. The name of the snapshot to get. Format is
``projects/{project}/snapshots/{snap}``.
This corresponds to the ``snapshot`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.pubsub_v1.types.Snapshot:
A snapshot resource. Snapshots are used in
[Seek](https://cloud.google.com/pubsub/docs/replay-overview)
operations, which allow you to manage message
acknowledgments in bulk. That is, you can set the
acknowledgment state of messages in an existing
subscription to the state captured by a snapshot.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([snapshot])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.GetSnapshotRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.GetSnapshotRequest):
request = pubsub.GetSnapshotRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if snapshot is not None:
request.snapshot = snapshot
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_snapshot]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("snapshot", request.snapshot),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_snapshots(
self,
request: Union[pubsub.ListSnapshotsRequest, dict] = None,
*,
project: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListSnapshotsPager:
r"""Lists the existing snapshots. Snapshots are used in
`Seek <https://cloud.google.com/pubsub/docs/replay-overview>`__
operations, which allow you to manage message acknowledgments in
bulk. That is, you can set the acknowledgment state of messages
in an existing subscription to the state captured by a snapshot.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google import pubsub_v1
def sample_list_snapshots():
# Create a client
client = pubsub_v1.SubscriberClient()
# Initialize request argument(s)
request = pubsub_v1.ListSnapshotsRequest(
project="project_value",
)
# Make the request
page_result = client.list_snapshots(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.pubsub_v1.types.ListSnapshotsRequest, dict]):
The request object. Request for the `ListSnapshots`
method.
project (str):
Required. The name of the project in which to list
snapshots. Format is ``projects/{project-id}``.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.pubsub_v1.services.subscriber.pagers.ListSnapshotsPager:
Response for the ListSnapshots method.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.ListSnapshotsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.ListSnapshotsRequest):
request = pubsub.ListSnapshotsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_snapshots]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("project", request.project),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListSnapshotsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def create_snapshot(
self,
request: Union[pubsub.CreateSnapshotRequest, dict] = None,
*,
name: str = None,
subscription: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pubsub.Snapshot:
r"""Creates a snapshot from the requested subscription. Snapshots
are used in
`Seek <https://cloud.google.com/pubsub/docs/replay-overview>`__
operations, which allow you to manage message acknowledgments in
bulk. That is, you can set the acknowledgment state of messages
in an existing subscription to the state captured by a snapshot.
If the snapshot already exists, returns ``ALREADY_EXISTS``. If
the requested subscription doesn't exist, returns ``NOT_FOUND``.
If the backlog in the subscription is too old -- and the
resulting snapshot would expire in less than 1 hour -- then
``FAILED_PRECONDITION`` is returned. See also the
``Snapshot.expire_time`` field. If the name is not provided in
the request, the server will assign a random name for this
snapshot on the same project as the subscription, conforming to
the [resource name format]
(https://cloud.google.com/pubsub/docs/admin#resource_names). The
generated name is populated in the returned Snapshot object.
Note that for REST API requests, you must specify a name in the
request.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google import pubsub_v1
def sample_create_snapshot():
# Create a client
client = pubsub_v1.SubscriberClient()
# Initialize request argument(s)
request = pubsub_v1.CreateSnapshotRequest(
name="name_value",
subscription="subscription_value",
)
# Make the request
response = client.create_snapshot(request=request)
# Handle the response
print(response)
Args:
request (Union[google.pubsub_v1.types.CreateSnapshotRequest, dict]):
The request object. Request for the `CreateSnapshot`
method.
name (str):
Required. User-provided name for this snapshot. If the
name is not provided in the request, the server will
assign a random name for this snapshot on the same
project as the subscription. Note that for REST API
requests, you must specify a name. See the resource name
rules. Format is
``projects/{project}/snapshots/{snap}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
subscription (str):
Required. The subscription whose backlog the snapshot
retains. Specifically, the created snapshot is
guaranteed to retain: (a) The existing backlog on the
subscription. More precisely, this is defined as the
messages in the subscription's backlog that are
unacknowledged upon the successful completion of the
``CreateSnapshot`` request; as well as: (b) Any messages
published to the subscription's topic following the
successful completion of the CreateSnapshot request.
Format is ``projects/{project}/subscriptions/{sub}``.
This corresponds to the ``subscription`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.pubsub_v1.types.Snapshot:
A snapshot resource. Snapshots are used in
[Seek](https://cloud.google.com/pubsub/docs/replay-overview)
operations, which allow you to manage message
acknowledgments in bulk. That is, you can set the
acknowledgment state of messages in an existing
subscription to the state captured by a snapshot.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, subscription])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.CreateSnapshotRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.CreateSnapshotRequest):
request = pubsub.CreateSnapshotRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if subscription is not None:
request.subscription = subscription
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_snapshot]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_snapshot(
self,
request: Union[pubsub.UpdateSnapshotRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pubsub.Snapshot:
r"""Updates an existing snapshot. Snapshots are used in
<a
href="https://cloud.google.com/pubsub/docs/replay-overview">Seek</a>
operations, which allow
you to manage message acknowledgments in bulk. That is,
you can set the acknowledgment state of messages in an
existing subscription to the state captured by a
snapshot.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google import pubsub_v1
def sample_update_snapshot():
# Create a client
client = pubsub_v1.SubscriberClient()
# Initialize request argument(s)
request = pubsub_v1.UpdateSnapshotRequest(
)
# Make the request
response = client.update_snapshot(request=request)
# Handle the response
print(response)
Args:
request (Union[google.pubsub_v1.types.UpdateSnapshotRequest, dict]):
The request object. Request for the UpdateSnapshot
method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.pubsub_v1.types.Snapshot:
A snapshot resource. Snapshots are used in
[Seek](https://cloud.google.com/pubsub/docs/replay-overview)
operations, which allow you to manage message
acknowledgments in bulk. That is, you can set the
acknowledgment state of messages in an existing
subscription to the state captured by a snapshot.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.UpdateSnapshotRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.UpdateSnapshotRequest):
request = pubsub.UpdateSnapshotRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_snapshot]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("snapshot.name", request.snapshot.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_snapshot(
self,
request: Union[pubsub.DeleteSnapshotRequest, dict] = None,
*,
snapshot: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Removes an existing snapshot. Snapshots are used in [Seek]
(https://cloud.google.com/pubsub/docs/replay-overview)
operations, which allow you to manage message acknowledgments in
bulk. That is, you can set the acknowledgment state of messages
in an existing subscription to the state captured by a snapshot.
When the snapshot is deleted, all messages retained in the
snapshot are immediately dropped. After a snapshot is deleted, a
new one may be created with the same name, but the new one has
no association with the old snapshot or its subscription, unless
the same subscription is specified.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google import pubsub_v1
def sample_delete_snapshot():
# Create a client
client = pubsub_v1.SubscriberClient()
# Initialize request argument(s)
request = pubsub_v1.DeleteSnapshotRequest(
snapshot="snapshot_value",
)
# Make the request
client.delete_snapshot(request=request)
Args:
request (Union[google.pubsub_v1.types.DeleteSnapshotRequest, dict]):
The request object. Request for the `DeleteSnapshot`
method.
snapshot (str):
Required. The name of the snapshot to delete. Format is
``projects/{project}/snapshots/{snap}``.
This corresponds to the ``snapshot`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([snapshot])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.DeleteSnapshotRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.DeleteSnapshotRequest):
request = pubsub.DeleteSnapshotRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if snapshot is not None:
request.snapshot = snapshot
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_snapshot]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("snapshot", request.snapshot),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def seek(
self,
request: Union[pubsub.SeekRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pubsub.SeekResponse:
r"""Seeks an existing subscription to a point in time or to a given
snapshot, whichever is provided in the request. Snapshots are
used in [Seek]
(https://cloud.google.com/pubsub/docs/replay-overview)
operations, which allow you to manage message acknowledgments in
bulk. That is, you can set the acknowledgment state of messages
in an existing subscription to the state captured by a snapshot.
Note that both the subscription and the snapshot must be on the
same topic.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google import pubsub_v1
def sample_seek():
# Create a client
client = pubsub_v1.SubscriberClient()
# Initialize request argument(s)
request = pubsub_v1.SeekRequest(
subscription="subscription_value",
)
# Make the request
response = client.seek(request=request)
# Handle the response
print(response)
Args:
request (Union[google.pubsub_v1.types.SeekRequest, dict]):
The request object. Request for the `Seek` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.pubsub_v1.types.SeekResponse:
Response for the Seek method (this response is empty).
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a pubsub.SeekRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, pubsub.SeekRequest):
request = pubsub.SeekRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.seek]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("subscription", request.subscription),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
def set_iam_policy(
self,
request: iam_policy_pb2.SetIamPolicyRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Sets the IAM access control policy on the specified function.
Replaces any existing policy.
Args:
request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`):
The request object. Request message for `SetIamPolicy`
method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy.
It is used to specify access control policies for Cloud
Platform resources.
A ``Policy`` is a collection of ``bindings``. A
``binding`` binds one or more ``members`` to a single
``role``. Members can be user accounts, service
accounts, Google groups, and domains (such as G Suite).
A ``role`` is a named list of permissions (defined by
IAM or configured by users). A ``binding`` can
optionally specify a ``condition``, which is a logic
expression that further constrains the role binding
based on attributes about the request and/or target
resource.
**JSON Example**
::
{
"bindings": [
{
"role": "roles/resourcemanager.organizationAdmin",
"members": [
"user:mike@example.com",
"group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
]
},
{
"role": "roles/resourcemanager.organizationViewer",
"members": ["user:eve@example.com"],
"condition": {
"title": "expirable access",
"description": "Does not grant access after Sep 2020",
"expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')",
}
}
]
}
**YAML Example**
::
bindings:
- members:
- user:mike@example.com
- group:admins@example.com
- domain:google.com
- serviceAccount:my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin
- members:
- user:eve@example.com
role: roles/resourcemanager.organizationViewer
condition:
title: expirable access
description: Does not grant access after Sep 2020
expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the `IAM
developer's
guide <https://cloud.google.com/iam/docs>`__.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.SetIamPolicyRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.set_iam_policy,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_iam_policy(
self,
request: iam_policy_pb2.GetIamPolicyRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Gets the IAM access control policy for a function.
Returns an empty policy if the function exists and does not have a
policy set.
Args:
request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`):
The request object. Request message for `GetIamPolicy`
method.
retry (google.api_core.retry.Retry): Designation of what errors, if
any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy.
It is used to specify access control policies for Cloud
Platform resources.
A ``Policy`` is a collection of ``bindings``. A
``binding`` binds one or more ``members`` to a single
``role``. Members can be user accounts, service
accounts, Google groups, and domains (such as G Suite).
A ``role`` is a named list of permissions (defined by
IAM or configured by users). A ``binding`` can
optionally specify a ``condition``, which is a logic
expression that further constrains the role binding
based on attributes about the request and/or target
resource.
**JSON Example**
::
{
"bindings": [
{
"role": "roles/resourcemanager.organizationAdmin",
"members": [
"user:mike@example.com",
"group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
]
},
{
"role": "roles/resourcemanager.organizationViewer",
"members": ["user:eve@example.com"],
"condition": {
"title": "expirable access",
"description": "Does not grant access after Sep 2020",
"expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')",
}
}
]
}
**YAML Example**
::
bindings:
- members:
- user:mike@example.com
- group:admins@example.com
- domain:google.com
- serviceAccount:my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin
- members:
- user:eve@example.com
role: roles/resourcemanager.organizationViewer
condition:
title: expirable access
description: Does not grant access after Sep 2020
expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the `IAM
developer's
guide <https://cloud.google.com/iam/docs>`__.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.GetIamPolicyRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.get_iam_policy,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def test_iam_permissions(
self,
request: iam_policy_pb2.TestIamPermissionsRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Tests the specified IAM permissions against the IAM access control
policy for a function.
If the function does not exist, this will return an empty set
of permissions, not a NOT_FOUND error.
Args:
request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`):
The request object. Request message for
`TestIamPermissions` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.iam_policy_pb2.TestIamPermissionsResponse:
Response message for ``TestIamPermissions`` method.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.TestIamPermissionsRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.test_iam_permissions,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
client_library_version=pkg_resources.get_distribution(
"google-cloud-pubsub",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("SubscriberClient",)
| {
"content_hash": "2ec352a429c52780bfd16f2bc904b146",
"timestamp": "",
"source": "github",
"line_count": 2629,
"max_line_length": 107,
"avg_line_length": 41.7527577025485,
"alnum_prop": 0.5875027330369507,
"repo_name": "googleapis/sphinx-docfx-yaml",
"id": "6f08e2792e1a65b5bed7ce0c07bcc0a34fc38912",
"size": "110368",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/testdata/gapic-combo/google/pubsub_v1/services/subscriber/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1964"
},
{
"name": "Python",
"bytes": "220912"
},
{
"name": "Shell",
"bytes": "29757"
}
],
"symlink_target": ""
} |
from emd_functions import * | {
"content_hash": "e43d1d8bbe635fb79f9f5d99fe64e398",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 27,
"avg_line_length": 27,
"alnum_prop": 0.8148148148148148,
"repo_name": "parkus/emd",
"id": "1645515561301330600546652feeefd6f05509c4",
"size": "27",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14370"
}
],
"symlink_target": ""
} |
from django.db import models
class Paste(models.Model):
name = models.CharField(max_length=128)
code = models.TextField(blank=True)
def __unicode__(self):
return "paste number "+str(self.pk)
| {
"content_hash": "467524383ad5a8fb700b60a00baca073",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 40,
"avg_line_length": 22.11111111111111,
"alnum_prop": 0.7236180904522613,
"repo_name": "piratos/paste",
"id": "294cea7694a7f320ce2d30e4679d7b78b90203e5",
"size": "199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "home/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "113881"
},
{
"name": "HTML",
"bytes": "4136"
},
{
"name": "Python",
"bytes": "4504"
}
],
"symlink_target": ""
} |
import unittest
import bmemcached
class DistributedClientHashingTest(unittest.TestCase):
def test_get_server_is_consistent(self):
key = 'the_key'
servers = ['localhost:11211', 'localhost:11212', 'localhost:11213']
for _ in range(10):
client = bmemcached.DistributedClient(servers)
self.assertEqual(client._get_server(key).port, 11211)
| {
"content_hash": "413fd16f7edaca3c02e843827b89d31b",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 75,
"avg_line_length": 32.5,
"alnum_prop": 0.6743589743589744,
"repo_name": "jaysonsantos/python-binary-memcached",
"id": "87bea13fb2577defc4736ee23574cd7d0bbd8f74",
"size": "390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_distributed_client_hashing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108932"
},
{
"name": "Shell",
"bytes": "479"
}
],
"symlink_target": ""
} |
import pytest
from .as_status_codes import AerospikeStatus
from .index_helpers import ensure_dropped_index
from .test_base_class import TestBaseClass
from aerospike import exception as e
import aerospike
class TestIndex(object):
@pytest.fixture(autouse=True)
def setup(self, request, as_connection):
for i in range(5):
key = ("test", "demo", i)
rec = {"name": "name%s" % (str(i)), "addr": "name%s" % (str(i)), "age": i, "no": i}
as_connection.put(key, rec)
def teardown():
ensure_dropped_index(self.as_connection, "test", "age_index")
ensure_dropped_index(self.as_connection, "test", "name_index")
for i in range(5):
key = ("test", "demo", i)
# TODO: unneeded variable?
rec = {"name": "name%s" % (str(i)), "addr": "name%s" % (str(i)), "age": i, "no": i} # noqa: F841
as_connection.remove(key)
request.addfinalizer(teardown)
def test_create_indexes_with_no_parameters(self):
"""
Invoke indexc_string_reate() without any
mandatory parameters.
"""
with pytest.raises(TypeError) as typeError:
self.as_connection.index_string_create()
assert "argument 'ns' (pos 1)" in str(typeError.value)
with pytest.raises(TypeError) as typeError:
self.as_connection.index_integer_create()
assert "argument 'ns' (pos 1)" in str(typeError.value)
def test_create_integer_index_with_correct_parameters(self):
"""
Invoke createindex() with correct arguments
"""
policy = {}
retobj = self.as_connection.index_integer_create("test", "demo", "age", "age_index", policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
self.as_connection.index_remove("test", "age_index", policy)
ensure_dropped_index(self.as_connection, "test", "age_index")
def test_create_integer_index_with_set_name_too_long(self):
# Invoke createindex with a set name beyond the maximum
set_name = "a" * 128
policy = {}
with pytest.raises(e.InvalidRequest) as err_info:
self.as_connection.index_integer_create("test", set_name, "age", "age_index", policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_REQUEST_INVALID
def test_create_integer_index_with_incorrect_namespace(self):
"""
Invoke createindex() with non existent namespace
"""
policy = {}
with pytest.raises(e.InvalidRequest) as err_info:
self.as_connection.index_integer_create("fake_namespace", "demo", "age", "age_index", policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_REQUEST_INVALID
def test_create_integer_index_with_incorrect_set(self):
"""
Invoke createindex() with nonexistent set
It should succeed
"""
policy = {}
retobj = self.as_connection.index_integer_create("test", "demo1", "age", "age_index", policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
self.as_connection.index_remove("test", "age_index", policy)
ensure_dropped_index(self.as_connection, "test", "age_index")
def test_create_integer_index_with_incorrect_bin(self):
"""
Invoke createindex() with a nonexistent bin
"""
policy = {}
retobj = self.as_connection.index_integer_create("test", "demo", "fake_bin", "age_index", policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
self.as_connection.index_remove("test", "age_index", policy)
ensure_dropped_index(self.as_connection, "test", "age_index")
def test_create_integer_index_with_namespace_is_none(self):
"""
Invoke createindex() with namespace is None
"""
policy = {}
with pytest.raises(e.ParamError) as err_info:
self.as_connection.index_integer_create(None, "demo", "age", "age_index", policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_PARAM
def test_creat_integer_eindex_with_set_is_none(self):
# Invoke createindex() with set is None
policy = {}
retobj = self.as_connection.index_integer_create("test", None, "age", "age_index", policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
self.as_connection.index_remove("test", "age_index", policy)
ensure_dropped_index(self.as_connection, "test", "age_index")
def test_create_integer_index_with_set_is_int(self):
# Invoke createindex() with set is int
policy = {}
with pytest.raises(e.ParamError) as err_info:
self.as_connection.index_integer_create("test", 1, "age", "age_index", policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_PARAM
def test_create_integer_index_with_bin_is_none(self):
"""
Invoke createindex() with bin is None
"""
policy = {}
with pytest.raises(e.ParamError) as err_info:
self.as_connection.index_integer_create("test", "demo", None, "age_index", policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_PARAM
def test_create_integer_index_with_index_is_none(self):
"""
Invoke createindex() with index_name is None
"""
policy = {}
with pytest.raises(e.ParamError) as err_info:
self.as_connection.index_integer_create("test", "demo", "age", None, policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_PARAM
def test_create_same_integer_index_multiple_times(self):
"""
Invoke createindex() with the same arguments
multiple times on the same bin
"""
policy = {}
retobj = self.as_connection.index_integer_create("test", "demo", "age", "age_index", policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
try:
retobj = self.as_connection.index_integer_create("test", "demo", "age", "age_index", policy)
except e.IndexFoundError:
assert self.server_version <= [6, 0]
self.as_connection.index_remove("test", "age_index", policy)
ensure_dropped_index(self.as_connection, "test", "age_index")
def test_create_same_integer_index_multiple_times_different_bin(self):
"""
Invoke createindex() with the same index name,
multiple times on different bin names
"""
policy = {}
retobj = self.as_connection.index_integer_create("test", "demo", "age", "age_index", policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
with pytest.raises(e.IndexFoundError):
retobj = self.as_connection.index_integer_create("test", "demo", "no", "age_index", policy)
self.as_connection.index_remove("test", "age_index", policy)
ensure_dropped_index(self.as_connection, "test", "age_index")
def test_create_different_integer_index_multiple_times_same_bin(self):
"""
Invoke createindex() with multiple times on same bin with different
name
"""
policy = {}
retobj = self.as_connection.index_integer_create("test", "demo", "age", "age_index", policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
try:
retobj = self.as_connection.index_integer_create("test", "demo", "age", "age_index1", policy)
self.as_connection.index_remove("test", "age_index1", policy)
except e.IndexFoundError:
assert self.server_version <= [6, 0]
ensure_dropped_index(self.as_connection, "test", "age_index")
def test_create_integer_index_with_policy(self):
"""
Invoke createindex() with policy
"""
policy = {"timeout": 1000}
retobj = self.as_connection.index_integer_create("test", "demo", "age", "age_index", policy)
ensure_dropped_index(self.as_connection, "test", "age_index")
assert retobj == AerospikeStatus.AEROSPIKE_OK
def test_create_string_index_positive(self):
"""
Invoke create string index() with correct arguments
"""
policy = {}
retobj = self.as_connection.index_string_create("test", "demo", "name", "name_index", policy)
self.as_connection.index_remove("test", "name_index", policy)
ensure_dropped_index(self.as_connection, "test", "name_index")
assert retobj == AerospikeStatus.AEROSPIKE_OK
def test_create_string_index_with_set_length_too_long(self):
# Invoke createindex() with correct arguments set length extra
set_name = "a" * 100
policy = {}
with pytest.raises(e.InvalidRequest) as err_info:
self.as_connection.index_string_create("test", set_name, "name", "name_index", policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_REQUEST_INVALID
def test_create_string_index_with_correct_parameters_ns_length_extra(self):
# Invoke createindex() with correct arguments ns length extra
ns_name = "a" * 50
policy = {}
with pytest.raises(e.InvalidRequest) as err_info:
self.as_connection.index_string_create(ns_name, "demo", "name", "name_index", policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_REQUEST_INVALID
def test_create_string_index_with_incorrect_namespace(self):
"""
Invoke create string index() with incorrect namespace
"""
policy = {}
with pytest.raises(e.InvalidRequest) as err_info:
self.as_connection.index_string_create("fake_namespace", "demo", "name", "name_index", policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_REQUEST_INVALID
def test_create_string_index_with_incorrect_set(self):
"""
Invoke create string index() with incorrect set
"""
policy = {}
retobj = self.as_connection.index_string_create("test", "demo1", "name", "name_index", policy)
self.as_connection.index_remove("test", "name_index", policy)
ensure_dropped_index(self.as_connection, "test", "name_index")
assert retobj == AerospikeStatus.AEROSPIKE_OK
def test_create_string_index_with_incorrect_bin(self):
"""
Invoke create string index() with incorrect bin
"""
policy = {}
retobj = self.as_connection.index_string_create("test", "demo", "name1", "name_index", policy)
self.as_connection.index_remove("test", "name_index", policy)
ensure_dropped_index(self.as_connection, "test", "name_index")
assert retobj == AerospikeStatus.AEROSPIKE_OK
def test_create_string_index_with_namespace_is_none(self):
"""
Invoke create string index() with namespace is None
"""
policy = {}
with pytest.raises(e.ParamError) as err_info:
self.as_connection.index_string_create(None, "demo", "name", "name_index", policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_PARAM
def test_create_string_index_with_set_is_none(self):
# Invoke create string index() with set is None
policy = {}
retobj = self.as_connection.index_string_create("test", None, "name", "name_index", policy)
self.as_connection.index_remove("test", "name_index", policy)
ensure_dropped_index(self.as_connection, "test", "name_index")
assert retobj == AerospikeStatus.AEROSPIKE_OK
def test_create_string_index_with_bin_is_none(self):
"""
Invoke create string index() with bin is None
"""
policy = {}
with pytest.raises(e.ParamError) as err_info:
self.as_connection.index_string_create("test", "demo", None, "name_index", policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_PARAM
def test_create_string_index_with_index_is_none(self):
"""
Invoke create_string_index() with index name is None
"""
policy = {}
with pytest.raises(e.ParamError) as err_info:
self.as_connection.index_string_create("test", "demo", "name", None, policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_ERR_PARAM
def test_create_same_string_index_multiple_times(self):
"""
Invoke create string index() with multiple times on same bin
"""
policy = {}
retobj = self.as_connection.index_string_create("test", "demo", "name", "name_index", policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
try:
retobj = self.as_connection.index_string_create("test", "demo", "name", "name_index", policy)
except e.IndexFoundError:
assert self.server_version <= [6, 0]
self.as_connection.index_remove("test", "name_index", policy)
ensure_dropped_index(self.as_connection, "test", "name_index")
def test_create_same_string_index_multiple_times_different_bin(self):
"""
Invoke create string index() with multiple times on different bin
"""
policy = {}
retobj = self.as_connection.index_string_create("test", "demo", "name", "name_index", policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
with pytest.raises(e.IndexFoundError):
retobj = self.as_connection.index_string_create("test", "demo", "addr", "name_index", policy)
self.as_connection.index_remove("test", "name_index", policy)
ensure_dropped_index(self.as_connection, "test", "name_index")
self.as_connection.index_remove("test", "name_index", policy)
ensure_dropped_index(self.as_connection, "test", "name_index")
assert retobj == AerospikeStatus.AEROSPIKE_OK
def test_create_different_string_index_multiple_times_same_bin(self):
"""
Invoke create string index() with multiple times on same
bin with different name
"""
policy = {}
retobj = self.as_connection.index_string_create("test", "demo", "name", "name_index", policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
try:
retobj = self.as_connection.index_string_create("test", "demo", "name", "name_index1", policy)
self.as_connection.index_remove("test", "name_index1", policy)
except e.IndexFoundError:
assert self.server_version <= [6, 0]
ensure_dropped_index(self.as_connection, "test", "name_index")
def test_create_string_index_with_policy(self):
"""
Invoke create string index() with policy
"""
policy = {"timeout": 1000}
retobj = self.as_connection.index_string_create("test", "demo", "name", "name_index", policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
self.as_connection.index_remove("test", "name_index", policy)
ensure_dropped_index(self.as_connection, "test", "name_index")
def test_drop_invalid_index(self):
"""
Invoke drop invalid index()
"""
policy = {}
try:
self.as_connection.index_remove("test", "notarealindex", policy)
except e.IndexNotFound:
assert self.server_version <= [6, 0]
def test_drop_valid_index(self):
"""
Invoke drop valid index()
"""
policy = {}
self.as_connection.index_integer_create("test", "demo", "age", "age_index", policy)
retobj = self.as_connection.index_remove("test", "age_index", policy)
ensure_dropped_index(self.as_connection, "test", "age_index")
assert retobj == AerospikeStatus.AEROSPIKE_OK
def test_drop_valid_index_policy(self):
"""
Invoke drop valid index() policy
"""
policy = {"timeout": 1000}
self.as_connection.index_integer_create("test", "demo", "age", "age_index", policy)
retobj = self.as_connection.index_remove("test", "age_index", policy)
ensure_dropped_index(self.as_connection, "test", "age_index")
assert retobj == AerospikeStatus.AEROSPIKE_OK
def test_createindex_with_long_index_name(self):
# Invoke createindex() with long index name
policy = {}
with pytest.raises(e.InvalidRequest):
self.as_connection.index_integer_create("test", "demo", "age", "index" * 100, policy)
def test_create_string_index_unicode_positive(self):
"""
Invoke create string index() with correct arguments
"""
policy = {}
retobj = self.as_connection.index_string_create("test", "demo", "name", "uni_name_index", policy)
self.as_connection.index_remove("test", "uni_name_index", policy)
ensure_dropped_index(self.as_connection, "test", "uni_name_index")
assert retobj == AerospikeStatus.AEROSPIKE_OK
def test_createindex_integer_unicode(self):
"""
Invoke createindex() with correct arguments
"""
policy = {}
retobj = self.as_connection.index_integer_create("test", "demo", "age", "uni_age_index", policy)
assert retobj == AerospikeStatus.AEROSPIKE_OK
self.as_connection.index_remove("test", "uni_age_index", policy)
ensure_dropped_index(self.as_connection, "test", "uni_age_index")
def test_createindex_with_correct_parameters_without_connection(self):
# Invoke createindex() with correct arguments without connection
policy = {}
config = TestBaseClass.get_connection_config()
client1 = aerospike.client(config)
client1.close()
with pytest.raises(e.ClusterError) as err_info:
client1.index_integer_create("test", "demo", "age", "age_index", policy)
err_code = err_info.value.code
assert err_code is AerospikeStatus.AEROSPIKE_CLUSTER_ERROR
def test_index_remove_no_args(self):
with pytest.raises(TypeError):
self.as_connection.index_remove()
def test_index_remove_no_index(self):
with pytest.raises(TypeError):
self.as_connection.index_remove("test")
def test_index_remove_extra_args(self):
# pass 'ns', 'idx_name', 'policy', and an extra argument
with pytest.raises(TypeError):
self.as_connection.index_remove("test", "demo", {}, "index_name")
@pytest.mark.parametrize(
"ns, idx_name, policy",
(("test", "idx", "policy"), ("test", 5, {}), (5, "idx", {}), ("test", None, {}), (None, "idx", {})),
)
def test_index_remove_wrong_arg_types(self, ns, idx_name, policy):
with pytest.raises(e.ParamError):
self.as_connection.index_remove(ns, idx_name, policy)
| {
"content_hash": "f3c83494dabc69a9308880de9eef9f53",
"timestamp": "",
"source": "github",
"line_count": 472,
"max_line_length": 113,
"avg_line_length": 40.470338983050844,
"alnum_prop": 0.6196209820961156,
"repo_name": "aerospike/aerospike-client-python",
"id": "f298fd6c9f106b5cc1ea5ff0fec40d0bda2fe721",
"size": "19127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/new_tests/test_index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1347774"
},
{
"name": "Lua",
"bytes": "6526"
},
{
"name": "Python",
"bytes": "2103805"
},
{
"name": "Shell",
"bytes": "6716"
}
],
"symlink_target": ""
} |
from sympy.core.symbol import symbols
from sympy.printing.ccode import ccode
from sympy.codegen.ast import Declaration, Variable, float64, int64
from sympy.codegen.cnodes import (
alignof, CommaOperator, goto, Label, PreDecrement, PostDecrement, PreIncrement, PostIncrement,
sizeof, union, struct
)
x, y = symbols('x y')
def test_alignof():
ax = alignof(x)
assert ccode(ax) == 'alignof(x)'
assert ax.func(*ax.args) == ax
def test_CommaOperator():
expr = CommaOperator(PreIncrement(x), 2*x)
assert ccode(expr) == '(++(x), 2*x)'
assert expr.func(*expr.args) == expr
def test_goto_Label():
s = 'early_exit'
g = goto(s)
assert g.func(*g.args) == g
assert g != goto('foobar')
assert ccode(g) == 'goto early_exit'
l = Label(s)
assert l.is_Atom
assert ccode(l) == 'early_exit:'
assert g.label == l
assert l == Label(s)
assert l != Label('foobar')
def test_PreDecrement():
p = PreDecrement(x)
assert p.func(*p.args) == p
assert ccode(p) == '--(x)'
def test_PostDecrement():
p = PostDecrement(x)
assert p.func(*p.args) == p
assert ccode(p) == '(x)--'
def test_PreIncrement():
p = PreIncrement(x)
assert p.func(*p.args) == p
assert ccode(p) == '++(x)'
def test_PostIncrement():
p = PostIncrement(x)
assert p.func(*p.args) == p
assert ccode(p) == '(x)++'
def test_sizeof():
typename = 'unsigned int'
sz = sizeof(typename)
assert ccode(sz) == 'sizeof(%s)' % typename
assert sz.func(*sz.args) == sz
assert not sz.is_Atom
assert all(atom == typename for atom in sz.atoms())
def test_struct():
vx, vy = Variable(x, type=float64), Variable(y, type=float64)
s = struct('vec2', [vx, vy])
assert s.func(*s.args) == s
assert s == struct('vec2', (vx, vy))
assert s != struct('vec2', (vy, vx))
assert str(s.name) == 'vec2'
assert len(s.declarations) == 2
assert all(isinstance(arg, Declaration) for arg in s.declarations)
assert ccode(s) == (
"struct vec2 {\n"
" double x;\n"
" double y;\n"
"}")
def test_union():
vx, vy = Variable(x, type=float64), Variable(y, type=int64)
u = union('dualuse', [vx, vy])
assert u.func(*u.args) == u
assert u == union('dualuse', (vx, vy))
assert str(u.name) == 'dualuse'
assert len(u.declarations) == 2
assert all(isinstance(arg, Declaration) for arg in u.declarations)
assert ccode(u) == (
"union dualuse {\n"
" double x;\n"
" int64_t y;\n"
"}")
| {
"content_hash": "ee743594fe8919ad4a84b01d26bcab4c",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 98,
"avg_line_length": 25.68,
"alnum_prop": 0.589563862928349,
"repo_name": "kaushik94/sympy",
"id": "3050fffa1a2f9afdc36c0baa5116b244517d6136",
"size": "2568",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "sympy/codegen/tests/test_cnodes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5094"
},
{
"name": "Python",
"bytes": "13553568"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4008"
},
{
"name": "TeX",
"bytes": "32356"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
from GUI.PopUps.FileSavingDialogPopUp import FileSavingDialogPopUp
from kivy.logger import Logger
from Utils.FileHandler import write_file, get_filename_only
class ProjectCopyDialogPopUp(FileSavingDialogPopUp):
FOLDER_NAME = "project folder"
DISMISS_BTN_TEXT = "Cancel"
def __init__(self, source, destination, listener, project_path):
super(ProjectCopyDialogPopUp, self).__init__(source=source,
destination=destination,
filename_list=None,
listener=listener,
path=project_path,
folder_name=self.FOLDER_NAME,
dismiss_button_text=self.DISMISS_BTN_TEXT)
def replace_file(self):
self.save_source(get_filename_only(self.destination))
def create_new_file(self):
self.save_source(self.ids.save_as.text)
def save_source(self, filename):
try:
Logger.debug("PCDPopUp:Creating new file as " + filename)
write_file(self.path, filename, self.source)
except Exception as ex:
self.error(ex) | {
"content_hash": "e1006d6a542f3aaca92fcce86fd29e2a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 95,
"avg_line_length": 44.758620689655174,
"alnum_prop": 0.539291217257319,
"repo_name": "RemuTeam/Remu",
"id": "cbd14d1597b2a604920db0a3dd3617567ac2b109",
"size": "1298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/GUI/PopUps/ProjectCopyDialogPopUp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "221"
},
{
"name": "Python",
"bytes": "218196"
}
],
"symlink_target": ""
} |
"""
Plot function module for basic facts.
"""
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import matplotlib as mpl
bl = '#58ACFA'
def plot_attack_ferq_per_year(attacks):
plt.subplots(figsize=(15,6))
sns.countplot(attacks['Year'], color=bl)
plt.xticks(rotation=90)
plt.title('Number of terrorist attacks per year')
plt.ylabel('Frequency\n')
plt.xlabel('\n Year')
plt.show()
def plot_attack_types_freq(attacks):
fig, axes = plt.subplots(figsize=(15,6))
sns.countplot(attacks['Attack_type'],order=attacks['Attack_type'].value_counts().index, color=bl)
plt.xticks(fontsize=12, rotation=90)
plt.title('Attacking methods')
plt.ylabel('Frequency\n')
plt.xlabel('\n Attack type ')
fig.autofmt_xdate()
plt.show()
def plot_target_distribution(attacks):
fig, axes = plt.subplots(figsize=(15,6))
sns.countplot(attacks['Target_type'],order=attacks['Target_type'].value_counts().index, color=bl)
plt.xticks(fontsize=12, rotation=90)
plt.title('Targets distribution')
plt.ylabel('Frequency\n')
plt.xlabel('\n Target type')
fig.autofmt_xdate()
plt.show()
def plot_attack_freq_by_year_and_region(attacks):
terror_region=pd.crosstab(attacks['Year'],attacks['Region'])
axes = plt.subplot(111)
axes.set_prop_cycle('color',plt.cm.spectral(np.linspace(0,1,12)))
terror_region.plot(ax = axes)
fig=plt.gcf()
fig.set_size_inches(18,15)
plt.title('Terrorist activity by year and region')
plt.ylabel('Frequency \n')
plt.xlabel('\n Year')
plt.legend(title='Legend')
plt.savefig('terrorist_attacks_by_year_and_region.pdf')
plt.show()
def plot_attack_distribution_by_region(attacks):
x = pd.crosstab(attacks['Region'],attacks['Attack_type'])
axes = plt.subplot(111)
x.plot(kind='barh', color = plt.cm.spectral(np.linspace(0,1,9)), ax=axes, stacked=True, width=1)
fig=plt.gcf()
fig.set_size_inches(12,8)
plt.title('Distribution of attack types by region')
plt.ylabel('Region \n')
plt.xlabel('\n Frequency')
plt.legend(title='Legend')
plt.show()
def plot_most_affected_countries(attacks):
fig, axes = plt.subplots(figsize=(18,6))
sns.barplot(attacks['Country'].value_counts()[:20].index,attacks['Country'].value_counts()[:20].values, color=bl)
plt.title('Most affected countries')
plt.xticks(rotation=90)
plt.ylabel('Frequency \n')
plt.xlabel('\n Country')
fig.autofmt_xdate()
plt.savefig('most_affected_countries.pdf')
plt.show()
def plot_top15_most_active_terrorist_groups(attacks):
sns.barplot(attacks['Group'].value_counts()[1:15].values,attacks['Group'].value_counts()[1:15].index, color=bl)
plt.xticks(rotation=90)
fig=plt.gcf()
fig.set_size_inches(10,8)
plt.title('Top 15 of most active terrorist groups')
plt.ylabel('Terrorist group \n')
plt.xlabel('\n Frequency')
plt.savefig('top15_active_groups.pdf')
plt.show()
def joint_plot_coordinates(attacks):
#usefull variable to get the coordinates.
df_coords = attacks.round({'Longitude':0, 'Latitude':0}).groupby(["Longitude", "Latitude"]).size().to_frame(name = 'count').reset_index()
fig=plt.gcf()
fig.set_size_inches(10,8)
sns.jointplot(x='Longitude', y='Latitude', data=df_coords, kind="kde", color=bl, size=15, stat_func=None, edgecolor="#020000", linewidth=.4)
plt.savefig('joint_plot_coordinates.pdf')
def killed_num_attacks_relation(attacks):
#compute the number of attacks for each country.
number_atk_country = attacks['Country'].value_counts()
#keeping the 'Country' and 'Killed' columns.
killed_country = attacks[['Country','Killed']]
#keeping the top 20.
killed_country = killed_country.groupby(by='Country').sum().reset_index().sort_values(by='Killed',ascending=False)[:20]
#getting the list.
countries = list(killed_country['Country'])
#taking the number of atack for each country in the list.
countries_atk = [number_atk_country.loc[c] for c in countries]
#creation of a new column.
killed_country['Number attacks'] = countries_atk
#let's look at the head.
killed_country.head()
#parameters.
mpl.rcParams['figure.figsize'] = (15,5)
mpl.rcParams['figure.dpi'] = 100
#creation of an histo.
histo = killed_country.plot.bar(color=['#FA5858', bl]);
#setting x axis-
histo.set_xticklabels(killed_country['Country'], rotation=45)
#setting title.
histo.set_title('Attacks and killed people per country')
plt.ylabel('Frequency \n')
plt.xlabel('\n Country')
plt.tight_layout()
plt.savefig('most_affected_countries_with_kills.pdf')
plt.show(); | {
"content_hash": "eb8982535984f2179214e473b897e6b7",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 141,
"avg_line_length": 34.9296875,
"alnum_prop": 0.7219861328561843,
"repo_name": "mdeff/ntds_2017",
"id": "23f8163979d74b4043072d250ad5b6cf5b0d5124",
"size": "4471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projects/reports/terrorist_attacks/project/plots.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6481"
},
{
"name": "HTML",
"bytes": "25493"
},
{
"name": "JavaScript",
"bytes": "30452"
},
{
"name": "Jupyter Notebook",
"bytes": "196798661"
},
{
"name": "Makefile",
"bytes": "947"
},
{
"name": "Python",
"bytes": "447355"
},
{
"name": "TeX",
"bytes": "22767"
}
],
"symlink_target": ""
} |
import re
import ipdb # noqa
marker_pattern = re.compile(r'[\(](\d+)x(\d+)[\)]')
compressed = ''
remaining = ''
decompressed = ''
with open('input.txt', 'r') as input_file:
remaining = compressed = input_file.read().strip()
# iterative partial decompression
while remaining:
match = marker_pattern.search(remaining)
if match is not None:
decompressed += remaining[0:match.start()]
length, repeat = match.groups()
length = int(length)
repeat = int(repeat)
tag_end = match.end()
segment_end = tag_end + length
repeat_segment = remaining[tag_end:segment_end]
repeat_segment *= repeat
decompressed += repeat_segment
remaining = remaining[segment_end:]
else:
decompressed += remaining
remaining = ''
print "ANSWER 1: %s" % len(decompressed.strip())
# full recursive decompression count only
def full_decompress(compressed):
decompressed_length = 0
remaining = compressed
while remaining:
match = marker_pattern.search(remaining)
if match is None:
decompressed_length += len(remaining)
remaining = ''
else:
decompressed_length += len(remaining[0:match.start()])
length, repeat = match.groups()
length = int(length)
repeat = int(repeat)
tag_end = match.end()
segment_end = tag_end + length
repeat_segment = remaining[tag_end:segment_end]
decompressed_length += full_decompress(repeat_segment) * repeat
remaining = remaining[segment_end:]
return decompressed_length
print "ANSWER 2: %s" % full_decompress(compressed)
| {
"content_hash": "8f9ceaad4a53e2245fe7c80d1fd51bd7",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 75,
"avg_line_length": 30.945454545454545,
"alnum_prop": 0.6139835487661575,
"repo_name": "Apreche/advent2016",
"id": "f900530f2ded906d53ce080e5380bcb314e42b11",
"size": "1724",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "2016/09/decompress.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16719"
}
],
"symlink_target": ""
} |
import array
from pymaging.colors import Color
from pymaging.affine import AffineTransform
from pymaging.exceptions import FormatNotSupported, InvalidColor
from pymaging.formats import get_format, get_format_objects
from pymaging.helpers import get_transformed_dimensions
from pymaging.pixelarray import get_pixel_array
from pymaging.resample import nearest
import os
class Image(object):
def __init__(self, mode, width, height, loader, meta=None):
self.mode = mode
self.width = width
self.height = height
self.loader = loader
self.meta = meta
self._pixelarray = None
self._palette = None
self.reverse_palette = None
# @property
def pixels(self):
self.load()
return self._pixelarray
# @property
def pixelsize(self):
return self.pixels().pixelsize
@property
def palette(self):
self.load()
return self._palette
#==========================================================================
# Constructors
#==========================================================================
@classmethod
def open(cls, fileobj):
for format in get_format_objects():
image = format.open(fileobj)
if image:
return image
raise FormatNotSupported()
@classmethod
def open_from_path(cls, filepath):
with open(filepath, 'rb') as fobj:
return cls.open(fobj)
@classmethod
def new(cls, mode, width, height, background_color, palette=None, meta=None):
color = background_color.to_pixel(mode.length)
pixel_array = get_pixel_array(array.array('B', color) * width * height, width, height, mode.length)
return LoadedImage(mode, width, height, pixel_array, palette=palette, meta=meta)
def load(self):
if self._pixelarray is not None:
return
self._pixelarray, self._palette = self.loader()
#==========================================================================
# Saving
#==========================================================================
def save(self, fileobj, format):
format_object = get_format(format)
if not format_object:
raise FormatNotSupported(format)
format_object.save(self, fileobj)
def save_to_path(self, filepath, format=None):
if not format:
format = os.path.splitext(filepath)[1][1:]
with open(filepath, 'wb') as fobj:
self.save(fobj, format)
#==========================================================================
# Helpers
#==========================================================================
def get_reverse_palette(self):
if self.reverse_palette is None:
self._fill_reverse_palette()
return self.reverse_palette
def _fill_reverse_palette(self):
self.reverse_palette = {}
if not self.palette:
return
for index, color in enumerate(self.palette):
color_obj = Color.from_pixel(color)
color_obj.to_hexcode()
self.reverse_palette[color_obj] = index
def _copy(self, pixels, **kwargs):
defaults = {
'mode': self.mode,
'width': self.width,
'height': self.height,
'palette': self.palette,
'meta': self.meta,
}
defaults.update(kwargs)
defaults['pixels'] = pixels
return LoadedImage(**defaults)
#==========================================================================
# Geometry Operations
#==========================================================================
def resize(self, width, height, resample_algorithm=nearest, resize_canvas=True):
pixels = resample_algorithm.resize(
self, width, height, resize_canvas=resize_canvas
)
return self._copy(pixels)
def affine(self, transform, resample_algorithm=nearest, resize_canvas=True):
"""
Returns a copy of this image transformed by the given
AffineTransform.
"""
pixels = resample_algorithm.affine(
self,
transform,
resize_canvas=resize_canvas,
)
return self._copy(pixels)
def rotate(self, degrees, clockwise=False, resample_algorithm=nearest, resize_canvas=True):
"""
Returns the image obtained by rotating this image by the
given number of degrees.
Anticlockwise unless clockwise=True is given.
"""
# translate to the origin first, then rotate, then translate back
transform = AffineTransform()
transform = transform.translate(self.width * -0.5, self.height * -0.5)
transform = transform.rotate(degrees, clockwise=clockwise)
width, height = self.width, self.height
if resize_canvas:
# determine new width
width, height = get_transformed_dimensions(transform, (0, 0, width, height))
transform = transform.translate(width * 0.5, height * 0.5)
pixels = resample_algorithm.affine(self, transform, resize_canvas=resize_canvas)
return self._copy(pixels)
def get_pixel(self, x, y):
try:
raw_pixel = self.pixels.get(x, y)
except IndexError:
raise IndexError("Pixel (%d, %d) not in image" % (x, y))
if self.pixelsize == 1 and self.palette:
return self.palette[raw_pixel[0]]
else:
return raw_pixel
def get_color(self, x, y):
return Color.from_pixel(self.get_pixel(x, y))
def set_color(self, x, y, color):
if color.alpha != 255:
base = self.get_color(x, y)
color = base.cover_with(color)
if self.reverse_palette and self.pixelsize == 1:
if color not in self.reverse_palette:
raise InvalidColor(str(color))
index = self.reverse_palette[color]
self.pixels().set(x, y, [index])
else:
self.pixels().set(x, y, color.to_pixel(self.pixelsize()))
def flip_top_bottom(self):
"""
Vertically flips the pixels of source into target
"""
pixels = self.pixels.copy_flipped_top_bottom()
return self._copy(pixels)
def flip_left_right(self):
"""
Horizontally flips the pixels of source into target
"""
return self._copy(pixels=self.pixels.copy_flipped_left_right())
def crop(self, width, height, padding_top, padding_left):
new_pixels = self.pixels().copy()
new_pixels.remove_lines(0, padding_top)
new_pixels.remove_lines(height, new_pixels.height - height)
new_pixels.remove_columns(0, padding_left)
new_pixels.remove_columns(width, new_pixels.width - width)
return self._copy(new_pixels, width=width, height=height)
#==========================================================================
# Manipulation
#==========================================================================
def draw(self, shape, color):
for x, y, pixelcolor in shape.iter_pixels(color):
self.set_color(x, y, pixelcolor)
def blit(self, padding_top, padding_left, image):
"""
Puts the image given on top of this image with the given padding
"""
# there *must* be a better/faster way to do this:
# TODO: check that palettes etc match.
# TODO: fastpath this by copying the array if pixelsize is identical/palette is the same
for x in range(min([image.width, self.width - padding_left])):
for y in range(min([image.height, self.height- padding_top])):
self.set_color(padding_left + x, padding_top + y, image.get_color(x, y))
class LoadedImage(Image):
def __init__(self, mode, width, height, pixels, palette=None, meta=None):
self.mode = mode
self.width = width
self.height = height
self.format = format
self.loader = lambda:None
self.meta = meta
self._pixelarray = pixels
self._palette = palette
self.reverse_palette = None | {
"content_hash": "ad5c80b001a7beabf0303cbf8a7ca26e",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 107,
"avg_line_length": 35.24463519313305,
"alnum_prop": 0.5482221139795421,
"repo_name": "wdv4758h/ZipPy",
"id": "69577bd7b05a4638a5a5c9ca6744ecb57a99cbed",
"size": "9765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edu.uci.python.benchmark/src/benchmarks/pymaging/pymaging/image.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "9447"
},
{
"name": "C",
"bytes": "106932"
},
{
"name": "CSS",
"bytes": "32004"
},
{
"name": "Groff",
"bytes": "27753"
},
{
"name": "HTML",
"bytes": "721863"
},
{
"name": "Java",
"bytes": "1550721"
},
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Makefile",
"bytes": "16156"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "33672733"
},
{
"name": "R",
"bytes": "1959"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "3119"
},
{
"name": "Tcl",
"bytes": "1048"
},
{
"name": "TeX",
"bytes": "8790"
},
{
"name": "Visual Basic",
"bytes": "481"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
import os
import time
try:
from pysqlite2 import dbapi2 as sqlite
vi = sqlite.version_info
if vi[0] > 2 or vi[1] > 6:
# latest pysqlite breaks anki
raise ImportError()
except ImportError:
from sqlite3 import dbapi2 as sqlite
Error = sqlite.Error
class DB(object):
def __init__(self, path, text=None, timeout=0):
encpath = path
if isinstance(encpath, unicode):
encpath = path.encode("utf-8")
self._db = sqlite.connect(encpath, timeout=timeout)
if text:
self._db.text_factory = text
self._path = path
self.echo = os.environ.get("DBECHO")
self.mod = False
def execute(self, sql, *a, **ka):
s = sql.strip().lower()
# mark modified?
for stmt in "insert", "update", "delete":
if s.startswith(stmt):
self.mod = True
t = time.time()
if ka:
# execute("...where id = :id", id=5)
res = self._db.execute(sql, ka)
else:
# execute("...where id = ?", 5)
res = self._db.execute(sql, a)
if self.echo:
#print a, ka
print sql, "%0.3fms" % ((time.time() - t)*1000)
if self.echo == "2":
print a, ka
return res
def executemany(self, sql, l):
self.mod = True
t = time.time()
self._db.executemany(sql, l)
if self.echo:
print sql, "%0.3fms" % ((time.time() - t)*1000)
if self.echo == "2":
print l
def commit(self):
t = time.time()
self._db.commit()
if self.echo:
print "commit %0.3fms" % ((time.time() - t)*1000)
def executescript(self, sql):
self.mod = True
if self.echo:
print sql
self._db.executescript(sql)
def rollback(self):
self._db.rollback()
def scalar(self, *a, **kw):
res = self.execute(*a, **kw).fetchone()
if res:
return res[0]
return None
def all(self, *a, **kw):
return self.execute(*a, **kw).fetchall()
def first(self, *a, **kw):
c = self.execute(*a, **kw)
res = c.fetchone()
c.close()
return res
def list(self, *a, **kw):
return [x[0] for x in self.execute(*a, **kw)]
def close(self):
self._db.close()
def set_progress_handler(self, *args):
self._db.set_progress_handler(*args)
def __enter__(self):
self._db.execute("begin")
return self
def __exit__(self, exc_type, *args):
self._db.close()
def totalChanges(self):
return self._db.total_changes
def interrupt(self):
self._db.interrupt()
| {
"content_hash": "6d1ac753d34764804ad454d6c8aa2bb1",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 61,
"avg_line_length": 26.00943396226415,
"alnum_prop": 0.5070729053318824,
"repo_name": "jlitven/vexer",
"id": "6b596b6bef9c77e66f86a08bea93b7f993fce519",
"size": "2904",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "src/anki/db.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "761396"
}
],
"symlink_target": ""
} |
import urllib.request
import collections
import string
import itertools
# Get the input data from the problem webpage
url = 'http://www.pythonchallenge.com/pc/def/ocr.html'
with urllib.request.urlopen(url) as response:
html = response.read().decode('utf-8')
data = html.split('<!--')[2].strip('\n-->')
print(f'Input data:\n{data[:60]}...(truncated)\n')
# Source code hint suggests looking for rare characters in the input data (less a tenth of the avg)
counts = collections.Counter(data)
avgCount = sum(counts.values()) / len(counts.values())
rareChars = [key for key, value in counts.items() if value < avgCount / 10]
print(f'Rare characters in input data:\n{rareChars}\n')
# Inspection suggests an anagram. Check permutations against Unix dictionary for matches
try:
with open('/usr/share/dict/words', 'r') as f:
words = {word for word in f.read().splitlines()}
except:
print('Unix dictionary not found - using a harcoded dictionary instead')
words = {'a', 'cheat', 'dictionary', 'that', 'includes', 'equality'}
for perm in itertools.permutations(rareChars):
perm = ''.join(perm)
if perm in words:
anagram = perm
print(f'Rare characters are an anagram of:\n{anagram}\n')
# Build the solution url by replacing the path end with the anagram
newUrl = url.split('/')
newUrl[-1] = anagram + '.html'
newUrl = '/'.join(newUrl)
solution = newUrl
print(f'Solution url:\n{solution}\n')
| {
"content_hash": "0f58c1d0db5eb674500519eb3e049003",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 99,
"avg_line_length": 36.53846153846154,
"alnum_prop": 0.7010526315789474,
"repo_name": "medwig/pythonchallenge-solutions",
"id": "c7141d2e08cd819767b51675f38b6ee060df205d",
"size": "1425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "p2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5466"
}
],
"symlink_target": ""
} |
from functools import partial
import unittest
##
# test helpers
#
from testutils import mock
##
# promise modules
#
from promise import Promise
class PromiseTestCase(unittest.TestCase):
def setUp(self):
self.d = mock.Mock()
def test_init_expects_one_arg_1(self):
self.assertRaises(TypeError, Promise)
def test_init_expects_one_arg_2(self):
err = False
try:
Promise(None)
except TypeError:
err = True
self.assertFalse(err)
def test_promise_works_as_proxy_for_given_object(self):
Promise(self.d).foo()
self.d.foo.assert_called_once_with()
def test_promise_rejects_call_to_resolve_method(self):
self.assertRaises(RuntimeError, partial(getattr, Promise(self.d),
'resolve'))
self.assertEqual(self.d.resolve.call_count, 0)
def test_promise_rejects_call_to_reject_method(self):
self.assertRaises(RuntimeError, partial(getattr, Promise(self.d),
'reject'))
self.assertEqual(self.d.reject.call_count, 0)
def test_promise_rejects_call_to_cancel_method(self):
self.assertRaises(RuntimeError, partial(getattr, Promise(self.d),
'cancel'))
self.assertEqual(self.d.cancel.call_count, 0)
if "__main__" == __name__:
unittest.main()
| {
"content_hash": "b3851ce44fae899dbae54375fd19dcc4",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 73,
"avg_line_length": 25.826923076923077,
"alnum_prop": 0.6358897989575577,
"repo_name": "michalbachowski/pypromise",
"id": "5395d63960e256eed0cad5447d57946585e2f2b7",
"size": "1421",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/promise_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "31925"
},
{
"name": "Ruby",
"bytes": "2856"
},
{
"name": "Shell",
"bytes": "996"
}
],
"symlink_target": ""
} |
"""
Created on Tue May 31 16:56:03 2016
@author: login
"""
from GEO_Database import GEO_Database
mystic_lake = GEO_Database(name="test_db", TestBool=True, BuildBool=True)
mystic_lake.read_combined_URL_list()
mystic_lake.download_data(type="metadata")
mystic_lake.write_metadata_table(tags = ['RangeDateTime', 'GranuleID'])
mystic_lake.read_metadata_table(mystic_lake.out_csv)
other_lake = GEO_Database(name="test_db", TestBool=False, BuildBool=False)
other_lake.read_metadata_table(mystic_lake.out_csv)
| {
"content_hash": "52e62117367c68f2da12e67ff8a1abba",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 74,
"avg_line_length": 31.6875,
"alnum_prop": 0.7593688362919132,
"repo_name": "karoraw1/GLM_Wrapper",
"id": "997aa495550dd07adaf5d836893162d72c3fb3fc",
"size": "531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/database_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "31"
},
{
"name": "Batchfile",
"bytes": "174"
},
{
"name": "C",
"bytes": "9382199"
},
{
"name": "C++",
"bytes": "70748"
},
{
"name": "CMake",
"bytes": "146630"
},
{
"name": "CSS",
"bytes": "490"
},
{
"name": "Fortran",
"bytes": "566"
},
{
"name": "HTML",
"bytes": "391759"
},
{
"name": "Jupyter Notebook",
"bytes": "89477"
},
{
"name": "Lex",
"bytes": "34110"
},
{
"name": "M4",
"bytes": "424511"
},
{
"name": "Makefile",
"bytes": "1115645"
},
{
"name": "Objective-C",
"bytes": "5942"
},
{
"name": "OpenEdge ABL",
"bytes": "32797179"
},
{
"name": "Python",
"bytes": "299012"
},
{
"name": "R",
"bytes": "2582"
},
{
"name": "Roff",
"bytes": "151340"
},
{
"name": "Shell",
"bytes": "857515"
},
{
"name": "Yacc",
"bytes": "78367"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='AutomaticLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('keyword', models.CharField(unique=True, max_length=255, verbose_name='keyword')),
('link', models.CharField(max_length=255, verbose_name='link')),
('active', models.BooleanField(default=True, verbose_name='active')),
('limit', models.IntegerField(default=0, help_text='zero - disabled', verbose_name='limit')),
('every', models.IntegerField(default=1, help_text='Every "3" mean that this keyword will be replaced to link in every third content item', verbose_name='every N')),
('target', models.CharField(default=b'_blank', max_length=10, verbose_name='target', choices=[(b'_blank', b'_blank'), (b'_self', b'_self'), (b'_parent', b'_parent'), (b'_top', b'_top')])),
('nofollow', models.BooleanField(default=False, verbose_name='rel="nofollow"')),
('css_class', models.CharField(default=None, max_length=100, null=True, verbose_name='css class', blank=True)),
],
options={
'verbose_name': 'automatic link',
'verbose_name_plural': 'automatic links',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='automaticlink',
unique_together=set([('keyword', 'link')]),
),
]
| {
"content_hash": "2ca8e9d8d4545ab531815c43264360ae",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 204,
"avg_line_length": 49,
"alnum_prop": 0.5801749271137027,
"repo_name": "silentsokolov/django-automatic-links",
"id": "89bd2f593cba6cf4cb2f381f12787c5e9a027d24",
"size": "1739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "automatic_links/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "433"
},
{
"name": "Python",
"bytes": "16940"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from setuptools.command.test import test as test_command
from sped import __version__
class PyTest(test_command):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
test_command.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
test_command.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import sys
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name='sped',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
include_package_data=True,
package_data={
'sped': ['ecd/tabelas/*'],
},
version=__version__,
description='Biblioteca para geração dos arquivos do Sistema Público de Escrituração Digital (SPED) para Python.',
long_description='Biblioteca para geração dos arquivos do Sistema Público de Escrituração Digital (SPED) para '
'Python.',
author='Sergio Garcia',
author_email='sergio@ginx.com.br',
url='https://github.com/sped-br/python-sped',
download_url='https://github.com/sped-br/python-sped/releases',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
keywords='sped fiscal contábil contabilidade receita federal',
tests_require=['pytest'],
cmdclass={'test': PyTest},
)
| {
"content_hash": "f1ae5cd321aa53b9ee981d72d01910a3",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 118,
"avg_line_length": 33.07843137254902,
"alnum_prop": 0.6378186129223473,
"repo_name": "mileo/python-sped",
"id": "969ac7b53e36d84b769ed3c19257b94b65e299a6",
"size": "1698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "266994"
}
],
"symlink_target": ""
} |
"""Sub command to manage trained generators associated with your account.
Run `hazy generator --help` for usage.
"""
import click
@click.group()
def generator():
'''Manage trained generators associated with your account.
'''
@generator.command()
def train():
'''Train a new generator with your data.
Example:
hazy generator train --db 2bcac5e8-fc4e-4675-ae58-c4b67b552888 --table table
'''
@generator.command()
def list():
'''List the trained generators in your account.
Example:
`hazy generator list`
'''
@generator.command()
@click.argument('uuid')
def show(uuid):
'''View details of a single trained generator.
Example:
`hazy generator show 2bcac5e8-fc4e-4675-ae58-c4b67b552888`
'''
@generator.command()
@click.argument('uuid')
def jobs(uuid):
'''View all synthetic data jobs of a generator.
Example:
`hazy generator jobs 2bcac5e8-fc4e-4675-ae58-c4b67b552888`
'''
| {
"content_hash": "24dea87781377a3062d952ab1a48117e",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 80,
"avg_line_length": 21.155555555555555,
"alnum_prop": 0.680672268907563,
"repo_name": "anon-ai/toolbelt",
"id": "d26a9dc80220ab9689db1ab0b77ce02900613b16",
"size": "952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/hazy/commands/generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11260"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from threading import local
USER_ATTR_NAME = getattr(settings, 'LOCAL_USER_ATTR_NAME', '_current_user')
_thread_locals = local()
def _do_set_current_user(user_fun):
setattr(_thread_locals, USER_ATTR_NAME, user_fun.__get__(user_fun, local))
def _set_current_user(user=None):
'''
Sets current user in local thread.
Can be used as a hook e.g. for shell jobs (when request object is not
available).
'''
_do_set_current_user(lambda self: user)
class SetCurrentUser:
def __init__(this, request):
this.request = request
def __enter__(this):
_do_set_current_user(lambda self: getattr(this.request, 'user', None))
def __exit__(this, type, value, traceback):
_do_set_current_user(lambda self: None)
class ThreadLocalUserMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# request.user closure; asserts laziness;
# memorization is implemented in
# request.user (non-data descriptor)
with SetCurrentUser(request):
response = self.get_response(request)
return response
def get_current_user():
current_user = getattr(_thread_locals, USER_ATTR_NAME, None)
if callable(current_user):
return current_user()
return current_user
def get_current_authenticated_user():
current_user = get_current_user()
if isinstance(current_user, AnonymousUser):
return None
return current_user
| {
"content_hash": "1bfb61659bb225d3bdcf46042bbe45dc",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 78,
"avg_line_length": 26.683333333333334,
"alnum_prop": 0.6658338538413492,
"repo_name": "PaesslerAG/django-currentuser",
"id": "dec921f316336144d089387662a52aa5f30884fa",
"size": "1601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_currentuser/middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1778"
},
{
"name": "Python",
"bytes": "22983"
}
],
"symlink_target": ""
} |
import getopt
import os
import re
import shlex
import sys
from lxml import etree as ET
from lxml.cssselect import CSSSelector
from sh import parseopt
def pxml(root, method="xml"):
if type(root) is list:
dummy = ET.Element("results")
for n in root:
dummy.append(n)
root = dummy
print(ET.tostring(root, pretty_print=True, method=method))
def make_selector(o):
if "t" in o:
return ET.XPath("//" + o.t)
elif "x" in o:
return ET.XPath(o.x)
elif "c" in o:
return CSSSelector(o.c)
return ET.XPath("string()")
if __name__ == "__main__":
o, a = parseopt("x:c:t:", sys.argv[1:])
fn = os.path.abspath(a[0])
print(fn)
if not os.path.exists(fn):
print("Could not find file '{0}'".format(fn))
sys.exit(1)
if re.match(r".*\.html?", fn):
parser = ET.HTMLParser()
method = "html"
else:
parser = ET.XMLParser()
method = "xml"
tree = ET.parse(fn, parser)
selector = make_selector(o)
res = selector(tree.getroot())
pxml(res, method)
| {
"content_hash": "b80b9c9f4f96779eabeb07b1b13e00ef",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 60,
"avg_line_length": 18.436363636363637,
"alnum_prop": 0.6173570019723866,
"repo_name": "spiralx/mypy",
"id": "86e30af0f967bfcfd221e4b494e749ef378a3029",
"size": "1015",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mypy/xq2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "109682"
},
{
"name": "JavaScript",
"bytes": "170251"
},
{
"name": "Python",
"bytes": "298163"
},
{
"name": "Shell",
"bytes": "81"
}
],
"symlink_target": ""
} |
import sys
# This sequence will get a lot cleaner for Python 3, for which the necessary
# flags should all be in the os module.
import ctypes
flags = ctypes.RTLD_GLOBAL
try:
import DLFCN
flags |= DLFCN.RTLD_NOW
except ImportError:
flags |= 0x2 # works for Linux and Mac, only platforms I care about now.
sys.setdlopenflags(flags)
# Ensure basics is loaded first, since we need its
# symbols for anything else.
from . import basics
| {
"content_hash": "5397255895fe868f756f77ea8ff6e5cb",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 77,
"avg_line_length": 27.875,
"alnum_prop": 0.742152466367713,
"repo_name": "TallJimbo/python-cpp-challenge",
"id": "976f54f4b2b64e7957b1758e172d8eed566b2861",
"size": "446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python2-cython/challenge/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "54717"
},
{
"name": "Python",
"bytes": "17135"
}
],
"symlink_target": ""
} |
import atexit
import functools
import socket
import warnings
import weakref
import time
# So that 'setup.py doc' can import this module without Tornado or greenlet
requirements_satisfied = True
try:
from tornado import iostream, ioloop
except ImportError:
requirements_satisfied = False
warnings.warn("Tornado not installed", ImportWarning)
try:
import greenlet
except ImportError:
requirements_satisfied = False
warnings.warn("greenlet module not installed", ImportWarning)
import pymongo
import pymongo.common
import pymongo.errors
import pymongo.mongo_client
import pymongo.mongo_replica_set_client
import pymongo.pool
import pymongo.son_manipulator
import logging
class MongoIOStream(iostream.IOStream):
def can_read_sync(self, num_bytes):
return self._read_buffer_size >= num_bytes
def _check_deadline(cleanup_cb=None):
gr = greenlet.getcurrent()
if hasattr(gr, 'is_deadlined') and \
gr.is_deadlined():
if cleanup_cb:
cleanup_cb()
try:
gr.do_deadline()
except AttributeError:
logging.exception(
'Greenlet %s has \'is_deadlined\' but not \'do_deadline\'')
def green_sock_method(method):
"""Wrap a GreenletSocket method to pause the current greenlet and arrange
for the greenlet to be resumed when non-blocking I/O has completed.
"""
@functools.wraps(method)
def _green_sock_method(self, *args, **kwargs):
self.child_gr = greenlet.getcurrent()
main = self.child_gr.parent
assert main, "Should be on child greenlet"
# Run on main greenlet
def closed(gr):
# The child greenlet might have died, e.g.:
# - An operation raised an error within PyMongo
# - PyMongo closed the MotorSocket in response
# - GreenletSocket.close() closed the IOStream
# - IOStream scheduled this closed() function on the loop
# - PyMongo operation completed (with or without error) and
# its greenlet terminated
# - IOLoop runs this function
if not gr.dead:
gr.throw(socket.error("Close called, killing mongo operation"))
# send the error to this greenlet if something goes wrong during the
# query
self.stream.set_close_callback(functools.partial(closed, self.child_gr))
try:
# Add timeout for closing non-blocking method call
if self.timeout and not self.timeout_handle:
self.timeout_handle = self.io_loop.add_timeout(
time.time() + self.timeout, self._switch_and_close)
# method is GreenletSocket.send(), recv(), etc. method() begins a
# non-blocking operation on an IOStream and arranges for
# callback() to be executed on the main greenlet once the
# operation has completed.
method(self, *args, **kwargs)
# Pause child greenlet until resumed by main greenlet, which
# will pass the result of the socket operation (data for recv,
# number of bytes written for sendall) to us.
socket_result = main.switch()
return socket_result
except socket.error:
raise
except IOError, e:
# If IOStream raises generic IOError (e.g., if operation
# attempted on closed IOStream), then substitute socket.error,
# since socket.error is what PyMongo's built to handle. For
# example, PyMongo will catch socket.error, close the socket,
# and raise AutoReconnect.
raise socket.error(str(e))
finally:
# do this here in case main.switch throws
# Remove timeout handle if set, since we've completed call
if self.timeout_handle:
self.io_loop.remove_timeout(self.timeout_handle)
self.timeout_handle = None
# disable the callback to raise exception in this greenlet on socket
# close, since the greenlet won't be around to raise the exception
# in (and it'll be caught on the next query and raise an
# AutoReconnect, which gets handled properly)
self.stream.set_close_callback(None)
def cleanup_cb():
self.stream.close()
try:
self.pool_ref._socket_semaphore.release()
except weakref.ReferenceError:
# pool was gc'ed
pass
_check_deadline(cleanup_cb)
return _green_sock_method
class GreenletSocket(object):
"""Replace socket with a class that yields from the current greenlet, if
we're on a child greenlet, when making blocking calls, and uses Tornado
IOLoop to schedule child greenlet for resumption when I/O is ready.
We only implement those socket methods actually used by pymongo.
"""
def __init__(self, sock, io_loop, use_ssl=False, pool_ref=None):
self.use_ssl = use_ssl
self.io_loop = io_loop
self.timeout = None
self.timeout_handle = None
self.pool_ref = pool_ref
if self.use_ssl:
raise Exception("SSL isn't supported")
else:
self.stream = MongoIOStream(sock, io_loop=io_loop)
def setsockopt(self, *args, **kwargs):
self.stream.socket.setsockopt(*args, **kwargs)
def settimeout(self, timeout):
self.timeout = timeout
def _switch_and_close(self):
# called on timeout to switch back to child greenlet
self.close()
if self.child_gr is not None:
self.child_gr.throw(IOError("Socket timed out"))
@green_sock_method
def connect(self, pair):
# do the connect on the underlying socket asynchronously...
self.stream.connect(pair, greenlet.getcurrent().switch)
def sendall(self, data):
# do the send on the underlying socket synchronously...
try:
self.stream.write(data)
except IOError as e:
raise socket.error(str(e))
if self.stream.closed():
raise socket.error("connection closed")
def recv(self, num_bytes):
# if we have enough bytes in our local buffer, don't yield
if self.stream.can_read_sync(num_bytes):
return self.stream._consume(num_bytes)
# else yield while we wait on Mongo to send us more
else:
return self.recv_async(num_bytes)
@green_sock_method
def recv_async(self, num_bytes):
# do the recv on the underlying socket... come back to the current
# greenlet when it's done
return self.stream.read_bytes(num_bytes, greenlet.getcurrent().switch)
def close(self):
# since we're explicitly handling closing here, don't raise an exception
# via the callback
self.stream.set_close_callback(None)
sock = self.stream.socket
try:
try:
self.stream.close()
except KeyError:
# Tornado's _impl (epoll, kqueue, ...) has already removed this
# file descriptor from its dict.
pass
finally:
# Sometimes necessary to avoid ResourceWarnings in Python 3:
# specifically, if the fd is closed from the OS's view, then
# stream.close() throws an exception, but the socket still has an
# fd and so will print a ResourceWarning. In that case, calling
# sock.close() directly clears the fd and does not raise an error.
if sock:
sock.close()
def fileno(self):
return self.stream.socket.fileno()
class GreenletPool(pymongo.pool.Pool):
"""A simple connection pool of GreenletSockets.
"""
def __init__(self, *args, **kwargs):
io_loop = kwargs.pop('io_loop', None)
self.io_loop = io_loop if io_loop else ioloop.IOLoop.instance()
pymongo.pool.Pool.__init__(self, *args, **kwargs)
if self.max_size is not None and self.wait_queue_multiple:
raise ValueError("GreenletPool doesn't support wait_queue_multiple")
# HACK [adam Dec/6/14]: need to use our IOLoop/greenlet semaphore
# implementation, so override what Pool.__init__ sets
# self._socket_semaphore to here
self._socket_semaphore = GreenletBoundedSemaphore(self.max_size)
def create_connection(self):
"""Copy of BasePool.connect()
"""
assert greenlet.getcurrent().parent, "Should be on child greenlet"
host, port = self.pair
# Don't try IPv6 if we don't support it. Also skip it if host
# is 'localhost' (::1 is fine). Avoids slow connect issues
# like PYTHON-356.
family = socket.AF_INET
if socket.has_ipv6 and host != 'localhost':
family = socket.AF_UNSPEC
err = None
for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
af, socktype, proto, dummy, sa = res
green_sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
green_sock = GreenletSocket(
sock, self.io_loop, use_ssl=self.use_ssl,
pool_ref=weakref.proxy(self))
# GreenletSocket will pause the current greenlet and resume it
# when connection has completed
green_sock.settimeout(self.conn_timeout)
green_sock.connect(sa)
green_sock.settimeout(self.net_timeout)
return green_sock
except socket.error, e:
err = e
if green_sock is not None:
green_sock.close()
if err is not None:
# pylint: disable=E0702
raise err
else:
# This likely means we tried to connect to an IPv6 only
# host with an OS/kernel or Python interpeter that doesn't
# support IPv6.
raise socket.error('getaddrinfo failed')
class GreenletEvent(object):
def __init__(self, io_loop):
self.io_loop = io_loop
self._flag = False
self._waiters = []
def is_set(self):
return self._flag
isSet = is_set
def set(self):
self._flag = True
waiters, self._waiters = self._waiters, []
# wake up all the greenlets that were waiting
for waiter in waiters:
self.io_loop.add_callback(waiter.switch)
def clear(self):
self._flag = False
def wait(self):
current = greenlet.getcurrent()
parent = current.parent
assert parent, "Must be called on child greenlet"
# yield back to the IOLoop if we have to wait
if not self._flag:
self._waiters.append(current)
try:
parent.switch()
finally:
# don't need callback because we haven't taken any resources
_check_deadline()
return self._flag
class GreenletSemaphore(object):
"""
Tornado IOLoop+Greenlet-based Semaphore class
"""
def __init__(self, value=1, io_loop=None):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self._value = value
self._waiters = []
self._waiter_timeouts = {}
self._ioloop = io_loop if io_loop else ioloop.IOLoop.instance()
def _handle_timeout(self, timeout_gr):
if len(self._waiters) > 1000:
import os
logging.error('waiters size: %s on pid: %s', len(self._waiters),
os.getpid())
# should always be there, but add some safety just in case
if timeout_gr in self._waiters:
self._waiters.remove(timeout_gr)
if timeout_gr in self._waiter_timeouts:
self._waiter_timeouts.pop(timeout_gr)
timeout_gr.switch()
def acquire(self, blocking=True, timeout=None):
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
current = greenlet.getcurrent()
parent = current.parent
assert parent, "Must be called on child greenlet"
start_time = time.time()
# if the semaphore has a postive value, subtract 1 and return True
if self._value > 0:
self._value -= 1
return True
elif not blocking:
# non-blocking mode, just return False
return False
# otherwise, we don't get the semaphore...
while True:
self._waiters.append(current)
if timeout:
callback = functools.partial(self._handle_timeout, current)
self._waiter_timeouts[current] = \
self._ioloop.add_timeout(time.time() + timeout,
callback)
# yield back to the parent, returning when someone releases the
# semaphore
#
# because of the async nature of the way we yield back, we're
# not guaranteed to actually *get* the semaphore after returning
# here (someone else could acquire() between the release() and
# this greenlet getting rescheduled). so we go back to the loop
# and try again.
#
# this design is not strictly fair and it's possible for
# greenlets to starve, but it strikes me as unlikely in
# practice.
try:
parent.switch()
finally:
# need to wake someone else up if we were the one
# given the semaphore
def _cleanup_cb():
if self._value > 0:
self._value -= 1
self.release()
_check_deadline(_cleanup_cb)
if self._value > 0:
self._value -= 1
if hasattr(current, '__mongoengine_comment__'):
current.add_mongo_start(
current.__mongoengine_comment__, time.time())
return True
# if we timed out, just return False instead of retrying
if timeout and (time.time() - start_time) >= timeout:
return False
__enter__ = acquire
def release(self):
current = greenlet.getcurrent()
if hasattr(current, '__mongoengine_comment__'):
is_scatter_gather = False
if hasattr(current, '__scatter_gather__'):
is_scatter_gather = current.__scatter_gather__
current.add_mongo_end(
current.__mongoengine_comment__, time.time(),
is_scatter_gather)
self._value += 1
if self._waiters:
waiting_gr = self._waiters.pop(0)
# remove the timeout
if waiting_gr in self._waiter_timeouts:
timeout = self._waiter_timeouts.pop(waiting_gr)
self._ioloop.remove_timeout(timeout)
# schedule the waiting greenlet to try to acquire
self._ioloop.add_callback(waiting_gr.switch)
def __exit__(self, t, v, tb):
self.release()
@property
def counter(self):
return self._value
class GreenletBoundedSemaphore(GreenletSemaphore):
"""Semaphore that checks that # releases is <= # acquires"""
def __init__(self, value=1):
GreenletSemaphore.__init__(self, value)
self._initial_value = value
def release(self):
if self._value >= self._initial_value:
raise ValueError("Semaphore released too many times")
return GreenletSemaphore.release(self)
class GreenletPeriodicExecutor(object):
_executors = set()
def __init__(self, interval, dummy, target, io_loop):
# dummy is in the place of min_interval which has no semantic
# equivalent in this implementation
self._interval = interval
self._target = target
self._io_loop = io_loop
self._stopped = True
self._next_timeout = None
# make sure multiple calls to wake() only schedules once
self._scheduled = False
# i'm about 90% sure these three methods are pymongo's safeguard against
# forgetting to close these things themselves
@classmethod
def _register_executor(cls, executor):
ref = weakref.ref(executor, cls._on_executor_deleted)
cls._executors.add(ref)
@classmethod
def _on_executor_deleted(cls, ref):
cls._executors.remove(ref)
@classmethod
def _shutdown_executors(cls):
executors = list(cls._executors)
for ref in executors:
executor = ref()
if executor:
executor.close()
def open(self):
if self._stopped:
if not self._next_timeout and not self._scheduled:
self._io_loop.add_callback(self._execute)
self._scheduled = True
self._stopped = False
def wake(self):
if not self._stopped:
# schedule immediately
self._cancel_next_run()
if not self._scheduled:
self._io_loop.add_callback(self._execute)
self._scheduled = True
def close(self, dummy=None):
self._stopped = True
self._cancel_next_run()
def join(self, timeout=None):
pass
def _cancel_next_run(self):
if self._next_timeout:
self._io_loop.remove_timeout(self._next_timeout)
def _execute(self):
self._next_timeout = None
self._scheduled = False
# cover the case where close is called after wake
if self._stopped:
return
try:
if not self._target():
self._stopped = True
return
except Exception:
self._stopped = True
# NOTE: this is an implementation difference from the real
# PeriodicExecutor. the real one ends up killing the thread, while
# this one propogates to the IOLoop handler.
raise
iotimeout = time.time() + self._interval
self._next_timeout = self._io_loop.add_timeout(iotimeout,
self._execute)
atexit.register(GreenletPeriodicExecutor._shutdown_executors)
class GreenletLock(object):
# we need to replace the internal lock do avoid the following scenario:
# greenlet 1:
# with lock:
# # do some io-blocking action, context switch to greenlet 2
#
# greenlet 2:
# with lock: # deadlock
#
# we can't just replace it with an RLock:
# greenlet 1:
# with lock:
# # do some action only one thread of control is expected
# # do some io-blocking action, context switch to greenlet 2
# greenlet 2:
# with lock:
# # lock is granted, potentially corrupting state for greenlet 1
# don't need to be too fancy or thread-safe because it's only coroutines
def __init__(self, io_loop):
# not an rlock, so we don't need to keep track of the holder,
# but might as well for sanity-checking
self.holder = None
self.waiters = []
self.io_loop = io_loop
def acquire(self, blocking=True):
current = greenlet.getcurrent()
parent = current.parent
assert parent, "Must be called on child greenlet"
while self.holder:
if blocking:
self.waiters.append(current)
parent.switch()
else:
return False
self.holder = current
def release(self):
current = greenlet.getcurrent()
assert self.holder is current, 'must be held'
self.holder = None
if self.waiters:
waiter = self.waiters.pop(0)
self.io_loop.add_callback(waiter.switch)
def __enter__(self):
self.acquire()
def __exit__(self, *args):
self.release()
class GreenletCondition(object):
# replacement class for threading.Condition
# only implements the methods used by pymongo.
def __init__(self, io_loop, lock):
self.lock = lock
self.waiters = []
self.waiter_timeouts = {}
self.io_loop = io_loop
def _handle_timeout(self, timeout_gr):
self.waiters.remove(timeout_gr)
self.waiter_timeouts.pop(timeout_gr)
timeout_gr.switch()
def wait(self, timeout=None):
current = greenlet.getcurrent()
parent = current.parent
assert parent, "Must be called on child greenlet"
assert self.lock.holder is current, 'must hold lock'
# yield back to the IOLoop
self.waiters.append(current)
if timeout:
callback = functools.partial(self._handle_timeout, current)
iotimeout = timeout + time.time()
self.waiter_timeouts[current] = self.io_loop.add_timeout(iotimeout,
callback)
self.lock.release()
# we'll be returned to by the timeout or by notify_all
parent.switch()
self.lock.acquire()
def notify_all(self):
current = greenlet.getcurrent()
assert self.lock.holder is current, 'must hold lock'
waiters, self.waiters = self.waiters, []
for waiter in waiters:
self.io_loop.add_callback(waiter.switch)
if waiter in self.waiter_timeouts:
timeout = self.waiter_timeouts.pop(waiter)
self.io_loop.remove_timeout(timeout)
class GreenletClient(object):
client = None
@classmethod
def sync_connect(cls, *args, **kwargs):
"""
Makes a synchronous connection to pymongo using Greenlets
Fire up the IOLoop to do the connect, then stop it.
"""
assert not greenlet.getcurrent().parent, "must be run on root greenlet"
def _inner_connect(io_loop, *args, **kwargs):
# asynchronously create a MongoClient using our IOLoop
try:
kwargs['use_greenlets'] = False
kwargs['_pool_class'] = GreenletPool
kwargs['_event_class'] = functools.partial(GreenletEvent,
io_loop)
cls.client = pymongo.mongo_client.MongoClient(*args, **kwargs)
except:
logging.exception("Failed to connect to MongoDB")
finally:
io_loop.stop()
# clear cls.client so we can't return an old one
if cls.client is not None:
try:
# manually close old unused connection
cls.client.close()
except:
logging.exception("Clearing old pymongo connection")
cls.client = None
# do the connection
io_loop = ioloop.IOLoop.instance()
conn_gr = greenlet.greenlet(_inner_connect)
# run the connect when the ioloop starts
io_loop.add_callback(functools.partial(conn_gr.switch,
io_loop, *args, **kwargs))
# start the ioloop
io_loop.start()
return cls.client
| {
"content_hash": "b32befda3d29b9a95a870e18ba00cd29",
"timestamp": "",
"source": "github",
"line_count": 681,
"max_line_length": 80,
"avg_line_length": 34.57856093979442,
"alnum_prop": 0.5814081875318499,
"repo_name": "ContextLogic/mongoengine",
"id": "fd72c4e519ffa0c60272c24f1202ebdfe5edc0cc",
"size": "23548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mongoengine/pymongo_greenlet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "2857"
},
{
"name": "Python",
"bytes": "375160"
}
],
"symlink_target": ""
} |
""" Tests for VPN in VPC
"""
# Import Local Modules
from marvin.codes import PASS, FAILED
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import (validateList,
wait_until)
from marvin.lib.base import (Account,
VPC,
VpcOffering,
ServiceOffering,
NetworkOffering,
Network,
PublicIPAddress,
NATRule,
NetworkACLList,
VirtualMachine,
Vpn,
VpnCustomerGateway,
VpnUser
)
from marvin.sshClient import SshClient
from marvin.lib.common import (get_zone,
get_domain,
get_test_template)
from nose.plugins.attrib import attr
import logging
import time
class Services:
"""Test VPC VPN Services.
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
"password": "password",
},
"host1": None,
"host2": None,
"compute_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 128,
},
"network_offering": {
"name": 'VPC Network offering',
"displaytext": 'VPC Network',
"guestiptype": 'Isolated',
"supportedservices": 'Vpn,Dhcp,Dns,SourceNat,Lb,PortForwarding,UserData,StaticNat,NetworkACL',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceProviderList": {
"Vpn": 'VpcVirtualRouter',
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"Lb": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
},
"network_offering_internal_lb": {
"name": 'VPC Network Internal Lb offering',
"displaytext": 'VPC Network internal lb',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,UserData,StaticNat,NetworkACL,Lb',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceCapabilityList": {
"Lb": {
"SupportedLbIsolation": 'dedicated',
"lbSchemes": 'internal'
}
},
"serviceProviderList": {
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter',
"Lb": 'InternalLbVm'
},
"egress_policy": "true",
},
"vpc_offering": {
"name": 'VPC off',
"displaytext": 'VPC off',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat',
},
"redundant_vpc_offering": {
"name": 'Redundant VPC off',
"displaytext": 'Redundant VPC off',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat',
"serviceProviderList": {
"Vpn": 'VpcVirtualRouter',
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"Lb": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
"serviceCapabilityList": {
"SourceNat": {
"RedundantRouter": 'true'
}
},
},
"vpc": {
"name": "TestVPC",
"displaytext": "TestVPC",
"cidr": '10.1.0.0/16'
},
"vpc1": {
"name": "TestVPC",
"displaytext": "VPC1",
"cidr": '10.1.0.0/16'
},
"vpc2": {
"name": "TestVPC",
"displaytext": "VPC2",
"cidr": '10.3.0.0/16'
},
"network_1": {
"name": "Test Network",
"displaytext": "Test Network",
"netmask": '255.255.255.0',
"gateway": "10.1.1.1"
},
"network_2": {
"name": "Test Network",
"displaytext": "Test Network",
"netmask": '255.255.255.0',
"gateway": "10.3.1.1"
},
"vpn": {
"vpn_user": "root",
"vpn_pass": "Md1sdc",
"vpn_pass_fail": "abc!123", # too short
"iprange": "10.3.2.1-10.3.2.10",
"fordisplay": "true"
},
"vpncustomergateway": {
"esppolicy": "3des-md5;modp1536",
"ikepolicy": "3des-md5;modp1536",
"ipsecpsk": "ipsecpsk"
},
"natrule": {
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"http_rule": {
"privateport": 80,
"publicport": 80,
"startport": 80,
"endport": 80,
"cidrlist": '0.0.0.0/0',
"protocol": "TCP"
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
}
}
class TestVpcRemoteAccessVpn(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.logger = logging.getLogger('TestVPCRemoteAccessVPN')
cls.stream_handler = logging.StreamHandler()
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(cls.stream_handler)
testClient = super(TestVpcRemoteAccessVpn, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.domain = get_domain(cls.apiclient)
cls._cleanup = []
cls.compute_offering = ServiceOffering.create(
cls.apiclient,
cls.services["compute_offering"]
)
cls._cleanup.append(cls.compute_offering)
cls.account = Account.create(
cls.apiclient, services=cls.services["account"])
cls._cleanup.append(cls.account)
cls.hypervisor = testClient.getHypervisorInfo()
cls.template = get_test_template(cls.apiclient, cls.zone.id, cls.hypervisor)
if cls.template == FAILED:
assert False, "get_test_template() failed to return template"
cls.logger.debug("Successfully created account: %s, id: \
%s" % (cls.account.name,
cls.account.id))
return
@attr(tags=["advanced"], required_hardware="true")
def test_01_vpc_remote_access_vpn(self):
"""Test Remote Access VPN in VPC"""
self.logger.debug("Starting test: test_01_vpc_remote_access_vpn")
# 0) Get the default network offering for VPC
self.logger.debug("Retrieving default VPC offering")
networkOffering = NetworkOffering.list(
self.apiclient, name="DefaultIsolatedNetworkOfferingForVpcNetworks")
self.assert_(networkOffering is not None and len(
networkOffering) > 0, "No VPC based network offering")
# 1) Create VPC
vpcOffering = VpcOffering.list(self.apiclient, name="Default VPC offering")
self.assert_(vpcOffering is not None and len(
vpcOffering) > 0, "No VPC offerings found")
vpc = None
try:
vpc = VPC.create(
apiclient=self.apiclient,
services=self.services["vpc"],
networkDomain="vpc.vpn",
vpcofferingid=vpcOffering[0].id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.domain.id
)
self.cleanup.append(vpc)
except Exception as e:
self.fail(e)
finally:
self.assert_(vpc is not None, "VPC creation failed")
self.logger.debug("VPC %s created" % (vpc.id))
try:
# 2) Create network in VPC
ntwk = Network.create(
apiclient=self.apiclient,
services=self.services["network_1"],
accountid=self.account.name,
domainid=self.domain.id,
networkofferingid=networkOffering[0].id,
zoneid=self.zone.id,
vpcid=vpc.id
)
except Exception as e:
self.fail(e)
finally:
self.assertIsNotNone(ntwk, "Network failed to create")
self.cleanup.append(ntwk)
self.logger.debug(
"Network %s created in VPC %s" % (ntwk.id, vpc.id))
try:
# 3) Deploy a vm
vm = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"],
templateid=self.template.id,
zoneid=self.zone.id,
accountid=self.account.name,
domainid=self.domain.id,
serviceofferingid=self.compute_offering.id,
networkids=ntwk.id,
hypervisor=self.hypervisor
)
self.assert_(vm is not None, "VM failed to deploy")
self.cleanup.append(vm)
self.assert_(vm.state == 'Running', "VM is not running")
self.debug("VM %s deployed in VPC %s" % (vm.id, vpc.id))
except Exception as e:
self.fail(e)
finally:
self.logger.debug("Deployed virtual machine: OK")
try:
# 4) Enable VPN for VPC
src_nat_list = PublicIPAddress.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True,
issourcenat=True,
vpcid=vpc.id
)
ip = src_nat_list[0]
except Exception as e:
self.fail(e)
finally:
self.logger.debug("Acquired public ip address: OK")
vpn = None
try:
vpn = Vpn.create(self.apiclient,
publicipid=ip.id,
account=self.account.name,
domainid=self.account.domainid,
iprange=self.services["vpn"]["iprange"],
fordisplay=self.services["vpn"]["fordisplay"]
)
except Exception as e:
self.fail(e)
finally:
self.assertIsNotNone(vpn, "Failed to create Remote Access VPN")
self.logger.debug("Created Remote Access VPN: OK")
vpnUser = None
# 5) Add VPN user for VPC
try:
vpnUser = VpnUser.create(self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
username=self.services["vpn"]["vpn_user"],
password=self.services["vpn"]["vpn_pass"]
)
except Exception as e:
self.fail(e)
finally:
self.assertIsNotNone(
vpnUser, "Failed to create Remote Access VPN User")
self.logger.debug("Created VPN User: OK")
# TODO: Add an actual remote vpn connection test from a remote vpc
try:
# 9) Disable VPN for VPC
vpn.delete(self.apiclient)
except Exception as e:
self.fail(e)
finally:
self.logger.debug("Deleted the Remote Access VPN: OK")
@classmethod
def tearDownClass(cls):
super(TestVpcRemoteAccessVpn, cls).tearDownClass()
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.hypervisor = self.testClient.getHypervisorInfo()
self.cleanup = []
def tearDown(self):
super(TestVpcRemoteAccessVpn, self).tearDown()
class TestVpcSite2SiteVpn(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.logger = logging.getLogger('TestVPCSite2SiteVPN')
cls.stream_handler = logging.StreamHandler()
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(cls.stream_handler)
testClient = super(TestVpcSite2SiteVpn, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.domain = get_domain(cls.apiclient)
cls._cleanup = []
cls.compute_offering = ServiceOffering.create(
cls.apiclient,
cls.services["compute_offering"]
)
cls._cleanup.append(cls.compute_offering)
cls.account = Account.create(
cls.apiclient, services=cls.services["account"])
cls._cleanup.append(cls.account)
cls.hypervisor = testClient.getHypervisorInfo()
cls.template = get_test_template(cls.apiclient, cls.zone.id, cls.hypervisor)
if cls.template == FAILED:
assert False, "get_test_template() failed to return template"
cls.logger.debug("Successfully created account: %s, id: \
%s" % (cls.account.name,
cls.account.id))
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.hypervisor = self.testClient.getHypervisorInfo()
self.cleanup = []
def _get_ssh_client(self, virtual_machine, services, retries):
""" Setup ssh client connection and return connection
vm requires attributes public_ip, public_port, username, password """
try:
ssh_client = SshClient(
virtual_machine.public_ip,
services["virtual_machine"]["ssh_port"],
services["virtual_machine"]["username"],
services["virtual_machine"]["password"],
retries)
except Exception as e:
self.fail("Unable to create ssh connection: " % e)
self.assertIsNotNone(
ssh_client, "Failed to setup ssh connection to vm=%s on public_ip=%s" % (virtual_machine.name, virtual_machine.public_ip))
return ssh_client
def _create_natrule(self, vpc, vm, public_port, private_port, public_ip, network, services=None):
self.logger.debug("Creating NAT rule in network for vm with public IP")
if not services:
self.services["natrule"]["privateport"] = private_port
self.services["natrule"]["publicport"] = public_port
self.services["natrule"]["startport"] = public_port
self.services["natrule"]["endport"] = public_port
services = self.services["natrule"]
nat_rule = NATRule.create(
apiclient=self.apiclient,
services=services,
ipaddressid=public_ip.ipaddress.id,
virtual_machine=vm,
networkid=network.id
)
self.assertIsNotNone(
nat_rule, "Failed to create NAT Rule for %s" % public_ip.ipaddress.ipaddress)
self.logger.debug(
"Adding NetworkACL rules to make NAT rule accessible")
vm.ssh_ip = nat_rule.ipaddress
vm.public_ip = nat_rule.ipaddress
vm.public_port = int(public_port)
return nat_rule
def _validate_vpc_offering(self, vpc_offering):
self.logger.debug("Check if the VPC offering is created successfully?")
vpc_offs = VpcOffering.list(
self.apiclient,
id=vpc_offering.id
)
offering_list = validateList(vpc_offs)
self.assertEqual(offering_list[0],
PASS,
"List VPC offerings should return a valid list"
)
self.assertEqual(
vpc_offering.name,
vpc_offs[0].name,
"Name of the VPC offering should match with listVPCOff data"
)
self.logger.debug(
"VPC offering is created successfully - %s" %
vpc_offering.name)
return
def _create_vpc_offering(self, offering_name):
vpc_off = None
if offering_name is not None:
self.logger.debug("Creating VPC offering: %s", offering_name)
vpc_off = VpcOffering.create(
self.apiclient,
self.services[offering_name]
)
self._validate_vpc_offering(vpc_off)
self.cleanup.append(vpc_off)
return vpc_off
@attr(tags=["advanced"], required_hardware="true")
def test_01_vpc_site2site_vpn(self):
"""Test Site 2 Site VPN Across VPCs"""
self.logger.debug("Starting test: test_01_vpc_site2site_vpn")
# 0) Get the default network offering for VPC
networkOffering = NetworkOffering.list(
self.apiclient, name="DefaultIsolatedNetworkOfferingForVpcNetworks")
self.assert_(networkOffering is not None and len(
networkOffering) > 0, "No VPC based network offering")
# Create and Enable VPC offering
vpc_offering = self._create_vpc_offering('vpc_offering')
self.assert_(vpc_offering is not None, "Failed to create VPC Offering")
vpc_offering.update(self.apiclient, state='Enabled')
vpc1 = None
# Create VPC 1
try:
vpc1 = VPC.create(
apiclient=self.apiclient,
services=self.services["vpc"],
networkDomain="vpc1.vpn",
vpcofferingid=vpc_offering.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.domain.id
)
except Exception as e:
self.fail(e)
finally:
self.assert_(vpc1 is not None, "VPC1 creation failed")
self.cleanup.append(vpc1)
self.logger.debug("VPC1 %s created" % vpc1.id)
vpc2 = None
# Create VPC 2
try:
vpc2 = VPC.create(
apiclient=self.apiclient,
services=self.services["vpc2"],
networkDomain="vpc2.vpn",
vpcofferingid=vpc_offering.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
except Exception as e:
self.fail(e)
finally:
self.assert_(vpc2 is not None, "VPC2 creation failed")
self.cleanup.append(vpc2)
self.logger.debug("VPC2 %s created" % vpc2.id)
default_acl = NetworkACLList.list(
self.apiclient, name="default_allow")[0]
ntwk1 = None
# Create network in VPC 1
try:
ntwk1 = Network.create(
apiclient=self.apiclient,
services=self.services["network_1"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=networkOffering[0].id,
zoneid=self.zone.id,
vpcid=vpc1.id,
aclid=default_acl.id
)
except Exception as e:
self.fail(e)
finally:
self.assertIsNotNone(ntwk1, "Network failed to create")
self.cleanup.append(ntwk1)
self.logger.debug("Network %s created in VPC %s" % (ntwk1.id, vpc1.id))
ntwk2 = None
# Create network in VPC 2
try:
ntwk2 = Network.create(
apiclient=self.apiclient,
services=self.services["network_2"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=networkOffering[0].id,
zoneid=self.zone.id,
vpcid=vpc2.id,
aclid=default_acl.id
)
except Exception as e:
self.fail(e)
finally:
self.assertIsNotNone(ntwk2, "Network failed to create")
self.cleanup.append(ntwk2)
self.logger.debug("Network %s created in VPC %s" % (ntwk2.id, vpc2.id))
vm1 = None
# Deploy a vm in network 2
try:
vm1 = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"],
templateid=self.template.id,
zoneid=self.zone.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.compute_offering.id,
networkids=ntwk1.id,
hypervisor=self.hypervisor
)
except Exception as e:
self.fail(e)
finally:
self.assert_(vm1 is not None, "VM failed to deploy")
self.assert_(vm1.state == 'Running', "VM is not running")
self.cleanup.append(vm1)
self.logger.debug("VM %s deployed in VPC %s" % (vm1.id, vpc1.id))
vm2 = None
# Deploy a vm in network 2
try:
vm2 = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"],
templateid=self.template.id,
zoneid=self.zone.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.compute_offering.id,
networkids=ntwk2.id,
hypervisor=self.hypervisor
)
except Exception as e:
self.fail(e)
finally:
self.assert_(vm2 is not None, "VM failed to deploy")
self.assert_(vm2.state == 'Running', "VM is not running")
self.cleanup.append(vm2)
self.debug("VM %s deployed in VPC %s" % (vm2.id, vpc2.id))
# 4) Enable Site-to-Site VPN for VPC
vpn1_response = Vpn.createVpnGateway(self.apiclient, vpc1.id)
self.assert_(
vpn1_response is not None, "Failed to enable VPN Gateway 1")
self.logger.debug("VPN gateway for VPC %s enabled" % vpc1.id)
vpn2_response = Vpn.createVpnGateway(self.apiclient, vpc2.id)
self.assert_(
vpn2_response is not None, "Failed to enable VPN Gateway 2")
self.logger.debug("VPN gateway for VPC %s enabled" % vpc2.id)
# 5) Add VPN Customer gateway info
src_nat_list = PublicIPAddress.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True,
issourcenat=True,
vpcid=vpc1.id
)
ip1 = src_nat_list[0]
src_nat_list = PublicIPAddress.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True,
issourcenat=True,
vpcid=vpc2.id
)
ip2 = src_nat_list[0]
services = self.services["vpncustomergateway"]
customer1_response = VpnCustomerGateway.create(
self.apiclient, services, "Peer VPC1", ip1.ipaddress, vpc1.cidr, self.account.name, self.domain.id)
self.debug("VPN customer gateway added for VPC %s enabled" % vpc1.id)
self.logger.debug(vars(customer1_response))
customer2_response = VpnCustomerGateway.create(
self.apiclient, services, "Peer VPC2", ip2.ipaddress, vpc2.cidr, self.account.name, self.domain.id)
self.debug("VPN customer gateway added for VPC %s enabled" % vpc2.id)
self.logger.debug(vars(customer2_response))
# 6) Connect two VPCs
vpnconn1_response = Vpn.createVpnConnection(
self.apiclient, customer1_response.id, vpn2_response['id'], True)
self.debug("VPN passive connection created for VPC %s" % vpc2.id)
vpnconn2_response = Vpn.createVpnConnection(
self.apiclient, customer2_response.id, vpn1_response['id'])
self.debug("VPN connection created for VPC %s" % vpc1.id)
def checkVpnConnected():
connections = Vpn.listVpnConnection(
self.apiclient,
listall='true',
vpcid=vpc2.id)
if isinstance(connections, list):
return connections[0].state == 'Connected', None
return False, None
# Wait up to 60 seconds for passive connection to show up as Connected
res, _ = wait_until(2, 30, checkVpnConnected)
if not res:
self.fail("Failed to connect between VPCs, see VPN state as Connected")
# acquire an extra ip address to use to ssh into vm2
try:
vm2.public_ip = PublicIPAddress.create(
apiclient=self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
services=self.services,
networkid=ntwk2.id,
vpcid=vpc2.id)
except Exception as e:
self.fail(e)
finally:
self.assert_(
vm2.public_ip is not None, "Failed to aqcuire public ip for vm2")
natrule = None
# Create port forward to be able to ssh into vm2
try:
natrule = self._create_natrule(
vpc2, vm2, 22, 22, vm2.public_ip, ntwk2)
except Exception as e:
self.fail(e)
finally:
self.assert_(
natrule is not None, "Failed to create portforward for vm2")
time.sleep(20)
# setup ssh connection to vm2
ssh_client = self._get_ssh_client(vm2, self.services, 10)
if ssh_client:
# run ping test
packet_loss = ssh_client.execute("/bin/ping -c 3 -t 10 " + vm1.nic[0].ipaddress + " | grep packet | sed 's/.*received, //g' | sed 's/[% ]*packet.*//g'")[0]
# during startup, some packets may not reply due to link/ipsec-route setup
self.assert_(int(packet_loss) < 50, "Ping did not succeed")
else:
self.fail("Failed to setup ssh connection to %s" % vm2.public_ip)
@classmethod
def tearDownClass(cls):
super(TestVpcSite2SiteVpn, cls).tearDownClass()
def tearDown(self):
super(TestVpcSite2SiteVpn, self).tearDown()
class TestRVPCSite2SiteVpn(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.logger = logging.getLogger('TestRVPCSite2SiteVPN')
cls.stream_handler = logging.StreamHandler()
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(cls.stream_handler)
testClient = super(TestRVPCSite2SiteVpn, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.domain = get_domain(cls.apiclient)
cls._cleanup = []
cls.compute_offering = ServiceOffering.create(
cls.apiclient,
cls.services["compute_offering"]
)
cls._cleanup.append(cls.compute_offering)
cls.account = Account.create(
cls.apiclient, services=cls.services["account"])
cls._cleanup.append(cls.account)
cls.hypervisor = testClient.getHypervisorInfo()
cls.template = get_test_template(cls.apiclient, cls.zone.id, cls.hypervisor)
if cls.template == FAILED:
assert False, "get_test_template() failed to return template"
cls.logger.debug("Successfully created account: %s, id: \
%s" % (cls.account.name,
cls.account.id))
return
def _validate_vpc_offering(self, vpc_offering):
self.logger.debug("Check if the VPC offering is created successfully?")
vpc_offs = VpcOffering.list(
self.apiclient,
id=vpc_offering.id
)
offering_list = validateList(vpc_offs)
self.assertEqual(offering_list[0],
PASS,
"List VPC offerings should return a valid list"
)
self.assertEqual(
vpc_offering.name,
vpc_offs[0].name,
"Name of the VPC offering should match with listVPCOff data"
)
self.logger.debug(
"VPC offering is created successfully - %s" %
vpc_offering.name)
return
def _create_vpc_offering(self, offering_name):
vpc_off = None
if offering_name is not None:
self.logger.debug("Creating VPC offering: %s", offering_name)
vpc_off = VpcOffering.create(
self.apiclient,
self.services[offering_name]
)
self._validate_vpc_offering(vpc_off)
self.cleanup.append(vpc_off)
return vpc_off
def _get_ssh_client(self, virtual_machine, services, retries):
""" Setup ssh client connection and return connection
vm requires attributes public_ip, public_port, username, password """
try:
ssh_client = SshClient(
virtual_machine.public_ip,
services["virtual_machine"]["ssh_port"],
services["virtual_machine"]["username"],
services["virtual_machine"]["password"],
retries)
except Exception as e:
self.fail("Unable to create ssh connection: %s" % e)
self.assertIsNotNone(
ssh_client, "Failed to setup ssh connection to vm=%s on public_ip=%s" % (virtual_machine.name, virtual_machine.public_ip))
return ssh_client
def _create_natrule(self, vpc, vm, public_port, private_port, public_ip, network, services=None):
self.logger.debug("Creating NAT rule in network for vm with public IP")
if not services:
self.services["natrule"]["privateport"] = private_port
self.services["natrule"]["publicport"] = public_port
self.services["natrule"]["startport"] = public_port
self.services["natrule"]["endport"] = public_port
services = self.services["natrule"]
nat_rule = NATRule.create(
apiclient=self.apiclient,
services=services,
ipaddressid=public_ip.ipaddress.id,
virtual_machine=vm,
networkid=network.id
)
self.assertIsNotNone(
nat_rule, "Failed to create NAT Rule for %s" % public_ip.ipaddress.ipaddress)
self.logger.debug(
"Adding NetworkACL rules to make NAT rule accessible")
vm.ssh_ip = nat_rule.ipaddress
vm.public_ip = nat_rule.ipaddress
vm.public_port = int(public_port)
return nat_rule
@attr(tags=["advanced"], required_hardware="true")
def test_01_redundant_vpc_site2site_vpn(self):
"""Test Site 2 Site VPN Across redundant VPCs"""
self.logger.debug("Starting test: test_02_redundant_vpc_site2site_vpn")
# 0) Get the default network offering for VPC
networkOffering = NetworkOffering.list(
self.apiclient, name="DefaultIsolatedNetworkOfferingForVpcNetworks")
self.assert_(networkOffering is not None and len(
networkOffering) > 0, "No VPC based network offering")
# Create and enable redundant VPC offering
redundant_vpc_offering = self._create_vpc_offering(
'redundant_vpc_offering')
self.assert_(redundant_vpc_offering is not None,
"Failed to create redundant VPC Offering")
redundant_vpc_offering.update(self.apiclient, state='Enabled')
# Create VPC 1
vpc1 = None
try:
vpc1 = VPC.create(
apiclient=self.apiclient,
services=self.services["vpc"],
networkDomain="vpc1.vpn",
vpcofferingid=redundant_vpc_offering.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.domain.id
)
except Exception as e:
self.fail(e)
finally:
self.assert_(vpc1 is not None, "VPC1 creation failed")
self.cleanup.append(vpc1)
self.logger.debug("VPC1 %s created" % vpc1.id)
# Create VPC 2
vpc2 = None
try:
vpc2 = VPC.create(
apiclient=self.apiclient,
services=self.services["vpc2"],
networkDomain="vpc2.vpn",
vpcofferingid=redundant_vpc_offering.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
except Exception as e:
self.fail(e)
finally:
self.assert_(vpc2 is not None, "VPC2 creation failed")
self.cleanup.append(vpc2)
self.logger.debug("VPC2 %s created" % vpc2.id)
default_acl = NetworkACLList.list(
self.apiclient, name="default_allow")[0]
# Create network in VPC 1
ntwk1 = None
try:
ntwk1 = Network.create(
apiclient=self.apiclient,
services=self.services["network_1"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=networkOffering[0].id,
zoneid=self.zone.id,
vpcid=vpc1.id,
aclid=default_acl.id
)
except Exception as e:
self.fail(e)
finally:
self.assertIsNotNone(ntwk1, "Network failed to create")
self.cleanup.append(ntwk1)
self.logger.debug("Network %s created in VPC %s" % (ntwk1.id, vpc1.id))
# Create network in VPC 2
ntwk2 = None
try:
ntwk2 = Network.create(
apiclient=self.apiclient,
services=self.services["network_2"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=networkOffering[0].id,
zoneid=self.zone.id,
vpcid=vpc2.id,
aclid=default_acl.id
)
except Exception as e:
self.fail(e)
finally:
self.assertIsNotNone(ntwk2, "Network failed to create")
self.cleanup.append(ntwk2)
self.logger.debug("Network %s created in VPC %s" % (ntwk2.id, vpc2.id))
# Deploy a vm in network 2
vm1 = None
try:
vm1 = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"],
templateid=self.template.id,
zoneid=self.zone.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.compute_offering.id,
networkids=ntwk1.id,
hypervisor=self.hypervisor
)
except Exception as e:
self.fail(e)
finally:
self.assert_(vm1 is not None, "VM failed to deploy")
self.assert_(vm1.state == 'Running', "VM is not running")
self.cleanup.append(vm1)
self.logger.debug("VM %s deployed in VPC %s" % (vm1.id, vpc1.id))
# Deploy a vm in network 2
vm2 = None
try:
vm2 = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"],
templateid=self.template.id,
zoneid=self.zone.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.compute_offering.id,
networkids=ntwk2.id,
hypervisor=self.hypervisor
)
except Exception as e:
self.fail(e)
finally:
self.assert_(vm2 is not None, "VM failed to deploy")
self.assert_(vm2.state == 'Running', "VM is not running")
self.cleanup.append(vm2)
self.debug("VM %s deployed in VPC %s" % (vm2.id, vpc2.id))
# 4) Enable Site-to-Site VPN for VPC
vpn1_response = Vpn.createVpnGateway(self.apiclient, vpc1.id)
self.assert_(
vpn1_response is not None, "Failed to enable VPN Gateway 1")
self.logger.debug("VPN gateway for VPC %s enabled" % vpc1.id)
vpn2_response = Vpn.createVpnGateway(self.apiclient, vpc2.id)
self.assert_(
vpn2_response is not None, "Failed to enable VPN Gateway 2")
self.logger.debug("VPN gateway for VPC %s enabled" % vpc2.id)
# 5) Add VPN Customer gateway info
src_nat_list = PublicIPAddress.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True,
issourcenat=True,
vpcid=vpc1.id
)
ip1 = src_nat_list[0]
src_nat_list = PublicIPAddress.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True,
issourcenat=True,
vpcid=vpc2.id
)
ip2 = src_nat_list[0]
services = self.services["vpncustomergateway"]
customer1_response = VpnCustomerGateway.create(
self.apiclient, services, "Peer VPC1", ip1.ipaddress, vpc1.cidr, self.account.name, self.domain.id)
self.debug("VPN customer gateway added for VPC %s enabled" % vpc1.id)
self.logger.debug(vars(customer1_response))
customer2_response = VpnCustomerGateway.create(
self.apiclient, services, "Peer VPC2", ip2.ipaddress, vpc2.cidr, self.account.name, self.domain.id)
self.debug("VPN customer gateway added for VPC %s enabled" % vpc2.id)
self.logger.debug(vars(customer2_response))
# 6) Connect two VPCs
vpnconn1_response = Vpn.createVpnConnection(
self.apiclient, customer1_response.id, vpn2_response['id'], True)
self.debug("VPN passive connection created for VPC %s" % vpc2.id)
vpnconn2_response = Vpn.createVpnConnection(
self.apiclient, customer2_response.id, vpn1_response['id'])
self.debug("VPN connection created for VPC %s" % vpc1.id)
def checkVpnConnected():
connections = Vpn.listVpnConnection(
self.apiclient,
listall='true',
vpcid=vpc2.id)
if isinstance(connections, list):
return connections[0].state == 'Connected', None
return False, None
# Wait up to 60 seconds for passive connection to show up as Connected
res, _ = wait_until(2, 30, checkVpnConnected)
if not res:
self.fail("Failed to connect between VPCs, see VPN state as Connected")
# acquire an extra ip address to use to ssh into vm2
try:
vm2.public_ip = PublicIPAddress.create(
apiclient=self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
services=self.services,
networkid=ntwk2.id,
vpcid=vpc2.id)
except Exception as e:
self.fail(e)
finally:
self.assert_(
vm2.public_ip is not None, "Failed to aqcuire public ip for vm2")
# Create port forward to be able to ssh into vm2
natrule = None
try:
natrule = self._create_natrule(
vpc2, vm2, 22, 22, vm2.public_ip, ntwk2)
except Exception as e:
self.fail(e)
finally:
self.assert_(
natrule is not None, "Failed to create portforward for vm2")
time.sleep(20)
# setup ssh connection to vm2
ssh_client = self._get_ssh_client(vm2, self.services, 10)
if ssh_client:
# run ping test
packet_loss = ssh_client.execute("/bin/ping -c 3 -t 10 " + vm1.nic[0].ipaddress + " | grep packet | sed 's/.*received, //g' | sed 's/[% ]*packet.*//g'")[0]
self.assert_(int(packet_loss) < 50, "Ping did not succeed")
else:
self.fail("Failed to setup ssh connection to %s" % vm2.public_ip)
@classmethod
def tearDownClass(cls):
super(TestRVPCSite2SiteVpn, cls).tearDownClass()
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.hypervisor = self.testClient.getHypervisorInfo()
self.cleanup = []
def tearDown(self):
super(TestRVPCSite2SiteVpn, self).tearDown()
class TestVPCSite2SiteVPNMultipleOptions(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.logger = logging.getLogger('TestVPCSite2SiteVPNMultipleOptions')
cls.stream_handler = logging.StreamHandler()
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(cls.stream_handler)
testClient = super(TestVPCSite2SiteVPNMultipleOptions, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.domain = get_domain(cls.apiclient)
cls._cleanup = []
cls.compute_offering = ServiceOffering.create(
cls.apiclient,
cls.services["compute_offering"]
)
cls._cleanup.append(cls.compute_offering)
cls.account = Account.create(
cls.apiclient, services=cls.services["account"])
cls._cleanup.append(cls.account)
cls.hypervisor = testClient.getHypervisorInfo()
cls.template = get_test_template(cls.apiclient, cls.zone.id, cls.hypervisor)
if cls.template == FAILED:
assert False, "get_test_template() failed to return template"
cls.logger.debug("Successfully created account: %s, id: \
%s" % (cls.account.name,
cls.account.id))
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.hypervisor = self.testClient.getHypervisorInfo()
self.cleanup = []
def _get_ssh_client(self, virtual_machine, services, retries):
""" Setup ssh client connection and return connection
vm requires attributes public_ip, public_port, username, password """
try:
ssh_client = SshClient(
virtual_machine.public_ip,
services["virtual_machine"]["ssh_port"],
services["virtual_machine"]["username"],
services["virtual_machine"]["password"],
retries)
except Exception as e:
self.fail("Unable to create ssh connection: " % e)
self.assertIsNotNone(
ssh_client, "Failed to setup ssh connection to vm=%s on public_ip=%s" % (virtual_machine.name, virtual_machine.public_ip))
return ssh_client
def _create_natrule(self, vpc, vm, public_port, private_port, public_ip, network, services=None):
self.logger.debug("Creating NAT rule in network for vm with public IP")
if not services:
self.services["natrule"]["privateport"] = private_port
self.services["natrule"]["publicport"] = public_port
self.services["natrule"]["startport"] = public_port
self.services["natrule"]["endport"] = public_port
services = self.services["natrule"]
nat_rule = NATRule.create(
apiclient=self.apiclient,
services=services,
ipaddressid=public_ip.ipaddress.id,
virtual_machine=vm,
networkid=network.id
)
self.assertIsNotNone(
nat_rule, "Failed to create NAT Rule for %s" % public_ip.ipaddress.ipaddress)
self.logger.debug(
"Adding NetworkACL rules to make NAT rule accessible")
vm.ssh_ip = nat_rule.ipaddress
vm.public_ip = nat_rule.ipaddress
vm.public_port = int(public_port)
return nat_rule
def _validate_vpc_offering(self, vpc_offering):
self.logger.debug("Check if the VPC offering is created successfully?")
vpc_offs = VpcOffering.list(
self.apiclient,
id=vpc_offering.id
)
offering_list = validateList(vpc_offs)
self.assertEqual(offering_list[0],
PASS,
"List VPC offerings should return a valid list"
)
self.assertEqual(
vpc_offering.name,
vpc_offs[0].name,
"Name of the VPC offering should match with listVPCOff data"
)
self.logger.debug(
"VPC offering is created successfully - %s" %
vpc_offering.name)
return
def _create_vpc_offering(self, offering_name):
vpc_off = None
if offering_name is not None:
self.logger.debug("Creating VPC offering: %s", offering_name)
vpc_off = VpcOffering.create(
self.apiclient,
self.services[offering_name]
)
self._validate_vpc_offering(vpc_off)
self.cleanup.append(vpc_off)
return vpc_off
@attr(tags=["advanced"], required_hardware="true")
def test_01_vpc_site2site_vpn_multiple_options(self):
"""Test Site 2 Site VPN Across VPCs"""
self.logger.debug("Starting test: test_01_vpc_site2site_vpn_multiple_options")
# 0) Get the default network offering for VPC
networkOffering = NetworkOffering.list(
self.apiclient, name="DefaultIsolatedNetworkOfferingForVpcNetworks")
self.assert_(networkOffering is not None and len(
networkOffering) > 0, "No VPC based network offering")
# Create and Enable VPC offering
vpc_offering = self._create_vpc_offering('vpc_offering')
self.assert_(vpc_offering is not None, "Failed to create VPC Offering")
vpc_offering.update(self.apiclient, state='Enabled')
vpc1 = None
# Create VPC 1
try:
vpc1 = VPC.create(
apiclient=self.apiclient,
services=self.services["vpc"],
networkDomain="vpc1.vpn",
vpcofferingid=vpc_offering.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.domain.id
)
except Exception as e:
self.fail(e)
finally:
self.assert_(vpc1 is not None, "VPC1 creation failed")
self.cleanup.append(vpc1)
self.logger.debug("VPC1 %s created" % vpc1.id)
vpc2 = None
# Create VPC 2
try:
vpc2 = VPC.create(
apiclient=self.apiclient,
services=self.services["vpc2"],
networkDomain="vpc2.vpn",
vpcofferingid=vpc_offering.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.domain.id
)
except Exception as e:
self.fail(e)
finally:
self.assert_(vpc2 is not None, "VPC2 creation failed")
self.cleanup.append(vpc2)
self.logger.debug("VPC2 %s created" % vpc2.id)
default_acl = NetworkACLList.list(
self.apiclient, name="default_allow")[0]
ntwk1 = None
# Create network in VPC 1
try:
ntwk1 = Network.create(
apiclient=self.apiclient,
services=self.services["network_1"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=networkOffering[0].id,
zoneid=self.zone.id,
vpcid=vpc1.id,
aclid=default_acl.id
)
except Exception as e:
self.fail(e)
finally:
self.assertIsNotNone(ntwk1, "Network failed to create")
self.cleanup.append(ntwk1)
self.logger.debug("Network %s created in VPC %s" % (ntwk1.id, vpc1.id))
ntwk2 = None
# Create network in VPC 2
try:
ntwk2 = Network.create(
apiclient=self.apiclient,
services=self.services["network_2"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=networkOffering[0].id,
zoneid=self.zone.id,
vpcid=vpc2.id,
aclid=default_acl.id
)
except Exception as e:
self.fail(e)
finally:
self.assertIsNotNone(ntwk2, "Network failed to create")
self.cleanup.append(ntwk2)
self.logger.debug("Network %s created in VPC %s" % (ntwk2.id, vpc2.id))
vm1 = None
# Deploy a vm in network 2
try:
vm1 = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"],
templateid=self.template.id,
zoneid=self.zone.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.compute_offering.id,
networkids=ntwk1.id,
hypervisor=self.hypervisor
)
except Exception as e:
self.fail(e)
finally:
self.assert_(vm1 is not None, "VM failed to deploy")
self.assert_(vm1.state == 'Running', "VM is not running")
self.cleanup.append(vm1)
self.logger.debug("VM %s deployed in VPC %s" % (vm1.id, vpc1.id))
vm2 = None
# Deploy a vm in network 2
try:
vm2 = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"],
templateid=self.template.id,
zoneid=self.zone.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.compute_offering.id,
networkids=ntwk2.id,
hypervisor=self.hypervisor
)
except Exception as e:
self.fail(e)
finally:
self.assert_(vm2 is not None, "VM failed to deploy")
self.assert_(vm2.state == 'Running', "VM is not running")
self.cleanup.append(vm2)
self.debug("VM %s deployed in VPC %s" % (vm2.id, vpc2.id))
# default config
config = {
'ike_enc' :'aes128',
'ike_hash' :'sha1',
'ike_dh' :'modp1536',
'esp_enc' :'aes128',
'esp_hash' :'sha1',
'esp_pfs' :'modp1536',
'psk' :'secreatKey',
'ike_life' :86400,
'esp_life' :3600,
'dpd' :True,
'force_encap' :False,
'passive_1' :False,
'passive_2' :False
}
test_confs = [
{}, # default
{'force_encap': True},
{'ike_life': ''},
{'esp_life': ''},
{'ike_life': '', 'esp_life': ''},
{'passive_1': True, 'passive_2': True},
{'passive_1': False, 'passive_2': True},
{'passive_1': True, 'passive_2': False},
{'passive_1': False, 'passive_2': False, 'dpd': False},
{'passive_1': True, 'passive_2': True, 'dpd': False},
{'passive_1': True, 'passive_2': False, 'dpd': False},
{'passive_1': False, 'passive_2': True, 'dpd': False},
{'passive_1': True, 'passive_2': False, 'esp_pfs': ''},
{'ike_dh': 'modp3072', 'ike_hash': 'sha256', 'esp_pfs': 'modp2048', 'esp_hash':'sha384'},
{'ike_dh': 'modp4096', 'ike_hash': 'sha384', 'esp_pfs': 'modp6144', 'esp_hash':'sha512'},
{'ike_dh': 'modp8192', 'ike_hash': 'sha512', 'esp_pfs': 'modp8192', 'esp_hash':'sha384'}
]
# 4) Enable Site-to-Site VPN for VPC
vpn1_response = Vpn.createVpnGateway(self.apiclient, vpc1.id)
self.assert_(
vpn1_response is not None, "Failed to enable VPN Gateway 1")
self.logger.debug("VPN gateway for VPC %s enabled" % vpc1.id)
vpn2_response = Vpn.createVpnGateway(self.apiclient, vpc2.id)
self.assert_(
vpn2_response is not None, "Failed to enable VPN Gateway 2")
self.logger.debug("VPN gateway for VPC %s enabled" % vpc2.id)
# 5) Add VPN Customer gateway info
src_nat_list = PublicIPAddress.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True,
issourcenat=True,
vpcid=vpc1.id
)
ip1 = src_nat_list[0]
src_nat_list = PublicIPAddress.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True,
issourcenat=True,
vpcid=vpc2.id
)
ip2 = src_nat_list[0]
# acquire an extra ip address to use to ssh into vm2
try:
vm2.public_ip = PublicIPAddress.create(
apiclient=self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
services=self.services,
networkid=ntwk2.id,
vpcid=vpc2.id)
except Exception as e:
self.fail(e)
finally:
self.assert_(
vm2.public_ip is not None, "Failed to aqcuire public ip for vm2")
natrule = None
# Create port forward to be able to ssh into vm2
try:
natrule = self._create_natrule(
vpc2, vm2, 22, 22, vm2.public_ip, ntwk2)
except Exception as e:
self.fail(e)
finally:
self.assert_(
natrule is not None, "Failed to create portforward for vm2")
time.sleep(20)
# setup ssh connection to vm2
ssh_client = self._get_ssh_client(vm2, self.services, 10)
if not ssh_client:
self.fail("Failed to setup ssh connection to %s" % vm2.public_ip)
for test_c in test_confs:
c = config.copy()
c.update(test_c)
services = self._get_vpn_config(c)
self.logger.debug(services)
customer1_response = VpnCustomerGateway.create(
self.apiclient,
services,
"Peer VPC1",
ip1.ipaddress,
vpc1.cidr,
account=self.account.name,
domainid=self.account.domainid)
self.logger.debug("VPN customer gateway added for VPC %s enabled" % vpc1.id)
customer2_response = VpnCustomerGateway.create(
self.apiclient,
services,
"Peer VPC2",
ip2.ipaddress,
vpc2.cidr,
account=self.account.name,
domainid=self.account.domainid)
self.logger.debug("VPN customer gateway added for VPC %s enabled" % vpc2.id)
# 6) Connect two VPCs
vpnconn1_response = Vpn.createVpnConnection(
self.apiclient, customer1_response.id, vpn2_response['id'], c['passive_1'])
self.logger.debug("VPN connection created for VPC %s" % vpc2.id)
time.sleep(5)
vpnconn2_response = Vpn.createVpnConnection(
self.apiclient, customer2_response.id, vpn1_response['id'], c['passive_2'])
self.logger.debug("VPN connection created for VPC %s" % vpc1.id)
def checkVpnConnected():
connections = Vpn.listVpnConnection(
self.apiclient,
listall='true',
vpcid=vpc2.id)
if isinstance(connections, list):
return connections[0].state == 'Connected', None
return False, None
# Wait up to 60 seconds for passive connection to show up as Connected
res, _ = wait_until(2, 30, checkVpnConnected)
if not res:
self.logger.debug("Failed to see VPN state as Connected, we'll attempt ssh+pinging")
# run ping test
packet_loss = ssh_client.execute("/bin/ping -c 3 -t 10 " + vm1.nic[0].ipaddress + " | grep packet | sed 's/.*received, //g' | sed 's/[% ]*packet.*//g'")[0]
self.logger.debug("Packet loss %s" % packet_loss)
self.assert_(int(packet_loss) < 50, "Ping did not succeed")
# Cleanup
Vpn.deleteVpnConnection(self.apiclient, vpnconn1_response['id'])
Vpn.deleteVpnConnection(self.apiclient, vpnconn2_response['id'])
customer1_response.delete(self.apiclient)
customer2_response.delete(self.apiclient)
def _get_vpn_config(self, c):
ike_policy = '%s-%s;%s' % (c['ike_enc'], c['ike_hash'], c['ike_dh']) if c['ike_dh'] else '%s-%s' % (c['ike_enc'], c['ike_hash'])
esp_policy = '%s-%s;%s' % (c['esp_enc'], c['esp_hash'], c['esp_pfs']) if c['esp_pfs'] else '%s-%s' % (c['esp_enc'], c['esp_hash'])
out = {
'ipsecpsk': c['psk'],
'ikepolicy':ike_policy,
'esppolicy':esp_policy,
'dpd':c['dpd'],
'forceencap':c['force_encap']
}
if c['ike_life']:
out['ikelifetime'] = c['ike_life']
if c['esp_life']:
out['esplifetime'] = c['esp_life']
return out
@classmethod
def tearDownClass(cls):
super(TestVPCSite2SiteVPNMultipleOptions, cls).tearDownClass()
| {
"content_hash": "7c413a8fdf4affeecb13fd8c65ab914e",
"timestamp": "",
"source": "github",
"line_count": 1572,
"max_line_length": 167,
"avg_line_length": 38.80279898218829,
"alnum_prop": 0.531427259910161,
"repo_name": "GabrielBrascher/cloudstack",
"id": "26b1daf946f05a188fc0be0dfd4ed1117e7740ea",
"size": "61783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/integration/smoke/test_vpc_vpn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9979"
},
{
"name": "C#",
"bytes": "2356211"
},
{
"name": "CSS",
"bytes": "42504"
},
{
"name": "Dockerfile",
"bytes": "4189"
},
{
"name": "FreeMarker",
"bytes": "4887"
},
{
"name": "Groovy",
"bytes": "146420"
},
{
"name": "HTML",
"bytes": "53626"
},
{
"name": "Java",
"bytes": "38859783"
},
{
"name": "JavaScript",
"bytes": "995137"
},
{
"name": "Less",
"bytes": "28250"
},
{
"name": "Makefile",
"bytes": "871"
},
{
"name": "Python",
"bytes": "12977377"
},
{
"name": "Ruby",
"bytes": "22732"
},
{
"name": "Shell",
"bytes": "744445"
},
{
"name": "Vue",
"bytes": "2012353"
},
{
"name": "XSLT",
"bytes": "57835"
}
],
"symlink_target": ""
} |
import responses
import json
from .helpers import mock_file, ClientTestCase
class TestClientPayment(ClientTestCase):
def setUp(self):
super(TestClientPayment, self).setUp()
self.base_url = '{}/payments'.format(self.base_url)
@responses.activate
def test_payment_all(self):
result = mock_file('payment_collection')
url = self.base_url
responses.add(responses.GET, url, status=200,
body=json.dumps(result), match_querystring=True)
self.assertEqual(self.client.payment.all(), result)
@responses.activate
def test_payment_all_with_options(self):
count = 1
result = mock_file('payment_collection_with_one_payment')
url = '{}?count={}'.format(self.base_url, count)
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.all({'count': count}), result)
@responses.activate
def test_payment_fetch(self):
result = mock_file('fake_payment')
url = '{}/{}'.format(self.base_url, self.payment_id)
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.fetch('fake_payment_id'), result)
@responses.activate
def test_payment_capture(self):
result = mock_file('fake_captured_payment')
url = '{}/{}/capture'.format(self.base_url, self.payment_id)
responses.add(responses.POST, url, status=200,
body=json.dumps(result), match_querystring=True)
self.assertEqual(self.client.payment.capture(self.payment_id,
amount=5100), result)
@responses.activate
def test_refund_create(self):
result = mock_file('fake_refund')
url = '{}/{}/refund'.format(self.base_url, self.payment_id)
responses.add(responses.POST, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.refund(self.payment_id, 2000),
result)
@responses.activate
def test_transfer(self):
param = {
'transfers': {
'currency': {
'amount': 100,
'currency': 'INR',
'account': 'dummy_acc'
}
}
}
result = mock_file('transfers_collection_with_payment_id')
url = '{}/{}/transfers'.format(self.base_url, self.payment_id)
responses.add(responses.POST, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.transfer(self.payment_id, param),
result)
@responses.activate
def test_transfer_fetch(self):
result = mock_file('transfers_collection_with_payment_id')
url = '{}/{}/transfers'.format(self.base_url, self.payment_id)
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.transfers(self.payment_id), result)
@responses.activate
def test_bank_transfer_fetch(self):
result = mock_file('fake_bank_transfer')
url = '{}/{}/bank_transfer'.format(self.base_url, self.payment_id)
responses.add(responses.GET,
url,
status=200,
body=result,
match_querystring=True)
response = self.client.payment.bank_transfer(self.payment_id)
self.assertEqual(response['virtual_account_id'], 'va_8J2ny4Naokqbpe')
self.assertEqual(response['payment_id'], self.payment_id)
@responses.activate
def test_upi_transfer_fetch(self):
result = mock_file('fake_upi_transfer')
url = '{}/{}/upi_transfer'.format(self.base_url, self.payment_id)
responses.add(responses.GET,
url,
status=200,
body=result,
match_querystring=True)
response = self.client.payment.upi_transfer(self.payment_id)
self.assertEqual(response['virtual_account_id'], 'va_8J2ny4Naokqbpf')
self.assertEqual(response['payment_id'], self.payment_id)
@responses.activate
def test_payment_refund(self):
init = {
"amount": "100"
}
result = mock_file('fake_refund')
url = '{}/{}/refund'.format(self.base_url, 'fake_refund_id')
responses.add(responses.POST, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.refund('fake_refund_id',init), result)
@responses.activate
def test_payment_fetch_multiple_refund(self):
result = mock_file('refund_collection')
url = "{}/{}/refunds".format(self.base_url, 'fake_payment_id')
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.fetch_multiple_refund(self.payment_id), result)
@responses.activate
def test_payment_fetch_refund_id(self):
result = mock_file('refund_collection')
url = "{}/{}/refunds/{}".format(self.base_url, 'fake_payment_id', 'fake_refund_id')
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.fetch_refund_id('fake_payment_id', 'fake_refund_id'), result)
@responses.activate
def test_payment_edit(self):
param = {
"notes": {
"key1": "value3",
"key2": "value2"
}
}
result = mock_file('edit_payment')
url = '{}/{}'.format(self.base_url, 'dummy_id')
responses.add(responses.PATCH, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.edit('dummy_id', param), result)
@responses.activate
def test_fetch_card_detail(self):
result = mock_file('fake_card_detail_payment')
url = '{}/{}/card'.format(self.base_url, 'dummy_id')
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.fetchCardDetails('dummy_id'), result)
@responses.activate
def test_fetch_downtimes(self):
result = mock_file('fake_card_detail_payment')
url = '{}/{}'.format(self.base_url, 'downtimes')
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.fetchDownTime(), result)
@responses.activate
def test_fetch_downtime_by_id(self):
result = mock_file('fake_card_detail_payment')
url = '{}/downtimes/{}'.format(self.base_url, 'dummy_id')
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.fetchDownTimeById('dummy_id'), result)
@responses.activate
def test_payment_json(self):
param = {
"amount": "500",
"currency": "INR",
"email": "gaurav.kumar@example.com",
"contact": "9123456789",
"order_id": "order_IfCjbAb066hM9i",
"method": "upi",
"card": {
"number": "4854980604708430",
"cvv": "123",
"expiry_month": "12",
"expiry_year": "21",
"name": "Gaurav Kumar"
}
}
result = mock_file('fake_payment_json')
url = "{}/create/{}".format(self.base_url, 'json')
responses.add(responses.POST, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.createPaymentJson(param), result)
@responses.activate
def test_createRecurring(self):
init = mock_file('init_create_recurring')
result = mock_file('fake_create_recurring')
url = "{}/{}/recurring".format(self.base_url,'create')
responses.add(responses.POST,
url,
status=200,
body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.createRecurring(init), result)
@responses.activate
def test_otpGenerate(self):
result = mock_file('fake_otp_generate')
url = "{}/{}/otp_generate".format(self.base_url,'dummy_id')
responses.add(responses.POST,
url,
status=200,
body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.otpGenerate('dummy_id'), result)
@responses.activate
def test_otpSubmit(self):
param = {
"otp": "123456"
}
result = mock_file('fake_otp_submit')
url = "{}/{}/otp/submit".format(self.base_url,'dummy_id')
responses.add(responses.POST,
url,
status=200,
body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.otpSubmit('dummy_id',param), result)
@responses.activate
def test_otpResend(self):
result = mock_file('fake_otp_resend')
url = "{}/{}/otp/resend".format(self.base_url,'dummy_id')
responses.add(responses.POST,
url,
status=200,
body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.otpResend('dummy_id'), result) | {
"content_hash": "2f81eb3e552645c475517818c50c50fe",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 110,
"avg_line_length": 40.56573705179283,
"alnum_prop": 0.5696326851306227,
"repo_name": "razorpay/razorpay-python",
"id": "0440f94ffd2e8435421ee37f54e2e759f495bc09",
"size": "10182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_client_payment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "122124"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.admin.sites import AlreadyRegistered
from .models import HOTPDevice
class HOTPDeviceAdmin(admin.ModelAdmin):
"""
:class:`~django.contrib.admin.ModelAdmin` for
:class:`~django_otp.plugins.otp_hotp.models.HOTPDevice`.
"""
fieldsets = [
('Identity', {
'fields': ['user', 'name', 'confirmed'],
}),
('Configuration', {
'fields': ['key', 'digits', 'tolerance'],
}),
('State', {
'fields': ['counter'],
}),
]
raw_id_fields = ['user']
radio_fields = {'digits': admin.HORIZONTAL}
try:
admin.site.register(HOTPDevice, HOTPDeviceAdmin)
except AlreadyRegistered:
# A useless exception from a double import
pass
| {
"content_hash": "c9fc0f3a8f6ed030659274deabfdbbd7",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 60,
"avg_line_length": 25.29032258064516,
"alnum_prop": 0.5956632653061225,
"repo_name": "robintema/django-otp",
"id": "aec1538ed0d98b95f980fd3d8b7044a62678ae73",
"size": "784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_otp/plugins/otp_hotp/admin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "16149"
},
{
"name": "Makefile",
"bytes": "319"
},
{
"name": "Python",
"bytes": "95513"
}
],
"symlink_target": ""
} |
import operator
def sze(cores, input_tasks):
tasks = []
for task, (p, k, c) in enumerate(input_tasks):
tasks.append((task, p, k - p - c, c))
tasks.sort(key=operator.itemgetter(2))
time = 0
while True:
available_cores = cores
unfinished_tasks = []
for task, start, reserve, work in tasks:
if start == time:
if available_cores > 0:
available_cores -= 1
if work > 1:
unfinished_tasks.append((
task, start + 1, reserve, work - 1))
elif reserve == 0:
return 0
else:
unfinished_tasks.append((
task, start + 1, reserve - 1, work))
else:
unfinished_tasks.append((
task, start, reserve, work))
if not unfinished_tasks:
break
tasks = sorted(unfinished_tasks, key=operator.itemgetter(2))
time += 1
return 1
| {
"content_hash": "2012719997cfb4207d3db771c7e9126e",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 68,
"avg_line_length": 28.62162162162162,
"alnum_prop": 0.46175637393767704,
"repo_name": "grucin/Potyczki-Algorytmiczne-2016",
"id": "4d369a3b25e3ed8b1da769271574734434b5aa50",
"size": "1059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sze/sze.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14802"
}
],
"symlink_target": ""
} |
"""
Given low and high limits on the Wilson coefficients c_j, produce
gridpacks for a grid of points with dimensionality equal to the
number of coeffients, and each axis spanning low < c_j < high.
"""
from __future__ import print_function
import argparse
import os
import re
import shutil
import subprocess
import numpy as np
from NPFitProduction.NPFitProduction.cross_sections import CrossSectionScan, get_points, setup_model
from NPFitProduction.NPFitProduction.utils import cartesian_product, TempDir
parser = argparse.ArgumentParser(description='produce gridpacks')
parser.add_argument('numvalues', type=int, help='number of values to scan per coefficient')
parser.add_argument('cores', type=int, help='number of cores to use')
parser.add_argument('coefficients', type=str, help='comma-delimited list of wilson coefficients to scan')
parser.add_argument('events', type=int, help='number of events to use for cross section calculation')
parser.add_argument('sm_gridpack', type=str, help='tarball containing an SM gridpack')
parser.add_argument('madgraph', type=str, help='tarball containing madgraph')
parser.add_argument('np_model', type=str, help='tarball containing NP model')
parser.add_argument('np_param_path', type=str,
help='path (relative to the unpacked madgraph tarball) to the NP parameter card')
parser.add_argument('cards', type=str,
help='path to the cards directory (must contain run_card.dat, grid_card.dat, '
'me5_configuration.txt and the parameter card pointed to by np_param_path)')
parser.add_argument('process_card', type=str, help='which process card to run')
parser.add_argument('--scale', type=float, help='maximum scaling to constrain coefficient values')
parser.add_argument('--scan', type=str,
help='coarse-grained scan point file-- note: either (scale, scan and constraints) or '
'(low and high) are required options')
parser.add_argument('--constraints', help='comma delimited list of processes to include for range finding')
parser.add_argument('--low', type=float, help='lower bound of coefficient range')
parser.add_argument('--high', type=float, help='upper bound of coefficient range')
parser.add_argument('index', type=int, help='the index of the point to calculate')
args = parser.parse_args()
args.coefficients = args.coefficients.split(',')
process = args.process_card.split('/')[-1].replace('.dat', '')
totalpoints = args.numvalues ** len(args.coefficients) + 1
if args.scan and args.scale and args.constraints:
coarse_scan = CrossSectionScan([args.scan.replace('file:', '')])
coarse_scan.prune(args.constraints)
points = get_points(args.coefficients, coarse_scan, args.scale, args.numvalues)
try:
mins, maxes = get_bounds(args.coefficients, coarse_scan, args.scale, args.interpolate_numvalues)
except RuntimeError:
raise
try:
points = None
for column, coefficient in enumerate(args.coefficients):
column = np.vstack([np.zeros(1), np.random.uniform(mins[column], maxes[column], (totalpoints, 1))])
if points is None:
points = column
else:
points = np.hstack([points, column])
except RuntimeError as e:
print e
sys.exit(42)
elif args.low and args.high:
values = [np.hstack([np.zeros(1), np.linspace(args.low, args.high, args.numvalues)]) for c in args.coefficients]
points = cartesian_product(*values)
else:
raise NotImplementedError('either scale and scan or interval are required')
point = points[args.index]
start = os.getcwd()
with TempDir() as sandbox:
os.chdir(sandbox)
outdir = setup_model(
start,
args.madgraph,
args.np_model,
args.np_param_path,
args.coefficients,
args.process_card,
args.cores,
args.events,
args.cards,
point
)
carddir = os.path.join(outdir, 'Cards')
with open(os.path.join(carddir, 'run_card.dat'), 'a') as f:
print('.true. = gridpack', file=f)
output = subprocess.check_output(['./{}/bin/generate_events'.format(outdir), '-f'])
m = re.search("Cross-section :\s*(.*) \+", output)
cross_section = float(m.group(1)) if m else np.nan
subprocess.call(['tar', 'xzf', '{}/run_01_gridpack.tar.gz'.format(outdir)])
subprocess.call(['tar', 'xaf', os.path.join(start, args.sm_gridpack), 'mgbasedir'])
subprocess.call(['tar', 'xaf', os.path.join(start, args.sm_gridpack), 'runcmsgrid.sh'])
os.mkdir('process')
shutil.move('madevent', 'process')
shutil.move('run.sh', 'process')
annotator = CrossSectionScan()
annotator.add(point, cross_section, process, args.coefficients)
annotator.dump('point.npz')
subprocess.call(['tar', 'cJpsf', 'gridpack.tar.xz', 'mgbasedir', 'process', 'runcmsgrid.sh', 'point.npz'])
shutil.move('gridpack.tar.xz', start)
| {
"content_hash": "b3ed3336e2f463b816267338307a0131",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 116,
"avg_line_length": 43.36842105263158,
"alnum_prop": 0.6834546925566343,
"repo_name": "annawoodard/EffectiveTTVProduction",
"id": "34acf1498595a4636a2dfd33ef501edde551c644",
"size": "4944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/gridpack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3656"
},
{
"name": "Python",
"bytes": "59953"
},
{
"name": "Shell",
"bytes": "780"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pokercore'
copyright = u'2015, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pokercoredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pokercore.tex', u'pokercore Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pokercore', u'pokercore Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pokercore', u'pokercore Documentation',
u'Author', 'pokercore', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'pokercore'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2015, Author'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'pokercore'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| {
"content_hash": "d2bd4c3258355ca9a3d04b62fbbfa849",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 80,
"avg_line_length": 30.91167192429022,
"alnum_prop": 0.7055822022655373,
"repo_name": "amikrop/pokercore",
"id": "af8391d85a1e5e16fd18dc4f624efc3b6cf558ac",
"size": "10221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20942"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
from .utils import load_all_tasks
class DjangoLightweightQueueConfig(AppConfig):
name = 'django_lightweight_queue'
def ready(self) -> None:
load_all_tasks()
| {
"content_hash": "564a8db02436c2160f515d51c0944f6e",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 46,
"avg_line_length": 21.1,
"alnum_prop": 0.7203791469194313,
"repo_name": "thread/django-lightweight-queue",
"id": "07a4c999bcadb575a6663e1fda4c306cd096d206",
"size": "211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_lightweight_queue/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "108198"
},
{
"name": "Shell",
"bytes": "131"
}
],
"symlink_target": ""
} |
'''
Unit tests for MergePairSelector.py
Verifies that we can successfully select components to merge
'''
import numpy as np
import unittest
from bnpy.learnalg import MergeTracker
from bnpy.learnalg import MergePairSelector
class TestMergePairSelector(unittest.TestCase):
def shortDescription(self):
return None
def setUp(self):
pass
def test_reindexAfterMerge(self):
MSelector = MergePairSelector()
MSelector.MScores[0] = 5
MSelector.MScores[3] = 5
MSelector.MScores[4] = 5
MSelector.PairMScores[(0,1)] = 5
MSelector.PairMScores[(3,4)] = 5
MSelector.PairMScores[(5,6)] = 5
MSelector.reindexAfterMerge(2,3)
assert MSelector.MScores[0] == 5
assert MSelector.MScores[3] == 5
assert 2 not in MSelector.MScores
assert len(MSelector.PairMScores.keys()) == 2
assert (0,1) in MSelector.PairMScores
assert (4,5) in MSelector.PairMScores
assert (2,3) not in MSelector.PairMScores
def test_select_merge_components_random(self):
''' Verify that under random choices, we select among 3 components
equally often
'''
MT = MergeTracker(3)
MSelector = MergePairSelector()
counts = np.zeros(3)
for trial in range(1000):
kA, kB = MSelector.select_merge_components(None, None, MT, mergename='random')
counts[kA] += 1
counts[kB] += 1
counts /= np.sum(counts)
minFrac = 0.25
maxFrac = 0.4
# Uniform at random means fraction of choice should be ~1/3 for each
assert np.all(counts > minFrac)
assert np.all(counts < maxFrac)
def test_select_merge_components_random_raisesError(self):
''' Verify that when comp 0 is excluded with K=3
we cannot provide comp 0 as kA, [error is raised]
AND
in free choice, we only choose kA=1, kB=2
'''
MT = MergeTracker(3)
MSelector = MergePairSelector()
MT.excludeList = set([0])
MT._synchronize_and_verify()
for trial in range(10):
kA, kB = MSelector.select_merge_components(None, None, MT, kA=1, mergename='random')
assert kA == 1
assert kB == 2
for trial in range(10):
kA, kB = MSelector.select_merge_components(None, None, MT, kA=2, mergename='random')
assert kA == 1
assert kB == 2
with self.assertRaises(AssertionError):
kA, kB = MSelector.select_merge_components(None, None, MT, mergename='random', kA=0)
def test_select_merge_components_random_raisesErrorAllButOneExcluded(self):
''' Verify that when comps 0,1 are excluded with K=3
we cannot provide comp 2 as kA, [error is raised]
'''
MT = MergeTracker(3)
MSelector = MergePairSelector()
MT.excludeList = set([1, 0])
MT._synchronize_and_verify()
with self.assertRaises(AssertionError):
kA, kB = MSelector.select_merge_components(None, None, MT, mergename='random', kA=2)
| {
"content_hash": "15399ba1e0d46efb0c7b620cdcfcc44b",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 90,
"avg_line_length": 31.855555555555554,
"alnum_prop": 0.66829438437391,
"repo_name": "daeilkim/refinery",
"id": "cc67e18d687ce5703e25ad63c282e9d753e43a80",
"size": "2867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "refinery/bnpy/bnpy-dev/tests/merge/TestMergePairSelector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "246351"
},
{
"name": "C++",
"bytes": "3051"
},
{
"name": "CSS",
"bytes": "34687"
},
{
"name": "HTML",
"bytes": "38220"
},
{
"name": "JavaScript",
"bytes": "111429"
},
{
"name": "Makefile",
"bytes": "3707"
},
{
"name": "Puppet",
"bytes": "5562"
},
{
"name": "Python",
"bytes": "855634"
},
{
"name": "Shell",
"bytes": "463"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import sys
try:
from ez_setup import use_setuptools
use_setuptools()
except ImportError:
# Try to use ez_setup, but if not, continue anyway. The import is known
# to fail when installing from a tar.gz.
print('Could not import ez_setup', file=sys.stderr)
from setuptools import setup
install_reqs = [
'ply >= 3.4',
'six >= 1.3.0',
]
setup_requires = [
'pytest-runner',
]
test_reqs = [
'pytest',
]
# WARNING: This imposes limitations on test/requirements.txt such that the
# full Pip syntax is not supported. See also
# <http://stackoverflow.com/questions/14399534/>.
with open('test/requirements.txt') as f:
test_reqs += f.read().splitlines()
with open('README.rst') as f:
README = f.read()
dist = setup(
name='stone',
version='0.1',
install_requires=install_reqs,
setup_requires=setup_requires,
tests_require=test_reqs,
entry_points={
'console_scripts': ['stone=stone.cli:main'],
},
packages=[
'stone',
'stone.backends',
'stone.backends.python_rsrc',
'stone.frontend',
'stone.ir',
],
zip_safe=False,
author_email='kelkabany@dropbox.com',
author='Ken Elkabany',
description='Stone is an interface description language (IDL) for APIs.',
license='MIT License',
long_description=README,
maintainer_email='api-platform@dropbox.com',
maintainer='Dropbox',
url='https://github.com/dropbox/stone',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Code Generators',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| {
"content_hash": "f8b73b9d37eab9fadd6ec790c698bf17",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 77,
"avg_line_length": 28.173333333333332,
"alnum_prop": 0.628490298154283,
"repo_name": "posita/stone",
"id": "894e52ea28c59dd20f550c81bd67944a24da216f",
"size": "2240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Objective-C",
"bytes": "23077"
},
{
"name": "Python",
"bytes": "922270"
},
{
"name": "Shell",
"bytes": "300"
},
{
"name": "Swift",
"bytes": "17212"
}
],
"symlink_target": ""
} |
import sqlite3
import bcrypt
import os
import htpasswd
import re
from email.utils import parseaddr
from . import config
# ------------------------------------------------------------------------------
BCRYPT_ROUNDS = 5
DATABASE = config.DB_DATABASE
PASSWDFILE = config.DB_PASSFILE
EMAIL_REGEX = re.compile(r"[^@ ]+@[^@ ]+\.[^@ ]+")
# ------------------------------------------------------------------------------
def mkEmptyDatabase(dbname):
if os.path.isfile(dbname):
os.remove(dbname)
conn = sqlite3.connect(dbname)
cur = conn.cursor()
cur.execute("CREATE TABLE user ( "
"uid INTEGER PRIMARY KEY AUTOINCREMENT, name text, "
"passwd text, email text, enabled INTEGER NOT NULL DEFAULT 1, "
"UNIQUE(name), UNIQUE(email))")
conn.commit()
conn.close()
# ------------------------------------------------------------------------------
def init():
mkEmptyDatabase(DATABASE)
if os.path.isfile(PASSWDFILE):
os.remove(PASSWDFILE)
open(PASSWDFILE, 'w').close()
insertUser(config.DB_TEST_USER, config.DB_TEST_PASS, config.DB_TEST_EMAIL)
# ------------------------------------------------------------------------------
def insertUser(name, passwd, email):
checkedEmail = parseaddr(email)[1]
if len(checkedEmail) == 0 or not EMAIL_REGEX.match(checkedEmail):
return (False, "Invalid email %s" % email)
hpass = bcrypt.hashpw(passwd, bcrypt.gensalt(BCRYPT_ROUNDS))
conn = sqlite3.connect(DATABASE)
try:
with conn:
conn.execute('INSERT INTO user(uid,name,passwd,email) '
'VALUES (null,?,?,?)',
(name, hpass, checkedEmail))
except sqlite3.IntegrityError:
return (False, "User '%s' Already Exists" % name)
try:
with htpasswd.Basic(PASSWDFILE) as userdb:
userdb.add(name, passwd)
except htpasswd.basic.UserExists, err:
return (False, "User '%s' Already Exists [%s]" % (name, str(err)))
return (True, "")
# ------------------------------------------------------------------------------
def getUserEmail(name):
conn = sqlite3.connect(DATABASE)
try:
with conn:
cur = conn.cursor()
cur.execute('SELECT email FROM user WHERE name=?', (name,))
val = cur.fetchone()
return val[0]
except:
pass
return None
# ------------------------------------------------------------------------------
def getUserName(uid):
conn = sqlite3.connect(DATABASE)
try:
with conn:
cur = conn.cursor()
cur.execute('SELECT name FROM user WHERE uid=?', (uid,))
val = cur.fetchone()
return val[0]
except:
pass
return None
# ------------------------------------------------------------------------------
def checkUser(name, passwd):
conn = sqlite3.connect(DATABASE)
try:
with conn:
cur = conn.cursor()
cur.execute('SELECT passwd FROM user '
'WHERE name=? AND enabled=1', (name,))
val = cur.fetchone()
if val is not None:
return bcrypt.hashpw(passwd, val[0]) == val[0]
except:
return False
return False
# ------------------------------------------------------------------------------
def changeUserPassword(name, newpass):
try:
with htpasswd.Basic(PASSWDFILE) as userdb:
userdb.change_password(name, newpass)
except htpasswd.basic.UserNotExists:
return False
hpass = bcrypt.hashpw(newpass, bcrypt.gensalt(BCRYPT_ROUNDS))
conn = sqlite3.connect(DATABASE)
try:
with conn:
conn.execute('UPDATE user SET passwd=? WHERE name=?',
(hpass, name))
except:
return False
return True
# ------------------------------------------------------------------------------
def checkIfUserAvailable(name):
conn = sqlite3.connect(DATABASE)
try:
with conn:
cur = conn.cursor()
cur.execute('SELECT * FROM user WHERE name=?', (name,))
val = cur.fetchone()
return val is None
except:
return False
return False
# ------------------------------------------------------------------------------
def enableUser(name):
conn = sqlite3.connect(DATABASE)
with conn:
conn.execute('UPDATE user SET enabled=1 WHERE name=?', (name,))
# ------------------------------------------------------------------------------
def disableUser(name):
conn = sqlite3.connect(DATABASE)
with conn:
conn.execute('UPDATE user SET enabled=0 WHERE name=?', (name,))
# ------------------------------------------------------------------------------
def deleteUser(name):
conn = sqlite3.connect(DATABASE)
conn.execute('DELETE FROM user WHERE name=?', (name,))
conn.commit()
conn.close()
try:
with htpasswd.Basic(PASSWDFILE) as userdb:
userdb.pop(name)
except htpasswd.basic.UserNotExists:
pass
# ------------------------------------------------------------------------------
| {
"content_hash": "c1fff92cbe772e333d304e7176e53385",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 80,
"avg_line_length": 28.304347826086957,
"alnum_prop": 0.47043010752688175,
"repo_name": "TRUFA-rnaseq/trufa-users-sqlite",
"id": "c65b7ebbfda607cef84c65f3f5d5ea5cf2b46703",
"size": "5289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "users/database.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7465"
}
],
"symlink_target": ""
} |
"""System utils in python"""
| {
"content_hash": "55df2a01dd97963bbafc740f9c28ea89",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 28,
"avg_line_length": 29,
"alnum_prop": 0.6551724137931034,
"repo_name": "thorwhalen/ut",
"id": "6412074bf1b989355dccdaf199767667b1e09d33",
"size": "29",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ut/sh/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1174"
},
{
"name": "Python",
"bytes": "2258941"
}
],
"symlink_target": ""
} |
"""
homeassistant.components.group
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides functionality to group devices that can be turned on or off.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/group/
"""
import homeassistant.core as ha
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_ICON, CONF_NAME, STATE_CLOSED, STATE_HOME,
STATE_NOT_HOME, STATE_OFF, STATE_ON, STATE_OPEN, STATE_UNKNOWN,
ATTR_ASSUMED_STATE, )
from homeassistant.helpers.entity import (
Entity, generate_entity_id, split_entity_id)
from homeassistant.helpers.event import track_state_change
DOMAIN = 'group'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
CONF_ENTITIES = 'entities'
CONF_VIEW = 'view'
ATTR_AUTO = 'auto'
ATTR_ORDER = 'order'
ATTR_VIEW = 'view'
# List of ON/OFF state tuples for groupable states
_GROUP_TYPES = [(STATE_ON, STATE_OFF), (STATE_HOME, STATE_NOT_HOME),
(STATE_OPEN, STATE_CLOSED)]
def _get_group_on_off(state):
""" Determine the group on/off states based on a state. """
for states in _GROUP_TYPES:
if state in states:
return states
return None, None
def is_on(hass, entity_id):
""" Returns if the group state is in its ON-state. """
state = hass.states.get(entity_id)
if state:
group_on, _ = _get_group_on_off(state.state)
# If we found a group_type, compare to ON-state
return group_on is not None and state.state == group_on
return False
def expand_entity_ids(hass, entity_ids):
""" Returns the given list of entity ids and expands group ids into
the entity ids it represents if found. """
found_ids = []
for entity_id in entity_ids:
if not isinstance(entity_id, str):
continue
entity_id = entity_id.lower()
try:
# If entity_id points at a group, expand it
domain, _ = split_entity_id(entity_id)
if domain == DOMAIN:
found_ids.extend(
ent_id for ent_id
in expand_entity_ids(hass, get_entity_ids(hass, entity_id))
if ent_id not in found_ids)
else:
if entity_id not in found_ids:
found_ids.append(entity_id)
except AttributeError:
# Raised by split_entity_id if entity_id is not a string
pass
return found_ids
def get_entity_ids(hass, entity_id, domain_filter=None):
""" Get the entity ids that make up this group. """
entity_id = entity_id.lower()
try:
entity_ids = hass.states.get(entity_id).attributes[ATTR_ENTITY_ID]
if domain_filter:
domain_filter = domain_filter.lower()
return [ent_id for ent_id in entity_ids
if ent_id.startswith(domain_filter)]
else:
return entity_ids
except (AttributeError, KeyError):
# AttributeError if state did not exist
# KeyError if key did not exist in attributes
return []
def setup(hass, config):
""" Sets up all groups found definded in the configuration. """
for object_id, conf in config.get(DOMAIN, {}).items():
if not isinstance(conf, dict):
conf = {CONF_ENTITIES: conf}
name = conf.get(CONF_NAME, object_id)
entity_ids = conf.get(CONF_ENTITIES)
icon = conf.get(CONF_ICON)
view = conf.get(CONF_VIEW)
if isinstance(entity_ids, str):
entity_ids = [ent.strip() for ent in entity_ids.split(",")]
Group(hass, name, entity_ids, icon=icon, view=view,
object_id=object_id)
return True
class Group(Entity):
""" Tracks a group of entity ids. """
# pylint: disable=too-many-instance-attributes, too-many-arguments
def __init__(self, hass, name, entity_ids=None, user_defined=True,
icon=None, view=False, object_id=None):
self.hass = hass
self._name = name
self._state = STATE_UNKNOWN
self._order = len(hass.states.entity_ids(DOMAIN))
self._user_defined = user_defined
self._icon = icon
self._view = view
self.entity_id = generate_entity_id(
ENTITY_ID_FORMAT, object_id or name, hass=hass)
self.tracking = []
self.group_on = None
self.group_off = None
self._assumed_state = False
if entity_ids is not None:
self.update_tracked_entity_ids(entity_ids)
else:
self.update_ha_state(True)
@property
def should_poll(self):
return False
@property
def name(self):
return self._name
@property
def state(self):
return self._state
@property
def icon(self):
return self._icon
@property
def hidden(self):
return not self._user_defined or self._view
@property
def state_attributes(self):
data = {
ATTR_ENTITY_ID: self.tracking,
ATTR_ORDER: self._order,
}
if not self._user_defined:
data[ATTR_AUTO] = True
if self._view:
data[ATTR_VIEW] = True
return data
@property
def assumed_state(self):
"""Return True if unable to access real state of entity."""
return self._assumed_state
def update_tracked_entity_ids(self, entity_ids):
""" Update the tracked entity IDs. """
self.stop()
self.tracking = tuple(ent_id.lower() for ent_id in entity_ids)
self.group_on, self.group_off = None, None
self.update_ha_state(True)
self.start()
def start(self):
""" Starts the tracking. """
track_state_change(
self.hass, self.tracking, self._state_changed_listener)
def stop(self):
""" Unregisters the group from Home Assistant. """
self.hass.states.remove(self.entity_id)
self.hass.bus.remove_listener(
ha.EVENT_STATE_CHANGED, self._state_changed_listener)
def update(self):
""" Query all the tracked states and determine current group state. """
self._state = STATE_UNKNOWN
self._update_group_state()
def _state_changed_listener(self, entity_id, old_state, new_state):
""" Listener to receive state changes of tracked entities. """
self._update_group_state(new_state)
self.update_ha_state()
@property
def _tracking_states(self):
"""States that the group is tracking."""
states = []
for entity_id in self.tracking:
state = self.hass.states.get(entity_id)
if state is not None:
states.append(state)
return states
def _update_group_state(self, tr_state=None):
"""Update group state.
Optionally you can provide the only state changed since last update
allowing this method to take shortcuts.
"""
# pylint: disable=too-many-branches
# To store current states of group entities. Might not be needed.
states = None
gr_state, gr_on, gr_off = self._state, self.group_on, self.group_off
# We have not determined type of group yet
if gr_on is None:
if tr_state is None:
states = self._tracking_states
for state in states:
gr_on, gr_off = \
_get_group_on_off(state.state)
if gr_on is not None:
break
else:
gr_on, gr_off = _get_group_on_off(tr_state.state)
if gr_on is not None:
self.group_on, self.group_off = gr_on, gr_off
# We cannot determine state of the group
if gr_on is None:
return
if tr_state is None or (gr_state == gr_on and
tr_state.state == gr_off):
if states is None:
states = self._tracking_states
if any(state.state == gr_on for state in states):
self._state = gr_on
else:
self._state = gr_off
elif tr_state.state in (gr_on, gr_off):
self._state = tr_state.state
if tr_state is None or self._assumed_state and \
not tr_state.attributes.get(ATTR_ASSUMED_STATE):
if states is None:
states = self._tracking_states
self._assumed_state = any(state.attributes.get(ATTR_ASSUMED_STATE)
for state in states)
elif tr_state.attributes.get(ATTR_ASSUMED_STATE):
self._assumed_state = True
| {
"content_hash": "b440b41cae0567bfb679d1c6cae23e2f",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 79,
"avg_line_length": 29.910344827586208,
"alnum_prop": 0.5786257781876873,
"repo_name": "coteyr/home-assistant",
"id": "3d157f32eea9cf58dae83d68dad26daad905b5fb",
"size": "8674",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1481041"
},
{
"name": "Python",
"bytes": "1876270"
},
{
"name": "Shell",
"bytes": "3570"
}
],
"symlink_target": ""
} |
import subprocess
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from splinter.driver.webdriver import BaseWebDriver, WebDriverElement as BaseWebDriverElement
from splinter.driver.webdriver.cookie_manager import CookieManager
class WebDriver(BaseWebDriver):
def __init__(self, profile=None, extensions=None, user_agent=None):
self.old_popen = subprocess.Popen
firefox_profile = FirefoxProfile(profile)
firefox_profile.set_preference('extensions.logging.enabled', False)
firefox_profile.set_preference('network.dns.disableIPv6', False)
if user_agent is not None:
firefox_profile.set_preference('general.useragent.override', user_agent)
if extensions:
for extension in extensions:
firefox_profile.add_extension(extension)
self._patch_subprocess()
self.driver = Firefox(firefox_profile)
self._unpatch_subprocess()
self.element_class = WebDriverElement
self._cookie_manager = CookieManager(self.driver)
super(WebDriver, self).__init__()
class WebDriverElement(BaseWebDriverElement):
def mouse_over(self):
"""
Firefox doesn't support mouseover.
"""
raise NotImplementedError("Firefox doesn't support mouse over")
def mouse_out(self):
"""
Firefox doesn't support mouseout.
"""
raise NotImplementedError("Firefox doesn't support mouseout")
def double_click(self):
"""
Firefox doesn't support doubleclick.
"""
raise NotImplementedError("Firefox doesn't support doubleclick")
def right_click(self):
"""
Firefox doesn't support right click'
"""
raise NotImplementedError("Firefox doesn't support right click")
def drag_and_drop(self, droppable):
"""
Firefox doesn't support drag and drop
"""
raise NotImplementedError("Firefox doesn't support drag an drop")
mouseover = mouse_over
mouseout = mouse_out
| {
"content_hash": "102ae3e27511a6da05d2b858718452a5",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 93,
"avg_line_length": 30.88235294117647,
"alnum_prop": 0.6680952380952381,
"repo_name": "softak/webfaction_demo",
"id": "4be1c45bfaa8aa53d0e537d792a21335c0bd8dcf",
"size": "2146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor-local/lib/python/splinter/driver/webdriver/firefox.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "33283"
},
{
"name": "JavaScript",
"bytes": "984889"
},
{
"name": "Python",
"bytes": "8055804"
},
{
"name": "Shell",
"bytes": "3065"
}
],
"symlink_target": ""
} |
"""Encoders that use time series inputs to predict SEIR terms."""
import abc
import logging
import sys
from typing import List
import numpy as np
import tensorflow as tf
from covid_epidemiology.src import constants
from covid_epidemiology.src.models.shared import feature_utils
class VariableEncoder(abc.ABC):
"""Encodes time series variables."""
@abc.abstractmethod
def encode(self,
time_series_input,
timestep,
is_training = False):
"""Encodes the time series variables.
Args:
time_series_input: List of tf.tensors of size [currnet timestep]. Each
element of time_series_input is a tf.tensor of shape [num_locations].
timestep: Point in time relative to the beginning of time_series_input to
predict from. Has to be <= len(time_series_input).
is_training: Whether the model is currently training or performing
inference.
Returns:
tf.tensor of shape [num_locations]
"""
raise NotImplementedError
@abc.abstractproperty
def trainable_variables(self):
raise NotImplementedError
@property
def lasso_loss(self):
return 0.
class StaticEncoder(VariableEncoder):
"""Static encoder with fixed parameters."""
def encode(self,
time_series_input,
timestep,
is_training = False):
del timestep, is_training # Unused.
return time_series_input[-1]
@property
def trainable_variables(self):
return []
class PassThroughEncoder(VariableEncoder):
"""Encoder that outputs the input without changing.
PassThroughEncoder is required to have a single covariate input, and that
input is required to be categorical (i.e., it must have absolute overrides,
not relative).
"""
def __init__(self,
chosen_locations,
num_known_timesteps,
forecast_window_size,
covariates,
forecasted_covariates,
covariate_overrides,
covariate_feature_specs,
ts_categorical_features = None,
name = ""):
self.num_known_timesteps = num_known_timesteps
covariates_over_time = []
for timestep in range(num_known_timesteps):
covariates_this_timestep = np.zeros((len(chosen_locations), 1),
dtype="float32")
if len(covariate_feature_specs) > 1:
raise ValueError(
"Only one covariate is supported for PassThroughEncoder.")
feature_spec = covariate_feature_specs[0]
if covariate_overrides is not None:
if (ts_categorical_features is None or
feature_spec.name not in ts_categorical_features):
raise ValueError(
("Only categorical features are supported by PassThroughEncoder. "
f"{feature_spec.name} not in {ts_categorical_features}"))
if feature_spec.name in covariates:
for location_index, location in enumerate(chosen_locations):
if location in covariates[feature_spec.name]:
covariates_this_timestep[location_index] = (
covariates[feature_spec.name][location][timestep])
else:
raise ValueError(
"Wrong feature name specified in covariate_feature_specs.")
covariates_over_time.append(covariates_this_timestep)
self.covariates_over_time = covariates_over_time
self.name = name
# Extract covariates name for each encoder
self.forecasted_feature_values = (
feature_utils.extract_forecasted_features(forecasted_covariates,
covariate_feature_specs))
self.forecasted_feature_values = tf.constant(
np.array(self.forecasted_feature_values), dtype=tf.float32)
if covariate_overrides is not None:
self.covariate_feature_overrides = feature_utils.covariate_overrides_to_dense(
covariate_overrides, covariate_feature_specs or {}, chosen_locations,
num_known_timesteps + forecast_window_size)
else:
self.covariate_feature_overrides = None
def encode(self,
time_series_input,
timestep,
is_training = False):
potential_timestep = timestep - 1
if potential_timestep >= 0:
output_values = self.get_overriden_covariate_values_passthrough(
potential_timestep)
else:
output_values = tf.zeros(tf.shape(self.covariates_over_time[0]))
# output_values is of shape (num locations, 1). we must convert it into
# the shape (num_locations) before returning. E.g.,
# output_values = [[0.3]
# [0.3]] becomes [0.3 0.3].
assert output_values.shape[1] == 1
return output_values[:, -1]
def _get_overriden_covariate_values(self, potential_timestep):
"""Overrides the covariates depending on the type of covariate and timestep.
Calculate the overridden covariate values.
Args:
potential_timestep: integer. Timestep to compute overriden covariates.
Returns:
Tensor with the computed overriden covariate at that timestep.
"""
# Note that if we move to potential_timestamp being a tensor we will have
# to move from the chained comparison to an and statement.
if 0 <= potential_timestep < self.num_known_timesteps:
current_covariate_values = self.covariates_over_time[potential_timestep]
else:
value_timestep = potential_timestep - self.num_known_timesteps
current_covariate_values = (
self.forecasted_feature_values[value_timestep])
if self.covariate_feature_overrides is not None:
covariate_values_merged = np.where(
self.covariate_feature_overrides[potential_timestep] == -1,
current_covariate_values,
self.covariate_feature_overrides[potential_timestep])
# Log the overrides for debugging purposes.
change_mask = np.where(
self.covariate_feature_overrides[potential_timestep] != -1, 1, 0)
change_ixs = np.nonzero(change_mask)
indices = np.dstack(change_ixs)
if change_ixs[0].size != 0:
np.set_printoptions(threshold=sys.maxsize)
current_covariate_values_np = np.asarray(current_covariate_values)
covariate_values_merged_np = np.asarray(covariate_values_merged)
logging.debug(
"Pass-through covariate changes at t=%d for %s\n"
"for [location index, covariate index] =\n %s:\n"
"%s overridden by %s results in\n%s", potential_timestep, self.name,
indices, current_covariate_values_np[change_ixs],
self.covariate_feature_overrides[potential_timestep][change_ixs],
covariate_values_merged_np[change_ixs])
else:
covariate_values_merged = current_covariate_values
return covariate_values_merged
def get_overriden_covariate_values_passthrough(self, potential_timestep):
"""This wrapper makes mocking this function possible."""
return self._get_overriden_covariate_values(potential_timestep)
@property
def trainable_variables(self):
return []
class VaccineEncoder(VariableEncoder):
"""Encoder for Vaccinations.
VaccineEncoder is required to have a single covariate input.
"""
def __init__(self,
chosen_locations,
num_known_timesteps,
forecast_window_size,
covariates,
forecasted_covariates,
covariate_overrides,
covariate_feature_specs,
ts_categorical_features = None,
name = "",
vaccine_type = "first_dose",
trend_following = True):
self.num_known_timesteps = num_known_timesteps
covariates_over_time_all = dict()
for feature_spec in covariate_feature_specs:
covariates_over_time = []
for timestep in range(num_known_timesteps):
covariates_this_timestep = np.zeros((len(chosen_locations), 1),
dtype="float32")
if feature_spec.name in covariates:
for location_index, location in enumerate(chosen_locations):
if location in covariates[feature_spec.name]:
covariates_this_timestep[location_index] = (
covariates[feature_spec.name][location][timestep])
else:
raise ValueError(
"Wrong feature name specified in covariate_feature_specs.")
covariates_over_time.append(covariates_this_timestep)
covariates_over_time_all[feature_spec.name] = covariates_over_time.copy()
self.covariates_over_time_all = covariates_over_time_all
self.name = name
self.vaccine_type = vaccine_type
self.covariate_feature_specs = covariate_feature_specs
self.forecasted_feature_values_all = dict()
for feature_spec in covariate_feature_specs:
# Extract covariates name for each encoder
if trend_following:
# Same daily vaccinated ratio for future.
# Will disable if XGBoost properly forecast.
# Currently, it forecasted 0 for all.
# Note that averaging is another option.
self.forecasted_feature_values_all[
feature_spec.name] = covariates_over_time_all[
feature_spec.name].copy()[-forecast_window_size:]
else:
self.forecasted_feature_values_all[feature_spec.name] = (
feature_utils.extract_forecasted_features(
forecasted_covariates[feature_spec.name], [feature_spec]))
self.forecasted_feature_values_all[feature_spec.name] = tf.constant(
np.array(self.forecasted_feature_values_all[feature_spec.name]),
dtype=tf.float32)
self.covariate_feature_overrides_all = dict()
for feature_spec in covariate_feature_specs:
if covariate_overrides is not None:
self.covariate_feature_overrides_all[
feature_spec.name] = feature_utils.covariate_overrides_to_dense(
covariate_overrides, [feature_spec] or {}, chosen_locations,
num_known_timesteps + forecast_window_size)
else:
self.covariate_feature_overrides_all[feature_spec.name] = None
def encode(self,
time_series_input,
timestep,
is_training = False):
potential_timestep = timestep - 1
if potential_timestep >= 0:
output_values = self.compute_immuned_patients(potential_timestep)
else:
output_values = tf.zeros(
tf.shape(self.covariates_over_time_all[
self.covariate_feature_specs[0].name][0]))
# output_values is of shape (num locations, 1). we must convert it into
# the shape (num_locations) before returning. E.g.,
# output_values = [[0.3]
# [0.3]] becomes [0.3 0.3].
assert output_values.shape[1] == 1
return output_values[:, -1]
def compute_immuned_patients(self, potential_timestep):
"""Compute immuned patients for first and second dosage vaccination.
Args:
potential_timestep: Timestep to compute immuned patients.
Returns:
immuned_patient_count: Number of immuned patient no via certain dosage.
"""
if self.vaccine_type == "first_dose":
vaccine_effect_diff = self.get_overriden_covariate_values_passthrough(
0, constants.VACCINATED_EFFECTIVENESS_FIRST_DOSE)
if self.vaccine_type == "second_dose":
vaccine_effect_diff = (
self.get_overriden_covariate_values_passthrough(
0, constants.VACCINATED_EFFECTIVENESS_SECOND_DOSE) -
self.get_overriden_covariate_values_passthrough(
0, constants.VACCINATED_EFFECTIVENESS_FIRST_DOSE))
immuned_patient_count = (
vaccine_effect_diff * self.get_overriden_covariate_values_passthrough(
0, self.covariate_feature_specs[0].name))
for time_index in range(potential_timestep):
if self.vaccine_type == "first_dose":
vaccine_effect_diff = self.get_overriden_covariate_values_passthrough(
potential_timestep - time_index,
constants.VACCINATED_EFFECTIVENESS_FIRST_DOSE)
if self.vaccine_type == "second_dose":
vaccine_effect_diff = (
self.get_overriden_covariate_values_passthrough(
potential_timestep - time_index,
constants.VACCINATED_EFFECTIVENESS_SECOND_DOSE) -
self.get_overriden_covariate_values_passthrough(
potential_timestep - time_index,
constants.VACCINATED_EFFECTIVENESS_FIRST_DOSE))
current_vaccine_effect = np.minimum(
(vaccine_effect_diff / constants.VACCINE_EFFECTIVENESS_CHANGE_PERIOD)
* time_index, vaccine_effect_diff)
current_vaccinated_count = (
self.get_overriden_covariate_values_passthrough(
potential_timestep - time_index,
self.covariate_feature_specs[0].name))
immuned_patient_count += current_vaccinated_count * current_vaccine_effect
return immuned_patient_count
def _get_overriden_covariate_values(self, potential_timestep,
feature_spec_name):
"""Overrides the covariates depending on the type of covariate and timestep.
Calculate the overridden covariate values.
Args:
potential_timestep: integer. Timestep to compute overriden covariates.
feature_spec_name: Name of the covariate
Returns:
Tensor with the computed overriden covariate at that timestep.
"""
if 0 <= potential_timestep < self.num_known_timesteps:
current_covariate_values = self.covariates_over_time_all[
feature_spec_name][potential_timestep]
else:
value_timestep = potential_timestep - self.num_known_timesteps
current_covariate_values = (
self.forecasted_feature_values_all[feature_spec_name][value_timestep])
if self.covariate_feature_overrides_all[feature_spec_name] is not None:
covariate_values_merged = np.where(
self.covariate_feature_overrides_all[feature_spec_name]
[potential_timestep] == -1, current_covariate_values,
self.covariate_feature_overrides_all[feature_spec_name]
[potential_timestep])
# Log the overrides for debugging purposes.
change_mask = np.where(
self.covariate_feature_overrides_all[feature_spec_name]
[potential_timestep] != -1, 1, 0)
change_ixs = np.nonzero(change_mask)
indices = np.dstack(change_ixs)
if change_ixs[0].size != 0:
np.set_printoptions(threshold=sys.maxsize)
current_covariate_values_np = np.asarray(current_covariate_values)
covariate_values_merged_np = np.asarray(covariate_values_merged)
logging.debug(
"Pass-through covariate changes at t=%d for %s\n"
"for [location index, covariate index] =\n %s:\n"
"%s overridden by %s results in\n%s", potential_timestep, self.name,
indices, current_covariate_values_np[change_ixs],
self.covariate_feature_overrides_all[feature_spec_name]
[potential_timestep][change_ixs],
covariate_values_merged_np[change_ixs])
else:
covariate_values_merged = current_covariate_values
return covariate_values_merged
def get_overriden_covariate_values_passthrough(self, potential_timestep,
feature_spec_name):
"""This wrapper makes mocking this function possible."""
# The function name must be unique among {GamEncoder, PassThroughEncoder}
# functions so that both GamEncoder and PassThroughEncider functions can be
# mocked in the same test function.
return self._get_overriden_covariate_values(potential_timestep,
feature_spec_name)
@property
def trainable_variables(self):
return []
| {
"content_hash": "122b048a787a59ed54fa04101ed07c96",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 84,
"avg_line_length": 39.37220843672456,
"alnum_prop": 0.6512258145837272,
"repo_name": "google-research/google-research",
"id": "78bc4a43c6f7c6abb8a9ab205547408fe7c1cd6a",
"size": "16475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "covid_epidemiology/src/models/encoders/variable_encoders.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
from gurobipy import *
from datetime import datetime
def read_input_csv(filename):
row_header, matrix, col_header = list(), dict(), set()
with open(filename) as f:
for csvline in f:
csvline = csvline.strip()
if len(row_header) == 0:
row_header = csvline.split(',')
continue
row = csvline.split(',')
col_header.add(row[0])
for i in range(1, len(row)):
matrix[row[0], row_header[i]] = float(row[i])
row_header.pop(0)
col_header = sorted(col_header)
return matrix, row_header, col_header
def read_slots_interviews(filename):
row_header = list()
slots_c = list()
with open(filename) as f:
for csvline in f:
csvline = csvline.strip()
if len(row_header) == 0:
row_header = csvline.split(',')
continue
slots_c = map(int, csvline.split(','))
return dict(zip(row_header, slots_c))
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Usage: InterviewScheduler Shortlists.csv SlotsPanels.csv Prefs.csv")
exit(-1)
print(datetime.now().time())
shortlists, clubs, names = read_input_csv(sys.argv[1])
print('Number of Clubs')
print(len(clubs))
print('Number of Candidates')
print(len(names))
panels, clubs2, slots = read_input_csv(sys.argv[2])
print('Number of Slots')
print(len(slots))
prefs, clubs3, names2 = read_input_csv(sys.argv[3])
assert (sorted(clubs) == sorted(clubs2))
assert (sorted(clubs) == sorted(clubs3))
for val in shortlists.values():
if val not in [0, 1]:
raise ValueError('The shortlists data can have only 0s or 1s indicating whether the student has a shortlist or not')
for val in panels.values():
if not val.is_integer():
raise ValueError('The number of panels should be a whole number')
if val < 0:
raise ValueError('THe number of panels cannot be negative')
slots_int = dict()
if len(sys.argv) > 4:
slots_int = read_slots_interviews(sys.argv[4])
# Find out max number of panels
maxpanels = dict((c, max(panels[s, c] for s in slots)) for c in clubs)
# Generate cost of slots
costs = dict((slots[s], s + 1) for s in range(len(slots)))
# Calculate number shortlists for each students
crit = dict((n, sum(shortlists[n, c] for c in clubs)) for n in names)
# Remove names who dont have any shortlists
names = [key for key, value in crit.items() if value > 0]
# Calculate number shortlists per company
compshortlists = dict((c, sum(shortlists[n, c] for n in names)) for c in clubs)
# Calculate total number of panels per company
comppanels = dict((c, int(sum(panels[s, c] for s in slots) / slots_int.get(c, 1))) for c in clubs)
for c in clubs:
if compshortlists[c] > comppanels[c]:
print(c + " has shortlists greater than no of panels " + str(compshortlists[c]) + " > " + str(comppanels[c]))
# Create Objective Coefficients
prefsnew = dict()
objcoeff = dict()
for n in names:
actpref = dict((c, prefs[n, c] * shortlists[n, c]) for c in clubs if shortlists[n, c] > 0)
scaledpref = {key: rank for rank, key in enumerate(sorted(actpref, key=actpref.get), 1)}
for c, rank in scaledpref.items():
prefsnew[n, c] = rank
for s in slots:
if compshortlists[c] > comppanels[c]:
objcoeff[s, c, n] = (rank / (crit[n])) * (len(slots) + 1 - costs[s])
else:
objcoeff[s, c, n] = (1 - rank / (crit[n] + 1)) * costs[s]
print('Creating IPLP')
model = Model('interviews')
choices = model.addVars(slots, clubs, names, vtype=GRB.BINARY, name='G')
# Objective - allocate max students to the initial few slots
model.setObjective(quicksum(choices[s, c, n] * objcoeff.get((s, c, n), 1) for s in slots for n in names for c in clubs), GRB.MINIMIZE)
# Constraint - maximum number in a slot for a club is limited by panels
model.addConstrs((choices.sum(s, c, '*') <= panels[s, c] for s in slots for c in clubs))
# Constraint - allocate student only if he has a shortlist
model.addConstrs((choices.sum('*', c, n) <= shortlists[n, c] * slots_int.get(c, 1) for n in names for c in clubs))
# Constraint - slots should not conflict for a student
model.addConstrs((choices.sum(s, '*', n) <= 1 for s in slots for n in names))
# Constraint - allocate all students or number of interviews possible
model.addConstrs((choices.sum('*', c, '*') == min(compshortlists[c], comppanels[c]) * slots_int.get(c, 1) for c in clubs))
# Constraint - for multiple slots per interview, same candidate should be allocated
for c, si in slots_int.items():
if si > 1:
for i in range(si - 1, len(slots), si):
for n in names:
for j in range(i - si + 1, i):
model.addConstr((choices[slots[i], c, n] - choices[slots[j], c, n]), GRB.EQUAL, 0)
print('Optimising')
model.optimize()
solution = model.getAttr('X', choices)
schedout = open('schedule.csv', 'w')
line = 'Slot'
for c in clubs:
for j in range(int(maxpanels[c])):
line = line + ',' + c + str(j + 1)
schedout.write(line + '\n')
for s in slots:
line = s
for c in clubs:
row = [''] * int(maxpanels[c])
i = 0
for n in names:
if solution[s, c, n] == 1:
row[i] = n + ' ' + str(int(prefsnew[n, c])) + '_' + str(int(crit[n]))
i = i + 1
line = line + ',' + ','.join(row)
schedout.write(line + '\n')
schedout.close()
namesout = open('names.csv', 'w')
line = 'Slot'
for n in names:
line = line + ',' + n
namesout.write(line + '\n')
for s in slots:
line = s
for n in names:
row = ''
for c in clubs:
if solution[s, c, n] == 1:
row = c + '_' + str(int(prefsnew[n, c]))
line = line + ',' + row
namesout.write(line + '\n')
namesout.close()
print(model.status)
print(datetime.now().time())
| {
"content_hash": "183b04a66d96533381272eed590db7a1",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 138,
"avg_line_length": 33.52105263157895,
"alnum_prop": 0.5638247762600094,
"repo_name": "bbalegere/MILP-Interview-Scheduler",
"id": "9290b87ebb22ca6666e9c3aec0635666cfdfa4c6",
"size": "6369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "InterviewSchedulerGurobi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38826"
}
],
"symlink_target": ""
} |
"""Tests for the repository class."""
import os
import tempfile
from pybel import to_bel_script, to_nodelink_file, to_pickle
from pybel.examples import egf_graph
from pybel.repository import BELRepository
from pybel.testing.cases import TemporaryCacheMixin
class TestRepository(TemporaryCacheMixin):
"""Tests for the repository class."""
def test_repository(self):
"""Test the repository class."""
name = "egf.bel"
with tempfile.TemporaryDirectory() as temporary_directory:
bel_path = os.path.join(temporary_directory, name)
json_path = os.path.join(temporary_directory, f"{name}.json")
pickle_path = os.path.join(temporary_directory, f"{name}.pickle")
to_bel_script(egf_graph, bel_path)
to_nodelink_file(egf_graph, json_path)
to_pickle(egf_graph, pickle_path)
repository = BELRepository(temporary_directory)
graphs = repository.get_graphs(
manager=self.manager,
use_cached=True,
use_tqdm=False,
)
self.assertNotEqual(0, len(graphs), msg="No graphs returned")
self.assertEqual(1, len(graphs))
self.assertIn(bel_path, graphs)
graph = graphs[bel_path]
self.assertEqual(graph.document, egf_graph.document)
self.assertEqual(
set(graph.nodes()),
set(egf_graph.nodes()),
msg=f"""
Original nodes: {set(egf_graph.nodes())}
New nodes: {set(graph.nodes())}
""",
)
self.assertEqual(
set(graph.edges()),
set(egf_graph.edges()),
msg=f"""
Original edges: {set(egf_graph.edges())}
New edges: {set(graph.edges())}
""",
)
self.assertTrue(os.path.exists(json_path))
self.assertTrue(os.path.exists(pickle_path))
| {
"content_hash": "223f7ecf135040d7c38b74121ec7827c",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 77,
"avg_line_length": 35.69642857142857,
"alnum_prop": 0.5637818909454727,
"repo_name": "pybel/pybel",
"id": "a0fd2a40f304a5d1ef1b3690fec197eb17815efb",
"size": "2024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_bel_repository.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "880"
},
{
"name": "JavaScript",
"bytes": "9473"
},
{
"name": "Jupyter Notebook",
"bytes": "52170"
},
{
"name": "Python",
"bytes": "1475429"
}
],
"symlink_target": ""
} |
import random
class Menu(object):
"""
Handles menu options
"""
def __init__(self):
self._current_index = 0
self._keys = []
self._items = {}
def __len__(self):
return len(self._keys)
def append(self, name, item):
self._keys.append(name)
self._items[name] = item
@property
def current_item_name(self):
if len(self) == 0:
raise ValueError("You must add menu items to the menu")
return self._keys[self._current_index]
@property
def current_item(self):
return self._items[self.current_item_name]
def move(self, step):
self._current_index = (self._current_index + step) % len(self._keys)
def move_random(self):
#self._current_index = random.choice(range(len(self._keys)))
self._current_index = random.randint(0, len(self._keys)-1)
while self.current_item.is_playlist == False:
self._current_index = random.randint(0, len(self._keys)-1)
def next(self):
self.move(1)
def prev(self):
self.move(-1)
| {
"content_hash": "49a1ab27c733e99d61acc67152bcbb70",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 72,
"avg_line_length": 18.381818181818183,
"alnum_prop": 0.6211671612265084,
"repo_name": "mattgrogan/ledmatrix",
"id": "253f1ae0f8b191ef519603ad88d4c5e06bd2a744",
"size": "1011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ledmatrix/menu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88132"
}
],
"symlink_target": ""
} |
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from io import StringIO
import attr
import pytest
import rnacentral_pipeline.databases.rfam.cross_references as cr
from rnacentral_pipeline.databases.data.ontology_term import OntologyTerm as Term
@pytest.fixture
def data():
with open("data/rfam/database_link.tsv", "r") as raw:
return list(cr.parse(raw))
def test_can_fetch_and_parse_data(data):
assert len(data) == 7909
def test_correctly_parses_so_data(data):
assert attr.asdict(data[1]) == attr.asdict(
cr.RfamDatabaseLink(
# RF00001 SO 0000652 rRNA_5S Gene; rRNA;
rfam_family="RF00001",
database="SO",
comment=None,
external_id="SO:0000652",
other="rRNA_5S",
family_type="Gene; rRNA;",
)
)
def test_correctly_parses_other(data):
assert attr.asdict(data[49]) == attr.asdict(
cr.RfamDatabaseLink(
rfam_family="RF00015",
database="GO",
comment=None,
external_id="GO:0017070",
other="U6 snRNA binding",
family_type="Gene; snRNA; splicing;",
)
)
def test_can_extract_all_ontology_terms():
with open("data/rfam/database_link.tsv", "r") as raw:
sample = StringIO()
for line in raw.readlines()[:10]:
sample.write(line)
sample.seek(0)
references = list(cr.ontology_references(sample))
references = [r.external_id for r in references]
assert references == [
"SO:0000652",
"GO:0003735",
"GO:0005840",
"SO:0000375",
"GO:0003735",
"GO:0005840",
"SO:0000391",
]
@pytest.mark.parametrize(
"excluded",
[
"GO:0008049",
"GO:0042981",
"GO:0042749",
"GO:0044005",
],
)
def test_does_not_include_bad_go_terms_in_ontologies(excluded):
with open("data/rfam/database_link.tsv", "r") as raw:
terms = {ref.external_id for ref in cr.ontology_references(raw)}
assert excluded not in terms
@pytest.mark.parametrize(
"model,expected,unexpected", [("RF02712", "GO:0051819", "GO:0044005")]
)
def test_replaces_bad_ontology_references(model, expected, unexpected):
with open("data/rfam/database_link.tsv", "r") as raw:
terms = {
ref.external_id
for ref in cr.ontology_references(raw)
if ref.rfam_family == model
}
assert expected in terms
assert unexpected not in terms
@pytest.mark.parametrize(
"model,excluded",
[
("RF01942", "GO:0035068"),
("RF02338", "GO:0006396"),
],
)
def test_does_not_incorrectly_assign_mirna_go_mapping(model, excluded):
with open("data/rfam/database_link.tsv", "r") as raw:
terms = {
ref.external_id
for ref in cr.ontology_references(raw)
if ref.rfam_family == model
}
assert excluded not in terms
@pytest.mark.parametrize(
"model,expected",
[
("RF00012", "GO:0006396"),
],
)
def test_does_not_exclude_bad_mirna_terms_from_other_families(model, expected):
with open("data/rfam/database_link.tsv", "r") as raw:
terms = {
ref.external_id
for ref in cr.ontology_references(raw)
if ref.rfam_family == model
}
assert expected in terms
| {
"content_hash": "65472590121e4b7e7b39f8fae84a95e2",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 81,
"avg_line_length": 28.640287769784173,
"alnum_prop": 0.6114041698065813,
"repo_name": "RNAcentral/rnacentral-import-pipeline",
"id": "2b2d81d68efec277ef84c107aab2037acd6b7488",
"size": "4006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/databases/rfam/cross_references_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "18451"
},
{
"name": "Dockerfile",
"bytes": "3405"
},
{
"name": "Groovy",
"bytes": "6339"
},
{
"name": "HTML",
"bytes": "10430"
},
{
"name": "Makefile",
"bytes": "1197"
},
{
"name": "Nextflow",
"bytes": "104756"
},
{
"name": "PLpgSQL",
"bytes": "15906"
},
{
"name": "PostScript",
"bytes": "965516"
},
{
"name": "Python",
"bytes": "1623134"
},
{
"name": "Rust",
"bytes": "181197"
},
{
"name": "Shell",
"bytes": "23155"
}
],
"symlink_target": ""
} |
"""dli_powerswitch.py unit tests."""
from unittest import mock
from gazoo_device.auxiliary_devices import dli_powerswitch
from gazoo_device.tests.unit_tests.utils import dli_powerswitch_logs
from gazoo_device.tests.unit_tests.utils import fake_device_test_case
from gazoo_device.utility import host_utils
from gazoo_device.utility import http_utils
import immutabledict
_PERSISTENT_PROPERTIES = immutabledict.immutabledict({
"console_port_name": "123.45.67.89",
"device_type": "powerswitch",
"model": "LPC9",
"serial_number": "ABCD1234",
})
def _mock_command(*args, **kwargs):
"""Returns a http response object retrieved from dli_powerswitch device."""
del kwargs # Unused by the mock.
data = dli_powerswitch_logs.DEFAULT_BEHAVIOR[args[0]]
return _ResponseObject(data["text"], data["status_code"])
class _ResponseObject:
"""HTTP response container."""
def __init__(self, text, status_code):
self.text = text
self.status_code = status_code
class PowerswitchTest(fake_device_test_case.FakeDeviceTestCase):
"""Powerswitch unit test class."""
def setUp(self):
super().setUp()
self.setup_fake_device_requirements("powerswitch-1234")
self.device_config["persistent"]["console_port_name"] = "123.45.67.89"
self.uut = dli_powerswitch.DliPowerSwitch(
self.mock_manager,
self.device_config,
log_directory=self.artifacts_directory,
log_file_name=None)
def test_001_powerswitch_init(self):
"""Testing the creation of the powerswitch_device object."""
self.assertTrue(
self.uut,
"The {} object failed to be created.".format("powerswitch_device"))
@mock.patch.object(host_utils, "is_pingable", return_value=True)
def test_002_is_connected_true(self, mock_ping):
"""Verify is_connected works as expected."""
self.assertTrue(
dli_powerswitch.DliPowerSwitch.is_connected(self.device_config))
@mock.patch.object(http_utils, "send_http_get", side_effect=_mock_command)
def test_003_get_detection_info(self, mock_http_get):
"""Verify get detection info works correctly."""
self._test_get_detection_info(
self.device_config["persistent"]["console_port_name"],
dli_powerswitch.DliPowerSwitch, _PERSISTENT_PROPERTIES)
@mock.patch.object(
http_utils,
"send_http_post",
side_effect=RuntimeError("Failure calling HTTP post"))
def test_006_write_command_failed_response(self, mock_http_post):
"""Verify exception raised when HTTP POST command returns an error."""
with self.assertRaisesRegexp(RuntimeError, "Failure calling HTTP post"):
self.uut._write_command("POST", "RAISE_EXCEPTION", headers={})
@mock.patch.object(http_utils, "send_http_get", side_effect=_mock_command)
def test_007_get_firmware(self, mock_http_get):
"""Verify the successful retrieval of device firmware version."""
self.assertTrue(self.uut.firmware_version)
@mock.patch.object(http_utils, "send_http_get", side_effect=_mock_command)
def test_009_powerswitch_power_get_mode(self, mock_http_get):
"""Verify the powerswitch_power capability is initialized properly."""
self.assertEqual(self.uut.powerswitch_power.get_mode(1), "on")
@mock.patch.object(http_utils, "send_http_get", side_effect=_mock_command)
def test_010_switch_power_get_mode(self, mock_http_get):
"""Verify the switch_power capability is initialized properly."""
self.assertEqual(self.uut.switch_power.get_mode(1), "on")
@mock.patch.object(host_utils, "is_pingable", return_value=True)
def test_011_check_device_ready_success(self, mock_is_pingable):
"""Test check_device_ready() when all health checks pass."""
self.uut.check_device_ready()
if __name__ == "__main__":
fake_device_test_case.main()
| {
"content_hash": "20bc08dd0308036bff92cee22a30a7be",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 77,
"avg_line_length": 38.94845360824742,
"alnum_prop": 0.7088406564319746,
"repo_name": "google/gazoo-device",
"id": "e5d53dbbb442d2b512e4d5b6502db5c2e0bbb85b",
"size": "4353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gazoo_device/tests/unit_tests/dli_powerswitch_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3014778"
},
{
"name": "Shell",
"bytes": "19088"
}
],
"symlink_target": ""
} |
"""Support testing with Pytest."""
import logging
import os
import pytest
from asgi_tools.tests import ASGITestClient, manage_lifespan
def pytest_addoption(parser):
"""Append pytest options for testing Muffin apps."""
parser.addini("muffin_app", "Set path to muffin application")
parser.addoption(
"--muffin-app", dest="muffin_app", help="Set to muffin application"
)
parser.addini("muffin_config", "Set module path to muffin configuration")
parser.addoption(
"--muffin-config",
dest="muffin_config",
help="Set module path to muffin configuration",
)
def pytest_load_initial_conftests(early_config, parser, args):
"""Prepare to loading Muffin application."""
from muffin import CONFIG_ENV_VARIABLE
options = parser.parse_known_args(args)
# Initialize configuration
config = options.muffin_config or early_config.getini("muffin_config")
if config:
os.environ[CONFIG_ENV_VARIABLE] = config
# Initialize application
app_ = options.muffin_app or early_config.getini("muffin_app")
early_config.app = app_
@pytest.fixture(scope="session")
async def app(pytestconfig, request, aiolib):
"""Load an application, run lifespan events, prepare plugins."""
if not pytestconfig.app:
logging.warning(
"Improperly configured. Please set ``muffin_app`` in your pytest config. "
"Or use ``--muffin-app`` command option."
)
return
from muffin.utils import import_app
app_ = import_app(pytestconfig.app)
msg = f"Setup application '{app_.cfg.name}'"
if app_.cfg.config:
msg += f"with config '{app_.cfg.config}'"
app_.logger.info(msg)
async with manage_lifespan(app_):
# Setup plugins
for plugin in app_.plugins.values():
if hasattr(plugin, "conftest") and plugin.conftest is not None:
app_.logger.info(f"Setup plugin '{plugin.name}'")
await plugin.conftest()
yield app_
@pytest.fixture
def client(app):
"""Generate a test client for the app."""
return ASGITestClient(app)
| {
"content_hash": "25d16305fe5eb896ed8f4f53e42b88f7",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 86,
"avg_line_length": 29.28767123287671,
"alnum_prop": 0.6543498596819457,
"repo_name": "klen/muffin",
"id": "483fb2f456359bfbb8a20b2a878acabe308902a3",
"size": "2138",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "muffin/pytest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1652"
},
{
"name": "Python",
"bytes": "40911"
}
],
"symlink_target": ""
} |
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from lamson import commands, utils as lamson_utils
class Command(BaseCommand):
help = 'Stops the lamson daemon'
option_list = BaseCommand.option_list + (
make_option('--kill',
action='store_true',
dest='kill',
default=False,
help='kill'),
make_option('--all',
action='store',
dest='all',
default=False,
help='Give --all the name of a run directory and it will stop all pid files it finds there',
),
make_option('--pid',
action='store',
dest='pid',
default='./run/smtp.pid',
help='The file where the pid for the process is stored',
),
)
def handle(self, *args, **options):
commands.stop_command(options['pid'],
options['kill'],
options['all'],
) | {
"content_hash": "35e4fce5961df0dd97ab64f1a951328b",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 104,
"avg_line_length": 28.083333333333332,
"alnum_prop": 0.533135509396637,
"repo_name": "vvangelovski/django-lamson",
"id": "5229ad75159d95da1c6f8c7b12384444ff71fe8d",
"size": "1011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_lamson/management/commands/lamson_stop.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PHP",
"bytes": "491"
},
{
"name": "Python",
"bytes": "16613"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division, absolute_import
from struct import pack
from ..message import BulkFrontendMessage
class SslRequest(BulkFrontendMessage):
message_id = None
SSL_REQUEST = 80877103
def read_bytes(self):
bytes_ = pack('!I', self.SSL_REQUEST)
return bytes_
| {
"content_hash": "56b2dc2f48391708196dd692316a48e2",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 64,
"avg_line_length": 22.714285714285715,
"alnum_prop": 0.7012578616352201,
"repo_name": "uber/vertica-python",
"id": "8604cf7d5b3abfb293a110bffc06e44f84a87fcf",
"size": "2081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vertica_python/vertica/messages/frontend_messages/ssl_request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "209204"
}
],
"symlink_target": ""
} |
import functools
import unittest
from taskflow import states
from taskflow import task
from taskflow import wrappers
from taskflow.patterns import linear_workflow as lw
def null_functor(*args, **kwargs):
return None
class LinearWorkflowTest(unittest.TestCase):
def makeRevertingTask(self, token, blowup=False):
def do_apply(token, context, *args, **kwargs):
context[token] = 'passed'
def do_revert(token, context, *args, **kwargs):
context[token] = 'reverted'
def blow_up(context, *args, **kwargs):
raise Exception("I blew up")
if blowup:
return wrappers.FunctorTask('task-%s' % (token),
functools.partial(blow_up, token),
null_functor)
else:
return wrappers.FunctorTask('task-%s' % (token),
functools.partial(do_apply, token),
functools.partial(do_revert, token))
def makeInterruptTask(self, token, wf):
def do_interrupt(token, context, *args, **kwargs):
wf.interrupt()
return wrappers.FunctorTask('task-%s' % (token),
functools.partial(do_interrupt, token),
null_functor)
def testHappyPath(self):
wf = lw.Workflow("the-test-action")
for i in range(0, 10):
wf.add(self.makeRevertingTask(i))
run_context = {}
wf.run(run_context)
self.assertEquals(10, len(run_context))
for _k, v in run_context.items():
self.assertEquals('passed', v)
def testRevertingPath(self):
wf = lw.Workflow("the-test-action")
wf.add(self.makeRevertingTask(1))
wf.add(self.makeRevertingTask(2, True))
run_context = {}
self.assertRaises(Exception, wf.run, run_context)
self.assertEquals('reverted', run_context[1])
self.assertEquals(1, len(run_context))
def testInterruptPath(self):
wf = lw.Workflow("the-int-action")
result_storage = {}
# If we interrupt we need to know how to resume so attach the needed
# parts to do that...
def result_fetcher(ctx, wf, task):
if task.name in result_storage:
return (True, result_storage.get(task.name))
return (False, None)
def task_listener(ctx, state, wf, task, result=None):
if state not in (states.SUCCESS, states.FAILURE,):
return
if task.name not in result_storage:
result_storage[task.name] = result
wf.result_fetcher = result_fetcher
wf.task_listeners.append(task_listener)
wf.add(self.makeRevertingTask(1))
wf.add(self.makeInterruptTask(2, wf))
wf.add(self.makeRevertingTask(3))
self.assertEquals(states.PENDING, wf.state)
context = {}
wf.run(context)
# Interrupt should have been triggered after task 1
self.assertEquals(1, len(context))
self.assertEquals(states.INTERRUPTED, wf.state)
# And now reset and resume.
wf.reset()
self.assertEquals(states.PENDING, wf.state)
wf.run(context)
self.assertEquals(2, len(context))
def testParentRevertingPath(self):
happy_wf = lw.Workflow("the-happy-action")
for i in range(0, 10):
happy_wf.add(self.makeRevertingTask(i))
context = {}
happy_wf.run(context)
for (_k, v) in context.items():
self.assertEquals('passed', v)
baddy_wf = lw.Workflow("the-bad-action", parents=[happy_wf])
baddy_wf.add(self.makeRevertingTask(i + 1))
baddy_wf.add(self.makeRevertingTask(i + 2, True))
self.assertRaises(Exception, baddy_wf.run, context)
for (_k, v) in context.items():
self.assertEquals('reverted', v)
| {
"content_hash": "78c04593fefae771fa73393d5bafbac1",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 76,
"avg_line_length": 32.153225806451616,
"alnum_prop": 0.5758715826435917,
"repo_name": "JohnGarbutt/TaskFlow",
"id": "4ad37d7212dd6e0d328ab579aded2470e56b27f3",
"size": "4689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taskflow/tests/unit/test_linear_workflow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import functools
import inspect
import math
import time
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import strutils
import six
import webob
import webob.exc
from jacket.api.openstack import api_version_request as api_version
from jacket.api.openstack import versioned_method
from jacket import exception
from jacket import i18n
from jacket.i18n import _, _LE, _LI
from jacket import policy
from jacket import utils
from jacket.wsgi import common as wsgi
LOG = logging.getLogger(__name__)
SUPPORTED_CONTENT_TYPES = (
'application/json',
'application/vnd.openstack.volume+json',
)
_MEDIA_TYPE_MAP = {
'application/vnd.openstack.volume+json': 'json',
'application/json': 'json',
}
# name of attribute to keep version method information
VER_METHOD_ATTR = 'versioned_methods'
# Name of header used by clients to request a specific version
# of the REST API
API_VERSION_REQUEST_HEADER = 'OpenStack-API-Version'
VOLUME_SERVICE = 'jacket'
class Request(webob.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def __init__(self, *args, **kwargs):
super(Request, self).__init__(*args, **kwargs)
self._resource_cache = {}
if not hasattr(self, 'api_version_request'):
self.api_version_request = api_version.APIVersionRequest()
def cache_resource(self, resource_to_cache, id_attribute='id', name=None):
"""Cache the given resource.
Allow API methods to cache objects, such as results from a DB query,
to be used by API extensions within the same API request.
The resource_to_cache can be a list or an individual resource,
but ultimately resources are cached individually using the given
id_attribute.
Different resources types might need to be cached during the same
request, they can be cached using the name parameter. For example:
Controller 1:
request.cache_resource(db_volumes, 'volumes')
request.cache_resource(db_volume_types, 'types')
Controller 2:
db_volumes = request.cached_resource('volumes')
db_type_1 = request.cached_resource_by_id('1', 'types')
If no name is given, a default name will be used for the resource.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
if not isinstance(resource_to_cache, list):
resource_to_cache = [resource_to_cache]
if not name:
name = self.path
cached_resources = self._resource_cache.setdefault(name, {})
for resource in resource_to_cache:
cached_resources[resource[id_attribute]] = resource
def cached_resource(self, name=None):
"""Get the cached resources cached under the given resource name.
Allow an API extension to get previously stored objects within
the same API request.
Note that the object data will be slightly stale.
:returns: a dict of id_attribute to the resource from the cached
resources, an empty map if an empty collection was cached,
or None if nothing has been cached yet under this name
"""
if not name:
name = self.path
if name not in self._resource_cache:
# Nothing has been cached for this key yet
return None
return self._resource_cache[name]
def cached_resource_by_id(self, resource_id, name=None):
"""Get a resource by ID cached under the given resource name.
Allow an API extension to get a previously stored object
within the same API request. This is basically a convenience method
to lookup by ID on the dictionary of all cached resources.
Note that the object data will be slightly stale.
:returns: the cached resource or None if the item is not in the cache
"""
resources = self.cached_resource(name)
if not resources:
# Nothing has been cached yet for this key yet
return None
return resources.get(resource_id)
def cache_db_items(self, key, items, item_key='id'):
"""Get cached database items.
Allow API methods to store objects from a DB query to be
used by API extensions within the same API request.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
self.cache_resource(items, item_key, key)
def get_db_items(self, key):
"""Get database items.
Allow an API extension to get previously stored objects within
the same API request.
Note that the object data will be slightly stale.
"""
return self.cached_resource(key)
def get_db_item(self, key, item_key):
"""Get database item.
Allow an API extension to get a previously stored object
within the same API request.
Note that the object data will be slightly stale.
"""
return self.get_db_items(key).get(item_key)
def cache_db_volumes(self, volumes):
# NOTE(mgagne) Cache it twice for backward compatibility reasons
self.cache_db_items('volumes', volumes, 'id')
self.cache_db_items(self.path, volumes, 'id')
def cache_db_volume(self, volume):
# NOTE(mgagne) Cache it twice for backward compatibility reasons
self.cache_db_items('volumes', [volume], 'id')
self.cache_db_items(self.path, [volume], 'id')
def get_db_volumes(self):
return (self.get_db_items('volumes') or
self.get_db_items(self.path))
def get_db_volume(self, volume_id):
return (self.get_db_item('volumes', volume_id) or
self.get_db_item(self.path, volume_id))
def cache_db_volume_types(self, volume_types):
self.cache_db_items('volume_types', volume_types, 'id')
def cache_db_volume_type(self, volume_type):
self.cache_db_items('volume_types', [volume_type], 'id')
def get_db_volume_types(self):
return self.get_db_items('volume_types')
def get_db_volume_type(self, volume_type_id):
return self.get_db_item('volume_types', volume_type_id)
def cache_db_snapshots(self, snapshots):
self.cache_db_items('snapshots', snapshots, 'id')
def cache_db_snapshot(self, snapshot):
self.cache_db_items('snapshots', [snapshot], 'id')
def get_db_snapshots(self):
return self.get_db_items('snapshots')
def get_db_snapshot(self, snapshot_id):
return self.get_db_item('snapshots', snapshot_id)
def cache_db_backups(self, backups):
self.cache_db_items('backups', backups, 'id')
def cache_db_backup(self, backup):
self.cache_db_items('backups', [backup], 'id')
def get_db_backups(self):
return self.get_db_items('backups')
def get_db_backup(self, backup_id):
return self.get_db_item('backups', backup_id)
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'jacket.best_content_type' not in self.environ:
# Calculate the best MIME type
content_type = None
# Check URL path suffix
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in SUPPORTED_CONTENT_TYPES:
content_type = possible_type
if not content_type:
content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES)
self.environ['jacket.best_content_type'] = (content_type or
'application/json')
return self.environ['jacket.best_content_type']
def get_content_type(self):
"""Determine content type of the request body.
Does not do any body introspection, only checks header
"""
if "Content-Type" not in self.headers:
return None
allowed_types = SUPPORTED_CONTENT_TYPES
content_type = self.content_type
if content_type not in allowed_types:
raise exception.InvalidContentType(content_type=content_type)
return content_type
def best_match_language(self):
"""Determines best available locale from the Accept-Language header.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not self.accept_language:
return None
all_languages = i18n.get_available_languages()
return self.accept_language.best_match(all_languages)
def set_api_version_request(self, url):
"""Set API version request based on the request header information.
Microversions starts with /v3, so if a client sends a request for
version 1.0 or 2.0 with the /v3 endpoint, throw an exception.
Sending a header with any microversion to a /v1 or /v2 endpoint will
be ignored.
Note that a microversion must be set for the legacy endpoints. This
will appear as 1.0 and 2.0 for /v1 and /v2.
"""
if API_VERSION_REQUEST_HEADER in self.headers and 'v3' in url:
hdr_string = self.headers[API_VERSION_REQUEST_HEADER]
# 'latest' is a special keyword which is equivalent to requesting
# the maximum version of the API supported
hdr_string_list = hdr_string.split(",")
volume_version = None
for hdr in hdr_string_list:
if VOLUME_SERVICE in hdr:
service, volume_version = hdr.split()
break
if not volume_version:
raise exception.VersionNotFoundForAPIMethod(
version=volume_version)
if volume_version == 'latest':
self.api_version_request = api_version.max_api_version()
else:
self.api_version_request = api_version.APIVersionRequest(
volume_version)
# Check that the version requested is within the global
# minimum/maximum of supported API versions
if not self.api_version_request.matches(
api_version.min_api_version(),
api_version.max_api_version()):
raise exception.InvalidGlobalAPIVersion(
req_ver=self.api_version_request.get_string(),
min_ver=api_version.min_api_version().get_string(),
max_ver=api_version.max_api_version().get_string())
else:
if 'v1' in url:
self.api_version_request = api_version.legacy_api_version1()
elif 'v2' in url:
self.api_version_request = api_version.legacy_api_version2()
else:
self.api_version_request = api_version.APIVersionRequest(
api_version._MIN_API_VERSION)
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, six.text_type(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class JSONDeserializer(ActionDispatcher):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class DictSerializer(ActionDispatcher):
"""Default request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(ActionDispatcher):
"""Default JSON request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return six.text_type(jsonutils.dumps(data))
def serializers(**serializers):
"""Attaches serializers to a method.
This decorator associates a dictionary of serializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_serializers'):
func.wsgi_serializers = {}
func.wsgi_serializers.update(serializers)
return func
return decorator
def deserializers(**deserializers):
"""Attaches deserializers to a method.
This decorator associates a dictionary of deserializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_deserializers'):
func.wsgi_deserializers = {}
func.wsgi_deserializers.update(deserializers)
return func
return decorator
def response(code):
"""Attaches response code to a method.
This decorator associates a response code with a method. Note
that the function attributes are directly manipulated; the method
is not wrapped.
"""
def decorator(func):
func.wsgi_code = code
return func
return decorator
class ResponseObject(object):
"""Bundles a response object with appropriate serializers.
Object that app methods may return in order to bind alternate
serializers with a response object to be serialized. Its use is
optional.
"""
def __init__(self, obj, code=None, headers=None, **serializers):
"""Binds serializers with an object.
Takes keyword arguments akin to the @serializer() decorator
for specifying serializers. Serializers specified will be
given preference over default serializers or method-specific
serializers on return.
"""
self.obj = obj
self.serializers = serializers
self._default_code = 200
self._code = code
self._headers = headers or {}
self.serializer = None
self.media_type = None
def __getitem__(self, key):
"""Retrieves a header with the given name."""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""Sets a header with the given name to the given value."""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""Deletes the header with the given name."""
del self._headers[key.lower()]
def _bind_method_serializers(self, meth_serializers):
"""Binds method serializers with the response object.
Binds the method serializers with the response object.
Serializers specified to the constructor will take precedence
over serializers specified to this method.
:param meth_serializers: A dictionary with keys mapping to
response types and values containing
serializer objects.
"""
# We can't use update because that would be the wrong
# precedence
for mtype, serializer in meth_serializers.items():
self.serializers.setdefault(mtype, serializer)
def get_serializer(self, content_type, default_serializers=None):
"""Returns the serializer for the wrapped object.
Returns the serializer for the wrapped object subject to the
indicated content type. If no serializer matching the content
type is attached, an appropriate serializer drawn from the
default serializers will be used. If no appropriate
serializer is available, raises InvalidContentType.
"""
default_serializers = default_serializers or {}
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in self.serializers:
return mtype, self.serializers[mtype]
else:
return mtype, default_serializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
def preserialize(self, content_type, default_serializers=None):
"""Prepares the serializer that will be used to serialize.
Determines the serializer that will be used and prepares an
instance of it for later call. This allows the serializer to
be accessed by extensions for, e.g., template extension.
"""
mtype, serializer = self.get_serializer(content_type,
default_serializers)
self.media_type = mtype
self.serializer = serializer()
def attach(self, **kwargs):
"""Attach slave templates to serializers."""
if self.media_type in kwargs:
self.serializer.attach(kwargs[self.media_type])
def serialize(self, request, content_type, default_serializers=None):
"""Serializes the wrapped object.
Utility method for serializing the wrapped object. Returns a
webob.Response object.
"""
if self.serializer:
serializer = self.serializer
else:
_mtype, _serializer = self.get_serializer(content_type,
default_serializers)
serializer = _serializer()
response = webob.Response()
response.status_int = self.code
for hdr, value in self._headers.items():
response.headers[hdr] = six.text_type(value)
response.headers['Content-Type'] = six.text_type(content_type)
if self.obj is not None:
body = serializer.serialize(self.obj)
if isinstance(body, six.text_type):
body = body.encode('utf-8')
response.body = body
return response
@property
def code(self):
"""Retrieve the response status."""
return self._code or self._default_code
@property
def headers(self):
"""Retrieve the headers."""
return self._headers.copy()
def action_peek_json(body):
"""Determine action to invoke."""
try:
decoded = jsonutils.loads(body)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
# Make sure there's exactly one key...
if len(decoded) != 1:
msg = _("too many body keys")
raise exception.MalformedRequestBody(reason=msg)
# Return the action and the decoded body...
return list(decoded.keys())[0]
class ResourceExceptionHandler(object):
"""Context manager to handle Resource exceptions.
Used when processing exceptions generated by API implementation
methods (or their extensions). Converts most exceptions to Fault
exceptions, with the appropriate logging.
"""
def __enter__(self):
return None
def __exit__(self, ex_type, ex_value, ex_traceback):
if not ex_value:
return True
if isinstance(ex_value, exception.NotAuthorized):
msg = six.text_type(ex_value)
raise Fault(webob.exc.HTTPForbidden(explanation=msg))
elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod):
raise
elif isinstance(ex_value, (exception.Invalid, exception.NotFound)):
raise Fault(exception.ConvertedException(
code=ex_value.code, explanation=six.text_type(ex_value)))
elif isinstance(ex_value, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
LOG.error(_LE(
'Exception handling resource: %s'),
ex_value, exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info(_LI("Fault thrown: %s"), six.text_type(ex_value))
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
LOG.info(_LI("HTTP exception thrown: %s"), six.text_type(ex_value))
raise Fault(ex_value)
# We didn't handle the exception
return False
class Resource(wsgi.Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
Exceptions derived from webob.exc.HTTPException will be automatically
wrapped in Fault() to provide API friendly error responses.
"""
support_api_request_version = True
def __init__(self, controller, action_peek=None, **deserializers):
"""Initialize Resource.
:param controller: object that implement methods created by routes lib
:param action_peek: dictionary of routines for peeking into an action
request body to determine the desired action
"""
self.controller = controller
default_deserializers = dict(json=JSONDeserializer)
default_deserializers.update(deserializers)
self.default_deserializers = default_deserializers
self.default_serializers = dict(json=JSONDictSerializer)
self.action_peek = dict(json=action_peek_json)
self.action_peek.update(action_peek or {})
# Copy over the actions dictionary
self.wsgi_actions = {}
if controller:
self.register_actions(controller)
# Save a mapping of extensions
self.wsgi_extensions = {}
self.wsgi_action_extensions = {}
def register_actions(self, controller):
"""Registers controller actions with this resource."""
actions = getattr(controller, 'wsgi_actions', {})
for key, method_name in actions.items():
self.wsgi_actions[key] = getattr(controller, method_name)
def register_extensions(self, controller):
"""Registers controller extensions with this resource."""
extensions = getattr(controller, 'wsgi_extensions', [])
for method_name, action_name in extensions:
# Look up the extending method
extension = getattr(controller, method_name)
if action_name:
# Extending an action...
if action_name not in self.wsgi_action_extensions:
self.wsgi_action_extensions[action_name] = []
self.wsgi_action_extensions[action_name].append(extension)
else:
# Extending a regular method
if method_name not in self.wsgi_extensions:
self.wsgi_extensions[method_name] = []
self.wsgi_extensions[method_name].append(extension)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
# NOTE(Vek): Check for get_action_args() override in the
# controller
if hasattr(self.controller, 'get_action_args'):
return self.controller.get_action_args(request_environment)
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except (KeyError, IndexError, AttributeError):
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def get_body(self, request):
if len(request.body) == 0:
LOG.debug("Empty body provided in request")
return None, ''
try:
content_type = request.get_content_type()
except exception.InvalidContentType:
LOG.debug("Unrecognized Content-Type provided in request")
return None, ''
if not content_type:
LOG.debug("No Content-Type provided in request")
return None, ''
return content_type, request.body
def deserialize(self, meth, content_type, body):
meth_deserializers = getattr(meth, 'wsgi_deserializers', {})
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in meth_deserializers:
deserializer = meth_deserializers[mtype]
else:
deserializer = self.default_deserializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
return deserializer().deserialize(body)
def pre_process_extensions(self, extensions, request, action_args):
# List of callables for post-processing extensions
post = []
for ext in extensions:
if inspect.isgeneratorfunction(ext):
response = None
# If it's a generator function, the part before the
# yield is the preprocessing stage
try:
with ResourceExceptionHandler():
gen = ext(req=request, **action_args)
response = next(gen)
except Fault as ex:
response = ex
# We had a response...
if response:
return response, []
# No response, queue up generator for post-processing
post.append(gen)
else:
# Regular functions only perform post-processing
post.append(ext)
# Run post-processing in the reverse order
return None, reversed(post)
def post_process_extensions(self, extensions, resp_obj, request,
action_args):
for ext in extensions:
response = None
if inspect.isgenerator(ext):
# If it's a generator, run the second half of
# processing
try:
with ResourceExceptionHandler():
response = ext.send(resp_obj)
except StopIteration:
# Normal exit of generator
continue
except Fault as ex:
response = ex
else:
# Regular functions get post-processing...
try:
with ResourceExceptionHandler():
response = ext(req=request, resp_obj=resp_obj,
**action_args)
except exception.VersionNotFoundForAPIMethod:
# If an attached extension (@wsgi.extends) for the
# method has no version match its not an error. We
# just don't run the extends code
continue
except Fault as ex:
response = ex
# We had a response...
if response:
return response
return None
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
LOG.info(_LI("%(method)s %(url)s"),
{"method": request.method,
"url": request.url})
if self.support_api_request_version:
# Set the version of the API requested based on the header
try:
request.set_api_version_request(request.url)
except exception.InvalidAPIVersionString as e:
return Fault(webob.exc.HTTPBadRequest(
explanation=six.text_type(e)))
except exception.InvalidGlobalAPIVersion as e:
return Fault(webob.exc.HTTPNotAcceptable(
explanation=six.text_type(e)))
# Identify the action, its arguments, and the requested
# content type
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
content_type, body = self.get_body(request)
accept = request.best_match_content_type()
# NOTE(Vek): Splitting the function up this way allows for
# auditing by external tools that wrap the existing
# function. If we try to audit __call__(), we can
# run into troubles due to the @webob.dec.wsgify()
# decorator.
return self._process_stack(request, action, action_args,
content_type, body, accept)
def _process_stack(self, request, action, action_args,
content_type, body, accept):
"""Implement the processing stack."""
# Get the implementing method
try:
meth, extensions = self.get_method(request, action,
content_type, body)
except (AttributeError, TypeError):
return Fault(webob.exc.HTTPNotFound())
except KeyError as ex:
msg = _("There is no such action: %s") % ex.args[0]
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
if body:
decoded_body = encodeutils.safe_decode(body, errors='ignore')
msg = ("Action: '%(action)s', calling method: %(meth)s, body: "
"%(body)s") % {'action': action,
'body': six.text_type(decoded_body),
'meth': six.text_type(meth)}
LOG.debug(strutils.mask_password(msg))
else:
LOG.debug("Calling method '%(meth)s'",
{'meth': six.text_type(meth)})
# Now, deserialize the request body...
try:
if content_type:
contents = self.deserialize(meth, content_type, body)
else:
contents = {}
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Update the action args
action_args.update(contents)
project_id = action_args.pop("project_id", None)
context = request.environ.get('jacket.context')
if (context and project_id and (project_id != context.project_id)):
msg = _("Malformed request url")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Run pre-processing extensions
response, post = self.pre_process_extensions(extensions,
request, action_args)
if not response:
try:
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
response = ex
if not response:
# No exceptions; convert action_result into a
# ResponseObject
resp_obj = None
if isinstance(action_result, dict) or action_result is None:
resp_obj = ResponseObject(action_result)
elif isinstance(action_result, ResponseObject):
resp_obj = action_result
else:
response = action_result
# Run post-processing extensions
if resp_obj:
_set_request_id_header(request, resp_obj)
# Do a preserialize to set up the response object
serializers = getattr(meth, 'wsgi_serializers', {})
resp_obj._bind_method_serializers(serializers)
if hasattr(meth, 'wsgi_code'):
resp_obj._default_code = meth.wsgi_code
resp_obj.preserialize(accept, self.default_serializers)
# Process post-processing extensions
response = self.post_process_extensions(post, resp_obj,
request, action_args)
if resp_obj and not response:
response = resp_obj.serialize(request, accept,
self.default_serializers)
try:
msg_dict = dict(url=request.url, status=response.status_int)
msg = _LI("%(url)s returned with HTTP %(status)d")
except AttributeError as e:
msg_dict = dict(url=request.url, e=e)
msg = _LI("%(url)s returned a fault: %(e)s")
LOG.info(msg, msg_dict)
if hasattr(response, 'headers'):
for hdr, val in response.headers.items():
# Headers must be utf-8 strings
val = utils.convert_str(val)
response.headers[hdr] = val
if (not request.api_version_request.is_null() and
not _is_legacy_endpoint(request)):
response.headers[API_VERSION_REQUEST_HEADER] = (
VOLUME_SERVICE + ' ' +
request.api_version_request.get_string())
response.headers['Vary'] = API_VERSION_REQUEST_HEADER
return response
def get_method(self, request, action, content_type, body):
"""Look up the action-specific method and its extensions."""
# Look up the method
try:
if not self.controller:
meth = getattr(self, action)
else:
meth = getattr(self.controller, action)
except AttributeError as e:
with excutils.save_and_reraise_exception(e) as ctxt:
if (not self.wsgi_actions or action not in ['action',
'create',
'delete',
'update']):
LOG.exception(_LE('Get method error.'))
else:
ctxt.reraise = False
else:
return meth, self.wsgi_extensions.get(action, [])
if action == 'action':
# OK, it's an action; figure out which action...
mtype = _MEDIA_TYPE_MAP.get(content_type)
action_name = self.action_peek[mtype](body)
LOG.debug("Action body: %s", body)
else:
action_name = action
# Look up the action method
return (self.wsgi_actions[action_name],
self.wsgi_action_extensions.get(action_name, []))
def dispatch(self, method, request, action_args):
"""Dispatch a call to the action-specific method."""
try:
return method(req=request, **action_args)
except exception.VersionNotFoundForAPIMethod:
# We deliberately don't return any message information
# about the exception to the user so it looks as if
# the method is simply not implemented.
return Fault(webob.exc.HTTPNotFound())
def action(name):
"""Mark a function as an action.
The given name will be taken as the action key in the body.
This is also overloaded to allow extensions to provide
non-extending definitions of create and delete operations.
"""
def decorator(func):
func.wsgi_action = name
return func
return decorator
def extends(*args, **kwargs):
"""Indicate a function extends an operation.
Can be used as either::
@extends
def index(...):
pass
or as::
@extends(action='resize')
def _action_resize(...):
pass
"""
def decorator(func):
# Store enough information to find what we're extending
func.wsgi_extends = (func.__name__, kwargs.get('action'))
return func
# If we have positional arguments, call the decorator
if args:
return decorator(*args)
# OK, return the decorator instead
return decorator
class ControllerMetaclass(type):
"""Controller metaclass.
This metaclass automates the task of assembling a dictionary
mapping action keys to method names.
"""
def __new__(mcs, name, bases, cls_dict):
"""Adds the wsgi_actions dictionary to the class."""
# Find all actions
actions = {}
extensions = []
# NOTE(geguileo): We'll keep a list of versioned methods that have been
# added by the new metaclass (dictionary in attribute VER_METHOD_ATTR
# on Controller class) and all the versioned methods from the different
# base classes so we can consolidate them.
versioned_methods = []
# NOTE(cyeoh): This resets the VER_METHOD_ATTR attribute
# between API controller class creations. This allows us
# to use a class decorator on the API methods that doesn't
# require naming explicitly what method is being versioned as
# it can be implicit based on the method decorated. It is a bit
# ugly.
if bases != (object,) and VER_METHOD_ATTR in vars(Controller):
# Get the versioned methods that this metaclass creation has added
# to the Controller class
versioned_methods.append(getattr(Controller, VER_METHOD_ATTR))
# Remove them so next metaclass has a clean start
delattr(Controller, VER_METHOD_ATTR)
# start with wsgi actions from base classes
for base in bases:
actions.update(getattr(base, 'wsgi_actions', {}))
# Get the versioned methods that this base has
if VER_METHOD_ATTR in vars(base):
versioned_methods.append(getattr(base, VER_METHOD_ATTR))
for key, value in cls_dict.items():
if not callable(value):
continue
if getattr(value, 'wsgi_action', None):
actions[value.wsgi_action] = key
elif getattr(value, 'wsgi_extends', None):
extensions.append(value.wsgi_extends)
# Add the actions and extensions to the class dict
cls_dict['wsgi_actions'] = actions
cls_dict['wsgi_extensions'] = extensions
if versioned_methods:
cls_dict[VER_METHOD_ATTR] = mcs.consolidate_vers(versioned_methods)
return super(ControllerMetaclass, mcs).__new__(mcs, name, bases,
cls_dict)
@staticmethod
def consolidate_vers(versioned_methods):
"""Consolidates a list of versioned methods dictionaries."""
if not versioned_methods:
return {}
result = versioned_methods.pop(0)
for base_methods in versioned_methods:
for name, methods in base_methods.items():
method_list = result.setdefault(name, [])
method_list.extend(methods)
method_list.sort(reverse=True)
return result
@six.add_metaclass(ControllerMetaclass)
class Controller(object):
"""Default controller."""
_view_builder_class = None
def __init__(self, view_builder=None):
"""Initialize controller with a view builder instance."""
if view_builder:
self._view_builder = view_builder
elif self._view_builder_class:
self._view_builder = self._view_builder_class()
else:
self._view_builder = None
def __getattribute__(self, key):
def version_select(*args, **kwargs):
"""Select and call the matching version of the specified method.
Look for the method which matches the name supplied and version
constraints and calls it with the supplied arguments.
:returns: Returns the result of the method called
:raises: VersionNotFoundForAPIMethod if there is no method which
matches the name and version constraints
"""
# The first arg to all versioned methods is always the request
# object. The version for the request is attached to the
# request object
if len(args) == 0:
version_request = kwargs['req'].api_version_request
else:
version_request = args[0].api_version_request
func_list = self.versioned_methods[key]
for func in func_list:
if version_request.matches_versioned_method(func):
# Update the version_select wrapper function so
# other decorator attributes like wsgi.response
# are still respected.
functools.update_wrapper(version_select, func.func)
return func.func(self, *args, **kwargs)
# No version match
raise exception.VersionNotFoundForAPIMethod(
version=version_request)
try:
version_meth_dict = object.__getattribute__(self, VER_METHOD_ATTR)
except AttributeError:
# No versioning on this class
return object.__getattribute__(self, key)
if (version_meth_dict and key in
object.__getattribute__(self, VER_METHOD_ATTR)):
return version_select
return object.__getattribute__(self, key)
# NOTE(cyeoh): This decorator MUST appear first (the outermost
# decorator) on an API method for it to work correctly
@classmethod
def api_version(cls, min_ver, max_ver=None, experimental=False):
"""Decorator for versioning API methods.
Add the decorator to any method which takes a request object
as the first parameter and belongs to a class which inherits from
wsgi.Controller.
:param min_ver: string representing minimum version
:param max_ver: optional string representing maximum version
"""
def decorator(f):
obj_min_ver = api_version.APIVersionRequest(min_ver)
if max_ver:
obj_max_ver = api_version.APIVersionRequest(max_ver)
else:
obj_max_ver = api_version.APIVersionRequest()
# Add to list of versioned methods registered
func_name = f.__name__
new_func = versioned_method.VersionedMethod(
func_name, obj_min_ver, obj_max_ver, experimental, f)
func_dict = getattr(cls, VER_METHOD_ATTR, {})
if not func_dict:
setattr(cls, VER_METHOD_ATTR, func_dict)
func_list = func_dict.get(func_name, [])
if not func_list:
func_dict[func_name] = func_list
func_list.append(new_func)
# Ensure the list is sorted by minimum version (reversed)
# so later when we work through the list in order we find
# the method which has the latest version which supports
# the version requested.
# TODO(cyeoh): Add check to ensure that there are no overlapping
# ranges of valid versions as that is ambiguous
func_list.sort(reverse=True)
# NOTE(geguileo): To avoid PEP8 errors when defining multiple
# microversions of the same method in the same class we add the
# api_version decorator to the function so it can be used instead,
# thus preventing method redefinition errors.
f.api_version = cls.api_version
return f
return decorator
@staticmethod
def is_valid_body(body, entity_name):
if not (body and entity_name in body):
return False
def is_dict(d):
try:
d.get(None)
return True
except AttributeError:
return False
if not is_dict(body[entity_name]):
return False
return True
@staticmethod
def assert_valid_body(body, entity_name):
# NOTE: After v1 api is deprecated need to merge 'is_valid_body' and
# 'assert_valid_body' in to one method. Right now it is not
# possible to modify 'is_valid_body' to raise exception because
# in case of V1 api when 'is_valid_body' return False,
# 'HTTPUnprocessableEntity' exception is getting raised and in
# V2 api 'HTTPBadRequest' exception is getting raised.
if not Controller.is_valid_body(body, entity_name):
raise webob.exc.HTTPBadRequest(
explanation=_("Missing required element '%s' in "
"request body.") % entity_name)
@staticmethod
def validate_name_and_description(body):
name = body.get('name')
if name is not None:
if isinstance(name, six.string_types):
body['name'] = name.strip()
try:
utils.check_string_length(body['name'], 'Name',
min_length=0, max_length=255)
except exception.InvalidInput as error:
raise webob.exc.HTTPBadRequest(explanation=error.msg)
description = body.get('description')
if description is not None:
try:
utils.check_string_length(description, 'Description',
min_length=0, max_length=255)
except exception.InvalidInput as error:
raise webob.exc.HTTPBadRequest(explanation=error.msg)
@staticmethod
def validate_string_length(value, entity_name, min_length=0,
max_length=None, remove_whitespaces=False):
"""Check the length of specified string.
:param value: the value of the string
:param entity_name: the name of the string
:param min_length: the min_length of the string
:param max_length: the max_length of the string
:param remove_whitespaces: True if trimming whitespaces is needed
else False
"""
if isinstance(value, six.string_types) and remove_whitespaces:
value = value.strip()
try:
utils.check_string_length(value, entity_name,
min_length=min_length,
max_length=max_length)
except exception.InvalidInput as error:
raise webob.exc.HTTPBadRequest(explanation=error.msg)
@staticmethod
def get_policy_checker(prefix):
@staticmethod
def policy_checker(req, action, resource=None):
ctxt = req.environ['jacket.context']
target = {
'project_id': ctxt.project_id,
'user_id': ctxt.user_id,
}
if resource:
target.update(resource)
_action = '%s:%s' % (prefix, action)
policy.enforce(ctxt, _action, target)
return ctxt
return policy_checker
class Fault(webob.exc.HTTPException):
"""Wrap webob.exc.HTTPException to provide API friendly response."""
_fault_names = {
400: "badRequest",
401: "unauthorized",
403: "forbidden",
404: "itemNotFound",
405: "badMethod",
409: "conflictingRequest",
413: "overLimit",
415: "badMediaType",
429: "overLimit",
501: "notImplemented",
503: "serviceUnavailable"}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
for key, value in list(self.wrapped_exc.headers.items()):
self.wrapped_exc.headers[key] = str(value)
self.status_int = exception.status_int
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
user_locale = req.best_match_language()
# Replace the body with fault details.
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "jacketFault")
explanation = self.wrapped_exc.explanation
LOG.debug("Returning %(code)s to user: %(explanation)s",
{'code': code, 'explanation': explanation})
explanation = i18n.translate(explanation, user_locale)
fault_data = {
fault_name: {
'code': code,
'message': explanation}}
if code == 413 or code == 429:
retry = self.wrapped_exc.headers.get('Retry-After', None)
if retry:
fault_data[fault_name]['retryAfter'] = retry
if not req.api_version_request.is_null():
self.wrapped_exc.headers[API_VERSION_REQUEST_HEADER] = \
req.api_version_request.get_string()
self.wrapped_exc.headers['Vary'] = \
API_VERSION_REQUEST_HEADER
self.wrapped_exc.content_type = 'application/json'
self.wrapped_exc.charset = 'UTF-8'
self.wrapped_exc.text = JSONDictSerializer().serialize(fault_data)
return self.wrapped_exc
def __str__(self):
return self.wrapped_exc.__str__()
def _set_request_id_header(req, headers):
context = req.environ.get('jacket.context')
if context:
headers['x-compute-request-id'] = context.request_id
def _is_legacy_endpoint(request):
version_str = request.api_version_request.get_string()
return '1.0' in version_str or '2.0' in version_str
class OverLimitFault(webob.exc.HTTPException):
"""Rate-limited request response."""
def __init__(self, message, details, retry_time):
"""Initialize new `OverLimitFault` with relevant information."""
hdrs = OverLimitFault._retry_after(retry_time)
self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs)
self.content = {
"overLimitFault": {
"code": self.wrapped_exc.status_int,
"message": message,
"details": details,
},
}
@staticmethod
def _retry_after(retry_time):
delay = int(math.ceil(retry_time - time.time()))
retry_after = delay if delay > 0 else 0
headers = {'Retry-After': '%d' % retry_after}
return headers
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""Serializes the wrapped exception conforming to our error format."""
content_type = request.best_match_content_type()
def translate(msg):
locale = request.best_match_language()
return i18n.translate(msg, locale)
self.content['overLimitFault']['message'] = \
translate(self.content['overLimitFault']['message'])
self.content['overLimitFault']['details'] = \
translate(self.content['overLimitFault']['details'])
serializer = {
'application/json': JSONDictSerializer(),
}[content_type]
content = serializer.serialize(self.content)
self.wrapped_exc.body = content
return self.wrapped_exc
| {
"content_hash": "839f49771910b50b599593298a50f5db",
"timestamp": "",
"source": "github",
"line_count": 1418,
"max_line_length": 79,
"avg_line_length": 36.903385049365305,
"alnum_prop": 0.595004681916337,
"repo_name": "HybridF5/jacket",
"id": "6ec60b66a6016f2dd3cb9876de69d2f46e65ce9f",
"size": "52992",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jacket/api/openstack/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26995056"
},
{
"name": "Shell",
"bytes": "28464"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
} |
from .base import Plugin
from ..canvastools import RectangleTool
from ...viewer.widgets import SaveButtons
__all__ = ['Crop']
class Crop(Plugin):
name = 'Crop'
def __init__(self, maxdist=10, **kwargs):
super(Crop, self).__init__(**kwargs)
self.maxdist = maxdist
self.add_widget(SaveButtons())
print(self.help())
def attach(self, image_viewer):
super(Crop, self).attach(image_viewer)
self.rect_tool = RectangleTool(image_viewer,
maxdist=self.maxdist,
on_enter=self.crop)
self.artists.append(self.rect_tool)
def help(self):
helpstr = ("Crop tool",
"Select rectangular region and press enter to crop.")
return '\n'.join(helpstr)
def crop(self, extents):
xmin, xmax, ymin, ymax = extents
image = self.image_viewer.image[ymin:ymax+1, xmin:xmax+1]
self.image_viewer.image = image
self.image_viewer.ax.relim()
| {
"content_hash": "6e74d1634ebaa7daed4581cf495a2579",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 72,
"avg_line_length": 29.514285714285716,
"alnum_prop": 0.5721200387221684,
"repo_name": "newville/scikit-image",
"id": "04a853213859d0e90be342a9430a21ff11a11cbf",
"size": "1033",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "skimage/viewer/plugins/crop.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "76670"
},
{
"name": "Makefile",
"bytes": "449"
},
{
"name": "Python",
"bytes": "2158081"
}
],
"symlink_target": ""
} |
import importlib
import logging
import random
import string
from time import strftime
import sys
import os
from os.path import dirname, abspath
formatter = logging.Formatter(
fmt='%(asctime)s.%(msecs)03d %(levelname)s %(message)s',
datefmt='%d.%m.%Y %H:%M:%S'
)
# from stackoverflow.com/a/11233293/2474159
def setup_logger(name, log_file, level):
# delay=True creates the logfile with the first line, so empty logfiles won't be created
# via stackoverflow.com/a/19656056/2474159
handler = logging.FileHandler(log_file, delay=True)
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
return logger
def get_unique_file_logger(cls, level=logging.INFO):
rand_str = ''
for i in range(0, 5):
rand_str += random.choice(string.ascii_lowercase)
log_file = cls.__class__.__name__ + '_' + strftime('%d-%m-%Y_%H-%M-%S') + '_' + rand_str + '.log'
if not in_pyinstaller_mode(): # else is GTPengine or executable, in that case we can't expect a folder
project_root_dir = dirname(dirname(abspath(__file__)))
log_file = os.path.join(os.path.join(project_root_dir, 'logs'), log_file)
logger = setup_logger(rand_str, log_file, level)
logger.propagate = False # via stackoverflow.com/a/2267567/2474159
return logger
# sys._MEIPASS is the path to a temporary folder pyinstaller (re)creates
# therefore the existence of this attribute means we are running from pyinstaller
def in_pyinstaller_mode():
return hasattr(sys, '_MEIPASS')
# ported to Python 3 from stackoverflow.com/a/44446822
def set_keras_backend(backend):
from keras import backend as K
if K.backend() != backend:
os.environ['KERAS_BACKEND'] = backend
importlib.reload(K)
assert K.backend() == backend
| {
"content_hash": "2a3563a57f390051fda4706fc8c640a7",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 107,
"avg_line_length": 34.81132075471698,
"alnum_prop": 0.6905149051490515,
"repo_name": "nathbo/GO_DILab",
"id": "390632d8e65a89d3c8402afb134f0a24c4a706c1",
"size": "1845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "496"
},
{
"name": "Python",
"bytes": "320762"
},
{
"name": "Shell",
"bytes": "2336"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import ctypes
from dataclasses import dataclass, field
from typing import *
from ctypes import *
from rich.pretty import pprint
import json
import logging
import builtins
import base64
import chip.exceptions
from chip import ChipDeviceCtrl
import copy
from .storage import PersistentStorage
from chip.CertificateAuthority import CertificateAuthority
class FabricAdmin:
''' Administers a fabric associated with a unique FabricID under a given CertificateAuthority
instance.
'''
@classmethod
def _Handle(cls):
return chip.native.GetLibraryHandle()
@classmethod
def logger(cls):
return logging.getLogger('FabricAdmin')
def __init__(self, certificateAuthority: CertificateAuthority, vendorId: int, fabricId: int = 1):
''' Initializes the object.
certificateAuthority: CertificateAuthority instance that will be used to vend NOCs for both
DeviceControllers and commissionable nodes on this fabric.
vendorId: Valid operational Vendor ID associated with this fabric.
fabricId: Fabric ID to be associated with this fabric.
'''
self._handle = chip.native.GetLibraryHandle()
if (vendorId is None or vendorId == 0):
raise ValueError(
f"Invalid VendorID ({vendorId}) provided!")
if (fabricId is None or fabricId == 0):
raise ValueError(
f"Invalid FabricId ({fabricId}) provided!")
self._vendorId = vendorId
self._fabricId = fabricId
self._certificateAuthority = certificateAuthority
self.logger().warning(f"New FabricAdmin: FabricId: 0x{self._fabricId:016X}, VendorId = 0x{self.vendorId:04X}")
self._isActive = True
self._activeControllers = []
def NewController(self, nodeId: int = None, paaTrustStorePath: str = "", useTestCommissioner: bool = False, catTags: List[int] = []):
''' Create a new chip.ChipDeviceCtrl.ChipDeviceController instance on this fabric.
When vending ChipDeviceController instances on a given fabric, each controller instance
is associated with a unique fabric index local to the running process. In the underlying FabricTable, each FabricInfo
instance can be treated as unique identities that can collide on the same logical fabric.
nodeId: NodeID to be assigned to the controller. Automatically allocates one starting from 112233 if one
is not provided.
paaTrustStorePath: Path to the PAA trust store. If one isn't provided, a suitable default is selected.
useTestCommissioner: If a test commmisioner is to be created.
catTags: A list of 32-bit CAT tags that will added to the NOC generated for this controller.
'''
if (not(self._isActive)):
raise RuntimeError(
f"FabricAdmin object was previously shutdown and is no longer valid!")
nodeIdList = [controller.nodeId for controller in self._activeControllers if controller.isActive]
if (nodeId is None):
if (len(nodeIdList) != 0):
nodeId = max(nodeIdList) + 1
else:
nodeId = 112233
else:
if (nodeId in nodeIdList):
raise RuntimeError(f"Provided NodeId {nodeId} collides with an existing controller instance!")
self.logger().warning(
f"Allocating new controller with CaIndex: {self._certificateAuthority.caIndex}, FabricId: 0x{self._fabricId:016X}, NodeId: 0x{nodeId:016X}, CatTags: {catTags}")
controller = ChipDeviceCtrl.ChipDeviceController(opCredsContext=self._certificateAuthority.GetOpCredsContext(), fabricId=self._fabricId, nodeId=nodeId,
adminVendorId=self._vendorId, paaTrustStorePath=paaTrustStorePath, useTestCommissioner=useTestCommissioner, fabricAdmin=self, catTags=catTags)
self._activeControllers.append(controller)
return controller
def Shutdown(self):
''' Shutdown all active controllers on the fabric before shutting down the fabric itself.
You cannot interact with this object there-after.
'''
if (self._isActive):
for controller in self._activeControllers:
controller.Shutdown()
self._isActive = False
def __del__(self):
self.Shutdown()
@property
def vendorId(self) -> int:
return self._vendorId
@property
def fabricId(self) -> int:
return self._fabricId
@property
def caIndex(self) -> int:
return self._certificateAuthority.caIndex
@property
def certificateAuthority(self) -> CertificateAuthority:
return self._certificateAuthority
| {
"content_hash": "fd051ae31d228b58daa25024bbd4869a",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 199,
"avg_line_length": 40.34959349593496,
"alnum_prop": 0.6510175297199274,
"repo_name": "project-chip/connectedhomeip",
"id": "97a729035f811e58a1f4b643c598d4a6d6450337",
"size": "5679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/controller/python/chip/FabricAdmin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1759301"
},
{
"name": "C++",
"bytes": "19104548"
},
{
"name": "CMake",
"bytes": "140510"
},
{
"name": "Dockerfile",
"bytes": "50353"
},
{
"name": "Emacs Lisp",
"bytes": "1042"
},
{
"name": "Java",
"bytes": "167719"
},
{
"name": "JavaScript",
"bytes": "2106"
},
{
"name": "Jinja",
"bytes": "22322"
},
{
"name": "Objective-C",
"bytes": "930838"
},
{
"name": "Objective-C++",
"bytes": "435348"
},
{
"name": "Python",
"bytes": "1931007"
},
{
"name": "Shell",
"bytes": "195843"
},
{
"name": "Tcl",
"bytes": "311"
},
{
"name": "ZAP",
"bytes": "584219"
}
],
"symlink_target": ""
} |
"""ZenIRCBot API"""
# These are at the top to ensure gevent can monkey patch before
# threading gets imported.
from gevent import monkey
monkey.patch_all()
import atexit
import json
import gevent
from redis import StrictRedis
__version__ = '2.2.7'
def load_config(name):
""" Loads a JSON file and returns an object.
:param string name: The JSON file to load.
:returns: An native object with the contents of the JSON file.
This is a helper so you don't have to do the file IO and JSON
parsing yourself.
"""
with open(name) as f:
return json.loads(f.read())
class ZenIRCBot(object):
"""Instantiates a new ZenIRCBot API object.
:param string host: Redis hostname (default: 'localhost')
:param integer port: Redis port (default: 6379)
:param integer db: Redis DB number (default: 0)
:param string name: Name for the service using this instance
:returns: ZenIRCBot instance
Takes Redis server parameters to use for instantiating Redis
clients.
"""
def __init__(self, host='localhost', port=6379, db=0, name="bot"):
self.host = host
self.port = port
self.db = db
self.service_name = name
self.redis = StrictRedis(host=self.host,
port=self.port,
db=self.db)
self.commands = [] # list of command callbacks
def send_privmsg(self, to, message):
"""Sends a message to the specified channel(s)
:param to: A list or a string, if it is a list it will send to
all the people or channels listed.
:param string message: The message to send.
This is a helper so you don't have to handle the JSON or the
envelope yourself.
"""
if isinstance(to, basestring):
to = (to,)
for channel in to:
self.get_redis_client().publish('out',
json.dumps({
'version': 1,
'type': 'privmsg',
'data': {
'to': channel,
'message': message,
}}))
def send_action(self, to, message):
"""Sends an "ACTION" message to the specified channel(s)
:param to: A list or a string, if it is a list it will send to
all the people or channels listed.
:param string message: The message to send.
This is a helper so you don't have to handle the JSON or the
envelope yourself.
"""
if isinstance(to, basestring):
to = (to,)
for channel in to:
self.get_redis_client().publish('out',
json.dumps({
'version': 1,
'type': 'privmsg_action',
'data': {
'to': channel,
'message': message,
}}))
def send_admin_message(self, message):
"""
:param string message: The message to send.
This is a helper function that sends the message to all of the
channels defined in ``admin_spew_channels``.
"""
admin_channels = self.redis.get('zenircbot:admin_spew_channels')
if admin_channels:
self.send_privmsg(admin_channels, message)
def non_blocking_redis_subscribe(self, func, args=[], kwargs={}):
pubsub = self.get_redis_client().pubsub()
pubsub.subscribe('in')
for msg in pubsub.listen():
if msg['type'] == 'message':
message = json.loads(msg['data'])
func(message=message, *args, **kwargs)
def register_commands(self, service, commands):
"""
:param string script: The script with extension that you are
registering.
:param list commands: A list of objects with name and description
attributes used to reply to
a commands query.
This will notify all ``admin_spew_channels`` of the script
coming online when the script registers itself. It will also
setup a subscription to the 'out' channel that listens for
'commands' to be sent to the bot and responds with the list of
script, command name, and command description for all
registered scripts.
"""
self.send_admin_message(service + ' online!')
if commands:
def registration_reply(message, service, commands):
if message['version'] == 1:
if message['type'] == 'directed_privmsg':
if message['data']['message'] == 'commands':
for command in commands:
self.send_privmsg(message['data']['sender'],
'%s: %s - %s' % (
service,
command['name'],
command['description']
))
elif message['data']['message'] == 'services':
self.send_privmsg(message['data']['sender'],
service)
greenlet = gevent.spawn(self.non_blocking_redis_subscribe,
func=registration_reply,
kwargs={
'service': service,
'commands': commands
})
# Ensures that the greenlet is cleaned up.
atexit.register(lambda gl: gl.kill(), greenlet)
def get_redis_client(self):
""" Get redis client using values from instantiation time."""
return StrictRedis(host=self.host,
port=self.port,
db=self.db)
def simple_command(self, commandstr, desc="a command", **options):
""" A decorator to register a command as a callback when the command
is triggered. The unction must take one argument, which is the message
text.
@zen.simple_command("ping"):
def ping(msg):
return "pong"
:param string commandstr: string to use as a command
:param string desc: optional description text
:returns: decorated function
This should be used in conjuction with ZenIRCBot.listen() which will
finish the registration process and starts listening to redis for
messages
"""
# make decorator, f is the wrapped function passed through **options
def decorator(f):
self.commands.append({'str': commandstr,
'desc': desc,
'name': f.__name__,
'callback': f,
})
return f
return decorator
def listen(self):
""" Start listening. This is blocking and should be the last line in a
service. Once this is called the service is running.
"""
# actually register commands
self.register_commands(self.service_name,
[{'name': '!'+c['str'], 'description': c['desc']} for c in self.commands]
)
# boilerplate pubsub listening
subscription = self.get_redis_client().pubsub()
subscription.subscribe('in')
for msg in subscription.listen():
if msg.get('type') == 'message':
message = json.loads(msg['data'])
if message['version'] == 1:
if message['type'] == 'directed_privmsg':
text = message['data']['message']
# Look for any command matches in registered commands
for command in self.commands:
# match command
if text.startswith(command['str']):
# do callback and send returned value to channel
self.send_privmsg(message['data']['channel'],
command['callback'](text))
| {
"content_hash": "4defa21784db4a53b01c4d87331c0ea9",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 85,
"avg_line_length": 39.236607142857146,
"alnum_prop": 0.48833769484583,
"repo_name": "zenirc/zenircbot-api-python",
"id": "87fb55abd0e2b925377852a0f99f14c8539933c6",
"size": "8789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zenircbot_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10017"
}
],
"symlink_target": ""
} |
import logging
import os
from pathlib import Path
import click
from platformio import fs
from platformio.package.exception import UnknownPackageError
from platformio.package.manager.library import LibraryPackageManager
from platformio.package.manager.platform import PlatformPackageManager
from platformio.package.manager.tool import ToolPackageManager
from platformio.package.meta import PackageSpec
from platformio.project.config import ProjectConfig
from platformio.project.savedeps import pkg_to_save_spec, save_project_dependencies
from platformio.test.result import TestSuite
from platformio.test.runners.factory import TestRunnerFactory
@click.command(
"install", short_help="Install the project dependencies or custom packages"
)
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True),
)
@click.option("-e", "--environment", "environments", multiple=True)
@click.option("-p", "--platform", "platforms", metavar="SPECIFICATION", multiple=True)
@click.option("-t", "--tool", "tools", metavar="SPECIFICATION", multiple=True)
@click.option("-l", "--library", "libraries", metavar="SPECIFICATION", multiple=True)
@click.option(
"--no-save",
is_flag=True,
help="Prevent saving specified packages to `platformio.ini`",
)
@click.option("--skip-dependencies", is_flag=True, help="Skip package dependencies")
@click.option("-g", "--global", is_flag=True, help="Install package globally")
@click.option(
"--storage-dir",
default=None,
type=click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True),
help="Custom Package Manager storage for global packages",
)
@click.option("-f", "--force", is_flag=True, help="Reinstall package if it exists")
@click.option("-s", "--silent", is_flag=True, help="Suppress progress reporting")
def package_install_cmd(**options):
if options.get("global"):
install_global_dependencies(options)
else:
install_project_dependencies(options)
def install_global_dependencies(options):
pm = PlatformPackageManager(options.get("storage_dir"))
tm = ToolPackageManager(options.get("storage_dir"))
lm = LibraryPackageManager(options.get("storage_dir"))
for obj in (pm, tm, lm):
obj.set_log_level(logging.WARN if options.get("silent") else logging.DEBUG)
for spec in options.get("platforms"):
pm.install(
spec,
skip_dependencies=options.get("skip_dependencies"),
force=options.get("force"),
)
for spec in options.get("tools"):
tm.install(
spec,
skip_dependencies=options.get("skip_dependencies"),
force=options.get("force"),
)
for spec in options.get("libraries", []):
lm.install(
spec,
skip_dependencies=options.get("skip_dependencies"),
force=options.get("force"),
)
def install_project_dependencies(options):
environments = options["environments"]
with fs.cd(options["project_dir"]):
config = ProjectConfig.get_instance()
config.validate(environments)
for env in config.envs():
if environments and env not in environments:
continue
if not options["silent"]:
click.echo(
"Resolving %s environment packages..." % click.style(env, fg="cyan")
)
already_up_to_date = not install_project_env_dependencies(env, options)
if not options["silent"] and already_up_to_date:
click.secho("Already up-to-date.", fg="green")
def install_project_env_dependencies(project_env, options=None):
"""Used in `pio run` -> Processor"""
options = options or {}
installed_conds = []
# custom platforms
if options.get("platforms"):
installed_conds.append(
_install_project_env_custom_platforms(project_env, options)
)
# custom tools
if options.get("tools"):
installed_conds.append(_install_project_env_custom_tools(project_env, options))
# custom ibraries
if options.get("libraries"):
installed_conds.append(
_install_project_env_custom_libraries(project_env, options)
)
# declared dependencies
if not installed_conds:
installed_conds = [
_install_project_env_platform(project_env, options),
_install_project_env_libraries(project_env, options),
]
return any(installed_conds)
def _install_project_env_platform(project_env, options):
config = ProjectConfig.get_instance()
pm = PlatformPackageManager()
if options.get("silent"):
pm.set_log_level(logging.WARN)
spec = config.get(f"env:{project_env}", "platform")
if not spec:
return False
already_up_to_date = not options.get("force")
if not pm.get_package(spec):
already_up_to_date = False
PlatformPackageManager().install(
spec,
project_env=project_env,
project_targets=options.get("project_targets"),
skip_dependencies=options.get("skip_dependencies"),
force=options.get("force"),
)
return not already_up_to_date
def _install_project_env_custom_platforms(project_env, options):
already_up_to_date = not options.get("force")
pm = PlatformPackageManager()
if not options.get("silent"):
pm.set_log_level(logging.DEBUG)
for spec in options.get("platforms"):
if not pm.get_package(spec):
already_up_to_date = False
pm.install(
spec,
project_env=project_env,
project_targets=options.get("project_targets"),
skip_dependencies=options.get("skip_dependencies"),
force=options.get("force"),
)
return not already_up_to_date
def _install_project_env_custom_tools(project_env, options):
already_up_to_date = not options.get("force")
tm = ToolPackageManager()
if not options.get("silent"):
tm.set_log_level(logging.DEBUG)
specs_to_save = []
for tool in options.get("tools"):
spec = PackageSpec(tool)
if not tm.get_package(spec):
already_up_to_date = False
pkg = tm.install(
spec,
skip_dependencies=options.get("skip_dependencies"),
force=options.get("force"),
)
specs_to_save.append(pkg_to_save_spec(pkg, spec))
if not options.get("no_save") and specs_to_save:
save_project_dependencies(
os.getcwd(),
specs_to_save,
scope="platform_packages",
action="add",
environments=[project_env],
)
return not already_up_to_date
def _install_project_env_libraries(project_env, options):
_uninstall_project_unused_libdeps(project_env, options)
already_up_to_date = not options.get("force")
config = ProjectConfig.get_instance()
env_lm = LibraryPackageManager(
os.path.join(config.get("platformio", "libdeps_dir"), project_env)
)
private_lm = LibraryPackageManager(
os.path.join(config.get("platformio", "lib_dir"))
)
if options.get("silent"):
env_lm.set_log_level(logging.WARN)
private_lm.set_log_level(logging.WARN)
lib_deps = config.get(f"env:{project_env}", "lib_deps")
if "__test" in options.get("project_targets", []):
test_runner = TestRunnerFactory.new(
TestSuite(project_env, options.get("piotest_running_name", "*")), config
)
lib_deps.extend(test_runner.EXTRA_LIB_DEPS or [])
for library in lib_deps:
spec = PackageSpec(library)
# skip built-in dependencies
if not spec.external and not spec.owner:
continue
if not env_lm.get_package(spec):
already_up_to_date = False
env_lm.install(
spec,
skip_dependencies=options.get("skip_dependencies"),
force=options.get("force"),
)
# install dependencies from the private libraries
for pkg in private_lm.get_installed():
_install_project_private_library_deps(pkg, private_lm, env_lm, options)
return not already_up_to_date
def _uninstall_project_unused_libdeps(project_env, options):
config = ProjectConfig.get_instance()
lib_deps = set(config.get(f"env:{project_env}", "lib_deps"))
if not lib_deps:
return
storage_dir = Path(config.get("platformio", "libdeps_dir"), project_env)
integrity_dat = storage_dir / "integrity.dat"
if integrity_dat.is_file():
prev_lib_deps = set(
integrity_dat.read_text(encoding="utf-8").strip().split("\n")
)
if lib_deps == prev_lib_deps:
return
lm = LibraryPackageManager(str(storage_dir))
if options.get("silent"):
lm.set_log_level(logging.WARN)
else:
click.secho("Removing unused dependencies...")
for spec in set(prev_lib_deps) - set(lib_deps):
try:
lm.uninstall(spec)
except UnknownPackageError:
pass
storage_dir.mkdir(parents=True, exist_ok=True)
integrity_dat.write_text("\n".join(lib_deps), encoding="utf-8")
def _install_project_private_library_deps(private_pkg, private_lm, env_lm, options):
for dependency in private_lm.get_pkg_dependencies(private_pkg) or []:
spec = private_lm.dependency_to_spec(dependency)
# skip built-in dependencies
if not spec.external and not spec.owner:
continue
pkg = private_lm.get_package(spec)
if not pkg and not env_lm.get_package(spec):
pkg = env_lm.install(
spec,
skip_dependencies=True,
force=options.get("force"),
)
if not pkg:
continue
_install_project_private_library_deps(pkg, private_lm, env_lm, options)
def _install_project_env_custom_libraries(project_env, options):
already_up_to_date = not options.get("force")
config = ProjectConfig.get_instance()
lm = LibraryPackageManager(
os.path.join(config.get("platformio", "libdeps_dir"), project_env)
)
if not options.get("silent"):
lm.set_log_level(logging.DEBUG)
specs_to_save = []
for library in options.get("libraries") or []:
spec = PackageSpec(library)
if not lm.get_package(spec):
already_up_to_date = False
pkg = lm.install(
spec,
skip_dependencies=options.get("skip_dependencies"),
force=options.get("force"),
)
specs_to_save.append(pkg_to_save_spec(pkg, spec))
if not options.get("no_save") and specs_to_save:
save_project_dependencies(
os.getcwd(),
specs_to_save,
scope="lib_deps",
action="add",
environments=[project_env],
)
return not already_up_to_date
| {
"content_hash": "8d42dfaed4e9e21f57ed5b3475d81d24",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 88,
"avg_line_length": 36.44701986754967,
"alnum_prop": 0.6298718997001908,
"repo_name": "platformio/platformio",
"id": "9bbe9505ff92c3febba365c6f2a1c9adf5003c29",
"size": "11618",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "platformio/package/commands/install.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "1826"
},
{
"name": "Makefile",
"bytes": "356"
},
{
"name": "Processing",
"bytes": "101"
},
{
"name": "Python",
"bytes": "333618"
},
{
"name": "Smarty",
"bytes": "45408"
}
],
"symlink_target": ""
} |
import unittest
class TestPolyStruct (unittest.TestCase):
def test_tetrahedron (self):
pstruct = CrankShape.PolyStruct ()
pstruct.set_nvertices (4)
pstruct.add_face_vertex_array ([0, 2, 1])
pstruct.add_face_vertex_array ([3, 0, 1])
pstruct.add_face_vertex_array ([3, 1, 2])
pstruct.add_face_vertex_array ([3, 2, 0])
self.assertEqual (pstruct.get_nvertices (), 4)
self.assertEqual (pstruct.get_nedges (), 6)
self.assertEqual (pstruct.get_nfaces (), 4)
assert (pstruct.check_valid ())
def test_cube (self):
pstruct = CrankShape.PolyStruct ()
pstruct.set_nvertices (8)
pstruct.add_face_vertex_array ([0, 1, 2, 3])
pstruct.add_face_vertex_array ([0, 4, 5, 1])
pstruct.add_face_vertex_array ([1, 5, 6, 2])
pstruct.add_face_vertex_array ([2, 6, 7, 3])
pstruct.add_face_vertex_array ([3, 7, 4, 0])
pstruct.add_face_vertex_array ([7, 6, 5, 4])
self.assertEqual (pstruct.get_nvertices (), 8)
self.assertEqual (pstruct.get_nedges (), 12)
self.assertEqual (pstruct.get_nfaces (), 6)
assert (pstruct.check_valid ())
| {
"content_hash": "b764ff9998a1b4a85ec12b1ed1b16205",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 54,
"avg_line_length": 33.27777777777778,
"alnum_prop": 0.5934891485809682,
"repo_name": "WSID/crank-system",
"id": "7416eac3de603130748a26ac1748437a259fe117",
"size": "2282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/shape/test_poly_struct.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3099576"
},
{
"name": "Makefile",
"bytes": "15971"
},
{
"name": "Python",
"bytes": "170235"
},
{
"name": "Shell",
"bytes": "25419"
},
{
"name": "Vala",
"bytes": "245313"
}
],
"symlink_target": ""
} |
import binascii
import pytest
@pytest.fixture
def icmp_packet(simple_capture):
return simple_capture[7]
@pytest.mark.parametrize('access_func', [
lambda pkt: pkt[-1],
lambda pkt: pkt['icmp'],
lambda pkt: pkt['ICMP'],
lambda pkt: pkt.icmp,
])
def test_can_access_layer(icmp_packet, access_func):
"""Tests that layer access in various ways works the same way."""
assert access_func(icmp_packet).layer_name.upper() == 'ICMP'
assert binascii.unhexlify(access_func(icmp_packet).data) == b'abcdefghijklmnopqrstuvwabcdefghi'
def test_packet_contains_layer(icmp_packet):
assert 'ICMP' in icmp_packet
def test_raw_mode(icmp_packet):
original = icmp_packet.ip.src
raw = icmp_packet.ip.src.raw_value
icmp_packet.ip.raw_mode = True
assert icmp_packet.ip.src != original
assert icmp_packet.ip.src == raw
def test_frame_info_access(icmp_packet):
actual = icmp_packet.frame_info.protocols
expected = set(['eth:ip:icmp:data', 'eth:ethertype:ip:icmp:data'])
assert actual in expected
assert icmp_packet.frame_info.number == '8'
| {
"content_hash": "72be57b909eda1f5385224f4baf5a7c8",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 99,
"avg_line_length": 28.763157894736842,
"alnum_prop": 0.6980786825251601,
"repo_name": "KimiNewt/pyshark",
"id": "72fa68aefd8b3a9af85d2eca99a7dff6d7c46d75",
"size": "1093",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_packet_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "127807"
}
],
"symlink_target": ""
} |
from venster.windows import *
from venster.wtl import *
from venster import comctl
from venster import gdi
from venster import shell
from venster.lib import form
from venster.lib import list
from venster.lib import tray
from venster.lib import menu
FILE_MENU = (MF_POPUP, "&File",
[(MF_STRING, "&Close", form.ID_CLOSE),
(MF_SEPARATOR, ),
(MF_STRING, "&Exit", form.ID_EXIT)
])
class Form(form.Form):
_window_title_ = "Tray test"
_window_icon_sm_ = _window_icon_ = Icon("cow.ico")
_window_width_ = 700
_window_height_ = 480
_form_menu_ = [FILE_MENU]
def OnDestroy(self, event):
form.Form.OnDestroy(self, event)
def OnClose(self, event):
event.handled = True #prevent default handler from closing window
self.ShowWindow(SW_HIDE) #instead hide it
def OnExitCmd(self, event):
exit()
class TrayIcon(tray.TrayIcon):
_window_icon_ = Form._window_icon_
_window_title_ = Form._window_title_
_window_class_ = "ApplicationTrayWindow"
_tray_icon_menu_open_ = [(MF_STRING, "*Open Application", form.ID_OPEN),
(MF_SEPARATOR,),
(MF_STRING, "Exit", form.ID_EXIT)]
_tray_icon_menu_close_ = [(MF_STRING, "*Close Application", form.ID_CLOSE),
(MF_SEPARATOR,),
(MF_STRING, "Exit", form.ID_EXIT)]
_tray_icon_menu_ = _tray_icon_menu_close_
def __init__(self, mainForm):
tray.TrayIcon.__init__(self)
self.mainForm = mainForm
def IsOpen(self):
return self.mainForm.IsWindowVisible() and not self.mainForm.IsIconic()
def Hide(self):
self.mainForm.ShowWindow(SW_HIDE)
def ShowNormal(self):
self.mainForm.ShowWindow(SW_SHOWNORMAL)
self.mainForm.SetForegroundWindow()
def OnLeftButtonDoubleClick(self, event):
if self.IsOpen():
self.Hide()
else:
self.ShowNormal()
def OnExitCmd(self, event):
exit()
def OnOpenCmd(self, event):
self.ShowNormal()
def OnCloseCmd(self, event):
self.Hide()
def TrackPopupMenu(self):
if self.IsOpen():
self._tray_icon_menu_ = self._tray_icon_menu_close_
else:
self._tray_icon_menu_ = self._tray_icon_menu_open_
tray.TrayIcon.TrackPopupMenu(self)
mainForm = None
trayIcon = None
application = None
def exit():
global mainForm
global trayIcon
mainForm.DestroyWindow()
trayIcon.DestroyWindow()
application.Quit()
mainForm = None
trayIcon = None
def run():
global mainForm
global trayIcon
global application
mainForm = Form()
trayIcon = TrayIcon(mainForm)
application = Application()
application.Run()
if __name__ == '__main__':
run()
| {
"content_hash": "de28c431d812b21b8bc7872b3c1f9ffa",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 79,
"avg_line_length": 26.327433628318584,
"alnum_prop": 0.5784873949579832,
"repo_name": "toymachine/venster",
"id": "88dd5d614c0932d440057cd41f4108c54a92ad54",
"size": "4403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_tray.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "308839"
},
{
"name": "Shell",
"bytes": "174"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, print_function, division
__author__ = "mozman <mozman@gmx.at>"
# Standard Library
import unittest
# trusted or separately tested modules
from ezodf2.xmlns import etree, CN
# objects to test
from ezodf2.base import GenericWrapper
TEXT_NS = "urn:oasis:names:tc:opendocument:xmlns:text:1.0"
DATA1 = '<GenericWrapper name="root"><GenericWrapper pos="0"/><GenericWrapper pos="1"/>'\
'<GenericWrapper pos="2"/><GenericWrapper pos="3"/></GenericWrapper>'
DATA2 = """
<text:p xmlns:text="urn:oasis:names:tc:opendocument:xmlns:text:1.0">
<text:span>
SPAN1
<text:span>
SPAN2
</text:span>
</text:span>
<text:span>
SPAN3
<text:span>
SPAN4
</text:span>
</text:span>
</text:p>
"""
class TestBaseClass(unittest.TestCase):
def test_bare_init(self):
b = GenericWrapper()
self.assertEqual('GenericWrapper', b.xmlnode.tag, "expected tag is 'GenericWrapper'")
def test_init_xmlroot(self):
node = etree.Element('GenericWrapper', test="GenericWrapper")
b = GenericWrapper(xmlnode=node)
self.assertEqual('GenericWrapper', b.xmlnode.tag, "expected tag is 'GenericWrapper'")
self.assertEqual('GenericWrapper', b.xmlnode.get('test'), "expected attribute test is 'GenericWrapper'")
def test_len(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
self.assertEqual(4, len(b), "expected len is 4")
def test_getattr(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
self.assertEqual('root', b.get_attr('name'))
def test_setattr(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
b.set_attr('name', 'xxx')
self.assertEqual('xxx', b.xmlnode.get('name'))
def test_setattr_None_error(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
with self.assertRaises(ValueError):
b.set_attr('name', None)
def test_setattr_empty_string_error(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
with self.assertRaises(ValueError):
b.set_attr('name', "")
def test_iter(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
for pos, e in enumerate(b):
self.assertTrue(isinstance(e, GenericWrapper))
self.assertEqual(pos, int(e.get_attr('pos')))
def test_get(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
for x in range(4):
e = b.get_child(x)
self.assertEqual(x, int(e.get_attr('pos')))
def test_getitem(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
for x in range(4):
e = b[x]
self.assertEqual(x, int(e.get_attr('pos')))
def test_getitem_index_error(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
with self.assertRaises(IndexError):
e = b[99]
def test_get_index_error(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
with self.assertRaises(IndexError):
e = b.get_child(99)
def test_setitem(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
newitem = GenericWrapper()
newitem.set_attr('name', 'newitem')
b[1] = newitem
self.assertEqual('newitem', b[1].get_attr('name'))
self.assertEqual(4, len(b))
def test_setitem_index_error(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
newitem = GenericWrapper()
with self.assertRaises(IndexError):
b[99] = newitem
def test_delitem(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
del b[0]
self.assertEqual(len(b), 3)
self.assertEqual(1, int(b[0].get_attr('pos')))
def test_delitem_index_error(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
with self.assertRaises(IndexError):
del b[99]
def test_append(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
pos = len(b)
newitem = GenericWrapper()
newitem.set_attr('name', 'newitem')
b.append(newitem)
self.assertEqual('newitem', b[pos].get_attr('name'))
def test_iadd(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
pos = len(b)
newitem = GenericWrapper()
newitem.set_attr('name', 'newitem')
b += newitem
self.assertEqual('newitem', b[pos].get_attr('name'))
def test_insert_before(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
newitem = GenericWrapper()
newitem.set_attr('name', 'newitem')
b.insert_before(b[2], newitem)
self.assertEqual('newitem', b[2].get_attr('name'))
def test_remove(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
b.remove(b[2])
self.assertEqual(3, len(b))
self.assertEqual('3', b[2].get_attr('pos'))
def test_findall_All(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
result = list(b.findall(GenericWrapper.TAG))
self.assertEqual(4, len(result))
def test_findall_None(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
result = list(b.findall(CN('text:p')))
self.assertEqual(0, len(result))
def test_findall_subelements(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA2))
subelements = list(b.findall(CN('text:span')))
self.assertEqual(2, len(subelements))
def test_find(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
first_element = b.find(GenericWrapper.TAG)
self.assertEqual('0', first_element.get_attr('pos'))
def test_find_None(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
found = b.find('test')
self.assertIsNone(found)
def test_replace_node(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
first_element = b.find(GenericWrapper.TAG)
replace = GenericWrapper()
replace.set_attr('pos', 'replaced')
b.replace(first_element, replace)
self.assertEqual(b[0].get_attr('pos'), 'replaced')
def test_replace_error(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
replace = GenericWrapper()
with self.assertRaises(ValueError):
b.replace(replace, replace)
def test_get_root_None(self):
b = GenericWrapper()
b.xmlnode = None
self.assertIsNone(b.get_xmlroot(), "expected xmlroot is None")
def test_get_root_no_children(self):
b = GenericWrapper()
self.assertEqual(b.get_xmlroot(), b.xmlnode)
def test_get_root_with_children(self):
b = GenericWrapper(xmlnode=etree.fromstring(DATA1))
first_child = b[0]
xmlroot = first_child.get_xmlroot()
self.assertEqual(xmlroot.get('name'), 'root')
def test_textlen_for_no_text(self):
b = GenericWrapper()
self.assertEqual(0, b.textlen)
def test_textlen(self):
b = GenericWrapper()
b .text = "text"
self.assertEqual(4, b.textlen)
def test_plaintext(self):
b = GenericWrapper()
b .text = "text"
self.assertEqual('text', b.plaintext())
def test_plaintext_for_no_text(self):
b = GenericWrapper()
self.assertEqual('', b.plaintext())
if __name__=='__main__':
unittest.main()
| {
"content_hash": "82d0d7e4e66981e0ef48963947d97fef",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 112,
"avg_line_length": 34.426008968609864,
"alnum_prop": 0.6079197603230428,
"repo_name": "iwschris/ezodf2",
"id": "3667fe002422c88508c0e773a0addc5cd1b35a9b",
"size": "7819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "351944"
},
{
"name": "Shell",
"bytes": "4505"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wmdadict', '0013_auto_20170824_2204'),
]
operations = [
migrations.AlterField(
model_name='emdisfield',
name='field_description',
field=models.CharField(max_length=500),
),
]
| {
"content_hash": "1ce1eefd5a514cd3f6c8cedf2016147e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 51,
"avg_line_length": 21.944444444444443,
"alnum_prop": 0.6050632911392405,
"repo_name": "antmont/wmda-stuff",
"id": "906fa7e625a6fb6b323f61bd23c1b0ef899afa3c",
"size": "468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wmdadict/migrations/0014_auto_20170827_2230.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "254"
},
{
"name": "HTML",
"bytes": "44178"
},
{
"name": "JavaScript",
"bytes": "659"
},
{
"name": "Python",
"bytes": "67721"
}
],
"symlink_target": ""
} |
import argparse
import os
import pwd
import signal
import subprocess
import sys
import traceback
from urllib.parse import urlunparse
# check for the venv
from lib import sanity_check
sanity_check.check_venv(__file__)
from tornado import httpclient
from tornado import httputil
from tornado import gen
from tornado import web
from tornado.ioloop import IOLoop
from tornado.websocket import WebSocketHandler, websocket_connect
from typing import Any, Callable, Generator, List, Optional
if 'posix' in os.name and os.geteuid() == 0:
raise RuntimeError("run-dev.py should not be run as root.")
parser = argparse.ArgumentParser(description=r"""
Starts the app listening on localhost, for local development.
This script launches the Django and Tornado servers, then runs a reverse proxy
which serves to both of them. After it's all up and running, browse to
http://localhost:9991/
Note that, while runserver and runtornado have the usual auto-restarting
behavior, the reverse proxy itself does *not* automatically restart on changes
to this file.
""",
formatter_class=argparse.RawTextHelpFormatter)
TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(TOOLS_DIR))
from tools.lib.test_script import (
assert_provisioning_status_ok,
)
parser.add_argument('--test',
action='store_true',
help='Use the testing database and ports')
parser.add_argument('--minify',
action='store_true',
help='Minifies assets for testing in dev')
parser.add_argument('--interface',
action='store',
default=None, help='Set the IP or hostname for the proxy to listen on')
parser.add_argument('--no-clear-memcached',
action='store_false', dest='clear_memcached',
default=True, help='Do not clear memcached')
parser.add_argument('--force',
action="store_true",
default=False, help='Run command despite possible problems.')
parser.add_argument('--enable-tornado-logging',
action="store_true",
default=False, help='Enable access logs from tornado proxy server.')
options = parser.parse_args()
assert_provisioning_status_ok(options.force)
if options.interface is None:
user_id = os.getuid()
user_name = pwd.getpwuid(user_id).pw_name
if user_name in ["vagrant", "zulipdev"]:
# In the Vagrant development environment, we need to listen on
# all ports, and it's safe to do so, because Vagrant is only
# exposing certain guest ports (by default just 9991) to the
# host. The same argument applies to the remote development
# servers using username "zulipdev".
options.interface = None
else:
# Otherwise, only listen to requests on localhost for security.
options.interface = "127.0.0.1"
elif options.interface == "":
options.interface = None
runserver_args = [] # type: List[str]
base_port = 9991
if options.test:
base_port = 9981
settings_module = "zproject.test_settings"
# Don't auto-reload when running casper tests
runserver_args = ['--noreload']
else:
settings_module = "zproject.settings"
manage_args = ['--settings=%s' % (settings_module,)]
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from scripts.lib.zulip_tools import WARNING, ENDC
proxy_port = base_port
django_port = base_port + 1
tornado_port = base_port + 2
webpack_port = base_port + 3
thumbor_port = base_port + 4
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
# Clean up stale .pyc files etc.
subprocess.check_call('./tools/clean-repo')
if options.clear_memcached:
print("Clearing memcached ...")
subprocess.check_call('./scripts/setup/flush-memcached')
# Set up a new process group, so that we can later kill run{server,tornado}
# and all of the processes they spawn.
os.setpgrp()
# Save pid of parent process to the pid file. It can be used later by
# tools/stop-run-dev to kill the server without having to find the
# terminal in question.
if options.test:
pid_file_path = os.path.join(os.path.join(os.getcwd(), 'var/casper/run_dev.pid'))
else:
pid_file_path = os.path.join(os.path.join(os.getcwd(), 'var/run/run_dev.pid'))
# Required for compatibility python versions.
if not os.path.exists(os.path.dirname(pid_file_path)):
os.makedirs(os.path.dirname(pid_file_path))
with open(pid_file_path, 'w+') as f:
f.write(str(os.getpgrp()) + "\n")
# Pass --nostatic because we configure static serving ourselves in
# zulip/urls.py.
cmds = [['./manage.py', 'runserver'] +
manage_args + runserver_args + ['127.0.0.1:%d' % (django_port,)],
['env', 'PYTHONUNBUFFERED=1', './manage.py', 'runtornado'] +
manage_args + ['127.0.0.1:%d' % (tornado_port,)],
['./manage.py', 'process_queue', '--all'] + manage_args,
['env', 'PGHOST=127.0.0.1', # Force password authentication using .pgpass
'./puppet/zulip/files/postgresql/process_fts_updates'],
['./manage.py', 'deliver_scheduled_messages'],
['/srv/zulip-thumbor-venv/bin/thumbor', '-c', './zthumbor/thumbor.conf',
'-p', '%s' % (thumbor_port,)]]
if options.test:
# We just need to compile webpack assets once at startup, not run a daemon,
# in test mode. Additionally, webpack-dev-server doesn't support running 2
# copies on the same system, so this model lets us run the casper tests
# with a running development server.
subprocess.check_call(['./tools/webpack', '--quiet', '--test'])
else:
webpack_cmd = ['./tools/webpack', '--watch', '--port', str(webpack_port)]
if options.minify:
webpack_cmd.append('--minify')
if options.interface is None:
# If interface is None and we're listening on all ports, we also need
# to disable the webpack host check so that webpack will serve assets.
webpack_cmd.append('--disable-host-check')
if options.interface:
webpack_cmd += ["--host", options.interface]
else:
webpack_cmd += ["--host", "0.0.0.0"]
cmds.append(webpack_cmd)
for cmd in cmds:
subprocess.Popen(cmd)
def transform_url(protocol, path, query, target_port, target_host):
# type: (str, str, str, int, str) -> str
# generate url with target host
host = ":".join((target_host, str(target_port)))
# Here we are going to rewrite the path a bit so that it is in parity with
# what we will have for production
if path.startswith('/thumbor'):
path = path[len('/thumbor'):]
newpath = urlunparse((protocol, host, path, '', query, ''))
return newpath
@gen.engine
def fetch_request(url, callback, **kwargs):
# type: (str, Any, **Any) -> Generator[Callable[..., Any], Any, None]
# use large timeouts to handle polling requests
req = httpclient.HTTPRequest(url, connect_timeout=240.0, request_timeout=240.0, **kwargs)
client = httpclient.AsyncHTTPClient()
# wait for response
response = yield gen.Task(client.fetch, req)
callback(response)
class BaseWebsocketHandler(WebSocketHandler):
# target server ip
target_host = '127.0.0.1' # type: str
# target server port
target_port = None # type: int
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
super().__init__(*args, **kwargs)
# define client for target websocket server
self.client = None # type: Any
def get(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[Callable[..., Any]]
# use get method from WebsocketHandler
return super().get(*args, **kwargs)
def open(self):
# type: () -> None
# setup connection with target websocket server
websocket_url = "ws://{host}:{port}{uri}".format(
host=self.target_host,
port=self.target_port,
uri=self.request.uri
)
request = httpclient.HTTPRequest(websocket_url)
request.headers = self._add_request_headers(['sec-websocket-extensions'])
websocket_connect(request, callback=self.open_callback,
on_message_callback=self.on_client_message)
def open_callback(self, future):
# type: (Any) -> None
# callback on connect with target websocket server
self.client = future.result()
def on_client_message(self, message):
# type: (str) -> None
if not message:
# if message empty -> target websocket server close connection
return self.close()
if self.ws_connection:
# send message to client if connection exists
self.write_message(message, False)
def on_message(self, message, binary=False):
# type: (str, bool) -> Optional[Callable[..., Any]]
if not self.client:
# close websocket proxy connection if no connection with target websocket server
return self.close()
self.client.write_message(message, binary)
return None
def check_origin(self, origin):
# type: (str) -> bool
return True
def _add_request_headers(self, exclude_lower_headers_list=None):
# type: (Optional[List[str]]) -> httputil.HTTPHeaders
exclude_lower_headers_list = exclude_lower_headers_list or []
headers = httputil.HTTPHeaders()
for header, v in self.request.headers.get_all():
if header.lower() not in exclude_lower_headers_list:
headers.add(header, v)
return headers
class CombineHandler(BaseWebsocketHandler):
def get(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[Callable[..., Any]]
if self.request.headers.get("Upgrade", "").lower() == 'websocket':
return super().get(*args, **kwargs)
return None
def head(self):
# type: () -> None
pass
def post(self):
# type: () -> None
pass
def put(self):
# type: () -> None
pass
def patch(self):
# type: () -> None
pass
def options(self):
# type: () -> None
pass
def delete(self):
# type: () -> None
pass
def handle_response(self, response):
# type: (Any) -> None
if response.error and not isinstance(response.error, httpclient.HTTPError):
self.set_status(500)
self.write('Internal server error:\n' + str(response.error))
else:
self.set_status(response.code, response.reason)
self._headers = httputil.HTTPHeaders() # clear tornado default header
for header, v in response.headers.get_all():
if header != 'Content-Length':
# some header appear multiple times, eg 'Set-Cookie'
self.add_header(header, v)
if response.body:
# rewrite Content-Length Header by the response
self.set_header('Content-Length', len(response.body))
self.write(response.body)
self.finish()
@web.asynchronous
def prepare(self):
# type: () -> None
if 'X-REAL-IP' not in self.request.headers:
self.request.headers['X-REAL-IP'] = self.request.remote_ip
if self.request.headers.get("Upgrade", "").lower() == 'websocket':
return super().prepare()
url = transform_url(
self.request.protocol,
self.request.path,
self.request.query,
self.target_port,
self.target_host,
)
try:
fetch_request(
url=url,
callback=self.handle_response,
method=self.request.method,
headers=self._add_request_headers(["upgrade-insecure-requests"]),
follow_redirects=False,
body=getattr(self.request, 'body'),
allow_nonstandard_methods=True
)
except httpclient.HTTPError as e:
if hasattr(e, 'response') and e.response:
self.handle_response(e.response)
else:
self.set_status(500)
self.write('Internal server error:\n' + str(e))
self.finish()
class WebPackHandler(CombineHandler):
target_port = webpack_port
class DjangoHandler(CombineHandler):
target_port = django_port
class TornadoHandler(CombineHandler):
target_port = tornado_port
class ThumborHandler(CombineHandler):
target_port = thumbor_port
class Application(web.Application):
def __init__(self, enable_logging=False):
# type: (bool) -> None
handlers = [
(r"/json/events.*", TornadoHandler),
(r"/api/v1/events.*", TornadoHandler),
(r"/webpack.*", WebPackHandler),
(r"/sockjs.*", TornadoHandler),
(r"/thumbor.*", ThumborHandler),
(r"/.*", DjangoHandler)
]
super().__init__(handlers, enable_logging=enable_logging)
def log_request(self, handler):
# type: (BaseWebsocketHandler) -> None
if self.settings['enable_logging']:
super().log_request(handler)
def on_shutdown():
# type: () -> None
IOLoop.instance().stop()
def shutdown_handler(*args, **kwargs):
# type: (*Any, **Any) -> None
io_loop = IOLoop.instance()
if io_loop._callbacks:
io_loop.call_later(1, shutdown_handler)
else:
io_loop.stop()
# log which services/ports will be started
print("Starting Zulip services on ports: web proxy: {},".format(proxy_port),
"Django: {}, Tornado: {}, Thumbor: {}".format(django_port, tornado_port, thumbor_port),
end='')
if options.test:
print("") # no webpack for --test
else:
print(", webpack: {}".format(webpack_port))
print("".join((WARNING,
"Note: only port {} is exposed to the host in a Vagrant environment.".format(
proxy_port), ENDC)))
try:
app = Application(enable_logging=options.enable_tornado_logging)
try:
app.listen(proxy_port, address=options.interface)
except OSError as e:
if e.errno == 98:
print('\n\nERROR: You probably have another server running!!!\n\n')
raise
ioloop = IOLoop.instance()
for s in (signal.SIGINT, signal.SIGTERM):
signal.signal(s, shutdown_handler)
ioloop.start()
except Exception:
# Print the traceback before we get SIGTERM and die.
traceback.print_exc()
raise
finally:
# Kill everything in our process group.
os.killpg(0, signal.SIGTERM)
# Remove pid file when development server closed correctly.
os.remove(pid_file_path)
| {
"content_hash": "81177191be1e30ccfb5f1a4d6e8e2f5f",
"timestamp": "",
"source": "github",
"line_count": 426,
"max_line_length": 93,
"avg_line_length": 34.81455399061033,
"alnum_prop": 0.6202548715528285,
"repo_name": "tommyip/zulip",
"id": "4b3e978bd347bfe818f1d92f24a1fdf37fe0bbaa",
"size": "14855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/run-dev.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "400301"
},
{
"name": "Dockerfile",
"bytes": "2939"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "718599"
},
{
"name": "JavaScript",
"bytes": "3092201"
},
{
"name": "Perl",
"bytes": "398763"
},
{
"name": "Puppet",
"bytes": "71123"
},
{
"name": "Python",
"bytes": "6889539"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "119898"
},
{
"name": "TypeScript",
"bytes": "14645"
}
],
"symlink_target": ""
} |
from rapp_robot_api_motion import Motion
class DeviceMotion(Motion):
def __init__(self, parameters):
pass
def enableMotors(self):
return {'error': 'Not implemented yet'}
def disableMotors(self):
return {'error': 'Not implemented yet'}
def moveByVelocity(self, x_vel, y_vel, theta_vel):
return {'error': 'Not implemented yet'}
def moveTo(self, x, y, theta):
return {'error': 'Not implemented yet'}
def stop(self):
return {'error': 'Not implemented yet'}
def getVelocities(self):
return {'velocities': '', 'error': 'Not implemented yet'}
| {
"content_hash": "52edcbbf05cffec541501e904e86f23e",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 65,
"avg_line_length": 26.166666666666668,
"alnum_prop": 0.6162420382165605,
"repo_name": "rapp-project/rapp-robots-api",
"id": "354977a8c6c63201cbb95f343aefa75042053260",
"size": "651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/implementations/implementation_template/rapp_DEVICE_api_motion.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "49817"
},
{
"name": "CMake",
"bytes": "8039"
},
{
"name": "Python",
"bytes": "55634"
},
{
"name": "Shell",
"bytes": "1148"
}
],
"symlink_target": ""
} |
import sys
from PyQt5.QtWidgets import QApplication, QWidget
import pandas as pd
import tables
import cv2
import numpy as np
import matplotlib
matplotlib.use("Qt5Agg")
import matplotlib.pylab as plt
import h5py
if getattr(sys, 'frozen', False):
print('Frozen.')
print(sys._MEIPASS)
else:
print('Not frozen.')
if __name__ == '__main__':
print('HOLA')
a = np.arange(10)
print(a)
print(cv2.__version__)
vid = cv2.VideoCapture('test.avi')
ret, image = vid.read()
print('Img size', image.shape)
vid.release()
inputFiles = "test.h5"
with h5py.File(inputFiles, 'w') as inputFileOpen:
print('good h5py')
with tables.File(inputFiles, 'w') as inputFileOpen:
print('good tables')
print(pd.__version__)
app = QApplication(sys.argv)
w = QWidget()
w.resize(250, 150)
w.move(300, 300)
w.setWindowTitle('Simple')
w.show()
sys.exit(app.exec_())
| {
"content_hash": "3855eb2bd44d020e3c6e14a0902a46f1",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 55,
"avg_line_length": 19.645833333333332,
"alnum_prop": 0.630965005302227,
"repo_name": "ver228/tierpsy-tracker",
"id": "71523b00d159daae5d08cecf545a60fb1fe553fe",
"size": "943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_old/create_binaries/_test/test_pyinstaller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3498"
},
{
"name": "C",
"bytes": "11990"
},
{
"name": "Makefile",
"bytes": "807"
},
{
"name": "Python",
"bytes": "1655591"
},
{
"name": "Shell",
"bytes": "3570"
}
],
"symlink_target": ""
} |
'''
Created on Jul 11, 2012
@author: joseph
'''
from http_client import HttpClient
import urllib,urllib2,hashlib
class User():
def __init__(self):
self.http = HttpClient()
def login(self,loginData):
signup_url = "http://localhost/signup/"
xsrf = self.http.getXsrf(signup_url)
print xsrf
loginData['_xsrf'] = xsrf
regQuery = urllib.urlencode(loginData)
#print regQuery
url = "http://localhost/login/"
print url
req = urllib2.Request(url,regQuery)
res = urllib2.urlopen(req)
print res.read(20)
self.http.saveCookie()
def reg(self,regData):
signup_url = "http://localhost/signup/"
xsrf = self.http.getXsrf(signup_url)
print xsrf
regData['_xsrf'] = xsrf
regQuery = urllib.urlencode(regData)
url = "http://localhost/signup/"
req = urllib2.Request(url,regQuery)
res = urllib2.urlopen(req)
print res.read(100)
def delete(self,user_key):
url = "http://localhost/manager/user/delete/%s" % user_key
res = urllib2.urlopen(url)
print res.code
if __name__ == '__main__':
user = User()
regData = {}
email = "fangtee@qq.com"
pwd = "111111"
regData['email'] = email
regData['password'] = pwd
regData['repassword'] = pwd
regData['nickname'] = "joseph"
user.reg(regData)
loginData = {}
loginData['email'] = email
loginData['password'] = pwd
loginData['next'] = ""
user.login(loginData)
#user_key = hashlib.md5(email).hexdigest()
#print email
#print user_key
#user.delete(user_key)
| {
"content_hash": "09b08679eb56f255b57cc5a394589292",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 66,
"avg_line_length": 25.257142857142856,
"alnum_prop": 0.5520361990950227,
"repo_name": "ptphp/PyLib",
"id": "a7b233f15f665f69809e3e5692abaeb34d8343c3",
"size": "1814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tornado/demos/lihuashu/test/user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1523"
},
{
"name": "C++",
"bytes": "7541"
},
{
"name": "CSS",
"bytes": "625731"
},
{
"name": "JavaScript",
"bytes": "4811257"
},
{
"name": "PHP",
"bytes": "34868"
},
{
"name": "Python",
"bytes": "3824172"
},
{
"name": "Ruby",
"bytes": "322"
},
{
"name": "SQL",
"bytes": "685656"
},
{
"name": "Shell",
"bytes": "4143"
}
],
"symlink_target": ""
} |
import helper
import common
from actions_menu import actions_menu
from data_provider import data_provider
from control_recorder import ControlRecorder
from localization import *
from recordings import *
class RecordingsMenu(object):
# private fields
channel_list_ = []
recording_list_ = []
def __init__(self):
self.control_recorder_ = ControlRecorder()
def create(self):
try:
if not self.channel_list_:
self.channel_list_ = data_provider.get_channels()
return self.create_recordings_list_()
except common.DVBLinkError, error:
return MessageContainer(
header=IDS_CAPTION_ERROR,
message=helper.get_status_string_id(error))
except Exception, error:
return MessageContainer(
header=IDS_CAPTION_ERROR,
message=str(error))
def create_recording_actions(self, title, program_id, channel_id, channel_name, is_series, replace_parent=False):
return actions_menu.create(
title,
program_id,
channel_id,
channel_name,
is_series,
R(helper.ART_SCHEDULED_RECORDINGS),
replace_parent,
True)
def create_recordings_list_(self):
oc = ObjectContainer(
no_history = True,
no_cache = True,
title2 = IDS_SCHEDULED_RECORDINGS_MENU_ITEM,
art = R(helper.ART_SCHEDULED_RECORDINGS))
self.recording_list_ = self.get_recordings_()
if self.recording_list_:
self.recording_list_.sort(key=lambda recording: recording.program.start_time)
for recording in self.recording_list_:
oc.add(self.create_recording_item_(recording))
return oc
def create_recording_item_(self, recording):
program = recording.program
channel = common.search_channel(self.channel_list_, recording.channel_id)
title = helper.create_recording_title(program, program.is_record, program.is_repeat_record, recording.is_conflicting)
return TVShowObject(
key = Callback(
self.create_recording_actions,
title = program.name,
program_id = program.program_id,
channel_id = recording.channel_id,
channel_name = channel.channel_name if channel else IDS_UNKNOWN,
is_series = program.is_series),
rating_key = program.program_id,
title = title,
summary = helper.create_program_summary(program),
source_title = channel.channel_name if channel else IDS_UNKNOWN,
rating = float(helper.calculate_rating(program.stars_number, program.stars_max_number)),
originally_available_at = Datetime.FromTimestamp(program.start_time).date(),
duration = program.duration * 1000,
genres = program.keywords.split('/') if program.keywords else [],
thumb = Resource.ContentsOfURLWithFallback(program.image))
def get_recordings_(self):
return [recording for recording in data_provider.get_recordings() \
if recording.program and recording.program.is_record]
def search_recording_(self, recording_id):
recording_list = [recording for recording in self.recording_list_ if recording.recording_id == recording_id]
return recording_list[0] if len(recording_list) else None
recordings_menu = RecordingsMenu() | {
"content_hash": "3e3e0412d151f3e52de50fbc27a39ff5",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 119,
"avg_line_length": 34.58620689655172,
"alnum_prop": 0.7324692588899967,
"repo_name": "cpaton/dvblink-plex-client",
"id": "73ebc5077bd30c62235186837f5b13c41ccdb4fe",
"size": "3009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plex/Contents/Code/recordings_menu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "143261"
}
],
"symlink_target": ""
} |
from surrogate.selection.tests.test_individuals import Individuals
individuals = Individuals()
from surrogate.selection import selDoubleTournament
print '\nTest selection.selDoubleTournament: selDoubleTournament'
print '\tInput: ind=\t' + '\t'.join(map(str, individuals)) + ''
out = selDoubleTournament(individuals=list(individuals), k=2, fitness_first=True)
# out = selDoubleTournament(individuals=list(individuals), k=2, fitness_first=False)
print '\tOutput: out=\t' + '\t'.join(map(str, out)) + ''
| {
"content_hash": "9de58160f76309626cd7933d4e44a105",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 84,
"avg_line_length": 45.90909090909091,
"alnum_prop": 0.7623762376237624,
"repo_name": "DailyActie/Surrogate-Model",
"id": "ff4ccadafbc7cf87001b9074d5675149df07ff3a",
"size": "1702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "surrogate/selection/tests/test_selDoubleTournament.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "345"
},
{
"name": "Batchfile",
"bytes": "18746"
},
{
"name": "C",
"bytes": "13004913"
},
{
"name": "C++",
"bytes": "14692003"
},
{
"name": "CMake",
"bytes": "72831"
},
{
"name": "CSS",
"bytes": "303488"
},
{
"name": "Fortran",
"bytes": "7339415"
},
{
"name": "HTML",
"bytes": "854774"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "2432846"
},
{
"name": "Jupyter Notebook",
"bytes": "829689"
},
{
"name": "M4",
"bytes": "1379"
},
{
"name": "Makefile",
"bytes": "48708"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "93585"
},
{
"name": "Pascal",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "1152272"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "34668203"
},
{
"name": "Roff",
"bytes": "5925"
},
{
"name": "Ruby",
"bytes": "92498"
},
{
"name": "Shell",
"bytes": "94698"
},
{
"name": "TeX",
"bytes": "156540"
},
{
"name": "TypeScript",
"bytes": "41691"
}
],
"symlink_target": ""
} |
import pygame
from pygame.locals import *
from vector2 import Vector2
from random import *
import math
class Vex():
"""
Vector sprite class (consider renaming) - consists of a list of points which
are rendered relative to an x and y at draw time
@author: James Heslin (PROGRAM_IX)
"""
radius = 20
def __str__(self):
"""
Returns a string containing the x and y of the vector sprite
@rtype: string
@return: A string containing the x and y of the vector sprite
@author: James Heslin (PROGRAM_IX)
"""
#string = "Colour: %d, %d, %d" % (self.colour.r, self.colour.g, self.colour.b)
#string = "Colour:", self.colour.r, self.colour.g, self.colour.b
string = "Position: %d, %d" % (self.x, self.y)
#string = string + "Points:"
#for p in self.points:
#string = string + p
return string
def __init__(self, x, y, colour, points, width, scale_x=1, scale_y=1):
"""
Constructs a new Vex
@type x: int
@param x: The X (horizontal) co-ordinate of the vector sprite
@type y: int
@param y: The Y (vertical) co-ordinate of the vector sprite
@type colour: pygame.Color
@param colour: The colour of the vector sprite
@type points: list/tuple of tuples (int, int)
@param points: The points that make up the vector sprite
@type width: int
@param width: The width of the vector sprite's lines
@type scale_x: double
@param scale_x: The horizontal multiplier of the vector sprite's size
@type scale_y: double
@param scale_y: The vertical multiplier of the vector sprite's size
@author: James Heslin (PROGRAM_IX)
"""
self.colour = colour
self.points = points
self.width = width
self.x = x
self.y = y
self.scale_x = scale_x
self.scale_y = scale_y
self.move_up = False
self.move_down = False
self.move_left = False
self.move_right = False
self.rel_dir_vec = Vector2(0, -1) # Points directly up by default
self.lifetime = 1
#self.dir_vec = points[0]
#print "Direction:", self.dir_vec()
#print self.__str__()
def dir_vec(self):
"""
Return a copy of the vector sprite's direction vector (the first vector
in its list of points), adjusted to have absolute co-ordinates
@rtype: Vector2
@return: A copy of the vector sprites's direction vector, with absolute
co-ordinates
@author: James Heslin (PROGRAM_IX)
"""
#print self.points[0] + vector2(self.x, self.y)
# Consider: store direction separately, but rotate it when everything
# else rotates.
v = Vector2(self.points[0].x*self.scale_x,
self.points[0].y*self.scale_y)
"""
# Trying to avoid weird edge cases where the mouse is close to the
# rotating body
if v.x < self.radius or v.y < self.radius:
v = v * self.radius
# Trying to normalise v without losing directionality
if v.x > v.y and abs(v.x) > 0 and abs(v.y) > 0:
v = v/(v.x)
elif v.y > v.x and abs(v.x) > 0 and abs(v.y) > 0:
v = v/(v.y)
"""
return v + Vector2(self.x, self.y)
def rel_dir(self):
"""
Returns a copy of the relative direction vector
@rtype: Vector2
@return: A copy of the relative direction vector
"""
return Vector2(self.rel_dir_vec.x, self.rel_dir_vec.y)
def draw(self, surface):
"""
Renders the vector sprite to the surface specified
@type surface: pygame.Surface
@param surface: The surface onto which the vector sprite is to be
rendered
@author: James Heslin (PROGRAM_IX)
"""
pygame.draw.polygon(surface, self.colour,
self.get_absolute_points_tuple(), self.width)
#dir_v = self.dir_vec()
#pygame.draw.aaline(surface, pygame.Color(255, 0, 0),
#(self.x, self.y), (dir_v.x, dir_v.y), 4)
def update(self, surface): # surface => check collision with outer bounds
"""
Updates the vector sprite with respect to the specified surface
@type surface: pygame.Surface
@param surface: The surface to update the vector sprite against
@author: James Heslin (PROGRAM_IX)
"""
"""
if ((self.x < surface.get_width() and self.x > 0)
and (self.y < surface.get_height() and self.y > 0)):
if self.xMod % 5 == 0:
self.xMod = -self.xMod - 1
else:
self.xMod -= 2
if self.yMod % 5 == 0:
self.yMod = -self.yMod - 1
else:
self.yMod -= 2
for p in self.points:
p.x += self.xMod
p.y += self.yMod
self.x += self.xMod
self.y += self.yMod
"""
#for p in self.points:
#p.x += self.xMod
#p.y += self.yMod
#self.x += self.xMod
#self.y += self.yMod
#print"DERP"
if self.move_up:
self.move(0, -10, surface)
elif self.move_down:
self.move(0, 10, surface)
elif self.move_left:
self.move(-10, 0, surface)
elif self.move_right:
self.move(10, 0, surface)
def distance_to(self, p):
"""
Returns the distance between the centre of the vector sprite and the
specified point
@type p: Vector2
@param p: The point to compare to the vector sprite
@rtype: double
@return: The distance between the centre of the vector sprite and the
specified point
@author: James Heslin (PROGRAM_IX)
"""
return (Vector2(self.x, self.y) - p).get_magnitude()
def vector_between(self, p):
"""
Returns the vector between the vector sprite and the specified point
@type p: Vector2
@param p: The point to compare to the vector sprite
@rtype: Vector2
@return: The vector between the vector sprite and the specified point
@author: James Heslin (PROGRAM_IX)
"""
# Remember: Vectors are NOT pass-by-value!
# Be more careful in future.
# What if this took the direction ONLY into account?
# Normalising won't work - only covers one sector then
# Trying relative vector mathematics
rel_p = p - Vector2(self.x, self.y)
# This line is wrong because it means I'm getting the vector between
# the directional point of this vex, and the point p. Actually I want
# to get the vector between the centre of the vex and p, so that I can
# adjust to that vector later.
#rel_dir = Vector2(self.points[0].x, self.points[0].y)
#direction = self.dir_vec()
#v = rel_dir - rel_p
return rel_p
def angle_to_face_point(self, p):
"""
Return the rotation angle (in radians) required for the vector sprite to
face a specified point (face: the vector sprite's direction vector is
pointing towards the point)
@type p: Vector2
@param p: The point to face
@rtype: double
@return: The rotation angle (in radians) required for the vector sprite
to face p
@author: James Heslin (PROGRAM_IX)
"""
p = p - Vector2(self.x, self.y)
angle_p = p.get_angle()
angle_self = Vector2(self.points[0].x, self.points[0].y).get_angle()
angle = angle_self - angle_p
# I think this should work, theoretically - why doesn't it?
#angle = self.vector_between(p).get_angle()
return angle
def rotate_to_face_point(self, p):
"""
Rotate the vex to face a specified point
@type p: Vector2
@param p: The point to face
@author: James Heslin (PROGRAM_IX)
"""
angle = self.angle_to_face_point(p)
self.rotate_by_radians(-angle)
def rotate_by_radians(self, a):
"""
Rotate the shape by a given number of radians
@type a: double
@param a: The number of radians to rotate the vector sprite by
@author: James Heslin (PROGRAM_IX)
"""
cos_a = math.cos(a) # save these so we only need to do the
sin_a = math.sin(a) # call once for each
for i in self.points:
old_x = i.x
old_y = i.y # preserve old values
i.x = (old_x*cos_a - old_y*sin_a) # use old values to calculate
i.y = (old_x*sin_a + old_y*cos_a) # new values
old_x = self.rel_dir_vec.x
old_y = self.rel_dir_vec.y
self.rel_dir_vec.x = (old_x*cos_a - old_y*sin_a)
self.rel_dir_vec.y = (old_x*sin_a + old_y*cos_a)
#print "Finished rotating"
def move_abs(self, x, y, surface):
"""
Move the vector sprite in the X/Y plane without leaving the bounds of
the specified surface - performs vector calculation to make sure
diagonal movement is not faster than cardinal
@type x: double
@param x: The X (horizontal) movement amount
@type y: double
@param y: The Y (vertical) movement amount
@type surface: pygame.Surface
@param surface: The surface to use to restrict the movement of the
vector sprite
@author: James Heslin (PROGRAM_IX)
"""
#TODO: make a new Vector2 using x and y, then move_rel?
if abs(x) > 0 or abs(y) > 0:
if abs(x) > 0 and abs(y) > 0:
x = x * .707
y = y * .707
if ((self.x + x < surface.get_width() and self.x + x > 0)
and (self.y + y < surface.get_height() and self.y + y > 0)):
#for p in self.points:
#p.x += x
#p.y += y
self.x += int(x)
self.y += int(y)
def move_rel(self, x, y, surface):
"""
Move the vector sprite in the X/Y plane without leaving the bounds of
the specified surface - assumes all inputs have already been calculated
to restrict movement speed
@type x: double
@param x: The X (horizontal) movement amount
@type y: double
@param y: The Y (vertical) movement amount
@type surface: pygame.Surface
@param surface: The surface to use to restrict the movement of the
vector sprite
@author: James Heslin (PROGRAM_IX)
"""
if abs(x) > 0 or abs(y) > 0:
if ((self.x + x < surface.get_width() and self.x + x > 0)
and (self.y + y < surface.get_height() and self.y + y > 0)):
#for p in self.points:
#p.x += x
#p.y += y
self.x += int(x)
self.y += int(y)
def get_relative_points_tuple(self):
"""
Returns a list of 2D points as tuples, relative to vector sprite
position, respective of scale
@rtype: list of tuples (int, int)
@return: A list of tuples representing the points in the vector sprite,
with co-ordinates relative to the vector sprite's position, respective
of scale
@author: James Heslin (PROGRAM_IX)
"""
pts = []
for p in self.points:
pts.append((p.x*self.scale_x, p.y*self.scale_y))
return pts
def get_absolute_points_tuple(self):
"""
Returns a list of 2D points as tuples, relative to origin, respective
of scale
@rtype: list of tuples (int, int)
@return: A list of tuples representing the points in the vector sprite,
with co-ordinates relative to the origin, respective of scale
@author: James Heslin (PROGRAM_IX)
"""
pts = []
for p in self.points:
pts.append(((p.x*self.scale_x)+self.x, (p.y*self.scale_y)+self.y))
return pts
def get_relative_points_vector2(self):
"""
Returns a list of Vector2 objects representing 2D points, relative
to vector sprite position, respective of scale
@rtype: list of Vector2 objects
@return: A list of Vector2 objects representing the points in the vector
sprite, with co-ordinates relative to the vector sprite's position,
respective of scale
@author: James Heslin (PROGRAM_IX)
"""
pts = []
for p in self.points:
pts.append(Vector2(p.x*self.scale_x, p.y*self.scale_y))
return pts
def get_absolute_points_vector2(self):
"""
Returns a list of Vector2 objects representing 2D points, relative
to origin, respective of scale
@rtype: list of Vector2 objects
@return: A list of Vector2 objects representing the points in the vector
sprite, with co-ordinates relative to the origin, respective of scale
@author: James Heslin (PROGRAM_IX)
"""
pts = []
for p in self.points:
pts.append(Vector2((p.x*self.scale_x)+self.x,
(p.y*self.scale_y)+self.y))
return pts
def point_inside(self, v):
"""
Determines roughly if a given point is inside the vector sprite, can be
used for crude collision detection
@type v: Vector2
@param v: The point to check
@rtype: boolean
@return: True if the point is inside the vector sprite, False otherwise
@author: James Heslin (PROGRAM_IX)
"""
max_x = self.points[0].x
max_y = self.points[0].x
min_x = max_x
min_y = max_y
for i in self.points:
if i.x*self.scale_x > max_x:
max_x = i.x*self.scale_x
elif i.x*self.scale_x < min_x:
min_x = i.x*self.scale_x
if i.y*self.scale_y > max_y:
max_y = i.y*self.scale_y
elif i.y*self.scale_y < min_y:
min_y = i.y*self.scale_y
max_x = max_x + self.x
max_y = max_y + self.y
min_x = min_x + self.x
min_y = min_y + self.y
if v.x < max_x and v.y < max_y and v.x > min_x and v.y > min_y:
return True
else:
return False
| {
"content_hash": "9da1e062743ea849944335b17f427063",
"timestamp": "",
"source": "github",
"line_count": 456,
"max_line_length": 86,
"avg_line_length": 33.296052631578945,
"alnum_prop": 0.5322400052690509,
"repo_name": "PROGRAM-IX/pystroke",
"id": "d89dabd841c2bfd8c9e55e2f27f246a75dc594b1",
"size": "15183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Python",
"bytes": "44806"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="tickvals", parent_name="choropleth.colorbar", **kwargs
):
super(TickvalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
| {
"content_hash": "e71ff29ff8d66d1a9b75fd4a9fb45d22",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 81,
"avg_line_length": 33.38461538461539,
"alnum_prop": 0.619815668202765,
"repo_name": "plotly/plotly.py",
"id": "4be520f8198523ff483d8f403370c5117fea8c48",
"size": "434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/choropleth/colorbar/_tickvals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
class Environment():
def __init__(self):
##initialize environmental variables:
self.objects = []
self.ground = Plane(0,0,0,100,100)
self.objects.append(self.ground)
def update(self):
##print("Update Environment...")
self.animate()
def animate(self):
for obj in self.objects:
print("Animate: "+str(obj))
print(obj.draw())
class Plane():
def __init__(self,x,y,z,length,width):
self.x = x
self.y = y
self.z = z
self.length = length
self.width = width
def draw(self):
return [self.x,self.y,self.z],self.length,self.width
| {
"content_hash": "218ff4f8dfefc7f96804ec384371b98a",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 60,
"avg_line_length": 30.227272727272727,
"alnum_prop": 0.5473684210526316,
"repo_name": "ultimatedeath11/VandyHacks2017",
"id": "787068850bbad95f7437ac7bf51c8dc6a8c08512",
"size": "812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "environment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "958"
},
{
"name": "HTML",
"bytes": "10382"
},
{
"name": "JavaScript",
"bytes": "808765"
},
{
"name": "PHP",
"bytes": "4590"
},
{
"name": "Python",
"bytes": "2214"
}
],
"symlink_target": ""
} |
"""
This is the code to accompany the Lesson 2 (SVM) mini-project.
Use a SVM to identify emails from the Enron corpus by their authors:
Sara has label 0
Chris has label 1
"""
# enable python 3 style printing
from __future__ import print_function
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
#########################################################
# import SVM algorithm
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
# create classifier
clf = SVC(kernel="rbf", C=10000)
# reduce training set
#features_train = features_train[:len(features_train)/100]
#labels_train = labels_train[:len(labels_train)/100]
# fit classifier
t0 = time()
clf.fit(features_train, labels_train)
print("training time: ", round(time()-t0, 3), "s")
# predict authors
t0 = time()
pred = clf.predict(features_test)
print("prediction time: ", round(time()-t0, 3), "s")
# print accuracy
print("accuracy: ", accuracy_score(labels_test, pred))
# print confusion matrix
print(confusion_matrix(labels_test, pred))
# print accuracy for items 10, 26, 50
#print(labels_test[10], pred[10], labels_test[26], pred[26], labels_test[50], pred[50])
######################################################### | {
"content_hash": "09dbfa13bd147dc526cf4928f2024b94",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 87,
"avg_line_length": 28.618181818181817,
"alnum_prop": 0.6766200762388819,
"repo_name": "stefanbuenten/nanodegree",
"id": "1187fce6c7725de5f70599a3eaf67386c8db5d5e",
"size": "1593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "p5/svm/svm_author_id.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4914292"
},
{
"name": "Jupyter Notebook",
"bytes": "1487580"
},
{
"name": "Python",
"bytes": "64383"
},
{
"name": "R",
"bytes": "13760"
}
],
"symlink_target": ""
} |
import os
##############################################################################
# setup environment setup
def setup(i):
"""
Input: {
cfg - meta of this soft entry
self_cfg - meta of module soft
ck_kernel - import CK kernel module (to reuse functions)
host_os_uoa - host OS UOA
host_os_uid - host OS UID
host_os_dict - host OS meta
target_os_uoa - target OS UOA
target_os_uid - target OS UID
target_os_dict - target OS meta
target_device_id - target device ID (if via ADB)
tags - list of tags used to search this entry
env - updated environment vars from meta
customize - updated customize vars from meta
deps - resolved dependencies for this soft
interactive - if 'yes', can ask questions, otherwise quiet
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
bat - prepared string for bat file
}
"""
import os
# Get variables
ck=i['ck_kernel']
s=''
iv=i.get('interactive','')
cus=i.get('customize',{})
fp=cus.get('full_path','')
hosd=i['host_os_dict']
tosd=i['target_os_dict']
sdirs=hosd.get('dir_sep','')
# Check platform
hplat=hosd.get('ck_name','')
hproc=hosd.get('processor','')
tproc=tosd.get('processor','')
remote=tosd.get('remote','')
tbits=tosd.get('bits','')
env=i['env']
p1=os.path.dirname(fp)
pi=os.path.dirname(p1)
ep=cus.get('env_prefix','')
env['CK_ENV_DATASET_TYPE']='librispeech'
env[ep] = pi
return {'return':0, 'bat':s}
| {
"content_hash": "cd4e00ddbbc3bae1a5373a1c3207a350",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 78,
"avg_line_length": 24.936708860759495,
"alnum_prop": 0.46751269035532994,
"repo_name": "ctuning/ck-env",
"id": "b43aabac17cd7e44932eb490fcf8055e5d5a2a03",
"size": "2134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "soft/dataset.librispeech/customize.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "1C Enterprise",
"bytes": "17"
},
{
"name": "Ada",
"bytes": "17"
},
{
"name": "Batchfile",
"bytes": "34416"
},
{
"name": "C",
"bytes": "179488"
},
{
"name": "C++",
"bytes": "299120"
},
{
"name": "CMake",
"bytes": "161769"
},
{
"name": "ChucK",
"bytes": "17"
},
{
"name": "Faust",
"bytes": "17"
},
{
"name": "HCL",
"bytes": "17"
},
{
"name": "HTML",
"bytes": "207490"
},
{
"name": "Hack",
"bytes": "17"
},
{
"name": "Inno Setup",
"bytes": "17"
},
{
"name": "Java",
"bytes": "35"
},
{
"name": "Makefile",
"bytes": "245"
},
{
"name": "Perl",
"bytes": "17"
},
{
"name": "Python",
"bytes": "1279851"
},
{
"name": "R",
"bytes": "17"
},
{
"name": "Roff",
"bytes": "204"
},
{
"name": "Shell",
"bytes": "301298"
},
{
"name": "TeX",
"bytes": "35"
},
{
"name": "Thrift",
"bytes": "17"
}
],
"symlink_target": ""
} |
"""
Test installation of the Quotient offering, as well as testing
signup with different combinations of selected benefactor factories
"""
from time import time
from twisted.trial.unittest import TestCase
from twisted.python.reflect import qual
from axiom.scripts import axiomatic
from axiom.store import Store
from axiom import userbase
from axiom.test.util import getPristineStore
from xmantissa import offering, signup
from xmantissa.plugins.free_signup import freeTicket
from xmantissa.product import Product
from xquotient import exmess
from xquotient.compose import Composer
from xquotient.inbox import Inbox
def createStore(testCase):
dbpath = testCase.mktemp()
axiomatic.main(['-d', dbpath, 'mantissa', '--admin-password', 'password'])
store = Store(dbpath)
_userbase = store.findUnique(userbase.LoginSystem)
adminAccount = _userbase.accountByAddress(u'admin', u'localhost')
adminStore = adminAccount.avatars.open()
conf = adminStore.findUnique(offering.OfferingConfiguration)
conf.installOffering(getQuotientOffering(), None)
return store
def getQuotientOffering():
for off in offering.getOfferings():
if off.name == 'Quotient':
return off
def getFactories(*names):
factories = []
for factory in getQuotientOffering().benefactorFactories:
name = factory.benefactorClass.__name__.lower()
if name.endswith('benefactor'):
name = name[:-len('benefactor')]
if name in names:
factories.append(factory)
return factories
class InstallationTestCase(TestCase):
"""
Tests to ensure we can at least get as far as installing the
application and signing up. We don't really care whether the
right stuff was installed.
"""
def setUp(self):
self.store = getPristineStore(self, createStore)
self.loginSystem = self.store.findUnique(userbase.LoginSystem)
adminAvatar = self.loginSystem.accountByAddress(u'admin', u'localhost')
adminStore = adminAvatar.avatars.open()
self.signupConfig = adminStore.findUnique(signup.SignupConfiguration)
def createSignupAndSignup(self, powerups):
"""
Signup via a newly-created signup, using a unique email address.
@return: substore, which will be endowed with C{product}
"""
product = Product(store=self.store, types=[qual(p) for (name, desc, p) in powerups])
qsignup = self.signupConfig.createSignup(
u'admin@localhost',
freeTicket.itemClass,
{'prefixURL': u'signup'},
product,
u'', u'')
booth = qsignup.booth
localpart = unicode(str(time()), 'ascii')
ticket = booth.createTicket(
booth, localpart + '@localhost', product)
ticket.claim()
return self.loginSystem.accountByAddress(
localpart, u'localhost').avatars.open()
def testBasic(self):
"""
Test signup with the top-most Quotient powerup
"""
self.createSignupAndSignup([(None, None, Inbox)])
def testCompose(self):
"""
Test signup with the compose benefactor (which
depends on the top-most Quotient benefactor)
"""
self.createSignupAndSignup([(None, None, Composer)])
def testAll(self):
"""
Test signup with all benefactors
"""
self.createSignupAndSignup(
getQuotientOffering().installablePowerups)
def testDefaultMessageDisplayPrefs(self):
"""
On signup, users' preferred message format should be HTML.
"""
ss = self.createSignupAndSignup(
getQuotientOffering().installablePowerups)
self.assertEqual(ss.findUnique(
exmess.MessageDisplayPreferenceCollection).preferredFormat, u"text/html")
| {
"content_hash": "f40779c6e800a0e899385dd5af8085bc",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 92,
"avg_line_length": 32.89915966386555,
"alnum_prop": 0.6610472541507024,
"repo_name": "twisted/quotient",
"id": "c762f4be6db752ec413c9782ce94a363b079178f",
"size": "3915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xquotient/test/test_signup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13968"
},
{
"name": "JavaScript",
"bytes": "354447"
},
{
"name": "Python",
"bytes": "890995"
}
],
"symlink_target": ""
} |
import pytest
from distutils.version import LooseVersion
from f5.bigip.resource import MissingRequiredCreationParameter
from f5.bigip.tm.security.shared_objects import Address_List
from f5.bigip.tm.security.shared_objects import Port_List
from requests.exceptions import HTTPError
DESC = 'TESTADDED'
@pytest.fixture(scope='function')
def addrlist(mgmt_root):
a1 = mgmt_root.tm.security.shared_objects.address_lists.address_list.create(
name='fake_addr', partition='Common', addresses=[{'name': '10.10.10.10'}])
yield a1
a1.delete()
@pytest.fixture(scope='function')
def portlist(mgmt_root):
p1 = mgmt_root.tm.security.shared_objects.port_lists.port_list.create(
name='fake_port', partition='Common', ports=[{'name': '80'}])
yield p1
p1.delete()
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion('14.0.0'),
reason='This collection is fully implemented on 14.0.0 or greater.'
)
class TestAddressList(object):
def test_create_missing_mandatory_attr_raises(self, mgmt_root):
ac = mgmt_root.tm.security.shared_objects.address_lists
with pytest.raises(MissingRequiredCreationParameter) as err:
ac.address_list.create(name='fail', partition='Common')
if LooseVersion(pytest.config.getoption('--release')) < LooseVersion('12.0.0'):
error = "This resource requires at least one of the mandatory additional parameters to be provided: addressLists, addresses, geo"
assert str(err.value) == error
else:
error = "This resource requires at least one of the mandatory additional parameters to be provided: addressLists, addresses, fqdns, geo"
assert str(err.value) == error
def test_create_req_args(self, addrlist):
r1 = addrlist
URI = 'https://localhost/mgmt/tm/security/' \
'shared-objects/address-list/~Common~fake_addr'
assert r1.name == 'fake_addr'
assert r1.partition == 'Common'
assert r1.selfLink.startswith(URI)
assert not hasattr(r1, 'description')
def test_create_opt_args(self, mgmt_root):
r1 = mgmt_root.tm.security.shared_objects.address_lists.address_list.create(
name='fake_addr', partition='Common', addresses=[{'name': '10.10.10.10'}], description=DESC)
URI = 'https://localhost/mgmt/tm/security/' \
'shared-objects/address-list/~Common~fake_addr'
assert r1.name == 'fake_addr'
assert r1.partition == 'Common'
assert r1.selfLink.startswith(URI)
assert hasattr(r1, 'description')
assert r1.description == DESC
r1.delete()
def test_refresh(self, mgmt_root, addrlist):
rc = mgmt_root.tm.security.shared_objects.address_lists
r1 = addrlist
r2 = rc.address_list.load(name='fake_addr', partition='Common')
assert r1.name == r2.name
assert r1.kind == r2.kind
assert r1.selfLink == r2.selfLink
assert not hasattr(r1, 'description')
assert not hasattr(r2, 'description')
r2.modify(description=DESC)
assert hasattr(r2, 'description')
r1.refresh()
assert r1.selfLink == r2.selfLink
assert hasattr(r1, 'description')
assert r1.description == r2.description
def test_delete(self, mgmt_root):
rc = mgmt_root.tm.security.firewall.address_lists
r1 = rc.address_list.create(name='delete_me', partition='Common',
addresses=[{'name': '10.10.10.10'}])
r1.delete()
with pytest.raises(HTTPError) as err:
rc.address_list.load(name='delete_me', partition='Common')
assert err.value.response.status_code == 404
def test_load_no_object(self, mgmt_root):
rc = mgmt_root.tm.security.firewall.address_lists
with pytest.raises(HTTPError) as err:
rc.address_list.load(name='not_exists', partition='Common')
assert err.value.response.status_code == 404
def test_load_and_update(self, mgmt_root, addrlist):
r1 = addrlist
URI = 'https://localhost/mgmt/tm/security/' \
'shared-objects/address-list/~Common~fake_addr'
assert r1.name == 'fake_addr'
assert r1.partition == 'Common'
assert r1.selfLink.startswith(URI)
assert not hasattr(r1, 'description')
r1.description = DESC
r1.update()
assert hasattr(r1, 'description')
assert r1.description == DESC
rc = mgmt_root.tm.security.shared_objects.address_lists
r2 = rc.address_list.load(name='fake_addr', partition='Common')
assert r1.name == r2.name
assert r1.partition == r2.partition
assert r1.selfLink == r2.selfLink
assert hasattr(r2, 'description')
assert r1.description == r2.description
def test_addrlst_collection(self, mgmt_root, addrlist):
r1 = addrlist
URI = 'https://localhost/mgmt/tm/security/' \
'shared-objects/address-list/~Common~fake_addr'
assert r1.name == 'fake_addr'
assert r1.partition == 'Common'
assert r1.selfLink.startswith(URI)
rc = mgmt_root.tm.security.shared_objects.address_lists.get_collection()
assert isinstance(rc, list)
assert len(rc)
assert isinstance(rc[0], Address_List)
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion('14.0.0'),
reason='This collection is fully implemented on 14.0.0 or greater.'
)
class TestPortList(object):
def test_create_missing_mandatory_attr_raises(self, mgmt_root):
ac = mgmt_root.tm.security.shared_objects.port_lists
error_message = "This resource requires at least one of the mandatory additional parameters to be provided: portLists, ports"
with pytest.raises(MissingRequiredCreationParameter) as err:
ac.port_list.create(name='fail', partition='Common')
assert str(err.value) == error_message
def test_create_req_args(self, portlist):
r1 = portlist
URI = 'https://localhost/mgmt/tm/security/' \
'shared-objects/port-list/~Common~fake_port'
assert r1.name == 'fake_port'
assert r1.partition == 'Common'
assert r1.selfLink.startswith(URI)
assert not hasattr(r1, 'description')
def test_create_opt_args(self, mgmt_root):
r1 = mgmt_root.tm.security.shared_objects.port_lists.port_list.create(
name='fake_port', partition='Common', ports=[{
'name': '80'}], description=DESC)
URI = 'https://localhost/mgmt/tm/security/' \
'shared-objects/port-list/~Common~fake_port'
assert r1.name == 'fake_port'
assert r1.partition == 'Common'
assert r1.selfLink.startswith(URI)
assert hasattr(r1, 'description')
assert r1.description == DESC
r1.delete()
def test_refresh(self, mgmt_root, portlist):
rc = mgmt_root.tm.security.shared_objects.port_lists
r1 = portlist
r2 = rc.port_list.load(name='fake_port', partition='Common')
assert r1.name == r2.name
assert r1.kind == r2.kind
assert r1.selfLink == r2.selfLink
assert not hasattr(r1, 'description')
assert not hasattr(r2, 'description')
r2.modify(description=DESC)
assert hasattr(r2, 'description')
assert r2.description == DESC
r1.refresh()
assert r1.selfLink == r2.selfLink
assert hasattr(r1, 'description')
assert r1.description == r2.description
def test_delete(self, mgmt_root):
rc = mgmt_root.tm.security.firewall.port_lists
r1 = rc.port_list.create(name='delete_me', partition='Common',
ports=[{'name': '80'}])
r1.delete()
with pytest.raises(HTTPError) as err:
rc.port_list.load(name='delete_me', partition='Common')
assert err.value.response.status_code == 404
def test_load_no_object(self, mgmt_root):
rc = mgmt_root.tm.security.shared_objects.port_lists
with pytest.raises(HTTPError) as err:
rc.port_list.load(name='not_exists', partition='Common')
assert err.value.response.status_code == 404
def test_load_and_update(self, mgmt_root, portlist):
r1 = portlist
URI = 'https://localhost/mgmt/tm/security/' \
'shared-objects/port-list/~Common~fake_port'
assert r1.name == 'fake_port'
assert r1.partition == 'Common'
assert r1.selfLink.startswith(URI)
assert not hasattr(r1, 'description')
r1.description = DESC
r1.update()
assert hasattr(r1, 'description')
assert r1.description == DESC
rc = mgmt_root.tm.security.shared_objects.port_lists
r2 = rc.port_list.load(name='fake_port', partition='Common')
assert r1.name == r2.name
assert r1.partition == r2.partition
assert r1.selfLink == r2.selfLink
assert hasattr(r2, 'description')
assert r1.description == r2.description
def test_portlist_collection(self, mgmt_root):
rc = mgmt_root.tm.security.shared_objects.port_lists.get_collection()
assert isinstance(rc, list)
assert len(rc)
assert isinstance(rc[0], Port_List)
| {
"content_hash": "44eb1862ab7e619c15ca900cc4258b0e",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 148,
"avg_line_length": 42.18468468468468,
"alnum_prop": 0.6367325146823278,
"repo_name": "F5Networks/f5-common-python",
"id": "b79ecbb3ddbe5da90ffbc681b63ef55bdfd6263f",
"size": "9947",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "f5/bigip/tm/security/test/functional/test_shared_objects.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "713"
},
{
"name": "Groovy",
"bytes": "4321"
},
{
"name": "Python",
"bytes": "2705690"
},
{
"name": "Shell",
"bytes": "6398"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import sys
sys.setrecursionlimit(10000)
import time
import json
import argparse
import densenet
import tensorflow as tf
import math
import numpy as np
import pandas as pd
import cv2
from tqdm import tqdm
import scipy
from sklearn.metrics import fbeta_score
from sklearn.cross_validation import train_test_split
import keras.backend as K
from keras.optimizers import Adam
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.models import Model, Sequential
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D
from keras.layers.normalization import BatchNormalization
from keras.layers import Reshape, core, Dense, Dropout, Flatten
from keras.layers import Conv2D
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler,TensorBoard,CSVLogger
PLANET_KAGGLE_ROOT = os.path.abspath("../../input/")
# PLANET_KAGGLE_TEST_JPEG_DIR = os.path.join(PLANET_KAGGLE_ROOT, 'testing-sets-for-coding/test-jpg-small')
# PLANET_KAGGLE_TRAIN_JPEG_DIR = os.path.join(PLANET_KAGGLE_ROOT, 'testing-sets-for-coding/train-jpg-small')
PLANET_KAGGLE_LABEL_CSV = os.path.join(PLANET_KAGGLE_ROOT, 'train_v2.csv')
PLANET_KAGGLE_TRAIN_JPEG_DIR = os.path.join(PLANET_KAGGLE_ROOT, 'train-jpg/')
PLANET_KAGGLE_TEST_JPEG_DIR = os.path.join(PLANET_KAGGLE_ROOT, 'test-jpg/')
test_submission_format_file = os.path.join(PLANET_KAGGLE_ROOT,'sample_submission_v2.csv')
assert os.path.exists(PLANET_KAGGLE_ROOT)
assert os.path.exists(PLANET_KAGGLE_LABEL_CSV)
assert os.path.isfile(test_submission_format_file)
assert os.path.exists(PLANET_KAGGLE_TRAIN_JPEG_DIR)
assert os.path.exists(PLANET_KAGGLE_TEST_JPEG_DIR)
# assert os.path.exists(PLANET_KAGGLE_TESTING_JPEG_TRAIN_DIR)
# assert os.path.exists(PLANET_KAGGLE_TESTING_JPEG_TEST_DIR)
df_train = pd.read_csv(PLANET_KAGGLE_LABEL_CSV)
df_test = pd.read_csv(test_submission_format_file)
flatten = lambda l: [item for sublist in l for item in sublist]
labels = np.array(list(set(flatten([l.split(' ') for l in df_train['tags'].values]))))
NUM_CLASSES = len(labels)
THRESHHOLD = [0.2]*17
THRESHHOLD = np.array(THRESHHOLD)
label_map = {l: i for i, l in enumerate(labels)}
inv_label_map = {i: l for l, i in label_map.items()}
X_train = []
X_test = []
y_train = []
print("Loading training set:\n")
for f, tags in tqdm(df_train.values, miniters=100):
img_path = PLANET_KAGGLE_TRAIN_JPEG_DIR + '/{}.jpg'.format(f)
img = cv2.imread(img_path)
targets = np.zeros(NUM_CLASSES)
for t in tags.split(' '):
targets[label_map[t]] = 1
X_train.append(img)
y_train.append(targets)
X_train = np.array(X_train, np.float32)
y_train = np.array(y_train, int)
print('Training data shape: {}' .format(X_train.shape))
print('Traing label shape: {}' .format(y_train.shape))
###################
# Data processing #
###################
img_dim = X_train.shape[1:]
if K.image_dim_ordering() == "th":
n_channels = X_train.shape[1]
elif K.image_dim_ordering() == "tf":
n_channels = X_train.shape[-1]
if K.image_dim_ordering() == "th":
for i in range(n_channels):
mean_train = np.mean(X_train[:, i, :, :])
std_train = np.std(X_train[:, i, :, :])
X_train[:, i, :, :] = (X_train[:, i, :, :] - mean_train) / std_train
elif K.image_dim_ordering() == "tf":
for i in range(n_channels):
mean_train = np.mean(X_train[:, :, :, i])
std_train = np.std(X_train[:, :, :, i])
X_train[:, :, :, i] = (X_train[:, :, :, i] - mean_train) / std_train
print('Splitting to training data set and validation set:')
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1)
print('Splitted training data set shape: {}' .format(X_train.shape))
print('Validation data set shape: {}' .format(X_val.shape))
#############
# Metrics #
############
def f2_beta(y_true, y_pred):
return fbeta_score(y_true, y_pred, beta=2, average='samples')
def get_optimal_threshhold(y_true, y_pred, iterations = 100):
best_threshhold = [0.2]*17
for t in range(NUM_CLASSES):
best_fbeta = 0
temp_threshhold = [0.2]*NUM_CLASSES
for i in range(iterations):
temp_value = i / float(iterations)
temp_threshhold[t] = temp_value
temp_fbeta = f2_beta(y_true, y_pred > temp_threshhold)
if temp_fbeta > best_fbeta:
best_fbeta = temp_fbeta
best_threshhold[t] = temp_value
return best_threshhold
def f2_beta_keras(y_true, y_pred):
beta = 2
# just in case of hipster activation at the final layer
y_pred = K.clip(y_pred, 0, 1)
# shifting the prediction threshold from .5 if needed
TR_tf = tf.cast(tf.constant(THRESHHOLD),tf.float32)
# y_pred_bin = K.round( tf.add( y_pred ,TR_tf) )
y_pred_bin = tf.cast(tf.greater(y_pred,TR_tf),tf.float32)
tp = K.sum(K.round(y_true * y_pred_bin)) + K.epsilon()
fp = K.sum(K.round(K.clip(y_pred_bin - y_true, 0, 1)))
fn = K.sum(K.round(K.clip(y_true - y_pred, 0, 1)))
precision = tp / (tp + fp)
recall = tp / (tp + fn)
beta_squared = beta ** 2
return (beta_squared + 1) * (precision * recall) / (beta_squared * precision + recall + K.epsilon())
###################
# Construct model #
###################
def cnn_model():
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=img_dim))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES, activation='sigmoid'))
# model.compile(loss='binary_crossentropy', # We NEED binary here, since categorical_crossentropy l1 norms the output before calculating loss.
# optimizer='adam',
# metrics=['accuracy'])
return model
# learning rate schedule
def step_decay(epoch):
initial_lrate = 0.1
drop = 0.5
epochs_drop = 10.0
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
# learning schedule callback
lrate = LearningRateScheduler(step_decay)
batch_size = 128
epochs = 50
learningrate = 0.1
decay = learningrate / epochs
depth = 40
nb_dense_block = 4
growth_rate = 48
nb_filter = 16
dropout_rate = 0.2 # 0.0 for data augmentation
weight_decay=1E-4
model = cnn_model()
# model = densenet.DenseNet(input_shape=img_dim, depth=depth, nb_dense_block=nb_dense_block,
# growth_rate=growth_rate, nb_filter=nb_filter, nb_layers_per_block=-1,
# bottleneck=True, reduction=0.0, dropout_rate=dropout_rate, weight_decay=weight_decay,
# include_top=True, weights=None, input_tensor=None,
# classes=NUM_CLASSES, activation='softmax')
print("Model created")
model.summary()
# optimizer = Adam(lr=1e-4) # Using Adam instead of SGD to speed up training
optimizer = SGD(lr=learningrate, decay=0.0, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy',f2_beta_keras])
print("Finished compiling")
print("Building model...")
model_file_path = './model/weights.{epoch:02d}-{val_loss:.2f}.hdf5'
check = ModelCheckpoint(model_file_path, monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
tensorboard = TensorBoard(log_dir='./logs',write_graph=True, write_images=False)
log_filename = './logs/training.csv'
csv_logger = CSVLogger(log_filename,separator=',',append=False)
model.fit(X_train, y_train,
batch_size=batch_size, epochs=epochs, shuffle=False,
validation_data=(X_val, y_val),
callbacks=[lrate,csv_logger,tensorboard])
del X_train
del y_train
print("Loading test set:\n")
for f, tags in tqdm(df_test.values, miniters=100):
img_path = PLANET_KAGGLE_TEST_JPEG_DIR + '/{}.jpg'.format(f)
img = cv2.imread(img_path)
X_test.append(img)
X_test = np.array(X_test, np.float32)
print('Test data shape: {}' .format(X_test.shape))
if K.image_dim_ordering() == "th":
for i in range(n_channels):
mean_test = np.mean(X_test[:, i, :, :])
std_test = np.std(X_test[:, i, :, :])
X_test[:, i, :, :] = (X_test[:, i, :, :] - mean_test) / std_test
elif K.image_dim_ordering() == "tf":
for i in range(n_channels):
mean_test = np.mean(X_test[:, :, :, i])
std_test = np.std(X_test[:, :, :, i])
X_test[:, :, :, i] = (X_test[:, :, :, i] - mean_test) / std_test
y_pred = model.predict(X_test, batch_size=batch_size)
predictions = [' '.join(labels[y_pred_row > 0.01]) for y_pred_row in y_pred]
submission = pd.DataFrame()
submission['image_name'] = df_test.image_name.values
submission['tags'] = predictions
submission.to_csv('../../results/submission_CNN_1_THRESHHOLD_001.csv', index=False)
predictions = [' '.join(labels[y_pred_row > 0.05]) for y_pred_row in y_pred]
submission = pd.DataFrame()
submission['image_name'] = df_test.image_name.values
submission['tags'] = predictions
submission.to_csv('../../results/submission_CNN_1_THRESHHOLD_005.csv', index=False)
predictions = [' '.join(labels[y_pred_row > 0.10]) for y_pred_row in y_pred]
submission = pd.DataFrame()
submission['image_name'] = df_test.image_name.values
submission['tags'] = predictions
submission.to_csv('../../results/submission_CNN_1_THRESHHOLD_01.csv', index=False)
predictions = [' '.join(labels[y_pred_row > 0.20]) for y_pred_row in y_pred]
submission = pd.DataFrame()
submission['image_name'] = df_test.image_name.values
submission['tags'] = predictions
submission.to_csv('../../results/submission_CNN_1_THRESHHOLD_02.csv', index=False)
y_pred_val = model.predict(X_val, batch_size=batch_size)
THRESHHOLD = get_optimal_threshhold(y_val, y_pred_val, iterations = 100)
THRESHHOLD = np.array(THRESHHOLD)
predictions = [' '.join(labels[y_pred_row > THRESHHOLD]) for y_pred_row in y_pred]
submission = pd.DataFrame()
submission['image_name'] = df_test.image_name.values
submission['tags'] = predictions
submission.to_csv('../../results/submission_CNN_THRESHOLD_OPTIMAL.csv', index=False)
| {
"content_hash": "8710dc421768dc4d953c08e8b6c9ff12",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 146,
"avg_line_length": 32.5141065830721,
"alnum_prop": 0.6566718087157732,
"repo_name": "root-master/DenseNet-Kaggle-Planet",
"id": "5eb1e2b3408a95ccfc052f1a48d4125901882472",
"size": "10372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DenseNet_1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "110982"
}
],
"symlink_target": ""
} |
import signify.pure as signify
import unittest
class SignifyTest(unittest.TestCase):
KAT = [
{
'pub': b"""untrusted comment: bjorntest public key
RWQ100QRGZoxU+Oy1g7Ko+8LjK1AQLIEavp/NuL54An1DC0U2cfCLKEl
""",
'priv': b"""untrusted comment: bjorntest secret key
RWRCSwAAACqHVbmAUokJcTpgKhRbw+/W+Q7nrVPi3eU100QRGZoxU86ZWb3NjEp9ScrFddFy0o2D1KtZ0440imfaWmUebGfs0Hm+Fm9SCtaJgtjFtrUlPlmnjksY8zdcXr2NvjLsr0A=
""",
'message': b"""my message
""",
'sig': b"""untrusted comment: signature from bjorntest secret key
RWQ100QRGZoxU/gjzE8m6GYtfICqE0Ap8SdXRSHrpjnSBKMc2RMalgi5RKrEHmKfTmcsuB9ZzDCo6K6sYEqaEcEnnAFa0zCewAg=
""",
'brokensig': b"""untrusted comment: signature from bjorntest secret key
RWQ100QRGZoxU/gjzE8m6GYtfICqE0Ap8SdXRSHrpjnSBKMc2RMXlgi5RKrEHmKfTmcsuB9ZzDCo6K6sYEqaEcEnnAFa0zCewAg=
""",
'embedded': b"""untrusted comment: signature from bjorntest secret key
RWQ100QRGZoxU/gjzE8m6GYtfICqE0Ap8SdXRSHrpjnSBKMc2RMalgi5RKrEHmKfTmcsuB9ZzDCo6K6sYEqaEcEnnAFa0zCewAg=
my message
"""
}
]
def test_extraction(self):
self.assertEquals(
b'\xe3\xb2\xd6\x0e\xca\xa3\xef\x0b\x8c\xad@@\xb2\x04j\xfa\x7f6\xe2\xf9\xe0\t\xf5\x0c-\x14\xd9\xc7\xc2,\xa1%',
signify.PublicKey.from_bytes(self.KAT[0]['pub']).raw())
sk = signify.SecretKey.from_bytes(self.KAT[0]['priv'])
sku = sk.unprotect('test')
self.assertEquals(
b'D@\xd9\xca\xb2\x96;\xa0^\xbb\x16\xc8\x0f\xf7Y=(hu\x85\xbd\xe4i\xf6\xcf\x0f\xfb#\xc1\xfa\xe0\xa1\xe3\xb2\xd6\x0e\xca\xa3\xef\x0b\x8c\xad@@\xb2\x04j\xfa\x7f6\xe2\xf9\xe0\t\xf5\x0c-\x14\xd9\xc7\xc2,\xa1%',
sku.raw_secret_key())
def test_verify_success(self):
self.assertTrue(
signify.verify(signify.PublicKey.from_bytes(self.KAT[0]['pub']),
signify.Signature.from_bytes(self.KAT[0]['sig']),
self.KAT[0]['message']))
def test_sign(self):
sk = signify.SecretKey.from_bytes(self.KAT[0]['priv'])
sku = sk.unprotect('test')
sig = signify.sign(sku,
self.KAT[0]['message'])
self.assertEquals(self.KAT[0]['sig'], sig.to_bytes())
def test_sign_embedded(self):
sk = signify.SecretKey.from_bytes(self.KAT[0]['priv'])
sku = sk.unprotect('test')
sig = signify.sign(sku,
self.KAT[0]['message'],
True)
self.assertEquals(self.KAT[0]['embedded'], sig.to_bytes())
def test_verify_embedded(self):
self.assertTrue(
signify.verify_embedded(signify.PublicKey.from_bytes(self.KAT[0]['pub']),
self.KAT[0]['embedded']))
self.assertEquals(b'my message\n',
signify.verify_embedded(signify.PublicKey.from_bytes(self.KAT[0]['pub']),
signify.Signature.from_bytes(self.KAT[0]['embedded'])))
def test_decrypt_secret_wrong_password(self):
self.assertRaises(KeyError,
signify.SecretKey.from_bytes(self.KAT[0]['priv']).unprotect,
'wrongpassword')
def test_verify_failure(self):
self.assertRaises(
signify.InvalidSignature,
signify.verify, signify.PublicKey.from_bytes(self.KAT[0]['pub']),
signify.Signature.from_bytes(self.KAT[0]['brokensig']),
self.KAT[0]['message'])
def test_generate_sign_no_password(self):
pub, priv = signify.generate('test', None)
self.assertTrue(pub.to_bytes().startswith(b'untrusted comment: test public key'))
self.assertTrue(priv.to_bytes().startswith(b'untrusted comment: test secret key'))
sku = priv.unprotect(None)
sig = signify.sign(sku,
b'My Message')
self.assertTrue(
signify.verify(pub,
sig,
b'My Message'))
def test_generate_no_comment(self):
pub, priv = signify.generate(None, None)
self.assertTrue(pub.to_bytes().startswith(b'untrusted comment: signify public key'))
self.assertTrue(priv.to_bytes().startswith(b'untrusted comment: signify secret key'))
def test_generate_sign_with_password(self):
pub, priv = signify.generate(None, 'testpassword')
self.assertTrue(pub.to_bytes().startswith(b'untrusted comment: signify public key'))
self.assertTrue(priv.to_bytes().startswith(b'untrusted comment: signify secret key'))
sku = priv.unprotect('testpassword')
sig = signify.sign(sku,
b'My Message')
self.assertTrue(
signify.verify(pub,
sig,
b'My Message'))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "c28131e80d495e880b5bf74e3543898a",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 216,
"avg_line_length": 40.0650406504065,
"alnum_prop": 0.6034902597402597,
"repo_name": "bjornedstrom/python-signify",
"id": "f8fb85e1e7c271b013d859a7e25bcb5d8c7b1310",
"size": "5003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/pure_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52471"
},
{
"name": "Shell",
"bytes": "2521"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os.path
import subprocess
from pre_commit.util import cmd_output
class PrefixedCommandRunner(object):
"""A PrefixedCommandRunner allows you to run subprocess commands with
comand substitution.
For instance:
PrefixedCommandRunner('/tmp/foo').run(['{prefix}foo.sh', 'bar', 'baz'])
will run ['/tmp/foo/foo.sh', 'bar', 'baz']
"""
def __init__(
self,
prefix_dir,
popen=subprocess.Popen,
makedirs=os.makedirs
):
self.prefix_dir = prefix_dir.rstrip(os.sep) + os.sep
self.__popen = popen
self.__makedirs = makedirs
def _create_path_if_not_exists(self):
if not os.path.exists(self.prefix_dir):
self.__makedirs(self.prefix_dir)
def run(self, cmd, **kwargs):
self._create_path_if_not_exists()
replaced_cmd = [
part.replace('{prefix}', self.prefix_dir) for part in cmd
]
return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)
def path(self, *parts):
path = os.path.join(self.prefix_dir, *parts)
return os.path.normpath(path)
def exists(self, *parts):
return os.path.exists(self.path(*parts))
def star(self, end):
return tuple(
path for path in os.listdir(self.prefix_dir) if path.endswith(end)
)
| {
"content_hash": "9387d6e365d450804a6561a5c3837c54",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 79,
"avg_line_length": 27.94,
"alnum_prop": 0.5977093772369363,
"repo_name": "philipgian/pre-commit",
"id": "6ae850997c1a2f5281debce5ff484dde747cfb33",
"size": "1397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pre_commit/prefixed_command_runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "240"
},
{
"name": "JavaScript",
"bytes": "128"
},
{
"name": "Makefile",
"bytes": "377"
},
{
"name": "Python",
"bytes": "259482"
},
{
"name": "Ruby",
"bytes": "817"
},
{
"name": "Shell",
"bytes": "2932"
},
{
"name": "Swift",
"bytes": "104"
}
],
"symlink_target": ""
} |
import web
from nailgun.api.v1.handlers.base import BaseHandler
from nailgun.api.v1.handlers.base import CollectionHandler
from nailgun.api.v1.handlers.base import SingleHandler
from nailgun.api.v1.handlers.base import content_json
from nailgun.api.v1.validators.node_group import NodeGroupValidator
from nailgun.db import db
from nailgun import objects
"""
Handlers dealing with node groups
"""
class NodeGroupHandler(SingleHandler):
"""NodeGroup single handler
"""
single = objects.NodeGroup
validator = NodeGroupValidator
def DELETE(self, group_id):
node_group = self.get_object_or_404(objects.NodeGroup, group_id)
db().delete(node_group)
db().commit()
raise web.webapi.HTTPError(
status="204 No Content",
data=""
)
class NodeGroupCollectionHandler(CollectionHandler):
"""NodeGroup collection handler
"""
collection = objects.NodeGroupCollection
validator = NodeGroupValidator
@content_json
def GET(self):
"""May receive cluster_id parameter to filter list
of groups
:returns: Collection of JSONized Task objects.
:http: * 200 (OK)
* 404 (task not found in db)
"""
user_data = web.input(cluster_id=None)
if user_data.cluster_id is not None:
return self.collection.to_json(
query=self.collection.get_by_cluster_id(
user_data.cluster_id
)
)
else:
return self.collection.to_json()
class NodeGroupAssignmentHandler(BaseHandler):
"""Node group assignment handler
"""
@content_json
def POST(self, group_id):
""":returns: Http response.
:http: * 201 (nodes are successfully assigned)
* 400 (invalid nodes data specified)
"""
self.get_object_or_404(
objects.NodeGroup,
group_id
)
data = self.checked_data()
nodes = self.get_objects_list_or_404(
objects.NodeCollection,
data
)
for node in nodes:
objects.Node.update(node, {"group_id": group_id})
| {
"content_hash": "18800729eb7a3fe2a79e9104fdb8b496",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 72,
"avg_line_length": 25.847058823529412,
"alnum_prop": 0.6149294492489759,
"repo_name": "andrei4ka/fuel-web-redhat",
"id": "31e92728903af5da5a4a27bc737268a8896c30bd",
"size": "2832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nailgun/nailgun/api/v1/handlers/node_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "100524"
},
{
"name": "JavaScript",
"bytes": "639783"
},
{
"name": "Makefile",
"bytes": "5891"
},
{
"name": "Puppet",
"bytes": "282"
},
{
"name": "Python",
"bytes": "3206343"
},
{
"name": "Ruby",
"bytes": "33423"
},
{
"name": "Shell",
"bytes": "31460"
}
],
"symlink_target": ""
} |
"""Package contenant la commande 'autoquête' et ses sous-commandes.
Dans ce fichier se trouve la commande même.
"""
from primaires.interpreteur.commande.commande import Commande
from .edit import PrmEdit
from .liste import PrmListe
class CmdAutoquete(Commande):
"""Commande 'autoquête'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "autoquête", "autoquest")
self.groupe = "administrateur"
self.aide_courte = "manipulation des autoquêtes"
self.aide_longue = \
"Cette commande permet de créer, éditer et lister les " \
"autoquêtes."
def ajouter_parametres(self):
"""Ajout des paramètres"""
self.ajouter_parametre(PrmEdit())
self.ajouter_parametre(PrmListe())
| {
"content_hash": "ba426c75094abdf68089d9241ad1ef46",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 69,
"avg_line_length": 28.344827586206897,
"alnum_prop": 0.6386861313868614,
"repo_name": "vlegoff/tsunami",
"id": "3eda6e18865857e246879bf4470780e9726f6b62",
"size": "2401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/autoquetes/commandes/autoquete/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
import re, getpass
from kokoropy import request
from kokoropy.model import DB_Model, or_, and_, Column, ForeignKey, func,\
Integer, String, Date, DateTime, Boolean, Text, relationship, backref, association_proxy
from _config import session, encrypt_password
from _all import engine, Cms, Group, Third_Party_Authenticator, Page, Page_Groups,\
Theme, Layout, Widget, Widget_Groups, User, User_Third_Party_Identities, User_Groups,\
Language, Language_Detail, Configuration
def do_login(identity, password):
user_list = User.get(and_(or_(User.username == identity, User.email == identity), User.encrypted_password == encrypt_password(password)))
if len(user_list) > 0:
user = user_list[0]
request.SESSION['__user_id'] = user.id
return True
else:
return False
def do_logout():
request.SESSION.pop('__user_id', None)
def get_current_user():
if '__user_id' in request.SESSION:
user_id = request.SESSION['__user_id']
user = User.find(user_id)
return user
return None
def get_pages(*criterion):
current_user = get_current_user()
is_super_admin = current_user.super_admin if current_user is not None else False
current_user_id = current_user.id if current_user is not None else None
try:
subquery = session.query(func.count(Group.id).label('group_count'), Page_Groups.fk_page).\
join(User_Groups).\
join(User).\
join(Page_Groups).\
filter(User.id == current_user_id).\
subquery("subquery")
return session.query(Page).\
filter(Page.active == True).\
filter(*criterion).\
filter(
or_(
Page.authorization == 'everyone', # EVERYONE
and_(Page.authorization == 'unauthenticated', current_user is None), # UNAUTHENTICATED
and_(Page.authorization == 'authenticated', current_user is not None), # AUTHENTICATED
and_(Page.authorization == 'authorized', is_super_admin), # AUTHORIZED & Super Admin
and_( # AUTHORIZED or STRICT_AUTHORIZED
or_(
Page.authorization == 'authorized',
Page.authorization == 'strict_authorized'
),
and_(
subquery.c.group_count > 0,
subquery.c.fk_page == Page._real_id
)
)
)
).\
all()
except Exception, e:
session.rollback()
raise
def get_widgets(*criterion):
current_user = get_current_user()
is_super_admin = current_user.super_admin if current_user is not None else False
current_user_id = current_user.id if current_user is not None else None
try:
subquery = session.query(func.count(Group.id).label('group_count'), Widget_Groups.fk_widget).\
join(User_Groups).\
join(User).\
join(Widget_Groups).\
filter(User.id == current_user_id).\
subquery("subquery")
return session.query(Widget).\
filter(Widget.active == True).\
filter(*criterion).\
filter(
or_(
Widget.authorization == 'everyone', # EVERYONE
and_(Widget.authorization == 'unauthenticated', current_user is None), # UNAUTHENTICATED
and_(Widget.authorization == 'authenticated', current_user is not None), # AUTHENTICATED
and_(Widget.authorization == 'authorized', is_super_admin), # AUTHORIZED & Super Admin
and_( # AUTHORIZED or STRICT_AUTHORIZED
or_(
Widget.authorization == 'authorized',
Widget.authorization == 'strict_authorized'
),
and_(
subquery.c.group_count > 0,
subquery.c.fk_widget == Widget._real_id
)
)
)
).\
all()
except Exception, e:
session.rollback()
raise
def insert_default():
# default action
if Group.count() == 0:
super_admin = Group()
super_admin.name = 'Super Admin'
super_admin.save()
else:
super_admin = Group.get()[0]
if User.count() == 0:
print('No user registered to this system. Please add a new one !!!')
username = raw_input('New user name : ')
realname = raw_input('Real name : ')
email = ''
password = ''
confirm_password = ''
while True:
email = raw_input('Email : ')
if re.match(r'[^@]+@[^@]+\.[^@]+', email):
break
else:
print('Invalid email address, please insert again')
while True:
password = getpass.getpass('Password : ')
confirm_password = getpass.getpass('Password (again) :')
if password == confirm_password:
break
else:
print('Password doesn\'t match, please insert again')
super_user = User()
super_user.username = username
super_user.realname = realname
super_user.email = email
super_user.password = password
super_user.groups.append(super_admin)
super_user.save() | {
"content_hash": "f62599b98776503ba3cf9ca34d916b1a",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 141,
"avg_line_length": 43.951388888888886,
"alnum_prop": 0.46974245536419656,
"repo_name": "goFrendiAsgard/kokoropy",
"id": "19b29c8dacef3f55ea7260d49069d7d2390576b4",
"size": "6329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kokoropy/scaffolding/scaffold_cms/models/_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46062"
},
{
"name": "CSS",
"bytes": "241059"
},
{
"name": "HTML",
"bytes": "856393"
},
{
"name": "JavaScript",
"bytes": "122897"
},
{
"name": "Makefile",
"bytes": "2339"
},
{
"name": "Mako",
"bytes": "4613"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "4235520"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import json
import logging
import sys
import django
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME # noqa
from django.core.urlresolvers import reverse
from django.forms import widgets
from django import http
import django.test
from django.utils import encoding
from django.utils.http import urlencode
from mox3.mox import IgnoreArg # noqa
from mox3.mox import IsA # noqa
import six
from horizon import exceptions
from horizon import forms
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.project.instances import console
from openstack_dashboard.dashboards.project.instances import tables
from openstack_dashboard.dashboards.project.instances import tabs
from openstack_dashboard.dashboards.project.instances import workflows
from openstack_dashboard.test import helpers
from openstack_dashboard.usage import quotas
INDEX_URL = reverse('horizon:project:instances:index')
SEC_GROUP_ROLE_PREFIX = \
workflows.update_instance.INSTANCE_SEC_GROUP_SLUG + "_role_"
AVAILABLE = api.cinder.VOLUME_STATE_AVAILABLE
VOLUME_SEARCH_OPTS = dict(status=AVAILABLE, bootable=1)
SNAPSHOT_SEARCH_OPTS = dict(status=AVAILABLE)
class InstanceTests(helpers.TestCase):
@helpers.create_stubs({
api.nova: (
'flavor_list',
'server_list',
'tenant_absolute_limits',
'extension_supported',
),
api.glance: ('image_list_detailed',),
api.network: (
'floating_ip_simple_associate_supported',
'floating_ip_supported',
'servers_update_addresses',
),
})
def _get_index(self):
servers = self.servers.list()
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
return self.client.get(INDEX_URL)
def test_index(self):
res = self._get_index()
self.assertTemplateUsed(res,
'project/instances/index.html')
instances = res.context['instances_table'].data
self.assertItemsEqual(instances, self.servers.list())
self.assertNotContains(res, "Launch Instance (Quota exceeded)")
@helpers.create_stubs({api.nova: ('server_list',
'tenant_absolute_limits',)})
def test_index_server_list_exception(self):
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndRaise(self.exceptions.nova)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/instances/index.html')
self.assertEqual(len(res.context['instances_table'].data), 0)
self.assertMessageCount(res, error=1)
@helpers.create_stubs({
api.nova: ('flavor_list', 'server_list', 'flavor_get',
'tenant_absolute_limits', 'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('floating_ip_simple_associate_supported',
'floating_ip_supported',
'servers_update_addresses',),
})
def test_index_flavor_list_exception(self):
servers = self.servers.list()
flavors = self.flavors.list()
full_flavors = OrderedDict([(f.id, f) for f in flavors])
search_opts = {'marker': None, 'paginate': True}
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndReturn(full_flavors[server.flavor["id"]])
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/instances/index.html')
instances = res.context['instances_table'].data
self.assertItemsEqual(instances, self.servers.list())
@helpers.create_stubs({
api.nova: ('flavor_list', 'server_list', 'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('floating_ip_simple_associate_supported',
'floating_ip_supported',
'servers_update_addresses',),
})
def test_index_with_instance_booted_from_volume(self):
volume_server = self.servers.first()
volume_server.image = ""
volume_server.image_name = "(not found)"
servers = self.servers.list()
servers[0] = volume_server
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/instances/index.html')
instances = res.context['instances_table'].data
self.assertEqual(len(instances), len(servers))
self.assertContains(res, "(not found)")
def test_index_with_console_link(self):
res = self._get_index()
instances_table = res.context['instances_table']
instances = res.context['instances_table'].data
console_link_rendered = False
for instance in instances:
for action in instances_table.get_row_actions(instance):
if isinstance(action, tables.ConsoleLink):
console_link_rendered = True
break
if console_link_rendered:
break
self.assertTrue(console_link_rendered)
@django.test.utils.override_settings(CONSOLE_TYPE=None)
def test_index_without_console_link(self):
res = self._get_index()
instances_table = res.context['instances_table']
instances = res.context['instances_table'].data
for instance in instances:
for action in instances_table.get_row_actions(instance):
self.assertNotIsInstance(action, tables.ConsoleLink)
@helpers.create_stubs({api.nova: ('server_list',
'flavor_list',
'server_delete',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_terminate_instance(self):
servers = self.servers.list()
server = servers[0]
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
api.nova.server_delete(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__terminate__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_list',
'flavor_list',
'server_delete',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_terminate_instance_exception(self):
servers = self.servers.list()
server = servers[0]
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
api.nova.server_delete(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__terminate__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_pause',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_pause_instance(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_pause(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_pause',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_pause_instance_exception(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_pause(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_unpause',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_unpause_instance(self):
servers = self.servers.list()
server = servers[0]
server.status = "PAUSED"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_unpause(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_unpause',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_unpause_instance_exception(self):
servers = self.servers.list()
server = servers[0]
server.status = "PAUSED"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_unpause(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__pause__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_reboot',
'server_list',
'flavor_list',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_reboot_instance(self):
servers = self.servers.list()
server = servers[0]
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_reboot(IsA(http.HttpRequest), server.id,
soft_reboot=False)
self.mox.ReplayAll()
formData = {'action': 'instances__reboot__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_reboot',
'server_list',
'flavor_list',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_reboot_instance_exception(self):
servers = self.servers.list()
server = servers[0]
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_reboot(IsA(http.HttpRequest), server.id,
soft_reboot=False) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__reboot__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_reboot',
'server_list',
'flavor_list',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_soft_reboot_instance(self):
servers = self.servers.list()
server = servers[0]
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_reboot(IsA(http.HttpRequest), server.id,
soft_reboot=True)
self.mox.ReplayAll()
formData = {'action': 'instances__soft_reboot__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_suspend',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_suspend_instance(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_suspend(IsA(http.HttpRequest),
six.text_type(server.id))
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_suspend',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_suspend_instance_exception(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_suspend(IsA(http.HttpRequest), six.text_type(server.id)) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_resume',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_resume_instance(self):
servers = self.servers.list()
server = servers[0]
server.status = "SUSPENDED"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_resume(IsA(http.HttpRequest), six.text_type(server.id))
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_resume',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_resume_instance_exception(self):
servers = self.servers.list()
server = servers[0]
server.status = "SUSPENDED"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_resume(IsA(http.HttpRequest),
six.text_type(server.id)) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__suspend__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_shelve',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_shelve_instance(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_shelve(IsA(http.HttpRequest), six.text_type(server.id))
self.mox.ReplayAll()
formData = {'action': 'instances__shelve__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_shelve',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_shelve_instance_exception(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_shelve(IsA(http.HttpRequest),
six.text_type(server.id)) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__shelve__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_unshelve',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_unshelve_instance(self):
servers = self.servers.list()
server = servers[0]
server.status = "SHELVED_OFFLOADED"
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_unshelve(IsA(http.HttpRequest),
six.text_type(server.id))
self.mox.ReplayAll()
formData = {'action': 'instances__shelve__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_unshelve',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_unshelve_instance_exception(self):
servers = self.servers.list()
server = servers[0]
server.status = "SHELVED_OFFLOADED"
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_unshelve(IsA(http.HttpRequest),
six.text_type(server.id)) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__shelve__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_lock',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_lock_instance(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions', IsA(
http.HttpRequest)).MultipleTimes().AndReturn(True)
api.glance.image_list_detailed(IgnoreArg()).AndReturn((
self.images.list(), False, False))
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(
IsA(http.HttpRequest),
search_opts=search_opts).AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_lock(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__lock__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_lock',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_lock_instance_exception(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions', IsA(
http.HttpRequest)).MultipleTimes().AndReturn(True)
api.glance.image_list_detailed(IgnoreArg()).AndReturn((
self.images.list(), False, False))
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(
IsA(http.HttpRequest),
search_opts=search_opts).AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_lock(IsA(http.HttpRequest), server.id).AndRaise(
self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__lock__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_unlock',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_unlock_instance(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions', IsA(
http.HttpRequest)).MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()).AndReturn((
self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(
IsA(http.HttpRequest),
search_opts=search_opts).AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_unlock(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__unlock__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_unlock',
'server_list',
'flavor_list',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_unlock_instance_exception(self):
servers = self.servers.list()
server = servers[0]
api.nova.extension_supported('AdminActions', IsA(
http.HttpRequest)).MultipleTimes().AndReturn(True)
api.glance.image_list_detailed(IgnoreArg()).AndReturn((
self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.server_list(
IsA(http.HttpRequest),
search_opts=search_opts).AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.server_unlock(IsA(http.HttpRequest), server.id).AndRaise(
self.exceptions.nova)
self.mox.ReplayAll()
formData = {'action': 'instances__unlock__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({
api.nova: (
"server_get",
"instance_volumes_list",
"flavor_get",
"extension_supported"
),
api.network: (
"server_security_groups",
"servers_update_addresses",
"floating_ip_simple_associate_supported",
"floating_ip_supported"
)
})
def _get_instance_details(self, server, qs=None,
flavor_return=None, volumes_return=None,
security_groups_return=None,
flavor_exception=False):
url = reverse('horizon:project:instances:detail', args=[server.id])
if qs:
url += qs
if flavor_return is None:
flavor_return = self.flavors.first()
if volumes_return is None:
volumes_return = []
if security_groups_return is None:
security_groups_return = self.security_groups.list()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.servers_update_addresses(IsA(http.HttpRequest),
IgnoreArg())
api.nova.instance_volumes_list(IsA(http.HttpRequest),
server.id).AndReturn(volumes_return)
if flavor_exception:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor['id']) \
.AndRaise(self.exceptions.nova)
else:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor['id']) \
.AndReturn(flavor_return)
api.network.server_security_groups(IsA(http.HttpRequest), server.id) \
.AndReturn(security_groups_return)
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
return self.client.get(url)
def test_instance_details_volumes(self):
server = self.servers.first()
volumes = [self.volumes.list()[1]]
security_groups = self.security_groups.list()
res = self._get_instance_details(
server, volumes_return=volumes,
security_groups_return=security_groups)
self.assertItemsEqual(res.context['instance'].volumes, volumes)
self.assertItemsEqual(res.context['instance'].volumes, volumes)
def test_instance_details_volume_sorting(self):
server = self.servers.first()
volumes = self.volumes.list()[1:3]
security_groups = self.security_groups.list()
res = self._get_instance_details(
server, volumes_return=volumes,
security_groups_return=security_groups)
self.assertItemsEqual(res.context['instance'].volumes, volumes)
self.assertEqual(res.context['instance'].volumes[0].device,
"/dev/hda")
self.assertEqual(res.context['instance'].volumes[1].device,
"/dev/hdk")
def test_instance_details_metadata(self):
server = self.servers.first()
tg = tabs.InstanceDetailTabs(self.request, instance=server)
qs = "?%s=%s" % (tg.param_name, tg.get_tab("overview").get_id())
res = self._get_instance_details(server, qs)
self.assertContains(res, "<dd>keyName</dd>", 1)
self.assertContains(res, "<dt>someMetaLabel</dt>", 1)
self.assertContains(res, "<dd>someMetaData</dd>", 1)
self.assertContains(res, "<dt>some<b>html</b>label</dt>",
1)
self.assertContains(res, "<dd><!--</dd>", 1)
self.assertContains(res, "<dt>empty</dt>", 1)
# TODO(david-lyle): uncomment when fixed with Django 1.6
# self.assertContains(res, "<dd><em>N/A</em></dd>", 1)
def test_instance_details_fault(self):
server = self.servers.first()
server.status = 'ERROR'
server.fault = {"message": "NoValidHost",
"code": 500,
"details": "No valid host was found. \n "
"File \"/mnt/stack/nova/nova/"
"scheduler/filter_scheduler.py\", "
"line 105, in schedule_run_instance\n "
"raise exception.NoValidHost"
"(reason=\"\")\n",
"created": "2013-10-07T00:08:32Z"}
res = self._get_instance_details(server)
self.assertItemsEqual(res.context['instance'].fault, server.fault)
def test_instance_details_console_tab(self):
server = self.servers.first()
tg = tabs.InstanceDetailTabs(self.request, instance=server)
qs = "?%s=%s" % (tg.param_name, tg.get_tab("console").get_id())
res = self._get_instance_details(server, qs)
self.assertIn(tabs.ConsoleTab, res.context_data['tab_group'].tabs)
self.assertTemplateUsed(res,
'project/instances/_detail_console.html')
console_tab_rendered = False
for tab in res.context_data['tab_group'].get_loaded_tabs():
if isinstance(tab, tabs.ConsoleTab):
console_tab_rendered = True
break
self.assertTrue(console_tab_rendered)
@django.test.utils.override_settings(CONSOLE_TYPE=None)
def test_instance_details_console_tab_deactivated(self):
server = self.servers.first()
tg = tabs.InstanceDetailTabs(self.request, instance=server)
self.assertIsNone(tg.get_tab("console"))
res = self._get_instance_details(server)
self.assertTemplateNotUsed(res,
'project/instances/_detail_console.html')
for tab in res.context_data['tab_group'].get_loaded_tabs():
self.assertNotIsInstance(tab, tabs.ConsoleTab)
@helpers.create_stubs({api.nova: ('server_get',)})
def test_instance_details_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:detail',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ("server_get",)})
def test_instance_details_unauthorized(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id)\
.AndRaise(self.exceptions.nova_unauthorized)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:detail',
args=[server.id])
# Avoid the log message in the test
# when unauthorized exception will be logged
logging.disable(logging.ERROR)
res = self.client.get(url)
logging.disable(logging.NOTSET)
self.assertEqual(302, res.status_code)
self.assertEqual(('Location', settings.TESTSERVER +
settings.LOGIN_URL + '?' +
REDIRECT_FIELD_NAME + '=' + url),
res._headers.get('location', None),)
def test_instance_details_flavor_not_found(self):
server = self.servers.first()
res = self._get_instance_details(server, flavor_exception=True)
self.assertTemplateUsed(res,
'project/instances/_detail_overview.html')
self.assertContains(res, "Not available")
@helpers.create_stubs({api.nova: ('server_console_output',)})
def test_instance_log(self):
server = self.servers.first()
CONSOLE_OUTPUT = 'output'
api.nova.server_console_output(IsA(http.HttpRequest),
server.id, tail_length=None) \
.AndReturn(CONSOLE_OUTPUT)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:console',
args=[server.id])
tg = tabs.InstanceDetailTabs(self.request, instance=server)
qs = "?%s=%s" % (tg.param_name, tg.get_tab("log").get_id())
res = self.client.get(url + qs)
self.assertNoMessages()
self.assertIsInstance(res, http.HttpResponse)
self.assertContains(res, CONSOLE_OUTPUT)
@helpers.create_stubs({api.nova: ('server_console_output',)})
def test_instance_log_exception(self):
server = self.servers.first()
api.nova.server_console_output(IsA(http.HttpRequest),
server.id, tail_length=None) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:console',
args=[server.id])
tg = tabs.InstanceDetailTabs(self.request, instance=server)
qs = "?%s=%s" % (tg.param_name, tg.get_tab("log").get_id())
res = self.client.get(url + qs)
self.assertContains(res, "Unable to get log for")
def test_instance_log_invalid_input(self):
server = self.servers.first()
url = reverse('horizon:project:instances:console',
args=[server.id])
tg = tabs.InstanceDetailTabs(self.request, instance=server)
for length in ["-5", "x"]:
qs = "?%s=%s&length=%s" % (tg.param_name,
tg.get_tab("log").get_id(),
length)
res = self.client.get(url + qs)
self.assertContains(res, "Unable to get log for")
def test_instance_vnc(self):
server = self.servers.first()
CONSOLE_OUTPUT = '/vncserver'
CONSOLE_TITLE = '&title=%s(%s)' % (server.name, server.id)
CONSOLE_URL = CONSOLE_OUTPUT + CONSOLE_TITLE
console_mock = self.mox.CreateMock(api.nova.VNCConsole)
console_mock.url = CONSOLE_OUTPUT
self.mox.StubOutWithMock(api.nova, 'server_get')
self.mox.StubOutWithMock(console, 'get_console')
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
console.get_console(IgnoreArg(), 'VNC', server) \
.AndReturn(('VNC', CONSOLE_URL))
self.mox.ReplayAll()
url = reverse('horizon:project:instances:vnc',
args=[server.id])
res = self.client.get(url)
redirect = CONSOLE_URL
self.assertRedirectsNoFollow(res, redirect)
def test_instance_vnc_error(self):
server = self.servers.first()
self.mox.StubOutWithMock(api.nova, 'server_get')
self.mox.StubOutWithMock(console, 'get_console')
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
console.get_console(IgnoreArg(), 'VNC', server) \
.AndRaise(exceptions.NotAvailable('console'))
self.mox.ReplayAll()
url = reverse('horizon:project:instances:vnc',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_instance_spice(self):
server = self.servers.first()
CONSOLE_OUTPUT = '/spiceserver'
CONSOLE_TITLE = '&title=%s(%s)' % (server.name, server.id)
CONSOLE_URL = CONSOLE_OUTPUT + CONSOLE_TITLE
console_mock = self.mox.CreateMock(api.nova.SPICEConsole)
console_mock.url = CONSOLE_OUTPUT
self.mox.StubOutWithMock(console, 'get_console')
self.mox.StubOutWithMock(api.nova, 'server_get')
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
console.get_console(IgnoreArg(), 'SPICE', server) \
.AndReturn(('SPICE', CONSOLE_URL))
self.mox.ReplayAll()
url = reverse('horizon:project:instances:spice',
args=[server.id])
res = self.client.get(url)
redirect = CONSOLE_URL
self.assertRedirectsNoFollow(res, redirect)
def test_instance_spice_exception(self):
server = self.servers.first()
self.mox.StubOutWithMock(console, 'get_console')
self.mox.StubOutWithMock(api.nova, 'server_get')
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
console.get_console(IgnoreArg(), 'SPICE', server) \
.AndRaise(exceptions.NotAvailable('console'))
self.mox.ReplayAll()
url = reverse('horizon:project:instances:spice',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_instance_rdp(self):
server = self.servers.first()
CONSOLE_OUTPUT = '/rdpserver'
CONSOLE_TITLE = '&title=%s(%s)' % (server.name, server.id)
CONSOLE_URL = CONSOLE_OUTPUT + CONSOLE_TITLE
console_mock = self.mox.CreateMock(api.nova.RDPConsole)
console_mock.url = CONSOLE_OUTPUT
self.mox.StubOutWithMock(console, 'get_console')
self.mox.StubOutWithMock(api.nova, 'server_get')
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
console.get_console(IgnoreArg(), 'RDP', server) \
.AndReturn(('RDP', CONSOLE_URL))
self.mox.ReplayAll()
url = reverse('horizon:project:instances:rdp',
args=[server.id])
res = self.client.get(url)
redirect = CONSOLE_URL
self.assertRedirectsNoFollow(res, redirect)
def test_instance_rdp_exception(self):
server = self.servers.first()
self.mox.StubOutWithMock(console, 'get_console')
self.mox.StubOutWithMock(api.nova, 'server_get')
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
console.get_console(IgnoreArg(), 'RDP', server) \
.AndRaise(exceptions.NotAvailable('console'))
self.mox.ReplayAll()
url = reverse('horizon:project:instances:rdp',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_get',
'snapshot_create',
'server_list',
'flavor_list',
'server_delete'),
api.glance: ('image_list_detailed',)})
def test_create_instance_snapshot(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.snapshot_create(IsA(http.HttpRequest),
server.id,
"snapshot1").AndReturn(self.snapshots.first())
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None).AndReturn([[], False,
False])
self.mox.ReplayAll()
formData = {'instance_id': server.id,
'method': 'CreateSnapshot',
'name': 'snapshot1'}
url = reverse('horizon:project:images:snapshots:create',
args=[server.id])
redir_url = reverse('horizon:project:images:index')
res = self.client.post(url, formData)
self.assertRedirects(res, redir_url)
@django.test.utils.override_settings(
OPENSTACK_ENABLE_PASSWORD_RETRIEVE=False)
def test_instances_index_retrieve_password_action_disabled(self):
self. _test_instances_index_retrieve_password_action()
@django.test.utils.override_settings(
OPENSTACK_ENABLE_PASSWORD_RETRIEVE=True)
def test_instances_index_retrieve_password_action_enabled(self):
self._test_instances_index_retrieve_password_action()
@helpers.create_stubs({
api.nova: ('flavor_list', 'server_list', 'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('floating_ip_simple_associate_supported',
'floating_ip_supported',
'servers_update_addresses',),
})
def _test_instances_index_retrieve_password_action(self):
servers = self.servers.list()
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:index')
res = self.client.get(url)
for server in servers:
_action_id = ''.join(["instances__row_",
server.id,
"__action_decryptpassword"])
if settings.OPENSTACK_ENABLE_PASSWORD_RETRIEVE and \
server.status == "ACTIVE" and \
server.key_name is not None:
self.assertContains(res, _action_id)
else:
self.assertNotContains(res, _action_id)
@helpers.create_stubs({api.nova: ('get_password',)})
def test_decrypt_instance_password(self):
server = self.servers.first()
enc_password = "azerty"
api.nova.get_password(IsA(http.HttpRequest), server.id)\
.AndReturn(enc_password)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:decryptpassword',
args=[server.id,
server.key_name])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/instances/decryptpassword.html')
@helpers.create_stubs({api.nova: ('get_password',)})
def test_decrypt_instance_get_exception(self):
server = self.servers.first()
keypair = self.keypairs.first()
api.nova.get_password(IsA(http.HttpRequest), server.id)\
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:decryptpassword',
args=[server.id,
keypair])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
instance_update_get_stubs = {
api.nova: ('server_get',),
api.network: ('security_group_list',
'server_security_groups',)}
@helpers.create_stubs(instance_update_get_stubs)
def test_instance_update_get(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.network.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn([])
self.mox.ReplayAll()
url = reverse('horizon:project:instances:update', args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@helpers.create_stubs(instance_update_get_stubs)
def test_instance_update_get_server_get_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:update',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
def _instance_update_post(self, server_id, server_name, secgroups):
default_role_field_name = 'default_' + \
workflows.update_instance.INSTANCE_SEC_GROUP_SLUG + '_role'
formData = {'name': server_name,
default_role_field_name: 'member',
SEC_GROUP_ROLE_PREFIX + 'member': secgroups}
url = reverse('horizon:project:instances:update',
args=[server_id])
return self.client.post(url, formData)
instance_update_post_stubs = {
api.nova: ('server_get', 'server_update'),
api.network: ('security_group_list',
'server_security_groups',
'server_update_security_groups')}
@helpers.create_stubs(instance_update_post_stubs)
def test_instance_update_post(self):
server = self.servers.first()
secgroups = self.security_groups.list()[:3]
server_groups = [secgroups[0], secgroups[1]]
wanted_groups = [secgroups[1].id, secgroups[2].id]
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(secgroups)
api.network.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn(server_groups)
api.nova.server_update(IsA(http.HttpRequest),
server.id,
server.name).AndReturn(server)
api.network.server_update_security_groups(IsA(http.HttpRequest),
server.id,
wanted_groups)
self.mox.ReplayAll()
res = self._instance_update_post(server.id, server.name, wanted_groups)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs(instance_update_post_stubs)
def test_instance_update_post_api_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.network.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn([])
api.nova.server_update(IsA(http.HttpRequest), server.id, server.name) \
.AndRaise(self.exceptions.nova)
api.network.server_update_security_groups(
IsA(http.HttpRequest), server.id, [])
self.mox.ReplayAll()
res = self._instance_update_post(server.id, server.name, [])
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs(instance_update_post_stubs)
def test_instance_update_post_secgroup_api_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.network.server_security_groups(IsA(http.HttpRequest),
server.id).AndReturn([])
api.nova.server_update(IsA(http.HttpRequest),
server.id,
server.name).AndReturn(server)
api.network.server_update_security_groups(
IsA(http.HttpRequest),
server.id, []).AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self._instance_update_post(server.id, server.name, [])
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_snapshot_list',
'volume_list',),
api.neutron: ('network_list',
'profile_list',),
api.glance: ('image_list_detailed',)})
def test_launch_instance_get(self,
expect_password_fields=True,
block_device_mapping_v2=True,
custom_flavor_sort=None,
only_one_network=False,
disk_config=True,
config_drive=True,
test_with_profile=False):
image = self.images.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(block_device_mapping_v2)
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn([])
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
if only_one_network:
api.neutron.network_list(IsA(http.HttpRequest),
shared=True).AndReturn([])
else:
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(disk_config)
api.nova.extension_supported(
'ConfigDrive', IsA(http.HttpRequest)).AndReturn(config_drive)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:launch')
params = urlencode({"source_type": "image_id",
"source_id": image.id})
res = self.client.get("%s?%s" % (url, params))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(res.context['workflow'].name,
workflows.LaunchInstance.name)
step = workflow.get_step("setinstancedetailsaction")
self.assertEqual(step.action.initial['image_id'], image.id)
self.assertQuerysetEqual(
workflow.steps,
['<SetInstanceDetails: setinstancedetailsaction>',
'<SetAccessControls: setaccesscontrolsaction>',
'<SetNetwork: setnetworkaction>',
'<PostCreationStep: customizeaction>',
'<SetAdvanced: setadvancedaction>'])
if custom_flavor_sort == 'id':
# Reverse sorted by id
sorted_flavors = (
('eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee', 'm1.metadata'),
('dddddddd-dddd-dddd-dddd-dddddddddddd', 'm1.secret'),
('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'm1.massive'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'm1.tiny'),
)
elif custom_flavor_sort == 'name':
sorted_flavors = (
('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'm1.massive'),
('eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee', 'm1.metadata'),
('dddddddd-dddd-dddd-dddd-dddddddddddd', 'm1.secret'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'm1.tiny'),
)
elif custom_flavor_sort == helpers.my_custom_sort:
sorted_flavors = (
('dddddddd-dddd-dddd-dddd-dddddddddddd', 'm1.secret'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'm1.tiny'),
('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'm1.massive'),
('eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee', 'm1.metadata'),
)
else:
# Default - sorted by RAM
sorted_flavors = (
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'm1.tiny'),
('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'm1.massive'),
('dddddddd-dddd-dddd-dddd-dddddddddddd', 'm1.secret'),
('eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee', 'm1.metadata'),
)
select_options = '\n'.join([
'<option value="%s">%s</option>' % (f[0], f[1])
for f in sorted_flavors
])
self.assertContains(res, select_options)
password_field_label = 'Admin Pass'
if expect_password_fields:
self.assertContains(res, password_field_label)
else:
self.assertNotContains(res, password_field_label)
boot_from_image_field_label = 'Boot from image (creates a new volume)'
if block_device_mapping_v2:
self.assertContains(res, boot_from_image_field_label)
else:
self.assertNotContains(res, boot_from_image_field_label)
checked_label = '<label for="id_network_0"><input checked="checked"'
if only_one_network:
self.assertContains(res, checked_label)
else:
self.assertNotContains(res, checked_label)
disk_config_field_label = 'Disk Partition'
if disk_config:
self.assertContains(res, disk_config_field_label)
else:
self.assertNotContains(res, disk_config_field_label)
config_drive_field_label = 'Configuration Drive'
if config_drive:
self.assertContains(res, config_drive_field_label)
else:
self.assertNotContains(res, config_drive_field_label)
@django.test.utils.override_settings(
OPENSTACK_HYPERVISOR_FEATURES={'can_set_password': False})
def test_launch_instance_get_without_password(self):
self.test_launch_instance_get(expect_password_fields=False)
@django.test.utils.override_settings(
OPENSTACK_HYPERVISOR_FEATURES={'requires_keypair': True})
def test_launch_instance_required_key(self):
flavor = self.flavors.first()
image = self.images.first()
image.min_ram = flavor.ram
image.min_disk = flavor.disk
self._test_launch_form_instance_requirement_error(image, flavor,
keypair_require=True)
def test_launch_instance_get_no_block_device_mapping_v2_supported(self):
self.test_launch_instance_get(block_device_mapping_v2=False)
def test_launch_instance_get_no_disk_config_supported(self):
self.test_launch_instance_get(disk_config=False)
def test_launch_instance_get_no_config_drive_supported(self):
self.test_launch_instance_get(config_drive=False)
@django.test.utils.override_settings(
CREATE_INSTANCE_FLAVOR_SORT={
'key': 'id',
'reverse': True,
})
def test_launch_instance_get_custom_flavor_sort_by_id(self):
self.test_launch_instance_get(custom_flavor_sort='id')
@django.test.utils.override_settings(
CREATE_INSTANCE_FLAVOR_SORT={
'key': 'name',
'reverse': False,
})
def test_launch_instance_get_custom_flavor_sort_by_name(self):
self.test_launch_instance_get(custom_flavor_sort='name')
@django.test.utils.override_settings(
CREATE_INSTANCE_FLAVOR_SORT={
'key': helpers.my_custom_sort,
'reverse': False,
})
def test_launch_instance_get_custom_flavor_sort_by_callable(self):
self.test_launch_instance_get(
custom_flavor_sort=helpers.my_custom_sort)
@django.test.utils.override_settings(
CREATE_INSTANCE_FLAVOR_SORT={
'key': 'no_such_column',
'reverse': False,
})
def test_launch_instance_get_custom_flavor_sort_by_missing_column(self):
self.test_launch_instance_get(custom_flavor_sort='no_such_column')
def test_launch_instance_get_with_only_one_network(self):
self.test_launch_instance_get(only_one_network=True)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_instance_get_with_profile(self):
self.test_launch_instance_get(test_with_profile=True)
@helpers.create_stubs({api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_snapshot_list',
'volume_list',),
api.neutron: ('network_list',
'profile_list',),
api.glance: ('image_list_detailed',)})
def test_launch_instance_get_bootable_volumes(self,
block_device_mapping_v2=True,
only_one_network=False,
disk_config=True,
config_drive=True,
test_with_profile=False):
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(block_device_mapping_v2)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
if only_one_network:
api.neutron.network_list(IsA(http.HttpRequest),
shared=True).AndReturn([])
else:
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(disk_config)
api.nova.extension_supported(
'ConfigDrive', IsA(http.HttpRequest)).AndReturn(config_drive)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:launch')
res = self.client.get(url)
bootable_volumes = [v.id for v in self.volumes.list()
if (v.bootable == 'true' and
v.status == 'available')]
volume_sources = (res.context_data['workflow'].steps[0].
action.fields['volume_id'].choices)
volume_sources_ids = []
for volume in volume_sources:
self.assertTrue(volume[0].split(":vol")[0] in bootable_volumes or
volume[0] == '')
if volume[0] != '':
volume_sources_ids.append(volume[0].split(":vol")[0])
for volume in bootable_volumes:
self.assertTrue(volume in volume_sources_ids)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_instance_get_bootable_volumes_with_profile(self):
self.test_launch_instance_get_bootable_volumes(test_with_profile=True)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'server_create',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post(self,
disk_config=True,
config_drive=True,
test_with_profile=False,
test_with_multi_nics=False):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port_one = self.ports.first()
nics = [{"port-id": port_one.id}]
api.neutron.profile_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.neutron.port_create(IsA(http.HttpRequest),
self.networks.first().id,
policy_profile_id=policy_profile_id) \
.AndReturn(port_one)
if test_with_multi_nics:
port_two = self.ports.get(name="port5")
nics = [{"port-id": port_one.id},
{"port-id": port_two.id}]
# Add a second port to test multiple nics
api.neutron.port_create(IsA(http.HttpRequest),
self.networks.get(name="net4")['id'],
policy_profile_id=policy_profile_id) \
.AndReturn(port_two)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(disk_config)
api.nova.extension_supported(
'ConfigDrive', IsA(http.HttpRequest)).AndReturn(config_drive)
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn([])
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
if disk_config:
disk_config_value = u'AUTO'
else:
disk_config_value = None
if config_drive:
config_drive_value = True
else:
config_drive_value = None
api.nova.server_create(IsA(http.HttpRequest),
server.name,
image.id,
flavor.id,
keypair.name,
customization_script,
[str(sec_group.id)],
block_device_mapping=None,
block_device_mapping_v2=None,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass=u'',
disk_config=disk_config_value,
config_drive=config_drive_value)
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': image.id,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': str(sec_group.id),
'availability_zone': avail_zone.zoneName,
'volume_type': '',
'network': self.networks.first().id,
'count': 1}
if disk_config:
form_data['disk_config'] = 'AUTO'
if config_drive:
form_data['config_drive'] = True
if test_with_profile:
form_data['profile'] = self.policy_profiles.first().id
if test_with_multi_nics:
form_data['network'] = [self.networks.first().id,
self.networks.get(name="net4")['id']]
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_launch_instance_post_no_disk_config_supported(self):
self.test_launch_instance_post(disk_config=False)
def test_launch_instance_post_no_config_drive_supported(self):
self.test_launch_instance_post(config_drive=False)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_instance_post_with_profile(self):
self.test_launch_instance_post(test_with_profile=True)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_instance_post_with_profile_and_multi_nics(self):
self.test_launch_instance_post(test_with_profile=True,
test_with_multi_nics=True)
def _test_launch_instance_post_with_profile_and_port_error(
self,
test_with_multi_nics=False,
):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port_one = self.ports.first()
api.neutron.profile_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
if test_with_multi_nics:
api.neutron.port_create(IsA(http.HttpRequest),
self.networks.first().id,
policy_profile_id=policy_profile_id) \
.AndReturn(port_one)
# Add a second port which has the exception to test multiple nics
api.neutron.port_create(IsA(http.HttpRequest),
self.networks.get(name="net4")['id'],
policy_profile_id=policy_profile_id) \
.AndRaise(self.exceptions.neutron)
# Delete the first port
api.neutron.port_delete(IsA(http.HttpRequest),
port_one.id)
else:
api.neutron.port_create(IsA(http.HttpRequest),
self.networks.first().id,
policy_profile_id=policy_profile_id) \
.AndRaise(self.exceptions.neutron)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn([])
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': image.id,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': str(sec_group.id),
'availability_zone': avail_zone.zoneName,
'volume_type': '',
'network': self.networks.first().id,
'count': 1,
'disk_config': 'AUTO',
'config_drive': True,
'profile': self.policy_profiles.first().id}
if test_with_multi_nics:
form_data['network'] = [self.networks.first().id,
self.networks.get(name="net4")['id']]
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create',
'port_delete',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post_with_profile_and_port_error(self):
self._test_launch_instance_post_with_profile_and_port_error()
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create',
'port_delete',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_lnch_inst_post_w_profile_and_multi_nics_w_port_error(self):
self._test_launch_instance_post_with_profile_and_port_error(
test_with_multi_nics=True)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'server_create',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post_boot_from_volume(
self,
test_with_profile=False,
test_with_bdmv2=False
):
flavor = self.flavors.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
if test_with_bdmv2:
volume_source_id = volume.id.split(':')[0]
block_device_mapping = None
block_device_mapping_2 = [
{'device_name': u'vda',
'source_type': 'volume',
'destination_type': 'volume',
'delete_on_termination': False,
'uuid': volume_source_id,
'boot_index': '0',
'volume_size': 1
}
]
else:
block_device_mapping = {device_name: u"%s::False" % volume_choice}
block_device_mapping_2 = None
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(test_with_bdmv2)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
if test_with_profile:
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port = self.ports.first()
api.neutron.profile_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.neutron.port_create(
IsA(http.HttpRequest),
self.networks.first().id,
policy_profile_id=policy_profile_id).AndReturn(port)
nics = [{"port-id": port.id}]
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(test_with_bdmv2)
api.nova.server_create(IsA(http.HttpRequest),
server.name,
'',
flavor.id,
keypair.name,
customization_script,
[str(sec_group.id)],
block_device_mapping=block_device_mapping,
block_device_mapping_v2=block_device_mapping_2,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass=u'',
disk_config=u'AUTO',
config_drive=True)
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'volume_id',
'source_id': volume_choice,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': str(sec_group.id),
'availability_zone': avail_zone.zoneName,
'volume_size': '1',
'volume_id': volume_choice,
'device_name': device_name,
'network': self.networks.first().id,
'count': 1,
'disk_config': 'AUTO',
'config_drive': True}
if test_with_profile:
form_data['profile'] = self.policy_profiles.first().id
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_launch_instance_post_boot_from_volume_with_bdmv2(self):
self.test_launch_instance_post_boot_from_volume(test_with_bdmv2=True)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_instance_post_boot_from_volume_with_profile(self):
self.test_launch_instance_post_boot_from_volume(test_with_profile=True)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create'),
api.nova: ('server_create',
'extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'tenant_absolute_limits',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post_no_images_available_boot_from_volume(
self,
test_with_profile=False,
):
flavor = self.flavors.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
block_device_mapping = {device_name: u"%s::False" % volume_choice}
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
if test_with_profile:
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port = self.ports.first()
api.neutron.profile_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.neutron.port_create(
IsA(http.HttpRequest),
self.networks.first().id,
policy_profile_id=policy_profile_id).AndReturn(port)
nics = [{"port-id": port.id}]
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(False)
api.nova.server_create(IsA(http.HttpRequest),
server.name,
'',
flavor.id,
keypair.name,
customization_script,
[str(sec_group.id)],
block_device_mapping=block_device_mapping,
block_device_mapping_v2=None,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass=u'',
disk_config='MANUAL',
config_drive=True)
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'volume_id',
# 'image_id': '',
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': str(sec_group.id),
'availability_zone': avail_zone.zoneName,
'network': self.networks.first().id,
'volume_type': 'volume_id',
'volume_id': volume_choice,
'device_name': device_name,
'count': 1,
'disk_config': 'MANUAL',
'config_drive': True}
if test_with_profile:
form_data['profile'] = self.policy_profiles.first().id
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_lnch_inst_post_no_images_avail_boot_from_vol_with_profile(self):
self.test_launch_instance_post_no_images_available_boot_from_volume(
test_with_profile=True)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'tenant_absolute_limits',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post_no_images_available(self,
test_with_profile=False):
flavor = self.flavors.first()
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([[], False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
if test_with_profile:
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn([])
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': '',
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': str(sec_group.id),
'availability_zone': avail_zone.zoneName,
'volume_type': '',
'count': 1}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertFormErrors(res, 1, "You must select an image.")
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_instance_post_no_images_available_with_profile(self):
self.test_launch_instance_post_no_images_available(
test_with_profile=True)
@helpers.create_stubs({
api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'server_create',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post_boot_from_snapshot(
self,
test_with_profile=False,
test_with_bdmv2=False
):
flavor = self.flavors.first()
keypair = self.keypairs.first()
server = self.servers.first()
snapshot = self.cinder_volume_snapshots.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
device_name = u'vda'
snapshot_choice = "%s:snap" % snapshot.id
if test_with_bdmv2:
snapshot_source_id = snapshot.id.split(':')[0]
block_device_mapping = None
block_device_mapping_2 = [
{'device_name': u'vda',
'source_type': 'snapshot',
'destination_type': 'volume',
'delete_on_termination': 0,
'uuid': snapshot_source_id,
'boot_index': '0',
'volume_size': 1
}
]
else:
block_device_mapping = {device_name:
u"%s::False" % snapshot_choice}
block_device_mapping_2 = None
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(test_with_bdmv2)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
if test_with_profile:
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port = self.ports.first()
api.neutron.profile_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.neutron.port_create(
IsA(http.HttpRequest),
self.networks.first().id,
policy_profile_id=policy_profile_id).AndReturn(port)
nics = [{"port-id": port.id}]
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
snapshots = [v for v in self.cinder_volume_snapshots.list()
if (v.status == AVAILABLE)]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn([])
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn(snapshots)
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(test_with_bdmv2)
api.nova.server_create(IsA(http.HttpRequest),
server.name,
'',
flavor.id,
keypair.name,
customization_script,
[str(sec_group.id)],
block_device_mapping=block_device_mapping,
block_device_mapping_v2=block_device_mapping_2,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass=u'',
disk_config=u'AUTO',
config_drive=True)
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'volume_snapshot_id',
'source_id': snapshot_choice,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': str(sec_group.id),
'availability_zone': avail_zone.zoneName,
'volume_size': '1',
'volume_snapshot_id': snapshot_choice,
'device_name': device_name,
'network': self.networks.first().id,
'count': 1,
'disk_config': 'AUTO',
'config_drive': True}
if test_with_profile:
form_data['profile'] = self.policy_profiles.first().id
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_launch_instance_post_boot_from_snapshot_with_bdmv2(self):
self.test_launch_instance_post_boot_from_snapshot(test_with_bdmv2=True)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_instance_post_boot_from_snapshot_with_profile(self):
self.test_launch_instance_post_boot_from_snapshot(
test_with_profile=True)
@helpers.create_stubs({
api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'server_create',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_instance_post_boot_from_snapshot_error(
self,
test_with_profile=False,
):
flavor = self.flavors.first()
keypair = self.keypairs.first()
server = self.servers.first()
avail_zone = self.availability_zones.first()
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}) \
.AndReturn([[], False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn([])
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
self.mox.ReplayAll()
bad_snapshot_id = 'a-bogus-id'
form_data = {'flavor': flavor.id,
'source_type': 'instance_snapshot_id',
'instance_snapshot_id': bad_snapshot_id,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'availability_zone': avail_zone.zoneName,
'network': self.networks.first().id,
'volume_id': '',
'volume_snapshot_id': '',
'image_id': '',
'device_name': 'vda',
'count': 1,
'profile': '',
'customization_script': ''}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertFormErrors(res, 1, "You must select a snapshot.")
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
api.network: ('security_group_list',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',)})
def test_launch_flavorlist_error(self,
test_with_profile=False):
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn([])
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:launch')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_flavorlist_error_with_profile(self):
self.test_launch_flavorlist_error(test_with_profile=True)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create',
'port_delete'),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'server_create',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_form_keystone_exception(self,
test_with_profile=False):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE)]
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn(volumes)
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.nova.keypair_list(IgnoreArg()).AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port = self.ports.first()
api.neutron.profile_list(
IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.neutron.port_create(
IsA(http.HttpRequest),
self.networks.first().id,
policy_profile_id=policy_profile_id).AndReturn(port)
nics = [{"port-id": port.id}]
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
api.nova.server_create(IsA(http.HttpRequest),
server.name,
image.id,
flavor.id,
keypair.name,
customization_script,
[str(sec_group.id)],
block_device_mapping=None,
block_device_mapping_v2=None,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass='password',
disk_config='AUTO',
config_drive=False) \
.AndRaise(self.exceptions.keystone)
if test_with_profile:
api.neutron.port_delete(IsA(http.HttpRequest), port.id)
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'source_id': image.id,
'volume_size': '1',
'image_id': image.id,
'availability_zone': avail_zone.zoneName,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': str(sec_group.id),
'volume_type': '',
'network': self.networks.first().id,
'count': 1,
'admin_pass': 'password',
'confirm_admin_pass': 'password',
'disk_config': 'AUTO',
'config_drive': False}
if test_with_profile:
form_data['profile'] = self.policy_profiles.first().id
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_form_keystone_exception_with_profile(self):
self.test_launch_form_keystone_exception(test_with_profile=True)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_form_instance_count_error(self,
test_with_profile=False):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': image.id,
'availability_zone': avail_zone.zoneName,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': str(sec_group.id),
'volume_type': 'volume_id',
'volume_id': volume_choice,
'device_name': device_name,
'count': 0}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertContains(res, "greater than or equal to 1")
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def _test_launch_form_count_error(self, resource,
avail, test_with_profile=False):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
quota_usages = self.quota_usages.first()
if resource == 'both':
quota_usages['cores']['available'] = avail
quota_usages['ram']['available'] = 512
else:
quota_usages[resource]['available'] = avail
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': image.id,
'availability_zone': avail_zone.zoneName,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': str(sec_group.id),
'volume_type': 'volume_id',
'volume_id': volume_choice,
'device_name': device_name,
'count': 2}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
if resource == 'ram':
msg = ("The following requested resource(s) exceed quota(s): "
"RAM(Available: %s" % avail)
if resource == 'cores':
msg = ("The following requested resource(s) exceed quota(s): "
"Cores(Available: %s" % avail)
if resource == 'both':
msg = ("The following requested resource(s) exceed quota(s): "
"Cores(Available: %(avail)s, Requested: 2), RAM(Available: "
"512, Requested: 1024)" % {'avail': avail})
self.assertContains(res, msg)
def test_launch_form_cores_count_error(self):
self._test_launch_form_count_error('cores', 1, test_with_profile=False)
def test_launch_form_ram_count_error(self):
self._test_launch_form_count_error('ram', 512, test_with_profile=False)
def test_launch_form_ram_cores_count_error(self):
self._test_launch_form_count_error('both', 1, test_with_profile=False)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_form_instance_count_error_with_profile(self):
self.test_launch_form_instance_count_error(test_with_profile=True)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def _test_launch_form_instance_requirement_error(self, image, flavor,
test_with_profile=False,
keypair_require=False):
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
device_name = u'vda'
volume_choice = "%s:vol" % volume.id
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'image_id': image.id,
'availability_zone': avail_zone.zoneName,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': str(sec_group.id),
'volume_type': 'volume_id',
'volume_id': volume_choice,
'device_name': device_name,
'count': 1}
if not keypair_require:
form_data['keypair'] = keypair.name
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
if keypair_require:
msg = "This field is required"
self.assertContains(res, msg)
else:
msg = "The flavor '%s' is too small" % flavor.name
self.assertContains(res, msg)
def test_launch_form_instance_requirement_error_disk(
self,
test_with_profile=False,
):
flavor = self.flavors.first()
image = self.images.first()
image.min_ram = flavor.ram
image.min_disk = flavor.disk + 1
self._test_launch_form_instance_requirement_error(image, flavor,
test_with_profile)
def test_launch_form_instance_requirement_error_ram(
self,
test_with_profile=False,
):
flavor = self.flavors.first()
image = self.images.first()
image.min_ram = flavor.ram + 1
image.min_disk = flavor.disk
self._test_launch_form_instance_requirement_error(image, flavor,
test_with_profile)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_form_instance_requirement_error_disk_with_profile(self):
self.test_launch_form_instance_requirement_error_disk(
test_with_profile=True)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_form_instance_requirement_error_ram_with_profile(self):
self.test_launch_form_instance_requirement_error_ram(
test_with_profile=True)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def _test_launch_form_instance_show_device_name(self, device_name,
widget_class,
widget_attrs):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
volume = self.volumes.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
volume_choice = "%s:vol" % volume.id
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)).AndReturn(True)
api.nova.flavor_list(
IsA(http.HttpRequest)).AndReturn(self.flavors.list())
api.nova.keypair_list(
IsA(http.HttpRequest)).AndReturn(self.keypairs.list())
api.network.security_group_list(
IsA(http.HttpRequest)).AndReturn(self.security_groups.list())
api.nova.availability_zone_list(
IsA(http.HttpRequest)).AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True,
'status': 'active'}).AndReturn(
[self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}).AndReturn([[], False, False])
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(self.networks.list()[:1])
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn(self.networks.list()[1:])
api.nova.extension_supported(
'DiskConfig', IsA(http.HttpRequest)).AndReturn(True)
api.nova.extension_supported(
'ConfigDrive', IsA(http.HttpRequest)).AndReturn(True)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.nova.flavor_list(
IsA(http.HttpRequest)).AndReturn(self.flavors.list())
api.nova.tenant_absolute_limits(
IsA(http.HttpRequest)).AndReturn(self.limits['absolute'])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)).AndReturn(quota_usages)
api.nova.flavor_list(
IsA(http.HttpRequest)).AndReturn(self.flavors.list())
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'volume_image_id',
'image_id': image.id,
'availability_zone': avail_zone.zoneName,
'keypair': keypair.name,
'name': server.name,
'customization_script': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': str(sec_group.id),
'volume_type': 'volume_id',
'volume_id': volume_choice,
'volume_size': max(
image.min_disk, image.size / 1024 ** 3),
'device_name': device_name,
'count': 1}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
widget_content = widget_class().render(**widget_attrs)
# In django 1.4, the widget's html attributes are not always rendered
# in the same order and checking the fully rendered widget fails.
for widget_part in widget_content.split():
self.assertContains(res, widget_part)
@django.test.utils.override_settings(
OPENSTACK_HYPERVISOR_FEATURES={'can_set_mount_point': True})
def test_launch_form_instance_device_name_showed(self):
self._test_launch_form_instance_show_device_name(
u'vda', widgets.TextInput, {
'name': 'device_name', 'value': 'vda',
'attrs': {'id': 'id_device_name'}}
)
@django.test.utils.override_settings(
OPENSTACK_HYPERVISOR_FEATURES={'can_set_mount_point': False})
def test_launch_form_instance_device_name_hidden(self):
self._test_launch_form_instance_show_device_name(
u'', widgets.HiddenInput, {
'name': 'device_name', 'value': '',
'attrs': {'id': 'id_device_name'}}
)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'tenant_absolute_limits',
'availability_zone_list',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def _test_launch_form_instance_volume_size(self, image, volume_size, msg,
test_with_profile=False,
volumes=None):
flavor = self.flavors.get(name='m1.massive')
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
device_name = u'vda'
quota_usages = self.quota_usages.first()
quota_usages['cores']['available'] = 2000
if volumes is not None:
quota_usages['volumes']['available'] = volumes
else:
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
form_data = {
'flavor': flavor.id,
'source_type': 'volume_image_id',
'image_id': image.id,
'availability_zone': avail_zone.zoneName,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': str(sec_group.id),
'volume_size': volume_size,
'device_name': device_name,
'count': 1
}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertContains(res, msg)
def test_launch_form_instance_volume_size_error(self,
test_with_profile=False):
image = self.images.get(name='protected_images')
volume_size = image.min_disk / 2
msg = ("The Volume size is too small for the '%s' image" %
image.name)
self._test_launch_form_instance_volume_size(image, volume_size, msg,
test_with_profile)
def test_launch_form_instance_non_int_volume_size(self,
test_with_profile=False):
image = self.images.get(name='protected_images')
msg = "Enter a whole number."
self._test_launch_form_instance_volume_size(image, 1.5, msg,
test_with_profile)
def test_launch_form_instance_volume_exceed_quota(self):
image = self.images.get(name='protected_images')
msg = "Requested volume exceeds quota: Available: 0, Requested: 1"
self._test_launch_form_instance_volume_size(image, image.min_disk,
msg, False, 0)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_form_instance_volume_size_error_with_profile(self):
self.test_launch_form_instance_volume_size_error(
test_with_profile=True)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_launch_form_instance_non_int_volume_size_with_profile(self):
self.test_launch_form_instance_non_int_volume_size(
test_with_profile=True)
@helpers.create_stubs({
api.nova: ('flavor_list', 'server_list', 'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('floating_ip_simple_associate_supported',
'floating_ip_supported',
'servers_update_addresses',),
})
def test_launch_button_disabled_when_quota_exceeded(self):
servers = self.servers.list()
limits = self.limits['absolute']
limits['totalInstancesUsed'] = limits['maxTotalInstances']
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(limits)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
launch = tables.LaunchLink()
url = launch.get_link_url()
classes = list(launch.get_default_classes()) + list(launch.classes)
link_name = "%s (%s)" % (six.text_type(launch.verbose_name),
"Quota exceeded")
res = self.client.get(INDEX_URL)
if django.VERSION < (1, 8, 0):
resp_charset = res._charset
else:
resp_charset = res.charset
expected_string = encoding.smart_str(u'''
<a href="%s" title="%s" class="%s disabled"
data-update-url=
"/project/instances/?action=launch&table=instances"
id="instances__action_launch">
<span class="fa fa-cloud-upload"></span>%s</a>
''' % (url, link_name, " ".join(classes), link_name), resp_charset)
self.assertContains(res, expected_string, html=True,
msg_prefix="The launch button is not disabled")
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'tenant_absolute_limits',
'server_create',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_launch_with_empty_device_name_allowed(self):
flavor = self.flavors.get(name='m1.massive')
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
nics = [{'net-id': self.networks.first().id, 'v4-fixed-ip': ''}]
device_name = u''
quota_usages = self.quota_usages.first()
quota_usages['cores']['available'] = 2000
device_mapping_v2 = [{'device_name': None, # device_name must be None
'source_type': 'image',
'destination_type': 'volume',
'delete_on_termination': False,
'uuid': image.id,
'boot_index': '0',
'volume_size': image.size}]
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
volumes = [v for v in self.volumes.list()
if (v.status == AVAILABLE and v.bootable == 'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.server_create(IsA(http.HttpRequest),
server.name,
'',
flavor.id,
keypair.name,
customization_script,
[str(sec_group.id)],
block_device_mapping=None,
block_device_mapping_v2=device_mapping_v2,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass=u'',
config_drive=False,
disk_config=u'')
self.mox.ReplayAll()
form_data = {
'flavor': flavor.id,
'source_type': 'volume_image_id',
'image_id': image.id,
'availability_zone': avail_zone.zoneName,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': str(sec_group.id),
'volume_size': image.size,
'device_name': device_name,
'network': self.networks.first().id,
'count': 1
}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
@helpers.create_stubs({
api.nova: ('flavor_list', 'server_list', 'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('floating_ip_simple_associate_supported',
'floating_ip_supported',
'servers_update_addresses',),
})
def test_index_options_after_migrate(self):
servers = self.servers.list()
server = self.servers.first()
server.status = "VERIFY_RESIZE"
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertContains(res, "instances__confirm")
self.assertContains(res, "instances__revert")
@helpers.create_stubs({api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'tenant_absolute_limits',),
api.network: ('security_group_list',),
cinder: ('volume_snapshot_list',
'volume_list',),
api.neutron: ('network_list',
'profile_list'),
api.glance: ('image_list_detailed',)})
def test_select_default_keypair_if_only_one(self,
test_with_profile=False):
keypair = self.keypairs.first()
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn([])
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn([])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
if test_with_profile:
policy_profiles = self.policy_profiles.list()
api.neutron.profile_list(IsA(http.HttpRequest),
'policy').AndReturn(policy_profiles)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.keypair_list(IsA(http.HttpRequest)) \
.AndReturn([keypair])
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
self.mox.ReplayAll()
url = reverse('horizon:project:instances:launch')
res = self.client.get(url)
self.assertContains(
res, "<option selected='selected' value='%(key)s'>"
"%(key)s</option>" % {'key': keypair.name},
html=True,
msg_prefix="The default key pair was not selected.")
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
def test_select_default_keypair_if_only_one_with_profile(self):
self.test_select_default_keypair_if_only_one(test_with_profile=True)
@helpers.create_stubs({api.network: ('floating_ip_target_get_by_instance',
'tenant_floating_ip_allocate',
'floating_ip_associate',
'servers_update_addresses',),
api.glance: ('image_list_detailed',),
api.nova: ('server_list',
'flavor_list')})
def test_associate_floating_ip(self):
servers = self.servers.list()
server = servers[0]
fip = self.q_floating_ips.first()
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
api.network.floating_ip_target_get_by_instance(
IsA(http.HttpRequest),
server.id).AndReturn(server.id)
api.network.tenant_floating_ip_allocate(
IsA(http.HttpRequest)).AndReturn(fip)
api.network.floating_ip_associate(
IsA(http.HttpRequest), fip.id, server.id)
self.mox.ReplayAll()
formData = {'action': 'instances__associate-simple__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.network: ('floating_ip_target_list_by_instance',
'tenant_floating_ip_list',
'floating_ip_disassociate',
'servers_update_addresses',),
api.glance: ('image_list_detailed',),
api.nova: ('server_list',
'flavor_list')})
def test_disassociate_floating_ip(self):
servers = self.servers.list()
server = servers[0]
fip = self.q_floating_ips.first()
fip.port_id = server.id
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.network.servers_update_addresses(IsA(http.HttpRequest), servers)
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
api.network.floating_ip_target_list_by_instance(
IsA(http.HttpRequest),
server.id).AndReturn([server.id, ])
api.network.tenant_floating_ip_list(
IsA(http.HttpRequest)).AndReturn([fip])
api.network.floating_ip_disassociate(
IsA(http.HttpRequest), fip.id)
self.mox.ReplayAll()
formData = {'action': 'instances__disassociate__%s' % server.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_get',
'flavor_list',
'tenant_absolute_limits',
'extension_supported')})
def test_instance_resize_get(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:resize', args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
config_drive_field_label = 'Configuration Drive'
self.assertNotContains(res, config_drive_field_label)
option = '<option value="%s">%s</option>'
for flavor in self.flavors.list():
if flavor.id == server.flavor['id']:
self.assertNotContains(res, option % (flavor.id, flavor.name))
else:
self.assertContains(res, option % (flavor.id, flavor.name))
@helpers.create_stubs({api.nova: ('server_get',
'flavor_list',)})
def test_instance_resize_get_server_get_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:resize',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_get',
'flavor_list',)})
def test_instance_resize_get_flavor_list_exception(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:resize',
args=[server.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.nova: ('server_get',
'flavor_list',
'flavor_get',
'tenant_absolute_limits',
'extension_supported')})
def test_instance_resize_get_current_flavor_not_found(self):
server = self.servers.first()
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn([])
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor['id']) \
.AndRaise(self.exceptions.nova)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:resize', args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
def _instance_resize_post(self, server_id, flavor_id, disk_config):
formData = {'flavor': flavor_id,
'default_role': 'member',
'disk_config': disk_config}
url = reverse('horizon:project:instances:resize',
args=[server_id])
return self.client.post(url, formData)
instance_resize_post_stubs = {
api.nova: ('server_get', 'server_resize',
'flavor_list', 'flavor_get',
'extension_supported')}
@helpers.create_stubs(instance_resize_post_stubs)
def test_instance_resize_post(self):
server = self.servers.first()
flavors = [flavor for flavor in self.flavors.list()
if flavor.id != server.flavor['id']]
flavor = flavors[0]
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.server_resize(IsA(http.HttpRequest), server.id, flavor.id,
'AUTO').AndReturn([])
self.mox.ReplayAll()
res = self._instance_resize_post(server.id, flavor.id, u'AUTO')
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs(instance_resize_post_stubs)
def test_instance_resize_post_api_exception(self):
server = self.servers.first()
flavors = [flavor for flavor in self.flavors.list()
if flavor.id != server.flavor['id']]
flavor = flavors[0]
api.nova.server_get(IsA(http.HttpRequest), server.id) \
.AndReturn(server)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.server_resize(IsA(http.HttpRequest), server.id, flavor.id,
'AUTO') \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self._instance_resize_post(server.id, flavor.id, 'AUTO')
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.nova: ('extension_supported',)})
def test_rebuild_instance_get(self, expect_password_fields=True):
server = self.servers.first()
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
self.mox.ReplayAll()
url = reverse('horizon:project:instances:rebuild', args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/instances/rebuild.html')
password_field_label = 'Rebuild Password'
if expect_password_fields:
self.assertContains(res, password_field_label)
else:
self.assertNotContains(res, password_field_label)
@django.test.utils.override_settings(
OPENSTACK_HYPERVISOR_FEATURES={'can_set_password': False})
def test_rebuild_instance_get_without_set_password(self):
self.test_rebuild_instance_get(expect_password_fields=False)
def _instance_rebuild_post(self, server_id, image_id,
password=None, confirm_password=None,
disk_config=None):
form_data = {'instance_id': server_id,
'image': image_id,
'disk_config': disk_config}
if password is not None:
form_data.update(password=password)
if confirm_password is not None:
form_data.update(confirm_password=confirm_password)
url = reverse('horizon:project:instances:rebuild',
args=[server_id])
return self.client.post(url, form_data)
instance_rebuild_post_stubs = {
api.nova: ('server_rebuild',
'extension_supported'),
api.glance: ('image_list_detailed',)}
@helpers.create_stubs(instance_rebuild_post_stubs)
def test_rebuild_instance_post_with_password(self):
server = self.servers.first()
image = self.images.first()
password = u'testpass'
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.server_rebuild(IsA(http.HttpRequest),
server.id,
image.id,
password,
'AUTO').AndReturn([])
self.mox.ReplayAll()
res = self._instance_rebuild_post(server.id, image.id,
password=password,
confirm_password=password,
disk_config='AUTO')
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs(instance_rebuild_post_stubs)
def test_rebuild_instance_post_with_password_equals_none(self):
server = self.servers.first()
image = self.images.first()
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.server_rebuild(IsA(http.HttpRequest),
server.id,
image.id,
None,
'AUTO') \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self._instance_rebuild_post(server.id, image.id,
password=None,
confirm_password=None,
disk_config='AUTO')
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs(instance_rebuild_post_stubs)
def test_rebuild_instance_post_password_do_not_match(self):
server = self.servers.first()
image = self.images.first()
pass1 = u'somepass'
pass2 = u'notsomepass'
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
self.mox.ReplayAll()
res = self._instance_rebuild_post(server.id, image.id,
password=pass1,
confirm_password=pass2,
disk_config='MANUAL')
self.assertContains(res, "Passwords do not match.")
@helpers.create_stubs(instance_rebuild_post_stubs)
def test_rebuild_instance_post_with_empty_string(self):
server = self.servers.first()
image = self.images.first()
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.server_rebuild(IsA(http.HttpRequest),
server.id,
image.id,
None,
'AUTO').AndReturn([])
self.mox.ReplayAll()
res = self._instance_rebuild_post(server.id, image.id,
password=u'',
confirm_password=u'',
disk_config=u'AUTO')
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs(instance_rebuild_post_stubs)
def test_rebuild_instance_post_api_exception(self):
server = self.servers.first()
image = self.images.first()
password = u'testpass'
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.server_rebuild(IsA(http.HttpRequest),
server.id,
image.id,
password,
'AUTO') \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self._instance_rebuild_post(server.id, image.id,
password=password,
confirm_password=password,
disk_config='AUTO')
self.assertRedirectsNoFollow(res, INDEX_URL)
@django.test.utils.override_settings(API_RESULT_PAGE_SIZE=2)
@helpers.create_stubs({
api.nova: ('flavor_list', 'server_list', 'tenant_absolute_limits',
'extension_supported',),
api.glance: ('image_list_detailed',),
api.network: ('floating_ip_simple_associate_supported',
'floating_ip_supported',
'servers_update_addresses',),
})
def test_index_form_action_with_pagination(self):
"""The form action on the next page should have marker
object from the previous page last element.
"""
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 2)
servers = self.servers.list()[:3]
api.nova.extension_supported('AdminActions',
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.MultipleTimes().AndReturn((self.images.list(), False, False))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers[:page_size], True])
api.network.servers_update_addresses(
IsA(http.HttpRequest), servers[:page_size])
api.nova.server_list(IsA(http.HttpRequest), search_opts={
'marker': servers[page_size - 1].id, 'paginate': True}) \
.AndReturn([servers[page_size:], False])
api.network.servers_update_addresses(
IsA(http.HttpRequest), servers[page_size:])
api.nova.tenant_absolute_limits(IsA(http.HttpRequest), reserved=True) \
.MultipleTimes().AndReturn(self.limits['absolute'])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.network.floating_ip_simple_associate_supported(
IsA(http.HttpRequest)).MultipleTimes().AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/instances/index.html')
# get first page with 2 items
self.assertEqual(len(res.context['instances_table'].data), page_size)
# update INDEX_URL with marker object
params = "=".join([tables.InstancesTable._meta.pagination_param,
servers[page_size - 1].id])
next_page_url = "?".join([reverse('horizon:project:instances:index'),
params])
form_action = 'action="%s"' % next_page_url
res = self.client.get(next_page_url)
# get next page with remaining items (item 3)
self.assertEqual(len(res.context['instances_table'].data), 1)
# ensure that marker object exists in form action
self.assertContains(res, form_action, count=1)
@django.test.utils.override_settings(API_RESULT_PAGE_SIZE=2)
@helpers.create_stubs({api.nova: ('server_list',
'flavor_list',
'server_delete',),
api.glance: ('image_list_detailed',),
api.network: ('servers_update_addresses',)})
def test_terminate_instance_with_pagination(self):
"""Instance should be deleted from
the next page.
"""
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 2)
servers = self.servers.list()[:3]
server = servers[-1]
search_opts = {'marker': servers[page_size - 1].id, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers[page_size:], False])
api.network.servers_update_addresses(IsA(http.HttpRequest),
servers[page_size:])
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.glance.image_list_detailed(IgnoreArg()) \
.AndReturn((self.images.list(), False, False))
api.nova.server_delete(IsA(http.HttpRequest), server.id)
self.mox.ReplayAll()
# update INDEX_URL with marker object
params = "=".join([tables.InstancesTable._meta.pagination_param,
servers[page_size - 1].id])
next_page_url = "?".join([reverse('horizon:project:instances:index'),
params])
formData = {'action': 'instances__terminate__%s' % server.id}
res = self.client.post(next_page_url, formData)
self.assertRedirectsNoFollow(res, next_page_url)
self.assertMessageCount(success=1)
class SimpleFile(object):
def __init__(self, name, data, size):
self.name = name
self.data = data
self._size = size
def read(self):
return self.data
def test_clean_file_upload_form_oversize_data(self):
t = workflows.create_instance.CustomizeAction(self.request, {})
upload_str = 'user data'
files = {'script_upload':
self.SimpleFile('script_name',
upload_str,
(16 * 1024) + 1)}
self.assertRaises(
forms.ValidationError,
t.clean_uploaded_files,
'script',
files)
def test_clean_file_upload_form_invalid_data(self):
t = workflows.create_instance.CustomizeAction(self.request, {})
upload_str = '\x81'
files = {'script_upload':
self.SimpleFile('script_name',
upload_str,
sys.getsizeof(upload_str))}
self.assertRaises(
forms.ValidationError,
t.clean_uploaded_files,
'script',
files)
def test_clean_file_upload_form_valid_data(self):
t = workflows.create_instance.CustomizeAction(self.request, {})
precleaned = 'user data'
upload_str = 'user data'
files = {'script_upload':
self.SimpleFile('script_name',
upload_str,
sys.getsizeof(upload_str))}
cleaned = t.clean_uploaded_files('script', files)
self.assertEqual(
cleaned,
precleaned)
class InstanceAjaxTests(helpers.TestCase):
@helpers.create_stubs({api.nova: ("server_get",
"flavor_get",
"extension_supported"),
api.neutron: ("is_extension_supported",)})
def test_row_update(self):
server = self.servers.first()
instance_id = server.id
flavor_id = server.flavor["id"]
flavors = self.flavors.list()
full_flavors = OrderedDict([(f.id, f) for f in flavors])
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'security-group')\
.MultipleTimes().AndReturn(True)
api.nova.server_get(IsA(http.HttpRequest), instance_id)\
.AndReturn(server)
api.nova.flavor_get(IsA(http.HttpRequest), flavor_id)\
.AndReturn(full_flavors[flavor_id])
self.mox.ReplayAll()
params = {'action': 'row_update',
'table': 'instances',
'obj_id': instance_id,
}
res = self.client.get('?'.join((INDEX_URL, urlencode(params))),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertContains(res, server.name)
@helpers.create_stubs({api.nova: ("server_get",
"flavor_get",
"extension_supported"),
api.neutron: ("is_extension_supported",)})
def test_row_update_instance_error(self):
server = self.servers.first()
instance_id = server.id
flavor_id = server.flavor["id"]
flavors = self.flavors.list()
full_flavors = OrderedDict([(f.id, f) for f in flavors])
server.status = 'ERROR'
server.fault = {"message": "NoValidHost",
"code": 500,
"details": "No valid host was found. \n "
"File \"/mnt/stack/nova/nova/"
"scheduler/filter_scheduler.py\", "
"line 105, in schedule_run_instance\n "
"raise exception.NoValidHost"
"(reason=\"\")\n",
"created": "2013-10-07T00:08:32Z"}
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'security-group')\
.MultipleTimes().AndReturn(True)
api.nova.server_get(IsA(http.HttpRequest), instance_id)\
.AndReturn(server)
api.nova.flavor_get(IsA(http.HttpRequest), flavor_id)\
.AndReturn(full_flavors[flavor_id])
self.mox.ReplayAll()
params = {'action': 'row_update',
'table': 'instances',
'obj_id': instance_id,
}
res = self.client.get('?'.join((INDEX_URL, urlencode(params))),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertContains(res, server.name)
self.assertTrue(res.has_header('X-Horizon-Messages'))
messages = json.loads(res['X-Horizon-Messages'])
self.assertEqual(len(messages), 1)
# (Pdb) messages
# [[u'error', u'Failed to launch instance "server_1": \
# There is not enough capacity for this flavor in the \
# selected availability zone. Try again later or select \
# a different availability zone.', u'']]
self.assertEqual(messages[0][0], 'error')
self.assertTrue(messages[0][1].startswith('Failed'))
@helpers.create_stubs({api.nova: ("server_get",
"flavor_get",
"extension_supported"),
api.neutron: ("is_extension_supported",)})
def test_row_update_flavor_not_found(self):
server = self.servers.first()
instance_id = server.id
api.nova.extension_supported('AdminActions', IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(True)
api.nova.extension_supported('Shelve', IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'security-group')\
.MultipleTimes().AndReturn(True)
api.nova.server_get(IsA(http.HttpRequest), instance_id)\
.AndReturn(server)
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"])\
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
params = {'action': 'row_update',
'table': 'instances',
'obj_id': instance_id,
}
res = self.client.get('?'.join((INDEX_URL, urlencode(params))),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertContains(res, server.name)
self.assertContains(res, "Not available")
class ConsoleManagerTests(helpers.TestCase):
def setup_consoles(self):
# Need to refresh with mocks or will fail since mox do not detect
# the api_call() as mocked.
console.CONSOLES = OrderedDict([
('VNC', api.nova.server_vnc_console),
('SPICE', api.nova.server_spice_console),
('RDP', api.nova.server_rdp_console),
('SERIAL', api.nova.server_serial_console)])
def _get_console_vnc(self, server):
console_mock = self.mox.CreateMock(api.nova.VNCConsole)
console_mock.url = '/VNC'
self.mox.StubOutWithMock(api.nova, 'server_vnc_console')
api.nova.server_vnc_console(IgnoreArg(), server.id) \
.AndReturn(console_mock)
self.mox.ReplayAll()
self.setup_consoles()
def test_get_console_vnc(self):
server = self.servers.first()
self._get_console_vnc(server)
url = '/VNC&title=%s(%s)' % (server.name, server.id)
data = console.get_console(self.request, 'VNC', server)[1]
self.assertEqual(data, url)
def _get_console_spice(self, server):
console_mock = self.mox.CreateMock(api.nova.SPICEConsole)
console_mock.url = '/SPICE'
self.mox.StubOutWithMock(api.nova, 'server_spice_console')
api.nova.server_spice_console(IgnoreArg(), server.id) \
.AndReturn(console_mock)
self.mox.ReplayAll()
self.setup_consoles()
def test_get_console_spice(self):
server = self.servers.first()
self._get_console_spice(server)
url = '/SPICE&title=%s(%s)' % (server.name, server.id)
data = console.get_console(self.request, 'SPICE', server)[1]
self.assertEqual(data, url)
def _get_console_rdp(self, server):
console_mock = self.mox.CreateMock(api.nova.RDPConsole)
console_mock.url = '/RDP'
self.mox.StubOutWithMock(api.nova, 'server_rdp_console')
api.nova.server_rdp_console(IgnoreArg(), server.id) \
.AndReturn(console_mock)
self.mox.ReplayAll()
self.setup_consoles()
def test_get_console_rdp(self):
server = self.servers.first()
self._get_console_rdp(server)
url = '/RDP&title=%s(%s)' % (server.name, server.id)
data = console.get_console(self.request, 'RDP', server)[1]
self.assertEqual(data, url)
def _get_console_serial(self, server):
console_mock = self.mox.CreateMock(api.nova.SerialConsole)
console_mock.url = '/SERIAL'
self.mox.StubOutWithMock(api.nova, 'server_serial_console')
api.nova.server_serial_console(IgnoreArg(), server.id) \
.AndReturn(console_mock)
self.mox.ReplayAll()
self.setup_consoles()
def test_get_console_serial(self):
server = self.servers.first()
self._get_console_serial(server)
url = '/SERIAL'
data = console.get_console(self.request, 'SERIAL', server)[1]
self.assertEqual(data, url)
def test_get_console_auto_iterate_available(self):
server = self.servers.first()
console_mock = self.mox.CreateMock(api.nova.RDPConsole)
console_mock.url = '/RDP'
self.mox.StubOutWithMock(api.nova, 'server_vnc_console')
api.nova.server_vnc_console(IgnoreArg(), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.StubOutWithMock(api.nova, 'server_spice_console')
api.nova.server_spice_console(IgnoreArg(), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.StubOutWithMock(api.nova, 'server_rdp_console')
api.nova.server_rdp_console(IgnoreArg(), server.id) \
.AndReturn(console_mock)
self.mox.ReplayAll()
self.setup_consoles()
url = '/RDP&title=%s(%s)' % (server.name, server.id)
data = console.get_console(self.request, 'AUTO', server)[1]
self.assertEqual(data, url)
def test_get_console_auto_iterate_serial_available(self):
server = self.servers.first()
console_mock = self.mox.CreateMock(api.nova.SerialConsole)
console_mock.url = '/SERIAL'
self.mox.StubOutWithMock(api.nova, 'server_vnc_console')
api.nova.server_vnc_console(IgnoreArg(), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.StubOutWithMock(api.nova, 'server_spice_console')
api.nova.server_spice_console(IgnoreArg(), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.StubOutWithMock(api.nova, 'server_rdp_console')
api.nova.server_rdp_console(IgnoreArg(), server.id) \
.AndRaise(self.exceptions.nova)
self.mox.StubOutWithMock(api.nova, 'server_serial_console')
api.nova.server_serial_console(IgnoreArg(), server.id) \
.AndReturn(console_mock)
self.mox.ReplayAll()
self.setup_consoles()
url = '/SERIAL'
data = console.get_console(self.request, 'AUTO', server)[1]
self.assertEqual(data, url)
def test_invalid_console_type_raise_value_error(self):
self.assertRaises(exceptions.NotAvailable,
console.get_console, None, 'FAKE', None)
@helpers.create_stubs({api.neutron: ('network_list_for_tenant',)})
def test_interface_attach_get(self):
server = self.servers.first()
api.neutron.network_list_for_tenant(IsA(http.HttpRequest),
self.tenant.id) \
.AndReturn(self.networks.list()[:1])
self.mox.ReplayAll()
url = reverse('horizon:project:instances:attach_interface',
args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res,
'project/instances/attach_interface.html')
@helpers.create_stubs({api.neutron: ('network_list_for_tenant',),
api.nova: ('interface_attach',)})
def test_interface_attach_post(self):
server = self.servers.first()
network = api.neutron.network_list_for_tenant(IsA(http.HttpRequest),
self.tenant.id) \
.AndReturn(self.networks.list()[:1])
api.nova.interface_attach(IsA(http.HttpRequest), server.id,
net_id=network[0].id)
self.mox.ReplayAll()
form_data = {'instance_id': server.id,
'network': network[0].id}
url = reverse('horizon:project:instances:attach_interface',
args=[server.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.create_stubs({api.neutron: ('port_list',)})
def test_interface_detach_get(self):
server = self.servers.first()
api.neutron.port_list(IsA(http.HttpRequest),
device_id=server.id)\
.AndReturn([self.ports.first()])
self.mox.ReplayAll()
url = reverse('horizon:project:instances:detach_interface',
args=[server.id])
res = self.client.get(url)
self.assertTemplateUsed(res,
'project/instances/detach_interface.html')
@helpers.create_stubs({api.neutron: ('port_list',),
api.nova: ('interface_detach',)})
def test_interface_detach_post(self):
server = self.servers.first()
port = self.ports.first()
api.neutron.port_list(IsA(http.HttpRequest),
device_id=server.id)\
.AndReturn([port])
api.nova.interface_detach(IsA(http.HttpRequest), server.id, port.id)
self.mox.ReplayAll()
form_data = {'instance_id': server.id,
'port': port.id}
url = reverse('horizon:project:instances:detach_interface',
args=[server.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create',
'port_delete'),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'server_create',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_port_cleanup_called_on_failed_vm_launch(self):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
volumes = [v for v in self.volumes.list() if (v.status == AVAILABLE
and v.bootable ==
'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
volumes = [v for v in self.volumes.list() if (v.status == AVAILABLE)]
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn(volumes)
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.nova.keypair_list(IgnoreArg()).AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port = self.ports.first()
api.neutron.profile_list(
IsA(http.HttpRequest), 'policy').AndReturn(policy_profiles)
api.neutron.port_create(
IsA(http.HttpRequest),
self.networks.first().id,
policy_profile_id=policy_profile_id).AndReturn(port)
nics = [{"port-id": port.id}]
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
api.nova.server_create(IsA(http.HttpRequest),
server.name,
image.id,
flavor.id,
keypair.name,
customization_script,
[str(sec_group.id)],
block_device_mapping=None,
block_device_mapping_v2=None,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass='password',
disk_config='AUTO',
config_drive=False) \
.AndRaise(self.exceptions.neutron)
api.neutron.port_delete(IsA(http.HttpRequest), port.id)
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'source_id': image.id,
'volume_size': '1',
'image_id': image.id,
'availability_zone': avail_zone.zoneName,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': [str(sec_group.id)],
'volume_type': '',
'network': self.networks.first().id,
'count': 1,
'admin_pass': 'password',
'confirm_admin_pass': 'password',
'disk_config': 'AUTO',
'config_drive': False,
'profile': self.policy_profiles.first().id}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
| {
"content_hash": "c9f18af3ff8acac25ac5c7db2238ce69",
"timestamp": "",
"source": "github",
"line_count": 4915,
"max_line_length": 82,
"avg_line_length": 45.099287894201424,
"alnum_prop": 0.5334404027735797,
"repo_name": "Tesora-Release/tesora-horizon",
"id": "cf828e51811a368d1e93a236b9f83ff1ddcb41f1",
"size": "222427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/instances/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109391"
},
{
"name": "HTML",
"bytes": "538713"
},
{
"name": "JavaScript",
"bytes": "973924"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "5145012"
},
{
"name": "Shell",
"bytes": "18658"
}
],
"symlink_target": ""
} |
"""This module contains the ListConverter class."""
from dc.converters.base import Converter
class ListConverter(Converter):
"""Converter used to store a list in a data connector.
We need this converter because the list is not a 'list' object, but a
model.represent.dc_list.DCList object, whereas a 'list' object would
be more convenient.
"""
@staticmethod
def to_storage(value):
"""Return a builtin-list type containing the value."""
return list(value)
| {
"content_hash": "3edb1beb57a237f3cf89421a0afd4989",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 73,
"avg_line_length": 28.88888888888889,
"alnum_prop": 0.6730769230769231,
"repo_name": "v-legoff/pa-poc3",
"id": "6c5b8272985ed4bddb9e69158bc06cf9ad05a511",
"size": "2063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/dc/converters/list_cvt.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "12354"
},
{
"name": "Python",
"bytes": "643635"
},
{
"name": "Shell",
"bytes": "6471"
}
],
"symlink_target": ""
} |
"""Global registry for OpDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from mxconsole.protobuf import op_def_pb2
_registered_ops = {}
def register_op_list(op_list):
"""Register all the ops in an op_def_pb2.OpList."""
if not isinstance(op_list, op_def_pb2.OpList):
raise TypeError("%s is %s, not an op_def_pb2.OpList" %
(op_list, type(op_list)))
for op_def in op_list.op:
if op_def.name in _registered_ops:
assert _registered_ops[op_def.name] == op_def
else:
_registered_ops[op_def.name] = op_def
def get_registered_ops():
"""Returns a dictionary mapping names to OpDefs."""
return _registered_ops
| {
"content_hash": "46387ddc64d0788fc1d65ceb4e928264",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 58,
"avg_line_length": 27.074074074074073,
"alnum_prop": 0.6621067031463749,
"repo_name": "bravomikekilo/mxconsole",
"id": "c39f5094b9bf134bc8e718a1a4f167f84087923f",
"size": "1421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mxconsole/framework/op_def_registry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "27900"
},
{
"name": "CSS",
"bytes": "5107"
},
{
"name": "HTML",
"bytes": "584168"
},
{
"name": "JavaScript",
"bytes": "1734943"
},
{
"name": "Protocol Buffer",
"bytes": "71639"
},
{
"name": "Python",
"bytes": "981371"
},
{
"name": "Shell",
"bytes": "1566"
},
{
"name": "TypeScript",
"bytes": "786869"
}
],
"symlink_target": ""
} |
from common.api.permissions import IsStaffOrTeamCaptain
# from common.api.serializers import PlayerMembershipSerializer
from common.models import Position, SkillBracket, TeamMember
# from teamfinder.api.serializers import PlayerMembershipSerializer, TeamSerializer
from teams.api.serializers import EditableFlatTeamSerializer, TeamSerializer, PlayerMembershipSerializer
from teams.models import Team
from rest_framework import permissions, status, viewsets
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from .serializers import FlatTeamSerializer
class TeamViewSet(viewsets.ModelViewSet):
queryset = Team.objects.all()
serializer_class = TeamSerializer
model = Team
permission_classes = (IsStaffOrTeamCaptain, ) # TODO: Create IsStaffOrTeamCaptain permission for put/patch/delete
# TODO: Create IsStaffOrPlayer permission for post
@staticmethod
def setup_eager_loading(queryset):
queryset = queryset.select_related(
'skill_bracket',
'captain',
'creator',
).prefetch_related(
'regions',
'available_positions',
'captain__regions',
'captain__positions',
'captain__teams',
'creator__regions',
'creator__positions',
'creator__teams',
'players__regions',
'teammember_set__player',
'teammember_set__player__regions',
'teammember_set__player__positions',
'teammember_set__player__teams',
)
return queryset
def get_serializer_class(self):
"""
If GET, HEAD, or OPTIONS return the nested serializer
If POST, PUT, PATCH, or DELETE return a flat serializer
Change the serializer based on permissions
* If method is safe, return TeamSerializer
* If user is the team captain, return EditableFlatTeamSerializer
* Else, return FlatTeamSerializer
"""
def _get_serializer_class():
if self.request.method in permissions.SAFE_METHODS:
return TeamSerializer
try:
instance = self.get_object()
except AssertionError:
pass
else:
if self.request.user == instance.captain.user:
return EditableFlatTeamSerializer
return FlatTeamSerializer
serializer_class = _get_serializer_class()
# print(self.request.method, 'get_serializer_class returning', serializer_class)
return serializer_class
def get_queryset(self):
queryset = super().get_queryset()
queryset = self.setup_eager_loading(queryset)
return queryset
@detail_route(permission_classes=(permissions.IsAuthenticated, ), methods=('GET', ))
def memberships(self, request, pk=None):
team = self.get_object()
serializer = PlayerMembershipSerializer(
team.teammember_set.all(), many=True, context={'request': request}
)
return Response(serializer.data, status=status.HTTP_200_OK)
def create(self, request, *args, **kwargs):
data = request.data
# Validate with the flat serializer
serializer = FlatTeamSerializer(data=data, context={'request': request})
serializer.is_valid(raise_exception=True)
new_team = self.perform_create(serializer)
try:
player_position = Position.objects.get(pk=request.data.get('player_position'))
except Position.DoesNotExist:
player_position = None
TeamMember.objects.create(team=new_team, player=request.user.player, position=player_position)
headers = self.get_success_headers(serializer.data)
# Return a nested serializer
full_team = TeamSerializer(instance=new_team, context={'request': request})
return Response(full_team.data, status=status.HTTP_201_CREATED, headers=headers)
def perform_create(self, serializer):
return serializer.save(creator=self.request.user.player, captain=self.request.user.player)
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer_class = self.get_serializer_class()
serializer = serializer_class(instance, data=request.data, partial=partial, context={'request': request})
serializer.is_valid(raise_exception=True)
updated_team = self.perform_update(serializer)
try:
# Try to update the requesting user's position within the team
player_position = Position.objects.get(pk=request.data.get('player_position'))
team_member = TeamMember.objects.get(team=updated_team, player=request.user.player)
if player_position != team_member.position:
team_member.position = player_position
team_member.save()
except (Position.DoesNotExist, TeamMember.DoesNotExist):
pass
full_team = TeamSerializer(instance=updated_team, context={'request': request})
return Response(full_team.data)
def perform_update(self, serializer):
return serializer.save()
| {
"content_hash": "999ff93e2aa536baa7503875b5e6ddbe",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 119,
"avg_line_length": 44.15,
"alnum_prop": 0.6521328803322008,
"repo_name": "prattl/teamfinder-api",
"id": "ac0b5b93513a19f0d82eede20030a78d1d9645fe",
"size": "5298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "teams/api/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "686"
},
{
"name": "Python",
"bytes": "120842"
},
{
"name": "Shell",
"bytes": "540"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.