text stringlengths 4 1.02M | meta dict |
|---|---|
import contextlib
import json
import os
import textwrap
from cumulusci.tasks.salesforce import Deploy
from cumulusci.utils import temporary_dir
SETTINGS_XML = """<?xml version="1.0" encoding="UTF-8"?>
<{settingsName} xmlns="http://soap.sforce.com/2006/04/metadata">
{values}
</{settingsName}>"""
ORGPREF = """<preferences>
<settingName>{name}</settingName>
<settingValue>{value}</settingValue>
</preferences>"""
PACKAGE_XML = """<?xml version="1.0" encoding="UTF-8"?>
<Package xmlns="http://soap.sforce.com/2006/04/metadata">
<types>
<members>*</members>
<name>Settings</name>
</types>
<version>{api_version}</version>
</Package>"""
class DeployOrgSettings(Deploy):
task_doc = """Deploys org settings from an sfdx scratch org definition file."""
task_options = {
"definition_file": {"description": "sfdx scratch org definition file"},
"api_version": {"description": "API version used to deploy the settings"},
}
def _init_options(self, kwargs):
super()._init_options(kwargs)
# We have no need for namespace injection when deploying settings,
# so let's explicitly disable it to prevent the Deploy task
# from making API calls to check if it's needed.
self.options["managed"] = False
self.options["namespaced_org"] = False
def _run_task(self):
with open(self.options["definition_file"], "r") as f:
scratch_org_definition = json.load(f)
settings = scratch_org_definition.get("settings", {})
if not settings:
return
api_version = (
self.options.get("api_version") or self.org_config.latest_api_version
)
with build_settings_package(settings, api_version) as path:
self.options["path"] = path
return super()._run_task()
def capitalize(s):
"""
Just capitalize first letter (different from .title, as it preserves
the rest of the case).
e.g. accountSettings -> AccountSettings
"""
return s[0].upper() + s[1:]
@contextlib.contextmanager
def build_settings_package(settings: dict, api_version: str):
with temporary_dir() as path:
os.mkdir("settings")
for section, section_settings in settings.items():
settings_name = capitalize(section)
if section == "orgPreferenceSettings":
values = " " + "\n ".join(
line
for line in "\n".join(
ORGPREF.format(name=capitalize(k), value=v)
for k, v in section_settings.items()
).splitlines()
)
else:
values = textwrap.indent(_dict_to_xml(section_settings), " ")
# e.g. AccountSettings -> settings/Account.settings
settings_file = os.path.join(
"settings", settings_name[: -len("Settings")] + ".settings"
)
with open(settings_file, "w") as f:
f.write(SETTINGS_XML.format(settingsName=settings_name, values=values))
print(SETTINGS_XML.format(settingsName=settings_name, values=values))
with open("package.xml", "w") as f:
f.write(PACKAGE_XML.format(api_version=api_version))
yield path
def _dict_to_xml(d: dict) -> str:
items = []
for k, v in d.items():
if isinstance(v, dict):
v = "\n" + textwrap.indent(_dict_to_xml(v), " ") + "\n"
elif isinstance(v, str):
pass
elif isinstance(v, bool):
v = str(v).lower()
else:
raise TypeError(f"Unexpected type {type(v)} for {k}")
items.append(f"<{k}>{v}</{k}>")
return "\n".join(items)
| {
"content_hash": "b60e19da0cebe2a94e511780614e8700",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 87,
"avg_line_length": 34.21818181818182,
"alnum_prop": 0.5831562167906482,
"repo_name": "SalesforceFoundation/CumulusCI",
"id": "1f4176bb57bce06c28e68a77eb0e349b6e3db284",
"size": "3764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cumulusci/tasks/salesforce/org_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2303"
},
{
"name": "Python",
"bytes": "754354"
},
{
"name": "RobotFramework",
"bytes": "9330"
},
{
"name": "Shell",
"bytes": "5555"
}
],
"symlink_target": ""
} |
"""A collection of useful Python recipes."""
| {
"content_hash": "15dddce9d577d7c8df00926bcecacce6",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 44,
"avg_line_length": 46,
"alnum_prop": 0.6956521739130435,
"repo_name": "BastiTee/bastis-python-toolbox",
"id": "ef9e2bf0982245014096079577379d43f9b77f16",
"size": "46",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "73084"
}
],
"symlink_target": ""
} |
"""
Copyright 2015 by Tobias Houska
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: Tobias Houska
This example implements the external hydrological model HYMOD into SPOTPY.
"""
import numpy as np
try:
import spotpy
except ImportError:
import sys
sys.path.append(".")
import spotpy
import multiprocessing as mp
import os
# from shutil import rmtree
import sys
from distutils.dir_util import copy_tree, remove_tree
class spot_setup(object):
def __init__(self, parallel="seq"):
self.params = [
spotpy.parameter.Uniform("cmax", low=1.0, high=500, optguess=412.33),
spotpy.parameter.Uniform("bexp", low=0.1, high=2.0, optguess=0.1725),
spotpy.parameter.Uniform("alpha", low=0.1, high=0.99, optguess=0.8127),
spotpy.parameter.Uniform("Ks", low=0.0, high=0.10, optguess=0.0404),
spotpy.parameter.Uniform("Kq", low=0.1, high=0.99, optguess=0.5592),
]
self.curdir = os.getcwd()
self.owd = os.path.dirname(os.path.realpath(__file__))
self.hymod_path = self.owd + os.sep + "hymod_unix"
self.evals = list(
np.genfromtxt(self.hymod_path + os.sep + "bound.txt", skip_header=65)[:, 3]
)[:730]
self.Factor = 1944 * (1000 * 1000) / (1000 * 60 * 60 * 24)
self.parallel = parallel
def parameters(self):
return spotpy.parameter.generate(self.params)
def simulation(self, x):
if self.parallel == "seq":
call = ""
elif self.parallel == "mpi":
# Running n parallel, care has to be taken when files are read or written
# Therefor we check the ID of the current computer core
call = str(int(os.environ["OMPI_COMM_WORLD_RANK"]) + 2)
# And generate a new folder with all underlying files
copy_tree(self.hymod_path, self.hymod_path + call)
elif self.parallel == "mpc":
# Running n parallel, care has to be taken when files are read or written
# Therefor we check the ID of the current computer core
call = str(os.getpid())
# And generate a new folder with all underlying files
copy_tree(self.hymod_path, self.hymod_path + call)
else:
raise "No call variable was assigned"
os.chdir(self.hymod_path + call)
try:
params = open("Param.in", "w")
for i in range(len(x)):
if i == len(x):
params.write(str(round(x[i], 5)))
else:
params.write(str(round(x[i], 5)) + " ")
params.close()
os.system(
"./hymod_%s.%s" % (sys.version_info.major, sys.version_info.minor)
)
SimRR = open("Q.out", "r")
simulations = []
for i in range(64):
SimRR.readline()
for i in range(730):
val = SimRR.readline()
simulations.append(float(val) * self.Factor)
SimRR.close()
except:
"Model has failed"
simulations = [np.nan] * 795 # Assign bad values - model might have crashed
os.chdir(self.curdir)
if self.parallel == "mpi" or self.parallel == "mpc":
remove_tree(self.hymod_path + call)
return simulations
def evaluation(self):
return self.evals
def objectivefunction(self, simulation, evaluation, params=None):
like = spotpy.objectivefunctions.nashsutcliffe(
evaluation, simulation
) # Just an example, please choose an appropriate objective function depending on the used algorithm
return like
| {
"content_hash": "6ce51c4004a8c33363aba2eda83a22ee",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 109,
"avg_line_length": 34.27522935779817,
"alnum_prop": 0.5789614561027837,
"repo_name": "thouska/spotpy",
"id": "e0a8ed0163e9c0822239822ce2952b6797d8fd8d",
"size": "3736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/spotpy/examples/spot_setup_hymod_unix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1032"
},
{
"name": "Cython",
"bytes": "4110"
},
{
"name": "Makefile",
"bytes": "433"
},
{
"name": "Python",
"bytes": "690905"
},
{
"name": "Shell",
"bytes": "304"
}
],
"symlink_target": ""
} |
"""
fs.opener
=========
Open filesystems via a URI.
There are occasions when you want to specify a filesystem from the command line
or in a config file. This module enables that functionality, and can return an
FS object given a filesystem specification in a URI-like syntax (inspired by
the syntax of http://commons.apache.org/vfs/filesystems.html).
The `OpenerRegistry` class maps the protocol (file, ftp etc.) on to an Opener
object, which returns an appropriate filesystem object and path. You can
create a custom opener registry that opens just the filesystems you require, or
use the opener registry defined here (also called `opener`) that can open any
supported filesystem.
The `parse` method of an `OpenerRegsitry` object returns a tuple of an FS
object a path. Here's an example of how to use the default opener registry::
>>> from fs.opener import opener
>>> opener.parse('ftp://ftp.mozilla.org/pub')
(<fs.ftpfs.FTPFS object at 0x96e66ec>, u'pub')
You can use use the `opendir` method, which just returns an FS object. In the
example above, `opendir` will return a FS object for the directory `pub`::
>>> opener.opendir('ftp://ftp.mozilla.org/pub')
<SubFS: <FTPFS ftp.mozilla.org>/pub>
If you are just interested in a single file, use the `open` method of a registry
which returns a file-like object, and has the same signature as FS objects and
the `open` builtin::
>>> opener.open('ftp://ftp.mozilla.org/pub/README')
<fs.ftpfs._FTPFile object at 0x973764c>
The `opendir` and `open` methods can also be imported from the top-level of
this module for sake of convenience. To avoid shadowing the builtin `open`
method, they are named `fsopendir` and `fsopen`. Here's how you might import
them::
from fs.opener import fsopendir, fsopen
"""
__all__ = ['OpenerError',
'NoOpenerError',
'OpenerRegistry',
'opener',
'fsopen',
'fsopendir',
'OpenerRegistry',
'Opener',
'OSFSOpener',
'ZipOpener',
'RPCOpener',
'FTPOpener',
'SFTPOpener',
'MemOpener',
'DebugOpener',
'TempOpener',
'S3Opener',
'TahoeOpener',
'DavOpener',
'HTTPOpener',
'HDFSOpener']
from fs.path import pathsplit, join, iswildcard, normpath
from fs.osfs import OSFS
from fs.filelike import FileWrapper
from os import getcwd
import os.path
import re
from urlparse import urlparse
class OpenerError(Exception):
"""The base exception thrown by openers"""
pass
class NoOpenerError(OpenerError):
"""Thrown when there is no opener for the given protocol"""
pass
def _expand_syspath(path):
if path is None:
return path
if path.startswith('\\\\?\\'):
path = path[4:]
path = os.path.expanduser(os.path.expandvars(path))
path = os.path.normpath(os.path.abspath(path))
return path
def _parse_credentials(url):
scheme = None
if '://' in url:
scheme, url = url.split('://', 1)
username = None
password = None
if '@' in url:
credentials, url = url.split('@', 1)
if ':' in credentials:
username, password = credentials.split(':', 1)
else:
username = credentials
if scheme is not None:
url = '%s://%s' % (scheme, url)
return username, password, url
def _parse_name(fs_name):
if '#' in fs_name:
fs_name, fs_name_params = fs_name.split('#', 1)
return fs_name, fs_name_params
else:
return fs_name, None
def _split_url_path(url):
if '://' not in url:
url = 'http://' + url
scheme, netloc, path, _params, _query, _fragment = urlparse(url)
url = '%s://%s' % (scheme, netloc)
return url, path
class _FSClosingFile(FileWrapper):
"""A file like object that closes its parent FS when closed itself"""
def close(self):
fs = getattr(self, '_closefs', None)
ret = super(_FSClosingFile, self).close()
if fs is not None:
fs.close
return ret
class OpenerRegistry(object):
"""An opener registry that stores a number of opener objects used to parse FS URIs"""
re_fs_url = re.compile(r'''
^
(.*?)
:\/\/
(?:
(?:(.*?)@(.*?))
|(.*?)
)
(?:
!(.*?)$
)*$
''', re.VERBOSE)
def __init__(self, openers=[]):
self.registry = {}
self.openers = {}
self.default_opener = 'osfs'
for opener in openers:
self.add(opener)
@classmethod
def split_segments(self, fs_url):
match = self.re_fs_url.match(fs_url)
return match
def get_opener(self, name):
"""Retrieve an opener for the given protocol
:param name: name of the opener to open
:raises NoOpenerError: if no opener has been registered of that name
"""
if name not in self.registry:
raise NoOpenerError("No opener for %s" % name)
index = self.registry[name]
return self.openers[index]
def add(self, opener):
"""Adds an opener to the registry
:param opener: a class derived from fs.opener.Opener
"""
index = len(self.openers)
self.openers[index] = opener
for name in opener.names:
self.registry[name] = index
def parse(self, fs_url, default_fs_name=None, writeable=False, create_dir=False, cache_hint=True):
"""Parses a FS url and returns an fs object a path within that FS object
(if indicated in the path). A tuple of (<FS instance>, <path>) is returned.
:param fs_url: an FS url
:param default_fs_name: the default FS to use if none is indicated (defaults is OSFS)
:param writeable: if True, a writeable FS will be returned
:param create_dir: if True, then the directory in the FS will be created
"""
orig_url = fs_url
match = self.split_segments(fs_url)
if match:
fs_name, credentials, url1, url2, path = match.groups()
if credentials:
fs_url = '%s@%s' % (credentials, url1)
else:
fs_url = url2
path = path or ''
fs_url = fs_url or ''
if ':' in fs_name:
fs_name, sub_protocol = fs_name.split(':', 1)
fs_url = '%s://%s' % (sub_protocol, fs_url)
if '!' in path:
paths = path.split('!')
path = paths.pop()
fs_url = '%s!%s' % (fs_url, '!'.join(paths))
fs_name = fs_name or self.default_opener
else:
fs_name = default_fs_name or self.default_opener
fs_url = _expand_syspath(fs_url)
path = ''
fs_name, fs_name_params = _parse_name(fs_name)
opener = self.get_opener(fs_name)
if fs_url is None:
raise OpenerError("Unable to parse '%s'" % orig_url)
fs, fs_path = opener.get_fs(self, fs_name, fs_name_params, fs_url, writeable, create_dir)
fs.cache_hint(cache_hint)
if fs_path and iswildcard(fs_path):
pathname, resourcename = pathsplit(fs_path or '')
if pathname:
fs = fs.opendir(pathname)
return fs, resourcename
fs_path = join(fs_path, path)
if create_dir and fs_path:
if not fs.getmeta('read_only', False):
fs.makedir(fs_path, allow_recreate=True)
pathname, resourcename = pathsplit(fs_path or '')
if pathname and resourcename:
fs = fs.opendir(pathname)
fs_path = resourcename
return fs, fs_path or ''
def open(self, fs_url, mode='r', **kwargs):
"""Opens a file from a given FS url
If you intend to do a lot of file manipulation, it would likely be more
efficient to do it directly through the an FS instance (from `parse` or
`opendir`). This method is fine for one-offs though.
:param fs_url: a FS URL, e.g. ftp://ftp.mozilla.org/README
:param mode: mode to open file file
:rtype: a file
"""
writeable = 'w' in mode or 'a' in mode or '+' in mode
fs, path = self.parse(fs_url, writeable=writeable)
file_object = fs.open(path, mode)
file_object = _FSClosingFile(file_object, mode)
file_object.fs = fs
return file_object
def getcontents(self, fs_url, mode='rb', encoding=None, errors=None, newline=None):
"""Gets the contents from a given FS url (if it references a file)
:param fs_url: a FS URL e.g. ftp://ftp.mozilla.org/README
"""
fs, path = self.parse(fs_url)
return fs.getcontents(path, mode, encoding=encoding, errors=errors, newline=newline)
def opendir(self, fs_url, writeable=True, create_dir=False):
"""Opens an FS object from an FS URL
:param fs_url: an FS URL e.g. ftp://ftp.mozilla.org
:param writeable: set to True (the default) if the FS must be writeable
:param create_dir: create the directory references by the FS URL, if
it doesn't already exist
"""
fs, path = self.parse(fs_url, writeable=writeable, create_dir=create_dir)
if path and '://' not in fs_url:
# A shortcut to return an OSFS rather than a SubFS for os paths
return OSFS(fs_url)
if path:
fs = fs.opendir(path)
return fs
class Opener(object):
"""The base class for openers
Opener follow a very simple protocol. To create an opener, derive a class
from `Opener` and define a classmethod called `get_fs`, which should have the following signature::
@classmethod
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
The parameters of `get_fs` are as follows:
* `fs_name` the name of the opener, as extracted from the protocol part of the url,
* `fs_name_params` reserved for future use
* `fs_path` the path part of the url
* `writeable` if True, then `get_fs` must return an FS that can be written to
* `create_dir` if True then `get_fs` should attempt to silently create the directory references in path
In addition to `get_fs` an opener class should contain
two class attributes: names and desc. `names` is a list of protocols that
list opener will opener. `desc` is an English description of the individual opener syntax.
"""
pass
class OSFSOpener(Opener):
names = ['osfs', 'file']
desc = """OS filesystem opener, works with any valid system path. This is the default opener and will be used if you don't indicate which opener to use.
examples:
* file://relative/foo/bar/baz.txt (opens a relative file)
* file:///home/user (opens a directory from a absolute path)
* osfs://~/ (open the user's home directory)
* foo/bar.baz (file:// is the default opener)"""
@classmethod
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
from fs.osfs import OSFS
path = os.path.normpath(fs_path)
if create_dir and not os.path.exists(path):
from fs.osfs import _os_makedirs
_os_makedirs(path)
dirname, resourcename = os.path.split(fs_path)
osfs = OSFS(dirname)
return osfs, resourcename
class ZipOpener(Opener):
names = ['zip', 'zip64']
desc = """Opens zip files. Use zip64 for > 2 gigabyte zip files, if you have a 64 bit processor.
examples:
* zip://myzip.zip (open a local zip file)
* zip://myzip.zip!foo/bar/insidezip.txt (reference a file insize myzip.zip)
* zip:ftp://ftp.example.org/myzip.zip (open a zip file stored on a ftp server)"""
@classmethod
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
zip_fs, zip_path = registry.parse(fs_path)
if zip_path is None:
raise OpenerError('File required for zip opener')
if zip_fs.exists(zip_path):
if writeable:
open_mode = 'r+b'
else:
open_mode = 'rb'
else:
open_mode = 'w+'
if zip_fs.hassyspath(zip_path):
zip_file = zip_fs.getsyspath(zip_path)
else:
zip_file = zip_fs.open(zip_path, mode=open_mode)
_username, _password, fs_path = _parse_credentials(fs_path)
from fs.zipfs import ZipFS
if zip_file is None:
zip_file = fs_path
mode = 'r'
if writeable:
mode = 'a'
allow_zip_64 = fs_name.endswith('64')
zipfs = ZipFS(zip_file, mode=mode, allow_zip_64=allow_zip_64)
return zipfs, None
class RPCOpener(Opener):
names = ['rpc']
desc = """An opener for filesystems server over RPC (see the fsserve command).
examples:
rpc://127.0.0.1:8000 (opens a RPC server running on local host, port 80)
rpc://www.example.org (opens an RPC server on www.example.org, default port 80)"""
@classmethod
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
from fs.rpcfs import RPCFS
_username, _password, fs_path = _parse_credentials(fs_path)
if '://' not in fs_path:
fs_path = 'http://' + fs_path
scheme, netloc, path, _params, _query, _fragment = urlparse(fs_path)
rpcfs = RPCFS('%s://%s' % (scheme, netloc))
if create_dir and path:
rpcfs.makedir(path, recursive=True, allow_recreate=True)
return rpcfs, path or None
class FTPOpener(Opener):
names = ['ftp']
desc = """An opener for FTP (File Transfer Protocl) server
examples:
* ftp://ftp.mozilla.org (opens the root of ftp.mozilla.org)
* ftp://ftp.example.org/foo/bar (opens /foo/bar on ftp.mozilla.org)"""
@classmethod
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
from fs.ftpfs import FTPFS
username, password, fs_path = _parse_credentials(fs_path)
scheme, _netloc, _path, _params, _query, _fragment = urlparse(fs_path)
if not scheme:
fs_path = 'ftp://' + fs_path
scheme, netloc, path, _params, _query, _fragment = urlparse(fs_path)
dirpath, resourcepath = pathsplit(path)
url = netloc
ftpfs = FTPFS(url, user=username or '', passwd=password or '', follow_symlinks=(fs_name_params == "symlinks"))
ftpfs.cache_hint(True)
if create_dir and path:
ftpfs.makedir(path, recursive=True, allow_recreate=True)
if dirpath:
ftpfs = ftpfs.opendir(dirpath)
if not resourcepath:
return ftpfs, None
else:
return ftpfs, resourcepath
class SFTPOpener(Opener):
names = ['sftp']
desc = """An opener for SFTP (Secure File Transfer Protocol) servers
examples:
* sftp://username:password@example.org (opens sftp server example.org with username and password
* sftp://example.org (opens example.org with public key authentication)"""
@classmethod
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
username, password, fs_path = _parse_credentials(fs_path)
from fs.sftpfs import SFTPFS
credentials = {}
if username is not None:
credentials['username'] = username
if password is not None:
credentials['password'] = password
if '/' in fs_path:
addr, fs_path = fs_path.split('/', 1)
else:
addr = fs_path
fs_path = '/'
fs_path, resourcename = pathsplit(fs_path)
host = addr
port = None
if ':' in host:
addr, port = host.rsplit(':', 1)
try:
port = int(port)
except ValueError:
pass
else:
host = (addr, port)
if create_dir:
sftpfs = SFTPFS(host, root_path='/', **credentials)
if not sftpfs._transport.is_authenticated():
sftpfs.close()
raise OpenerError('SFTP requires authentication')
sftpfs = sftpfs.makeopendir(fs_path)
return sftpfs, None
sftpfs = SFTPFS(host, root_path=fs_path, **credentials)
if not sftpfs._transport.is_authenticated():
sftpfs.close()
raise OpenerError('SFTP requires authentication')
return sftpfs, resourcename
class MemOpener(Opener):
names = ['mem', 'ram']
desc = """Creates an in-memory filesystem (very fast but contents will disappear on exit).
Useful for creating a fast temporary filesystem for serving or mounting with fsserve or fsmount.
NB: If you user fscp or fsmv to copy/move files here, you are effectively deleting them!
examples:
* mem:// (opens a new memory filesystem)
* mem://foo/bar (opens a new memory filesystem with subdirectory /foo/bar) """
@classmethod
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
from fs.memoryfs import MemoryFS
memfs = MemoryFS()
if create_dir:
memfs = memfs.makeopendir(fs_path)
return memfs, None
class DebugOpener(Opener):
names = ['debug']
desc = """For developers -- adds debugging information to output.
example:
* debug:ftp://ftp.mozilla.org (displays details of calls made to a ftp filesystem)"""
@classmethod
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
from fs.wrapfs.debugfs import DebugFS
if fs_path:
fs, _path = registry.parse(fs_path, writeable=writeable, create_dir=create_dir)
return DebugFS(fs, verbose=False), None
if fs_name_params == 'ram':
from fs.memoryfs import MemoryFS
return DebugFS(MemoryFS(), identifier=fs_name_params, verbose=False), None
else:
from fs.tempfs import TempFS
return DebugFS(TempFS(), identifier=fs_name_params, verbose=False), None
class TempOpener(Opener):
names = ['temp']
desc = """Creates a temporary filesystem, that is erased on exit.
Probably only useful for mounting or serving.
NB: If you user fscp or fsmv to copy/move files here, you are effectively deleting them!
example:
* temp://"""
@classmethod
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
from fs.tempfs import TempFS
from fs.wrapfs.lazyfs import LazyFS
fs = LazyFS((TempFS,(),{"identifier":fs_name_params}))
return fs, fs_path
class S3Opener(Opener):
names = ['s3']
desc = """Opens a filesystem stored on Amazon S3 storage
The environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY should be set"""
@classmethod
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
from fs.s3fs import S3FS
bucket = fs_path
path =''
if '/' in fs_path:
bucket, path = fs_path.split('/', 1)
fs = S3FS(bucket)
if path:
dirpath, resourcepath = pathsplit(path)
if dirpath:
fs = fs.opendir(dirpath)
path = resourcepath
return fs, path
class TahoeOpener(Opener):
names = ['tahoe']
desc = """Opens a Tahoe-LAFS filesystem
example:
* tahoe://http://pubgrid.tahoe-lafs.org/uri/URI:DIR2:h5bkxelehowscijdb [...]"""
@classmethod
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
from fs.contrib.tahoelafs import TahoeLAFS
if '/uri/' not in fs_path:
raise OpenerError("""Tahoe-LAFS url should be in the form <url>/uri/<dicap>""")
url, dircap = fs_path.split('/uri/')
path = ''
if '/' in dircap:
dircap, path = dircap.split('/', 1)
fs = TahoeLAFS(dircap, webapi=url)
if '/' in path:
dirname, _resourcename = pathsplit(path)
if create_dir:
fs = fs.makeopendir(dirname)
else:
fs = fs.opendir(dirname)
path = ''
return fs, path
class DavOpener(Opener):
names = ['dav']
desc = """Opens a WebDAV server
example:
* dav://example.org/dav"""
@classmethod
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
from fs.contrib.davfs import DAVFS
url = fs_path
if '://' not in url:
url = 'http://' + url
scheme, url = url.split('://', 1)
username, password, url = _parse_credentials(url)
credentials = None
if username or password:
credentials = {}
if username:
credentials['username'] = username
if password:
credentials['password'] = password
url = '%s://%s' % (scheme, url)
fs = DAVFS(url, credentials=credentials)
return fs, ''
class HTTPOpener(Opener):
names = ['http']
desc = """HTTP file opener. HTTP only supports reading files, and not much else.
example:
* http://www.example.org/index.html"""
@classmethod
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
from fs.httpfs import HTTPFS
if '/' in fs_path:
dirname, resourcename = fs_path.rsplit('/', 1)
else:
dirname = fs_path
resourcename = ''
fs = HTTPFS('http://' + dirname)
return fs, resourcename
class HDFSOpener(Opener):
names = ['hdfs']
desc = """HDFS file opener. Supports a variety of filesystem operations
that interact with HDFS via the WebHDFS API.
The URI must include a HDFS namenode host or IP address. The port is optional
and will default to 50070.
examples:
* hdfs://1.2.3.4:1234/foo/bar
* hdfs://1.2.3.4/foo/bar
"""
@classmethod
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
if '/' not in fs_path:
namenode, path = fs_path, '/'
else:
namenode, path = fs_path.split('/', 1)
if len(path) == 0:
path = '/'
# Parse the port from the hostname
namenode_host, namenode_port = namenode, ''
if ':' in fs_path:
namenode_host, namenode_port = namenode.split(':')
if not namenode_host:
env_host = os.environ.get("PYFS_HADOOP_NAMENODE_ADDR")
if env_host:
namenode_host = env_host
else:
raise OpenerError("No HDFS namenode host specified")
if not namenode_port:
namenode_port = os.environ.get("PYFS_HADOOP_NAMENODE_PORT") or "50070"
from fs.hadoop import HadoopFS
return HadoopFS(
namenode=namenode_host,
port=namenode_port
), path
class UserDataOpener(Opener):
names = ['appuserdata', 'appuser']
desc = """Opens a filesystem for a per-user application directory.
The 'domain' should be in the form <author name>:<application name>.<version> (the author name and version are optional).
example:
* appuserdata://myapplication
* appuserdata://examplesoft:myapplication
* appuserdata://anotherapp.1.1
* appuserdata://examplesoft:anotherapp.1.3"""
FSClass = 'UserDataFS'
@classmethod
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
import fs.appdirfs
fs_class = getattr(fs.appdirfs, cls.FSClass)
if ':' in fs_path:
appauthor, appname = fs_path.split(':', 1)
else:
appauthor = None
appname = fs_path
if '/' in appname:
appname, path = appname.split('/', 1)
else:
path = ''
if '.' in appname:
appname, appversion = appname.split('.', 1)
else:
appversion = None
fs = fs_class(appname, appauthor=appauthor, version=appversion, create=create_dir)
if '/' in path:
subdir, path = path.rsplit('/', 1)
if create_dir:
fs = fs.makeopendir(subdir, recursive=True)
else:
fs = fs.opendir(subdir)
return fs, path
class SiteDataOpener(UserDataOpener):
names = ['appsitedata', 'appsite']
desc = """Opens a filesystem for an application site data directory.
The 'domain' should be in the form <author name>:<application name>.<version> (the author name and version are optional).
example:
* appsitedata://myapplication
* appsitedata://examplesoft:myapplication
* appsitedata://anotherapp.1.1
* appsitedata://examplesoft:anotherapp.1.3"""
FSClass = 'SiteDataFS'
class UserCacheOpener(UserDataOpener):
names = ['appusercache', 'appcache']
desc = """Opens a filesystem for an per-user application cache directory.
The 'domain' should be in the form <author name>:<application name>.<version> (the author name and version are optional).
example:
* appusercache://myapplication
* appusercache://examplesoft:myapplication
* appusercache://anotherapp.1.1
* appusercache://examplesoft:anotherapp.1.3"""
FSClass = 'UserCacheFS'
class UserLogOpener(UserDataOpener):
names = ['appuserlog', 'applog']
desc = """Opens a filesystem for an application site data directory.
The 'domain' should be in the form <author name>:<application name>.<version> (the author name and version are optional).
example:
* appuserlog://myapplication
* appuserlog://examplesoft:myapplication
* appuserlog://anotherapp.1.1
* appuserlog://examplesoft:anotherapp.1.3"""
FSClass = 'UserLogFS'
class MountOpener(Opener):
names = ['mount']
desc = """Mounts other filesystems on a 'virtual' filesystem
The path portion of the FS URL should be a path to an ini file, where the keys are the mount point, and the values are FS URLs to mount.
The following is an example of such an ini file:
[fs]
resources=appuser://myapp/resources
foo=~/foo
foo/bar=mem://
[fs2]
bar=~/bar
example:
* mount://fs.ini
* mount://fs.ini!resources
* mount://fs.ini:fs2"""
@classmethod
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
from fs.mountfs import MountFS
from ConfigParser import ConfigParser
cfg = ConfigParser()
if '#' in fs_path:
path, section = fs_path.split('#', 1)
else:
path = fs_path
section = 'fs'
cfg.readfp(registry.open(path))
mount_fs = MountFS()
for mount_point, mount_path in cfg.items(section):
mount_fs.mount(mount_point, registry.opendir(mount_path, create_dir=create_dir))
return mount_fs, ''
class MultiOpener(Opener):
names = ['multi']
desc = """Combines other filesystems in to a single filesystem.
The path portion of the FS URL should be a path to an ini file, where the keys are the mount point, and the values are FS URLs to mount.
The following is an example of such an ini file:
[templates]
dir1=templates/foo
dir2=templates/bar
example:
* multi://fs.ini"""
@classmethod
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
from fs.multifs import MultiFS
from ConfigParser import ConfigParser
cfg = ConfigParser()
if '#' in fs_path:
path, section = fs_path.split('#', 1)
else:
path = fs_path
section = 'fs'
cfg.readfp(registry.open(path))
multi_fs = MultiFS()
for name, fs_url in cfg.items(section):
multi_fs.addfs(name, registry.opendir(fs_url, create_dir=create_dir))
return multi_fs, ''
opener = OpenerRegistry([OSFSOpener,
ZipOpener,
RPCOpener,
FTPOpener,
SFTPOpener,
MemOpener,
DebugOpener,
TempOpener,
S3Opener,
TahoeOpener,
DavOpener,
HTTPOpener,
UserDataOpener,
SiteDataOpener,
UserCacheOpener,
UserLogOpener,
MountOpener,
MultiOpener,
HDFSOpener
])
fsopen = opener.open
fsopendir = opener.opendir
| {
"content_hash": "776ee3c30de19d8a2dd69329f8bdf2f1",
"timestamp": "",
"source": "github",
"line_count": 907,
"max_line_length": 156,
"avg_line_length": 31.54685777287762,
"alnum_prop": 0.600076888127774,
"repo_name": "duedil-ltd/pyfilesystem",
"id": "fdaa424978d8e147772f2b9678e836e7dd535510",
"size": "28613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fs/opener.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1077573"
}
],
"symlink_target": ""
} |
import datetime
from itertools import permutations
from os.path import abspath, exists, isfile, join, getmtime
import shutil
import unittest
from dependency_management.requirements.PipRequirement import PipRequirement
from freezegun import freeze_time
import requests
import requests_mock
from coalib.core.Bear import Bear
from coalib.results.Result import Result
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
class Bear1(Bear):
pass
class Bear2(Bear):
pass
class BearWithAnalysis(Bear):
def analyze(self, x: int, y: int, z: int=33):
"""
Analyzes stuff.
:param x: First value.
:param y: Second value.
:param z: Third value.
"""
yield x
yield y
yield z
class BearWithMoreAnalysis(Bear):
BEAR_DEPS = {BearWithAnalysis}
def analyze(self, y: int, w: float):
"""
Analyzes even more stuff.
:param y: Second value, but better.
:param w: Fourth value.
"""
yield y
yield w
class BearWithPrerequisites(Bear):
REQUIREMENTS = {PipRequirement('super-super-bad-package'),
PipRequirement('even-even-worse-package')}
class BearWithPrerequisitesOverride(Bear):
prerequisites_fulfilled = True
@classmethod
def check_prerequisites(cls):
return cls.prerequisites_fulfilled
class BearTest(unittest.TestCase):
def tearDown(self):
defined_bears = [
value
for value in globals().values()
if isinstance(value, type) and issubclass(value, Bear)]
for bear in defined_bears:
if exists(bear.data_dir):
shutil.rmtree(bear.data_dir)
def test_invalid_types_at_instantiation(self):
with self.assertRaises(TypeError):
Bear(Section('test-section'), 2)
with self.assertRaises(TypeError):
Bear(None, {})
def test_analyze(self):
with self.assertRaises(NotImplementedError):
Bear(Section('test-section'), {}).analyze()
def test_generate_tasks(self):
with self.assertRaises(NotImplementedError):
Bear(Section('test-section'), {}).generate_tasks()
def test_execute_task(self):
# Test the default implementation of execute_task().
section = Section('test-section')
filedict = {}
uut = BearWithAnalysis(section, filedict)
results = uut.execute_task((10, 20), {'z': 30})
self.assertEqual(results, [10, 20, 30])
def test_check_prerequisites(self):
section = Section('test-section')
filedict = {}
# Test default implementation.
expected_possibilities = [
'Following requirements are not installed: '
'{} (installable via `{}`), {} (installable via `{}`)'.format(
req1.package, ' '.join(req1.install_command()),
req2.package, ' '.join(req2.install_command()))
for req1, req2 in permutations(BearWithPrerequisites.REQUIREMENTS)
]
self.assertIn(BearWithPrerequisites.check_prerequisites(),
expected_possibilities)
# Test correct exception throwing in constructor.
BearWithPrerequisitesOverride.prerequisites_fulfilled = True
BearWithPrerequisitesOverride(section, filedict)
BearWithPrerequisitesOverride.prerequisites_fulfilled = False
with self.assertRaises(RuntimeError) as cm:
BearWithPrerequisitesOverride(section, filedict)
self.assertEqual(
str(cm.exception),
'The bear BearWithPrerequisitesOverride does not fulfill all '
'requirements.')
BearWithPrerequisitesOverride.prerequisites_fulfilled = (
'This is on purpose due to running inside a test.')
with self.assertRaises(RuntimeError) as cm:
BearWithPrerequisitesOverride(section, filedict)
self.assertEqual(
str(cm.exception),
'The bear BearWithPrerequisitesOverride does not fulfill all '
'requirements. This is on purpose due to running inside a test.')
def test_get_metadata(self):
uut = BearWithAnalysis.get_metadata()
self.assertEqual(uut.non_optional_params,
{'x': ('First value.', int),
'y': ('Second value.', int)})
self.assertEqual(
uut.optional_params,
{'z': ("Third value. (Optional, defaults to '33'.)", int, 33)})
def test_get_non_optional_settings(self):
self.assertEqual(BearWithAnalysis.get_non_optional_settings(),
{'x': ('First value.', int),
'y': ('Second value.', int)})
# Test settings of dependency bears. Also test settings-override-
# behaviour for dependency bears with equal setting names.
self.assertEqual(BearWithMoreAnalysis.get_non_optional_settings(),
{'x': ('First value.', int),
'y': ('Second value, but better.', int),
'w': ('Fourth value.', float)})
def test_new_result(self):
bear = Bear(Section('test-section'), {})
result = bear.new_result('test message', '/tmp/testy')
expected = Result.from_values(bear, 'test message', '/tmp/testy')
self.assertEqual(result, expected)
def test_get_config_dir(self):
section = Section('default')
section.append(Setting('files', '**', '/path/to/dir/config'))
uut = Bear(section, {})
self.assertEqual(uut.get_config_dir(), abspath('/path/to/dir'))
def test_download_cached_file(self):
section = Section('default')
uut = Bear(section, {})
mock_url = 'https://test.com'
mock_text = """<html>
<p> lorem ipsum dolor</p>
</html>"""
filename = 'test.html'
file_location = join(uut.data_dir, filename)
with freeze_time('2017-01-01') as frozen_datetime, \
requests_mock.Mocker() as reqmock:
reqmock.get(mock_url, text=mock_text)
self.assertFalse(isfile(file_location))
expected_filename = file_location
result_filename = uut.download_cached_file(mock_url, filename)
self.assertTrue(isfile(join(file_location)))
self.assertEqual(result_filename, expected_filename)
expected_time = getmtime(file_location)
frozen_datetime.tick(delta=datetime.timedelta(seconds=0.5))
result_filename = uut.download_cached_file(mock_url, filename)
self.assertEqual(result_filename, expected_filename)
result_time = getmtime(file_location)
self.assertEqual(result_time, expected_time)
def test_download_cached_file_connection_timeout_mocked(self):
mock_url = 'https://test.com'
exc = requests.exceptions.ConnectTimeout
with requests_mock.Mocker() as reqmock:
reqmock.get(mock_url, exc=exc)
with self.assertRaisesRegex(exc, '^$'):
Bear.download_cached_file(
mock_url, 'test.html')
def test_download_cached_file_status_code_error(self):
exc = requests.exceptions.HTTPError
with self.assertRaisesRegex(exc, '418 Client Error'):
Bear.download_cached_file(
'http://httpbin.org/status/418', 'test.html')
def test_json(self):
result = BearWithAnalysis.__json__()
# data_dir and source_location are system dependent, so remove them for
# further tests.
self.assertIn('data_dir', result)
del result['data_dir']
del result['source_location']
expected = {
'ASCIINEMA_URL': '',
'AUTHORS': set(),
'AUTHORS_EMAILS': set(),
'BEAR_DEPS': set(),
'CAN_DETECT': set(),
'CAN_FIX': set(),
'INCLUDE_LOCAL_FILES': set(),
'LANGUAGES': set(),
'LICENSE': '',
'MAINTAINERS': set(),
'MAINTAINERS_EMAILS': set(),
'metadata': {
'desc': 'Analyzes stuff.',
'non_optional_params': {
'x': 'First value.',
'y': 'Second value.'},
'optional_params': {
'z': "Third value. (Optional, defaults to '33'.)"}},
'name': 'BearWithAnalysis',
'PLATFORMS': {'any'},
'REQUIREMENTS': set()}
self.assertEqual(result, expected)
| {
"content_hash": "7b52e6c0862fec977523d8b09b3361bf",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 79,
"avg_line_length": 33.75,
"alnum_prop": 0.5900462962962963,
"repo_name": "IPMITMO/statan",
"id": "843c8f9d9209d349dc1877e94e3d2bfeb4a3e96f",
"size": "8640",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "coala/tests/core/BearTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "101"
},
{
"name": "Batchfile",
"bytes": "10931"
},
{
"name": "C",
"bytes": "28190"
},
{
"name": "C#",
"bytes": "45474"
},
{
"name": "C++",
"bytes": "335"
},
{
"name": "CSS",
"bytes": "6631"
},
{
"name": "Go",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "1564"
},
{
"name": "Java",
"bytes": "592"
},
{
"name": "JavaScript",
"bytes": "472227"
},
{
"name": "Makefile",
"bytes": "15304"
},
{
"name": "PHP",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "2312447"
},
{
"name": "Ruby",
"bytes": "447"
},
{
"name": "Shell",
"bytes": "12706"
}
],
"symlink_target": ""
} |
"""Command to delete a project."""
import textwrap
from googlecloudsdk.api_lib.projects import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.GA)
class Delete(base.Command):
"""Delete a Project."""
detailed_help = {
'brief': 'Delete a project.',
'DESCRIPTION': textwrap.dedent("""\
Deletes the project with the given project ID.
This command can fail for the following reasons:
* The project specified does not exist.
* The active account does not have Owner permissions for
the given project.
"""),
'EXAMPLES': textwrap.dedent("""\
The following command deletes the project with the ID
`example-foo-bar-1`:
$ {command} example-foo-bar-1
"""),
}
@staticmethod
def Args(parser):
parser.add_argument('id', metavar='PROJECT_ID',
completion_resource='cloudresourcemanager.projects',
list_command_path='projects',
help='ID for the project you want to delete.')
@util.HandleHttpError
def Run(self, args):
projects = self.context['projects_client']
messages = self.context['projects_messages']
resources = self.context['projects_resources']
project_ref = resources.Parse(args.id,
collection='cloudresourcemanager.projects')
if not console_io.PromptContinue('Your project will be deleted.'):
return None
result = projects.projects.Delete(
messages.CloudresourcemanagerProjectsDeleteRequest(
projectId=project_ref.Name()))
log.DeletedResource(project_ref)
return result
| {
"content_hash": "68a39f8bc0cd4905d3d81d34802ccdde",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 77,
"avg_line_length": 35.01923076923077,
"alnum_prop": 0.6479956068094453,
"repo_name": "flgiordano/netcash",
"id": "ac5508faafe33f43945c27572637571b58e50609",
"size": "2417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "+/google-cloud-sdk/lib/surface/projects/delete.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "622"
},
{
"name": "HTML",
"bytes": "33831"
},
{
"name": "JavaScript",
"bytes": "13859"
},
{
"name": "Shell",
"bytes": "2716"
}
],
"symlink_target": ""
} |
import sys
import argparse
import baseline
from baseline.vectorizers import BPEVectorizer1D, WordpieceVectorizer1D
from mead.api_examples.preproc_utils import *
from eight_mile.utils import (
write_yaml, Timer
)
from typing import Optional
import numpy as np
import os
def get_subword_vec1d(type):
if type == 'bpe':
return BPEVectorizer1D
elif type == 'wordpiece':
return WordpieceVectorizer1D
else:
from baseline.vectorizers import SentencePieceVectorizer1D
return SentencePieceVectorizer1D
def create_record(chunk: list, str_lookup: dict, prefix: Optional[str], suffix: Optional[str], masking: Optional[Masking]=None):
"""Emit a record
:param chunk: A chunk of integer inputs
:param str_lookup: A lookup table from integers to strings
:param prefix: A prefix integer token
:param suffix: A suffix integer token
:param mask_value: An integer value representing a [MASK]
:param vocab_size: The total size of the vocab
:param pad_y: Should we replace non-[MASK] X values with <PAD> in Y?
:return: An object with `[xy]_str` and `[xy]` entries
"""
ignore_prefix = False
ignore_suffix = False
if prefix:
chunk = [prefix] + chunk
ignore_prefix = True
if suffix:
chunk = chunk + [suffix]
ignore_suffix = True
if not masking:
inputs = np.array(chunk)
return {'x': inputs, 'x_str': [str_lookup[s] for s in inputs]}
inputs, labels = masking(np.array(chunk), ignore_prefix, ignore_suffix)
return {'x': inputs, 'y': labels, 'x_str': [str_lookup[s] for s in inputs], 'y_str': [str_lookup[s] for s in labels]}
def run(input_files=[], input_pattern='*.txt', codes=None, vocab=None, nctx=256, fmt='json', fields=['x_str', 'y_str'],
output=None, x_prefix=None, x_suffix=None, y_prefix=None, y_suffix=None, max_file_size=100, cased=True,
mask_type="mlm", module=None, pad_y=True, extra_tokens=['[CLS]', '[MASK]'],
tgt_nctx=None, world_size=1, world_offset=0, subword_type='bpe', **kwargs):
timer = Timer()
if module:
logger.warning("Loading custom user module %s for masking rules", module)
baseline.import_user_module(module)
if os.path.isdir(input_files):
import glob
input_files = list(glob.glob(os.path.join(input_files, input_pattern)))
if not output:
output = os.path.join(input_files, 'records')
else:
input_files = [input_files]
if not output:
output = f'{input_files}.records'
logger.info('Output [%s]', output)
if not tgt_nctx:
tgt_nctx = 64
transform = baseline.lowercase if not cased else lambda x: x
Vec1D = get_subword_vec1d(subword_type)
vectorizer = Vec1D(transform_fn=transform, model_file=codes, vocab_file=vocab, mxlen=1024, extra_tokens=extra_tokens)
if x_prefix:
x_prefix = vectorizer.vocab[x_prefix]
if x_suffix:
x_suffix = vectorizer.vocab[x_suffix]
if y_prefix:
y_prefix = vectorizer.vocab[y_prefix]
if y_suffix:
y_suffix = vectorizer.vocab[y_suffix]
indices2word = baseline.revlut(vectorizer.vocab)
root_dir = os.path.dirname(output)
masking = create_masking(mask_type, vectorizer.vocab, pad_y)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
# Create a file writer for this shard
fw = create_file_writer(fmt, output, fields, max_file_size, 1000 * world_offset)
num_read = -1
num_samples_this_worker = 0
for text in input_files:
with open(text, encoding='utf-8') as rf:
print(f"Reading from {text}...")
for line in rf:
num_read += 1
if num_read % world_size != world_offset:
continue
to_bpe = line.strip().split()
if not to_bpe:
continue
output, available = vectorizer.run(to_bpe, vectorizer.vocab)
x, y = masking(output[:available], False, False)
if x_prefix:
x = [x_prefix] + x
if y_prefix:
y = [y_prefix] + y
if x_suffix:
x += [x_suffix]
if y_suffix:
y += [y_suffix]
x = x[:nctx]
y = y[:tgt_nctx]
x_t = np.zeros(nctx, dtype=output.dtype)
y_t = np.zeros(tgt_nctx, dtype=output.dtype)
x_t[:len(x)] = x
y_t[:len(y)] = y
record = {'x': x_t, 'y': y_t, 'x_str': [indices2word[s] for s in x_t], 'y_str': [indices2word[s] for s in y_t]}
if masking.is_valid(record):
fw.write(record)
num_samples_this_worker += 1
fw.close()
duration = timer.elapsed()
print("Processed {:,} samples in {:.2f}s".format(num_samples_this_worker, duration))
f_name = f'md-{world_offset}.yml' if world_size > 1 else 'md.yml'
write_yaml({'num_samples': num_samples_this_worker}, os.path.join(root_dir, f_name))
def main():
argv = sys.argv[1:]
args = parse_args(argv)
run(**vars(args))
def parse_args(argv):
parser = argparse.ArgumentParser(description='Convert paired text into fixed width contexts')
parser.add_argument('--input_files',
help='The text to convert to LM or a path to a file with each line as an example', type=str)
parser.add_argument('--input_pattern', type=str, default='*.txt')
parser.add_argument('--world_size', type=int, default=1, help="Can be used as decimation factor, or to support multiproc")
parser.add_argument('--world_offset', type=int, default=0, help="Offset for decimation or processor")
parser.add_argument('--codes', help='BPE codes')
parser.add_argument('--vocab', help='BPE vocab')
parser.add_argument("--subword_type", type=str, choices=["bpe", "wordpiece", "sentencepiece"], default="bpe")
parser.add_argument("--nctx", type=int, default=256, help="Max input length")
parser.add_argument("--fmt", type=str, default='json', choices=['json', 'tsv', 'tfrecord'])
parser.add_argument("--fields", type=str, nargs="+", default=["x_str", "y_str"])
parser.add_argument("--output", type=str, help="Output base name, e.g. /path/to/output/record")
parser.add_argument("--x_prefix", type=str, help="Prefix every x with this token")
parser.add_argument("--x_suffix", type=str, help="Suffix every x with this token")
parser.add_argument("--y_prefix", type=str, help="Prefix every y with this token")
parser.add_argument("--y_suffix", type=str, help="Suffix every y with this token")
parser.add_argument("--max_file_size", type=int, default=100, help="Shard size, defaults to 100MB")
parser.add_argument("--cased", type=baseline.str2bool, default=True)
parser.add_argument("--mask_type", type=str, default="mlm", help="Masking rules, including 'mlm' and 'causal'")
parser.add_argument("--module", default=None, help="Module containing custom masking rules")
parser.add_argument("--pad_y", type=baseline.str2bool, default=True,
help="Replace all non-masked Y values with <PAD>")
parser.add_argument("--extra_tokens", type=str, nargs="+", default=['[CLS]', '[MASK]'])
args = parser.parse_args(argv)
return args
if __name__ == '__main__':
main()
| {
"content_hash": "9a15a493b3b0434b85646df7eee921e9",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 128,
"avg_line_length": 42.52,
"alnum_prop": 0.6140303722617928,
"repo_name": "dpressel/baseline",
"id": "c9f8289c1ca2b357c4735089dcfe4cee641f9012",
"size": "7441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mead/api_examples/preproc_paired.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "9649"
},
{
"name": "CMake",
"bytes": "430"
},
{
"name": "HCL",
"bytes": "923"
},
{
"name": "Perl",
"bytes": "17554"
},
{
"name": "Python",
"bytes": "1281602"
},
{
"name": "Roff",
"bytes": "24"
},
{
"name": "Shell",
"bytes": "10168"
}
],
"symlink_target": ""
} |
import random
import socket
import threading
class DNSSeeder(object):
def __init__(self, dns_seeds):
self._dns_seeds = dns_seeds
self._lock = threading.Lock()
self._found = []
self._start()
def __len__(self):
with self._lock:
return len(self._found)
def pop(self):
with self._lock:
address = random.choice(self._found)
self._found.remove(address)
return address
def _start(self):
def try_address(address):
try:
(ip_address, port) = address
index = 0
for info in socket.getaddrinfo(ip_address, port, socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP):
try:
with self._lock:
self._found.append((info[4][0], info[4][1]))
except Exception, e:
pass
# snooze for some time, so each dns_seed has a chance
# to add nodes, and get addresses from those nodes
#snooze = -1 + 1.3 ** index
#if snooze > 600: snooze = 600 + random.randint(0, 120)
#index += 1
#time.sleep(snooze)
except Exception, e:
pass
for address in self._dns_seeds:
thread = threading.Thread(target = try_address, args = (address, ))
thread.daemon = True
thread.start()
| {
"content_hash": "609cbb016634d0e847fa575a8b626119",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 121,
"avg_line_length": 30.6,
"alnum_prop": 0.4823529411764706,
"repo_name": "ricmoo/pycoind",
"id": "a50fa16268798cf8ee84684cd0930668a526ff0b",
"size": "2650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycoind/util/bootstrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "631386"
}
],
"symlink_target": ""
} |
import pytest
from datadog_checks.base import AgentCheck
from datadog_checks.mesos_slave import MesosSlave
from .common import not_windows_ci
pytestmark = not_windows_ci
@pytest.mark.integration
@pytest.mark.usefixtures("dd_environment")
def test_service_check(bad_instance, aggregator):
check = MesosSlave('mesos_slave', {}, [bad_instance])
with pytest.raises(Exception):
check.check(bad_instance)
aggregator.assert_service_check('mesos_slave.can_connect', count=1, status=AgentCheck.CRITICAL)
| {
"content_hash": "74cd4e6218eb2589a592a6824027c8de",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 99,
"avg_line_length": 27.473684210526315,
"alnum_prop": 0.7605363984674329,
"repo_name": "DataDog/integrations-core",
"id": "dc7b186058b88ed8bc8e4a1ccab7b34d8d1708c0",
"size": "637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mesos_slave/tests/test_integration.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
import flask
import os
import configparser
import mwoauth
from requests_oauthlib import OAuth1
import requests
CONFIG_FILENAME = "config.ini"
app = flask.Flask(__name__)
# Load configuration
curr_dir = os.path.dirname(__file__)
config = configparser.ConfigParser()
config.optionxform = str
config.read(os.path.join(curr_dir, CONFIG_FILENAME))
app.config.update(dict(config.items("CREDS")))
# Generate consumer token
consumer_token = mwoauth.ConsumerToken(
app.config["CONSUMER_KEY"], app.config["CONSUMER_SECRET"])
@app.route('/')
def index():
username = flask.session.get("username", None)
if username:
return flask.render_template("yabbr.html", username=username)
else:
return flask.render_template("index.html")
@app.route("/edit", methods=["GET", "POST"])
def edit():
username = flask.session.get("username", None)
if not username or flask.request.method == "GET":
return flask.render_template("index.html")
# We'll need this auth1 param for all of our requests
access_token_dict = flask.session.get("access_token")
auth1 = OAuth1(consumer_token.key, consumer_token.secret, access_token_dict["key"], access_token_dict["secret"])
edit_token = flask.session.get("edit_token", None)
if not edit_token:
query_params = {
'action': "query",
'meta': 'tokens',
'format': "json"
}
response = requests.get("https://en.wikipedia.org/w/api.php", params=query_params, auth=auth1)
print(response.json())
edit_token = response.json()["query"]["tokens"]["csrftoken"]
flask.session["edit_token"] = edit_token
text = flask.request.form.get("text", None)
title = flask.request.form.get("title", None)
summary = flask.request.form.get("summary", None)
if not text or not title or not summary:
response = {"error": "The following required parameters weren't supplied: " + ", ".join([x[0] for x in [("text", text), ("title", title), ("summary", summary)] if not x[1]])}
return flask.jsonify(response)
query_params = {
"action": "edit",
"title": title,
"summary": summary,
"format": "json"
}
query_data = {
"token": edit_token,
"text": text
}
response = requests.post("https://en.wikipedia.org/w/api.php", params=query_params, data=query_data, auth=auth1)
return flask.jsonify(response.json())
@app.route("/login")
def login():
try:
redirect, request_token = mwoauth.initiate(app.config["OAUTH_MWURI"], consumer_token)
except Exception:
app.logger.exception("mwoauth.initiate failed")
return flask.redirect(flask.url_for('index'))
else:
# Convert request_token into a dictionary
request_token_dict = dict(zip(request_token._fields, request_token))
flask.session["request_token"] = request_token_dict
return flask.redirect(redirect)
@app.route("/oauth-callback")
def oauth_callback():
if "request_token" not in flask.session:
app.logger.exception("OAuth callback failed. Are cookies disabled?")
return flask.redirect(flask.url_for("index"))
try:
access_token = mwoauth.complete(app.config["OAUTH_MWURI"], consumer_token, mwoauth.RequestToken(**flask.session["request_token"]), flask.request.query_string)
identity = mwoauth.identify(app.config["OAUTH_MWURI"], consumer_token, access_token)
except Exception:
app.logger.exception("OAuth authentication failed.")
else:
flask.session["access_token"] = dict(zip(access_token._fields, access_token))
flask.session["username"] = identity["username"]
# Check for at least 2K edits
query_params = {
"action": "query",
"list": "users",
"ususers": identity["username"],
"usprop": "editcount",
"format": "json"
}
response = requests.get("https://en.wikipedia.org/w/api.php", params=query_params)
print(response.content)
if response.json()["query"]["users"][0]["editcount"] < 2000:
flask.flash("Your edit count needs to be at least 2000 to use this tool.")
flask.session.clear()
return flask.render_template("error.html", text="Your edit count needs to be at least 2000 to use this tool!")
return flask.redirect(flask.url_for("index"))
@app.route('/logout')
def logout():
"""Log the user out by clearing their session."""
flask.session.clear()
return flask.redirect(flask.url_for('index'))
| {
"content_hash": "eecb10a91a4adfaa22da0d8aea472914",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 178,
"avg_line_length": 35.61666666666667,
"alnum_prop": 0.6881141787552644,
"repo_name": "APerson241/yabbr",
"id": "14ba589151045ac3bf678af2b984a1a9f466adb5",
"size": "4274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "www/python/src/app.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12069"
},
{
"name": "HTML",
"bytes": "2873"
},
{
"name": "JavaScript",
"bytes": "25187"
},
{
"name": "Python",
"bytes": "4274"
}
],
"symlink_target": ""
} |
import docker
from logging import debug
import sys
import subprocess
import os
docker_client = None
def exists(exe):
return any(os.access(os.path.join(path, exe), os.X_OK) for path in os.environ["PATH"].split(os.pathsep))
def is_linux():
return sys.platform == 'linux' or sys.platform == 'linux2'
def run(cmd, cont=False):
debug('Running command: ' + cmd)
import shlex
args = shlex.split(cmd)
if cont:
return subprocess.call(args, stdout=open(os.devnull, 'w'))
else:
return subprocess.check_output(args)
def docker_machine_run(cmd):
if is_linux():
return run(cmd)
else:
return run('docker-machine ssh YANS-machine ' + cmd)
def create_links(links):
for lnk in links:
docker_machine_run('sudo brctl addbr ' + lnk.bridge_name)
docker_machine_run('sudo ip link set ' + lnk.bridge_name + ' up')
def destroy_links(links):
for lnk in links:
docker_machine_run('sudo ip link set ' + lnk.bridge_name + ' down')
docker_machine_run('sudo brctl delbr ' + lnk.bridge_name)
def create_nodes(nodes):
client().images.pull('kennethjiang/yans-node')
for node in nodes:
client().containers.run('kennethjiang/yans-node', name=node.container_name, command='sleep 3153600000', detach=True, privileged=True)
def destroy_nodes(nodes):
for node in nodes:
try:
client().containers.get(node.container_name).remove(force=True)
except docker.errors.NotFound:
pass
def attach_node(node):
set_docker_machine_env()
import shlex
subprocess.call(shlex.split('docker exec -it --privileged ' + node.container_name + ' bash'), stdin=sys.stdin, stdout=sys.stdout)
def bind_interface(interface):
docker_machine_run('sudo ip link add ' + interface.name + ' type veth peer name ' + interface.peer_name)
docker_machine_run('sudo ip link set ' + interface.peer_name + ' up')
docker_machine_run('sudo brctl addif ' + interface.link.bridge_name + ' ' + interface.peer_name)
container_pid = str(client().api.inspect_container( interface.node.container_name )['State']['Pid'])
docker_machine_run('sudo ip link set netns ' + container_pid + ' dev ' + interface.name)
def ensure_docker_machine():
if is_linux(): # docker machine not required on linux
return
if not exists('docker-machine'):
sys.exit("docker-machine is required to run yans on Mac OS X. Please make sure it is installed and in $PATH")
if run('docker-machine inspect YANS-machine', cont=True) != 0: # create docker machine needed for YANS if one doesn't exist
print('Creating docker machine that will host all YANS containers')
run('docker-machine create -d virtualbox --virtualbox-boot2docker-url https://github.com/kennethjiang/YANS/raw/master/boot2docker/boot2docker.iso YANS-machine')
run('docker-machine start YANS-machine', cont=True) # make sure YANS-machine is started
def client():
ensure_docker_client()
return docker_client
def ensure_docker_client():
global docker_client
if not docker_client:
set_docker_machine_env()
docker_client = docker.from_env()
def set_docker_machine_env():
if not is_linux():
out = run('docker-machine env YANS-machine')
import re
for (name, value) in re.findall('export ([^=]+)="(.+)"', out):
os.environ[name] = value
| {
"content_hash": "ea960671a5654fe44e1deb525abdef3d",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 168,
"avg_line_length": 35.93684210526316,
"alnum_prop": 0.6687170474516696,
"repo_name": "kennethjiang/YANS",
"id": "46eb7672a9375cee40ba9dd134ed2a2a138acfbf",
"size": "3414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docker_command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11630"
},
{
"name": "Shell",
"bytes": "103"
}
],
"symlink_target": ""
} |
from GeoHealthCheck.probe import Probe
class HttpGet(Probe):
"""
Do HTTP GET Request, to poll/ping any Resource bare url.
"""
NAME = 'HTTP GET Resource URL'
DESCRIPTION = 'Simple HTTP GET on Resource URL'
RESOURCE_TYPE = '*:*'
REQUEST_METHOD = 'GET'
CHECKS_AVAIL = {
'GeoHealthCheck.plugins.check.checks.HttpStatusNoError': {
'default': True
},
'GeoHealthCheck.plugins.check.checks.ContainsStrings': {},
'GeoHealthCheck.plugins.check.checks.NotContainsStrings': {},
'GeoHealthCheck.plugins.check.checks.HttpHasContentType': {}
}
"""Checks avail"""
class HttpGetQuery(HttpGet):
"""
Do HTTP GET Request, to poll/ping any Resource bare url with query string.
"""
NAME = 'HTTP GET Resource URL with query'
DESCRIPTION = """
HTTP GET Resource URL with
?query string to be user-supplied (without ?)
"""
REQUEST_TEMPLATE = '?{query}'
PARAM_DEFS = {
'query': {
'type': 'string',
'description': 'The query string to add to request (without ?)',
'default': None,
'required': True
}
}
"""Param defs"""
class HttpPost(HttpGet):
"""
Do HTTP POST Request, to send POST request to
Resource bare url with POST body.
"""
NAME = 'HTTP POST Resource URL with body'
DESCRIPTION = """
HTTP POST to Resource URL with body
content(-type) to be user-supplied
"""
REQUEST_METHOD = 'POST'
REQUEST_HEADERS = {'content-type': '{post_content_type}'}
REQUEST_TEMPLATE = '{body}'
PARAM_DEFS = {
'body': {
'type': 'string',
'description': 'The post body to send',
'default': None,
'required': True
},
'content_type': {
'type': 'string',
'description': 'The post content type to send',
'default': 'text/xml;charset=UTF-8',
'required': True
}
}
"""Param defs"""
def get_request_headers(self):
"""
Overridden from Probe: construct request_headers
via parameter substitution from content_type Parameter.
"""
# content_type =
# {'post_content_type': self._parameters['content_type']}
# request_headers =
# self.REQUEST_HEADERS['content-type'].format(**content_type)
# Hmm seems simpler
return {'content-type': self._parameters['content_type']}
| {
"content_hash": "633791056f81172f26bb222cd66e93dc",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 78,
"avg_line_length": 27.78021978021978,
"alnum_prop": 0.5652689873417721,
"repo_name": "tomkralidis/GeoHealthCheck",
"id": "774e10990382d940702b41ca10fa622ae71b3afc",
"size": "2528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GeoHealthCheck/plugins/probe/http.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2374"
},
{
"name": "Dockerfile",
"bytes": "2768"
},
{
"name": "HTML",
"bytes": "78685"
},
{
"name": "JavaScript",
"bytes": "6442"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "278769"
},
{
"name": "Shell",
"bytes": "6841"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import base64
import copy
import os
import re
import shutil
import tempfile
import zipfile
from cStringIO import StringIO
from xml.dom import minidom
from distutils import dir_util
from selenium.webdriver.common.proxy import ProxyType
from selenium.common.exceptions import WebDriverException
WEBDRIVER_EXT = "webdriver.xpi"
EXTENSION_NAME = "fxdriver@googlecode.com"
class FirefoxProfile(object):
ANONYMOUS_PROFILE_NAME = "WEBDRIVER_ANONYMOUS_PROFILE"
DEFAULT_PREFERENCES = {
"app.update.auto": "false",
"app.update.enabled": "false",
"browser.startup.page" : "0",
"browser.download.manager.showWhenStarting": "false",
"browser.EULA.override": "true",
"browser.EULA.3.accepted": "true",
"browser.link.open_external": "2",
"browser.link.open_newwindow": "2",
"browser.offline": "false",
"browser.safebrowsing.enabled": "false",
"browser.search.update": "false",
"browser.sessionstore.resume_from_crash": "false",
"browser.shell.checkDefaultBrowser": "false",
"browser.tabs.warnOnClose": "false",
"browser.tabs.warnOnOpen": "false",
"browser.startup.page": "0",
"browser.safebrowsing.malware.enabled": "false",
"startup.homepage_welcome_url": "\"about:blank\"",
"devtools.errorconsole.enabled": "true",
"dom.disable_open_during_load": "false",
"extensions.autoDisableScopes" : 10,
"extensions.logging.enabled": "true",
"extensions.update.enabled": "false",
"extensions.update.notifyUser": "false",
"network.manage-offline-status": "false",
"network.http.max-connections-per-server": "10",
"network.http.phishy-userpass-length": "255",
"offline-apps.allow_by_default": "true",
"prompts.tab_modal.enabled": "false",
"security.fileuri.origin_policy": "3",
"security.fileuri.strict_origin_policy": "false",
"security.warn_entering_secure": "false",
"security.warn_submit_insecure": "false",
"security.warn_entering_secure.show_once": "false",
"security.warn_entering_weak": "false",
"security.warn_entering_weak.show_once": "false",
"security.warn_leaving_secure": "false",
"security.warn_leaving_secure.show_once": "false",
"security.warn_submit_insecure": "false",
"security.warn_viewing_mixed": "false",
"security.warn_viewing_mixed.show_once": "false",
"signon.rememberSignons": "false",
"toolkit.networkmanager.disable": "true",
"toolkit.telemetry.enabled": "false",
"toolkit.telemetry.prompted": "2",
"toolkit.telemetry.rejected": "true",
"javascript.options.showInConsole": "true",
"browser.dom.window.dump.enabled": "true",
"webdriver_accept_untrusted_certs": "true",
"webdriver_enable_native_events": "true",
"webdriver_assume_untrusted_issuer": "true",
"dom.max_script_run_time": "30",
}
def __init__(self,profile_directory=None):
"""
Initialises a new instance of a Firefox Profile
:args:
- profile_directory: Directory of profile that you want to use.
This defaults to None and will create a new
directory when object is created.
"""
self.default_preferences = copy.deepcopy(
FirefoxProfile.DEFAULT_PREFERENCES)
self.profile_dir = profile_directory
if self.profile_dir is None:
self.profile_dir = self._create_tempfolder()
else:
newprof = os.path.join(tempfile.mkdtemp(),
"webdriver-py-profilecopy")
shutil.copytree(self.profile_dir, newprof,
ignore=shutil.ignore_patterns("parent.lock", "lock", ".parentlock"))
self.profile_dir = newprof
self._read_existing_userjs()
self.extensionsDir = os.path.join(self.profile_dir, "extensions")
self.userPrefs = os.path.join(self.profile_dir, "user.js")
#Public Methods
def set_preference(self, key, value):
"""
sets the preference that we want in the profile.
"""
clean_value = ''
if value is True:
clean_value = 'true'
elif value is False:
clean_value = 'false'
elif isinstance(value, str):
clean_value = '"%s"' % value
elif isinstance(value, unicode):
clean_value = '"%s"' % value
else:
clean_value = str(int(value))
self.default_preferences[key] = clean_value
def add_extension(self, extension=WEBDRIVER_EXT):
self._install_extension(extension)
def update_preferences(self):
self._write_user_prefs(self.default_preferences)
#Properties
@property
def path(self):
"""
Gets the profile directory that is currently being used
"""
return self.profile_dir
@property
def port(self):
"""
Gets the port that WebDriver is working on
"""
return self._port
@port.setter
def port(self, port):
"""
Sets the port that WebDriver will be running on
"""
if not isinstance(port, int):
raise WebDriverException("Port needs to be an integer")
self._port = port
self.set_preference("webdriver_firefox_port", self._port)
@property
def accept_untrusted_certs(self):
return self._santise_pref(
self.default_preferences["webdriver_accept_untrusted_certs"])
@accept_untrusted_certs.setter
def accept_untrusted_certs(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_accept_untrusted_certs", value)
@property
def assume_untrusted_cert_issuer(self):
return self._santise_pref(self.default_preferences["webdriver_assume_untrusted_issuer"])
@assume_untrusted_cert_issuer.setter
def assume_untrusted_cert_issuer(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_assume_untrusted_issuer", value)
@property
def native_events_enabled(self):
return self._santise_pref(self.default_preferences['webdriver_enable_native_events'])
@native_events_enabled.setter
def native_events_enabled(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_enable_native_events", value)
@property
def encoded(self):
"""
A zipped, base64 encoded string of profile directory
for use with remote WebDriver JSON wire protocol
"""
fp = StringIO()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
path_root = len(self.path) + 1 # account for trailing slash
for base, dirs, files in os.walk(self.path):
for fyle in files:
filename = os.path.join(base, fyle)
zipped.write(filename, filename[path_root:])
zipped.close()
return base64.encodestring(fp.getvalue())
def set_proxy(self, proxy):
if proxy is None:
raise ValueError("proxy can not be None")
if proxy.proxy_type is ProxyType.UNSPECIFIED:
return
self.set_preference("network.proxy.type", proxy.proxy_type['ff_value'])
if proxy.proxy_type is ProxyType.MANUAL:
self.set_preference("network.proxy.no_proxies_on", proxy.no_proxy)
self._set_manual_proxy_preference("ftp", proxy.ftp_proxy)
self._set_manual_proxy_preference("http", proxy.http_proxy)
self._set_manual_proxy_preference("ssl", proxy.ssl_proxy)
elif proxy.proxy_type is ProxyType.AUTODETECT:
self.set_preference("network.proxy.autoconfig_url", proxy.proxy_autoconfig_url)
#Private Methods
def _santise_pref(self, item):
if item == 'true':
return True
elif item == 'false':
return False
else:
return item
def _set_manual_proxy_preference(self, key, setting):
if setting is None or setting is '':
return
host_details = setting.split(":")
self.set_preference("network.proxy.%s" % key, host_details[1][2:])
if len(host_details) > 1:
self.set_preference("network.proxy.%s_port" % key, int(host_details[2]))
def _create_tempfolder(self):
"""
Creates a temp folder to store User.js and the extension
"""
return tempfile.mkdtemp()
def _write_user_prefs(self, user_prefs):
"""
writes the current user prefs dictionary to disk
"""
with open(self.userPrefs, "w") as f:
for key, value in user_prefs.items():
f.write('user_pref("%s", %s);\n' % (key, value))
def _read_existing_userjs(self):
userjs_path = os.path.join(self.profile_dir, 'user.js')
PREF_RE = re.compile(r'user_pref\("(.*)",\s(.*)\)')
try:
with open(userjs_path) as f:
for usr in f:
matches = re.search(PREF_RE, usr)
self.default_preferences[matches.group(1)] = matches.group(2)
except:
# The profile given hasn't had any changes made, i.e no users.js
pass
def _install_extension(self, addon, unpack=True):
"""
Installs addon from a filepath, url
or directory of addons in the profile.
- path: url, path to .xpi, or directory of addons
- unpack: whether to unpack unless specified otherwise in the install.rdf
"""
if addon == WEBDRIVER_EXT:
addon = os.path.join(os.path.dirname(__file__), WEBDRIVER_EXT)
tmpdir = None
xpifile = None
if addon.endswith('.xpi'):
tmpdir = tempfile.mkdtemp(suffix = '.' + os.path.split(addon)[-1])
compressed_file = zipfile.ZipFile(addon, 'r')
for name in compressed_file.namelist():
if name.endswith('/'):
os.makedirs(os.path.join(tmpdir, name))
else:
if not os.path.isdir(os.path.dirname(os.path.join(tmpdir, name))):
os.makedirs(os.path.dirname(os.path.join(tmpdir, name)))
data = compressed_file.read(name)
with open(os.path.join(tmpdir, name), 'wb') as f:
f.write(data)
xpifile = addon
addon = tmpdir
# determine the addon id
addon_details = self._addon_details(addon)
addon_id = addon_details.get('id')
assert addon_id, 'The addon id could not be found: %s' % addon
# copy the addon to the profile
extensions_path = os.path.join(self.profile_dir, 'extensions')
addon_path = os.path.join(extensions_path, addon_id)
if not unpack and not addon_details['unpack'] and xpifile:
if not os.path.exists(extensions_path):
os.makedirs(extensions_path)
shutil.copy(xpifile, addon_path + '.xpi')
else:
dir_util.copy_tree(addon, addon_path, preserve_symlinks=1)
# remove the temporary directory, if any
if tmpdir:
dir_util.remove_tree(tmpdir)
def _addon_details(self, addon_path):
"""
returns a dictionary of details about the addon
- addon_path : path to the addon directory
Returns:
{'id': u'rainbow@colors.org', # id of the addon
'version': u'1.4', # version of the addon
'name': u'Rainbow', # name of the addon
'unpack': False } # whether to unpack the addon
"""
# TODO: We don't use the unpack variable yet, but we should: bug 662683
details = {
'id': None,
'name': None,
'unpack': True,
'version': None
}
def get_namespace_id(doc, url):
attributes = doc.documentElement.attributes
namespace = ""
for i in range(attributes.length):
if attributes.item(i).value == url:
if ":" in attributes.item(i).name:
# If the namespace is not the default one remove 'xlmns:'
namespace = attributes.item(i).name.split(':')[1] + ":"
break
return namespace
def get_text(element):
"""Retrieve the text value of a given node"""
rc = []
for node in element.childNodes:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc).strip()
doc = minidom.parse(os.path.join(addon_path, 'install.rdf'))
# Get the namespaces abbreviations
em = get_namespace_id(doc, "http://www.mozilla.org/2004/em-rdf#")
rdf = get_namespace_id(doc, "http://www.w3.org/1999/02/22-rdf-syntax-ns#")
description = doc.getElementsByTagName(rdf + "Description").item(0)
for node in description.childNodes:
# Remove the namespace prefix from the tag for comparison
entry = node.nodeName.replace(em, "")
if entry in details.keys():
details.update({ entry: get_text(node) })
return details
| {
"content_hash": "3c717a42673804abf58f49d9f9f2ff58",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 96,
"avg_line_length": 38.212290502793294,
"alnum_prop": 0.5902777777777778,
"repo_name": "leighpauls/k2cro4",
"id": "d36494e6a5f48cdb9af92c451cab4c5692628323",
"size": "14303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/webdriver/pylib/selenium/webdriver/firefox/firefox_profile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "3062"
},
{
"name": "AppleScript",
"bytes": "25392"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "68131038"
},
{
"name": "C",
"bytes": "242794338"
},
{
"name": "C#",
"bytes": "11024"
},
{
"name": "C++",
"bytes": "353525184"
},
{
"name": "Common Lisp",
"bytes": "3721"
},
{
"name": "D",
"bytes": "1931"
},
{
"name": "Emacs Lisp",
"bytes": "1639"
},
{
"name": "F#",
"bytes": "4992"
},
{
"name": "FORTRAN",
"bytes": "10404"
},
{
"name": "Java",
"bytes": "3845159"
},
{
"name": "JavaScript",
"bytes": "39146656"
},
{
"name": "Lua",
"bytes": "13768"
},
{
"name": "Matlab",
"bytes": "22373"
},
{
"name": "Objective-C",
"bytes": "21887598"
},
{
"name": "PHP",
"bytes": "2344144"
},
{
"name": "Perl",
"bytes": "49033099"
},
{
"name": "Prolog",
"bytes": "2926122"
},
{
"name": "Python",
"bytes": "39863959"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Racket",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "304063"
},
{
"name": "Scheme",
"bytes": "14853"
},
{
"name": "Shell",
"bytes": "9195117"
},
{
"name": "Tcl",
"bytes": "1919771"
},
{
"name": "Verilog",
"bytes": "3092"
},
{
"name": "Visual Basic",
"bytes": "1430"
},
{
"name": "eC",
"bytes": "5079"
}
],
"symlink_target": ""
} |
'''
Created on Oct 18, 2016
@author: jaime
'''
from django.core import serializers
from django.http import HttpResponse
from decimal import Decimal
import json
class JSONBaseMixin(object):
def get_json_response(self, content, **httpresponse_kwargs):
"""
Construct an `HttpResponse` object with the content type set to json
"""
return HttpResponse(content,
content_type='application/json',
**httpresponse_kwargs)
class JSONQuerysetResponseMixin(JSONBaseMixin):
"""
Helper for class based views
Useful when the response should be a json representation of a queryset
"""
def render_queryset_to_response(self, context, **httpresponse_kwargs):
"""
Returns a JSON response containing 'context' as payload
"""
return self.get_json_response(serializers.serialize('json', context), **httpresponse_kwargs)
class JSONResponseMixin(JSONBaseMixin):
"""
Helper for class based views
Useful when the response should be a json representation of a python object
"""
def render_data_to_response(self, context, **httpresponse_kwargs):
"""
Returns a JSON response containing 'context' as payload
"""
return self.get_json_response(self.convert_context_to_json(context), **httpresponse_kwargs)
def convert_context_to_json(self, context):
"""
Convert the context dictionary into a JSON object
Note: Json does not support decimal serialization by default, so we use decimal_default to convert decimals to serializable floats
"""
return json.dumps(context, default = self.decimal_default)
def decimal_default(self, obj):
"""
Unlike simplejson, json does not suport decimal serialization.
So we check id the object given is a decimal and convert it to a float
"""
if isinstance(obj, Decimal):
return float(obj)
return obj | {
"content_hash": "4dd23a5921d3d80a07ba05078ed77951",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 142,
"avg_line_length": 36.421052631578945,
"alnum_prop": 0.6392100192678227,
"repo_name": "jroeland/teapot",
"id": "189c9677dd793ba6fb5ff3b0b64d4f50b86b941e",
"size": "2076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/web/app/utils/mixins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59770"
},
{
"name": "Shell",
"bytes": "2058"
}
],
"symlink_target": ""
} |
from flask import Flask
from flask import redirect
app = Flask(__name__)
@app.route("/post", methods=['POST'])
def post():
return redirect(url_for('index'))
| {
"content_hash": "0650310ac5bb70dc97c29ce681ce989d",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 37,
"avg_line_length": 18.22222222222222,
"alnum_prop": 0.6707317073170732,
"repo_name": "alfg/postpress",
"id": "2d837a9d7b3bad819b33ef781fa07dc9cffdb08e",
"size": "187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "postpress/post.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "47875"
},
{
"name": "Python",
"bytes": "5483"
}
],
"symlink_target": ""
} |
"""Callback functions that can be used to track various status during epoch."""
from __future__ import absolute_import
import logging
import math
import time
from .model import save_checkpoint
def module_checkpoint(mod, prefix, period=1, save_optimizer_states=False):
"""Callback to checkpoint Module to prefix every epoch.
Parameters
----------
mod : subclass of BaseModule
The module to checkpoint.
prefix : str
The file prefix for this checkpoint.
period : int
How many epochs to wait before checkpointing. Defaults to 1.
save_optimizer_states : bool
Indicates whether or not to save optimizer states for continued training.
Returns
-------
callback : function
The callback function that can be passed as iter_end_callback to fit.
"""
period = int(max(1, period))
# pylint: disable=unused-argument
def _callback(iter_no, sym=None, arg=None, aux=None):
"""The checkpoint function."""
if (iter_no + 1) % period == 0:
mod.save_checkpoint(prefix, iter_no + 1, save_optimizer_states)
return _callback
def do_checkpoint(prefix, period=1):
"""A callback that saves a model checkpoint every few epochs.
Each checkpoint is made up of a couple of binary files: a model description file and a
parameters (weights and biases) file. The model description file is named
`prefix`--symbol.json and the parameters file is named `prefix`-`epoch_number`.params
Parameters
----------
prefix : str
Prefix for the checkpoint filenames.
period : int, optional
Interval (number of epochs) between checkpoints. Default `period` is 1.
Returns
-------
callback : function
A callback function that can be passed as `epoch_end_callback` to fit.
Example
-------
>>> module.fit(iterator, num_epoch=n_epoch,
... epoch_end_callback = mx.callback.do_checkpoint("mymodel", 1))
Start training with [cpu(0)]
Epoch[0] Resetting Data Iterator
Epoch[0] Time cost=0.100
Saved checkpoint to "mymodel-0001.params"
Epoch[1] Resetting Data Iterator
Epoch[1] Time cost=0.060
Saved checkpoint to "mymodel-0002.params"
"""
period = int(max(1, period))
def _callback(iter_no, sym, arg, aux):
"""The checkpoint function."""
if (iter_no + 1) % period == 0:
save_checkpoint(prefix, iter_no + 1, sym, arg, aux)
return _callback
def log_train_metric(period, auto_reset=False):
"""Callback to log the training evaluation result every period.
Parameters
----------
period : int
The number of batch to log the training evaluation metric.
auto_reset : bool
Reset the metric after each log.
Returns
-------
callback : function
The callback function that can be passed as iter_epoch_callback to fit.
"""
def _callback(param):
"""The checkpoint function."""
if param.nbatch % period == 0 and param.eval_metric is not None:
name_value = param.eval_metric.get_name_value()
for name, value in name_value:
logging.info('Iter[%d] Batch[%d] Train-%s=%f',
param.epoch, param.nbatch, name, value)
if auto_reset:
param.eval_metric.reset_local()
return _callback
class Speedometer(object):
"""Logs training speed and evaluation metrics periodically.
Parameters
----------
batch_size: int
Batch size of data.
frequent: int
Specifies how frequently training speed and evaluation metrics
must be logged. Default behavior is to log once every 50 batches.
auto_reset : bool
Reset the evaluation metrics after each log.
Example
-------
>>> # Print training speed and evaluation metrics every ten batches. Batch size is one.
>>> module.fit(iterator, num_epoch=n_epoch,
... batch_end_callback=mx.callback.Speedometer(1, 10))
Epoch[0] Batch [10] Speed: 1910.41 samples/sec Train-accuracy=0.200000
Epoch[0] Batch [20] Speed: 1764.83 samples/sec Train-accuracy=0.400000
Epoch[0] Batch [30] Speed: 1740.59 samples/sec Train-accuracy=0.500000
"""
def __init__(self, batch_size, frequent=50, auto_reset=True):
self.batch_size = batch_size
self.frequent = frequent
self.init = False
self.tic = 0
self.last_count = 0
self.auto_reset = auto_reset
def __call__(self, param):
"""Callback to Show speed."""
count = param.nbatch
if self.last_count > count:
self.init = False
self.last_count = count
if self.init:
if count % self.frequent == 0:
# #11504
try:
speed = self.frequent * self.batch_size / (time.time() - self.tic)
except ZeroDivisionError:
speed = float('inf')
if param.eval_metric is not None:
name_value = param.eval_metric.get_name_value()
if self.auto_reset:
param.eval_metric.reset_local()
msg = 'Epoch[%d] Batch [%d-%d]\tSpeed: %.2f samples/sec'
if 'lr' in param.locals.keys():
msg += ' lr:%f'
msg += '\t%s=%f'*len(name_value)
if 'lr' in param.locals.keys():
logging.info(msg, param.epoch, count-self.frequent,
count, speed, param.locals['lr'],
*sum(name_value, ()))
else:
logging.info(msg, param.epoch, count-self.frequent,
count, speed, *sum(name_value, ()))
else:
msg = 'Epoch[%d] Batch [0-%d]\tSpeed: %.2f samples/sec'
if 'lr' in param.locals.keys():
msg += ' lr:%f'
msg += '\t%s=%f'*len(name_value)
if 'lr' in param.locals.keys():
logging.info(msg, param.epoch, count, speed, param.locals['lr'],
*sum(name_value, ()))
else:
logging.info(msg, param.epoch, count, speed, *sum(name_value, ()))
else:
logging.info("Iter[%d] Batch [%d]\tSpeed: %.2f samples/sec",
param.epoch, count, speed)
self.tic = time.time()
else:
self.init = True
self.tic = time.time()
class ProgressBar(object):
"""Displays a progress bar, indicating the percentage of batches processed within each epoch.
Parameters
----------
total: int
total number of batches per epoch
length: int
number of chars to define maximum length of progress bar
Examples
--------
>>> progress_bar = mx.callback.ProgressBar(total=2)
>>> mod.fit(data, num_epoch=5, batch_end_callback=progress_bar)
[========--------] 50.0%
[================] 100.0%
"""
def __init__(self, total, length=80):
self.bar_len = length
self.total = total
def __call__(self, param):
"""Callback to Show progress bar."""
count = param.nbatch
filled_len = int(round(self.bar_len * count / float(self.total)))
percents = math.ceil(100.0 * count / float(self.total))
prog_bar = '=' * filled_len + '-' * (self.bar_len - filled_len)
logging.info('[%s] %s%s\r', prog_bar, percents, '%')
class LogValidationMetricsCallback(object):
"""Just logs the eval metrics at the end of an epoch."""
def __call__(self, param):
if not param.eval_metric:
return
name_value = param.eval_metric.get_name_value()
for name, value in name_value:
logging.info('Epoch[%d] Validation-%s=%f', param.epoch, name, value)
| {
"content_hash": "aa0722914170fabe58473322064f87ee",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 97,
"avg_line_length": 37.25688073394495,
"alnum_prop": 0.5602068456045309,
"repo_name": "mlperf/training_results_v0.6",
"id": "3e15ebfb8ec61b262499b8e50ca97c6e24c39b7c",
"size": "8924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/python/mxnet/callback.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
} |
import logging
from collections import defaultdict
from typing import Union, Optional, Sequence, Tuple, Any
import pyvex
from angr.knowledge_plugins import Function
from . import Analysis
from ..utils.library import get_cpp_function_name
from ..utils.formatting import ansi_color_enabled, ansi_color, add_edge_to_buffer
from ..block import DisassemblerInsn, CapstoneInsn, SootBlockNode
from ..codenode import BlockNode
from .disassembly_utils import decode_instruction
try:
from ..engines import pcode
import pypcode
IRSBType = Union[pyvex.IRSB, pcode.lifter.IRSB]
IROpObjType = Union[pyvex.stmt.IRStmt, pypcode.PcodeOp]
except ImportError:
pcode = None
IRSBType = pyvex.IRSB
IROpObjType = pyvex.stmt
l = logging.getLogger(name=__name__)
# pylint: disable=unidiomatic-typecheck
class DisassemblyPiece:
addr = None
ident = float('nan')
def render(self, formatting=None):
x = self._render(formatting)
if len(x) == 1:
return [self.highlight(x[0], formatting)]
else:
return x
def _render(self, formatting):
raise NotImplementedError
def getpiece(self, formatting, column): # pylint:disable=unused-argument
return self
def width(self, formatting):
r = self._render(formatting)
if not r: return 0
return max(len(x) for x in r)
def height(self, formatting):
return len(self._render(formatting))
@staticmethod
def color(string, coloring, formatting):
try:
return '%s%s%s' % (formatting['colors'][coloring][0], string, formatting['colors'][coloring][1])
except KeyError:
return string
def highlight(self, string, formatting=None):
try:
if formatting is not None:
if 'format_callback' in formatting:
return formatting['format_callback'](self, string)
if self in formatting['highlight']:
return self.color(string, 'highlight', formatting)
except KeyError:
pass
return string
def __eq__(self, other):
return False
class FunctionStart(DisassemblyPiece):
def __init__(self, func):
"""
Constructor.
:param angr.knowledge.Function func: The function instance.
"""
self.addr = func.addr
self.vars = []
self.name = func.name
self.is_simprocedure = func.is_simprocedure
self.sim_procedure = None
if func.is_syscall:
self.sim_procedure = func._project.simos.syscall_from_addr(self.addr)
elif func.is_simprocedure:
self.sim_procedure = func._project.hooked_by(self.addr)
def _render(self, formatting):
# TODO: Make the individual elements be individual Pieces
return ['%s = %#x' % (name, offset) for offset, name in self.vars]
def height(self, formatting):
return len(self.vars)
class Label(DisassemblyPiece):
def __init__(self, addr, name):
self.addr = addr
self.name = name
def _render(self, formatting): # pylint:disable=unused-argument
return [self.name + ':']
class IROp(DisassemblyPiece):
__slots__ = (
'addr',
'seq',
'obj',
'irsb',
)
addr: int
seq: int
obj: IROpObjType
irsb: IRSBType
def __init__(self, addr: int, seq: int, obj: IROpObjType, irsb: IRSBType):
self.addr = addr
self.seq = seq
self.obj = obj
self.irsb = irsb
def __str__(self):
return str(self.obj)
def _render(self, formatting): # pylint:disable=unused-argument
return [str(self)]
class BlockStart(DisassemblyPiece):
def __init__(self, block, parentfunc, project):
self.addr = block.addr
self.size = block.size
self.parentfunc = parentfunc
self.project = project
def _render(self, formatting):
return []
class Hook(DisassemblyPiece):
def __init__(self, block):
self.addr = block.addr
simproc_name = str(block.sim_procedure)
self.name = simproc_name.split()[-1].strip("'<>")
self.short_name = simproc_name.strip("'<>").split('.')[-1]
def _render(self, formatting):
return ['SimProcedure ' + self.short_name]
def __eq__(self, other):
return type(other) is Hook and self.name == other.name
class Instruction(DisassemblyPiece):
def __init__(self, insn, parentblock, project=None):
self.addr = insn.address
self.size = insn.size
self.insn = insn
self.parentblock = parentblock
self.project = parentblock.project if parentblock is not None else project
self.arch = self.project.arch
self.format = ''
self.components = ()
self.operands = [ ]
# the following members will be filled in after disecting the instruction
self.type = None
self.branch_type = None
self.branch_target_operand = None
self.disect_instruction()
if isinstance(insn, CapstoneInsn):
decode_instruction(self.arch, self)
@property
def mnemonic(self):
return self.opcode
def reload_format(self):
self.insn = CapstoneInsn(next(self.arch.capstone.disasm(self.insn.bytes, self.addr)))
self.disect_instruction()
def disect_instruction(self):
# perform a "smart split" of an operands string into smaller pieces
insn_pieces = self.split_op_string(self.insn.op_str)
self.operands = []
cur_operand = None
i = len(insn_pieces) - 1
cs_op_num = -1
nested_mem = False
# iterate over operands in reverse order
while i >= 0:
c = insn_pieces[i]
if c == '':
i -= 1
continue
if cur_operand is None:
cur_operand = []
self.operands.append(cur_operand)
# Check if this is a number or an identifier.
ordc = ord(c[0])
# pylint:disable=too-many-boolean-expressions
if (ordc >= 0x30 and ordc <= 0x39) or \
(ordc >= 0x41 and ordc <= 0x5a) or \
(ordc >= 0x61 and ordc <= 0x7a):
# perform some basic classification
intc = None
reg = False
try:
intc = int(c, 0)
except ValueError:
reg = c in self.arch.registers
# if this is a "live" piece, liven it up!
# special considerations:
# - registers should consolidate with a $ or % prefix
# - integers should consolidate with a sign prefix
if reg:
prefix = ''
if i > 0 and insn_pieces[i-1] in ('$', '%'):
prefix = insn_pieces[i-1]
insn_pieces[i-1] = ''
cur_operand.append(Register(c, prefix))
elif intc is not None:
with_sign = False
if i > 0 and insn_pieces[i-1] in ('+', '-'):
with_sign = True
if insn_pieces[i-1] == '-':
intc = -intc # pylint: disable=invalid-unary-operand-type
insn_pieces[i-1] = ''
cur_operand.append(Value(intc, with_sign))
else:
cur_operand.append(c)
elif c == ',' and not nested_mem:
cs_op_num -= 1
cur_operand = None
elif c == ':': # XXX this is a hack! fix this later
insn_pieces[i-1] += ':'
else:
# Check if we are inside braces or parentheses. Do not forget
# that we are iterating in reverse order!
if c == ']' or c == ')':
nested_mem = True
elif (c == '[' or c == '('):
nested_mem = False
if cur_operand is None:
cur_operand = [c]
self.operands.append(cur_operand)
else:
cur_operand.append(c if c[0] != ',' else c + ' ')
i -= 1
self.opcode = Opcode(self)
self.operands.reverse()
if not hasattr(self.insn, 'operands'):
# Not all disassemblers provide operands. Just use our smart split
for i, o in enumerate(self.operands):
o.reverse()
self.operands[i] = Operand.build(1, i, o, self)
return
if len(self.operands) != len(self.insn.operands):
l.error("Operand parsing failed for instruction %s. %d operands are parsed, while %d are expected.",
str(self.insn),
len(self.operands),
len(self.insn.operands)
)
self.operands = [ ]
return
for i, o in enumerate(self.operands):
o.reverse()
self.operands[i] = Operand.build(
self.insn.operands[i].type,
i,
o,
self
)
@staticmethod
def split_op_string(insn_str):
pieces = []
in_word = False
for c in insn_str:
ordc = ord(c)
if ordc == 0x20:
in_word = False
continue
# pylint:disable=too-many-boolean-expressions
if (ordc >= 0x30 and ordc <= 0x39) or \
(ordc >= 0x41 and ordc <= 0x5a) or \
(ordc >= 0x61 and ordc <= 0x7a):
if in_word:
pieces[-1] += c
else:
in_word = True
pieces.append(c)
else:
in_word = False
pieces.append(c)
return pieces
def _render(self, formatting=None):
return ['%s %s' % (self.opcode.render(formatting)[0], ', '.join(o.render(formatting)[0] for o in self.operands))]
class SootExpression(DisassemblyPiece):
def __init__(self, expr):
self.expr = expr
def _render(self, formatting=None):
return [self.expr]
class SootExpressionTarget(SootExpression):
def __init__(self, target_stmt_idx):
super(SootExpressionTarget, self).__init__(target_stmt_idx)
self.target_stmt_idx = target_stmt_idx
def _render(self, formatting=None):
return [ "Goto %d" % self.target_stmt_idx ]
class SootExpressionStaticFieldRef(SootExpression):
def __init__(self, field):
field_str = ".".join(field)
super(SootExpressionStaticFieldRef, self).__init__(field_str)
self.field = field
self.field_str = field_str
def _render(self, formatting=None):
return [ self.field_str ]
class SootExpressionInvoke(SootExpression):
Virtual = "virtual"
Static = "static"
Special = "special"
def __init__(self, invoke_type, expr):
super(SootExpressionInvoke, self).__init__(str(expr))
self.invoke_type = invoke_type
self.base = str(expr.base) if self.invoke_type in (self.Virtual, self.Special) else ""
self.method_name = expr.method_name
self.arg_str = expr.list_to_arg_str(expr.args)
def _render(self, formatting=None):
return [ "%s%s(%s) [%s]" % (self.base + "." if self.base else "",
self.method_name,
self.arg_str,
self.invoke_type
)
]
class SootStatement(DisassemblyPiece):
def __init__(self, block_addr, raw_stmt):
self.addr = block_addr.copy()
self.addr.stmt_idx = raw_stmt.label
self.raw_stmt = raw_stmt
self.components = [ ]
self._parse()
@property
def stmt_idx(self):
return self.addr.stmt_idx
def _parse(self):
func = "_parse_%s" % self.raw_stmt.__class__.__name__
if hasattr(self, func):
getattr(self, func)()
else:
# print func
self.components += ["NotImplemented: %s" % func]
def _expr(self, expr):
func = "_handle_%s" % expr.__class__.__name__
if hasattr(self, func):
return getattr(self, func)(expr)
else:
# print func
return SootExpression(str(expr))
def _render(self, formatting=None):
return [ " ".join([ component if type(component) is str
else component.render(formatting=formatting)[0]
for component in self.components
]
)
]
#
# Statement parsers
#
def _parse_AssignStmt(self):
self.components += [
SootExpression(str(self.raw_stmt.left_op)),
"=",
self._expr(self.raw_stmt.right_op),
]
def _parse_InvokeStmt(self):
self.components += [
self._expr(self.raw_stmt.invoke_expr),
]
def _parse_GotoStmt(self):
self.components += [
SootExpressionTarget(self.raw_stmt.target),
]
def _parse_IfStmt(self):
self.components += [
"if (",
SootExpression(str(self.raw_stmt.condition)),
")",
SootExpressionTarget(self.raw_stmt.target),
]
def _parse_ReturnVoidStmt(self):
self.components += [
"return",
]
def _parse_IdentityStmt(self):
self.components += [
SootExpression(str(self.raw_stmt.left_op)),
"<-",
SootExpression(str(self.raw_stmt.right_op)),
]
#
# Expression handlers
#
def _handle_SootStaticFieldRef(self, expr):
return SootExpressionStaticFieldRef(expr.field[::-1])
def _handle_SootVirtualInvokeExpr(self, expr):
return SootExpressionInvoke(SootExpressionInvoke.Virtual, expr)
def _handle_SootStaticInvokeExpr(self, expr):
return SootExpressionInvoke(SootExpressionInvoke.Static, expr)
def _handle_SootSpecialInvokeExpr(self, expr):
return SootExpressionInvoke(SootExpressionInvoke.Special, expr)
class Opcode(DisassemblyPiece):
def __init__(self, parentinsn):
self.addr = parentinsn.addr
self.insn = parentinsn.insn
self.parentinsn = parentinsn
self.opcode_string = self.insn.mnemonic
self.ident = (self.addr, 'opcode')
def _render(self, formatting=None):
return [self.opcode_string.ljust(7)]
def __eq__(self, other):
return type(other) is Opcode and self.opcode_string == other.opcode_string
class Operand(DisassemblyPiece):
def __init__(self, op_num, children, parentinsn):
self.addr = parentinsn.addr
self.children = children
self.parentinsn = parentinsn
self.op_num = op_num
self.ident = (self.addr, 'operand', self.op_num)
for i, c in enumerate(self.children):
if type(c) not in (bytes, str):
c.ident = (self.addr, 'operand piece', self.op_num, i)
c.parentop = self
@property
def cs_operand(self):
return self.parentinsn.insn.operands[self.op_num]
def _render(self, formatting):
return [''.join(x if type(x) is str else x.decode() if type(x) is bytes else x.render(formatting)[0] for x in self.children)]
@staticmethod
def build(operand_type, op_num, children, parentinsn):
# Maps capstone operand types to operand classes
MAPPING = {
1: RegisterOperand,
2: ConstantOperand,
3: MemoryOperand,
4: Operand, # ARM FP
64: Operand, # ARM CIMM
65: Operand, # ARM PIMM
66: Operand, # ARM SETEND
67: Operand, # ARM SYSREG
}
cls = MAPPING.get(operand_type, None)
if cls is None:
raise ValueError('Unknown capstone operand type %s.' % operand_type)
operand = cls(op_num, children, parentinsn)
# Post-processing
if cls is MemoryOperand and \
parentinsn.arch.name in { 'AMD64' } and \
len(operand.values) == 2:
op0, op1 = operand.values
if type(op0) is Register and op0.is_ip and type(op1) is Value:
# Indirect addressing in x86_64
# 400520 push [rip+0x200782] ==> 400520 push [0x600ca8]
absolute_addr = parentinsn.addr + parentinsn.size + op1.val
return MemoryOperand(1, operand.prefix + ['[', Value(absolute_addr, False), ']'], parentinsn)
return operand
class ConstantOperand(Operand):
pass
class RegisterOperand(Operand):
@property
def register(self):
return next((child for child in self.children if isinstance(child, Register)), None)
def _render(self, formatting):
custom_value_str = None
if formatting is not None:
try: custom_value_str = formatting['custom_values_str'][self.ident]
except KeyError: pass
if custom_value_str:
return [custom_value_str]
else:
return super(RegisterOperand, self)._render(formatting)
class MemoryOperand(Operand):
def __init__(self, op_num, children, parentinsn):
super(MemoryOperand, self).__init__(op_num, children, parentinsn)
# a typical "children" looks like the following:
# [ 'dword', 'ptr', '[', Register, Value, ']' ]
# or
# [ '[', Register, ']' ]
# or
# [ Value, '(', Regsiter, ')' ]
# it will be converted into more meaningful and Pythonic properties
self.segment_selector = None
self.prefix = [ ]
self.values = [ ]
self.offset = [ ]
# offset_location
# - prefix: -0xff00($gp)
# - before_value: 0xff00+rax
# - after_value: rax+0xff00
self.offset_location = "after_value"
# values_style
# - square: [rax+0x10]
# - curly: {rax+0x10}
# - paren: (rax+0x10)
self.values_style = "square"
try:
if '[' in self.children:
self._parse_memop_squarebracket()
elif '(' in self.children:
self._parse_memop_paren()
else:
raise ValueError()
except ValueError:
l.error("Failed to parse operand children %s. Please report to Fish.", self.children)
# setup all dummy properties
self.prefix = None
self.values = None
def _parse_memop_squarebracket(self):
if self.children[0] != '[':
try:
square_bracket_pos = self.children.index('[')
except ValueError: #pylint: disable=try-except-raise
raise
self.prefix = self.children[ : square_bracket_pos]
# take out segment selector
if len(self.prefix) == 3:
self.segment_selector = self.prefix[-1]
self.prefix = self.prefix[ : -1]
else:
self.segment_selector = None
else:
# empty
square_bracket_pos = 0
self.prefix = [ ]
self.segment_selector = None
if self.children[-1] != ']':
raise ValueError()
self.values = self.children[square_bracket_pos + 1: len(self.children) - 1]
def _parse_memop_paren(self):
offset = [ ]
self.values_style = "paren"
if self.children[0] != '(':
try:
paren_pos = self.children.index('(')
except ValueError: #pylint: disable=try-except-raise
raise
if all(isinstance(item, str) for item in self.children[:paren_pos]):
# parse prefix
self.prefix = self.children[ : paren_pos]
elif all(isinstance(item, Value) for item in self.children[:paren_pos]):
# parse offset
# force each piece to be rendered with its sign (+/-)
offset += self.children[:paren_pos]
# offset appears before the left parenthesis
self.offset_location = "prefix"
else:
paren_pos = 0
self.prefix = [ ]
self.segment_selector = None
self.values = self.children[paren_pos + 1 : len(self.children) - 1]
self.offset = offset
def _render(self, formatting):
if self.prefix is None:
# we failed in parsing. use the default rendering
return super(MemoryOperand, self)._render(formatting)
else:
values_style = self.values_style
show_prefix = True
custom_values_str = None
if formatting is not None:
try: values_style = formatting['values_style'][self.ident]
except KeyError: pass
try:
show_prefix_str = formatting['show_prefix'][self.ident]
if show_prefix_str in ('false', 'False'):
show_prefix = False
except KeyError:
pass
try: custom_values_str = formatting['custom_values_str'][self.ident]
except KeyError: pass
prefix_str = " ".join(self.prefix) + " " if show_prefix and self.prefix else ""
if custom_values_str is not None:
value_str = custom_values_str
else:
value_str = ''.join(
x.render(formatting)[0] if not isinstance(x, (bytes, str)) else x for x in self.values
)
if values_style == "curly":
left_paren, right_paren = "{", "}"
elif values_style == "paren":
left_paren, right_paren = "(", ")"
else: # square
left_paren, right_paren = "[", "]"
if self.offset:
offset_str = "".join(
x.render(formatting)[0] if not isinstance(x, (bytes, str)) else x for x in self.offset
)
# combine values and offsets according to self.offset_location
if self.offset_location == "prefix":
value_str = ''.join([offset_str, left_paren, value_str, right_paren])
elif self.offset_location == "before_value":
value_str = ''.join([left_paren, offset_str, value_str, right_paren])
else: # after_value
value_str = ''.join([left_paren, value_str, offset_str, right_paren])
else:
value_str = left_paren + value_str + right_paren
segment_selector_str = "" if self.segment_selector is None else self.segment_selector
if segment_selector_str and prefix_str:
prefix_str += ' '
return [ '%s%s%s' % (prefix_str, segment_selector_str, value_str) ]
class OperandPiece(DisassemblyPiece): # pylint: disable=abstract-method
# These get filled in later...
addr = None
parentop = None
ident = None
class Register(OperandPiece):
def __init__(self, reg, prefix):
self.reg = reg
self.prefix = prefix
self.is_ip = self.reg in {"eip", "rip"} # TODO: Support more architectures
def _render(self, formatting):
# TODO: register renaming
return [self.prefix + self.reg]
def __eq__(self, other):
return type(other) is Register and self.reg == other.reg
class Value(OperandPiece):
def __init__(self, val, render_with_sign):
self.val = val
self.render_with_sign = render_with_sign
@property
def project(self):
return self.parentop.parentinsn.project
def __eq__(self, other):
return type(other) is Value and self.val == other.val
def _render(self, formatting):
if formatting is not None:
try:
style = formatting['int_styles'][self.ident]
if style[0] == 'hex':
if self.render_with_sign:
return ['%#+x' % self.val]
else:
return ['%#x' % self.val]
elif style[0] == 'dec':
if self.render_with_sign:
return ['%+d' % self.val]
else:
return [str(self.val)]
elif style[0] == 'label':
labeloffset = style[1]
if labeloffset == 0:
lbl = self.project.kb.labels[self.val]
return [lbl]
return ['%s%s%#+x' % ('+' if self.render_with_sign else '', self.project.kb.labels[self.val + labeloffset], labeloffset)]
except KeyError:
pass
# default case
try:
func = self.project.kb.functions.get_by_addr(self.val)
except KeyError:
func = None
if self.val in self.project.kb.labels:
lbl = self.project.kb.labels[self.val]
if func is not None:
# see if lbl == func.name and func.demangled_name != func.name. if so, we prioritize the
# demangled name
if lbl == func.name and func.name != func.demangled_name:
normalized_name = get_cpp_function_name(func.demangled_name, specialized=False, qualified=True)
return [normalized_name]
return [('+' if self.render_with_sign else '') + lbl]
elif func is not None:
return [func.demangled_name]
else:
if self.render_with_sign:
return ['%#+x' % self.val]
else:
return ['%#x' % self.val]
class Comment(DisassemblyPiece):
def __init__(self, addr, text):
self.addr = addr
self.text = text.split('\n')
def _render(self, formatting=None):
return [self.text]
def height(self, formatting):
lines = len(self.text)
return 0 if lines == 1 else lines
class FuncComment(DisassemblyPiece):
def __init__(self, func):
self.func = func
def _render(self, formatting=None):
return ['##', '## Function ' + self.func.name, '##']
class Disassembly(Analysis):
"""
Produce formatted machine code disassembly.
"""
def __init__(self, function: Optional[Function] = None, ranges: Optional[Sequence[Tuple[int,int]]] = None,
include_ir: bool = False):
self.raw_result = []
self.raw_result_map = {
'block_starts': {},
'comments': {},
'labels': {},
'instructions': {},
'hooks': {},
'ir': defaultdict(list)
}
self.block_to_insn_addrs = defaultdict(list)
self._func_cache = {}
self._include_ir = include_ir
self._graph = None
if function is not None:
# sort them by address, put hooks before nonhooks
self._graph = function.graph
blocks = sorted(function.graph.nodes(), key=lambda node: (node.addr, not node.is_hook))
for block in blocks:
self.parse_block(block)
elif ranges is not None:
cfg = self.project.kb.cfgs.get_most_accurate()
if cfg is None:
# CFG not available yet. Simply disassemble the code in the given regions. In the future we may want
# to handle this case by automatically running CFG analysis on given ranges.
for start, end in ranges:
self.parse_block(BlockNode(start, end - start))
else:
self._graph = cfg.graph
for start, end in ranges:
if start == end:
continue
assert start < end
# Grab all blocks that intersect target range
blocks = sorted([n.to_codenode()
for n in self._graph.nodes() if not (n.addr + (n.size or 1) <= start or
n.addr >= end)],
key=lambda node: (node.addr, not node.is_hook))
# Trim blocks that are not within range
for i, block in enumerate(blocks):
if block.size and block.addr < start:
delta = start - block.addr
block_bytes = block.bytestr[delta:] if block.bytestr else None
blocks[i] = BlockNode(block.addr + delta, block.size - delta, block_bytes)
for i, block in enumerate(blocks):
if block.size and block.addr + block.size > end:
delta = block.addr + block.size - end
block_bytes = block.bytestr[0:-delta] if block.bytestr else None
blocks[i] = BlockNode(block.addr, block.size - delta, block_bytes)
for block in blocks:
self.parse_block(block)
def func_lookup(self, block):
try:
return self._func_cache[block.function.addr]
except AttributeError:
return None
except KeyError:
f = FunctionStart(block.function)
self._func_cache[f.addr] = f
return f
def _add_instruction_to_results(self, block: BlockNode, insn: DisassemblerInsn, bs: BlockStart) -> None:
"""
Add instruction to analysis results with associated labels and comments
"""
if insn.address in self.kb.labels:
label = Label(insn.address, self.kb.labels[insn.address])
self.raw_result.append(label)
self.raw_result_map['labels'][label.addr] = label
if insn.address in self.kb.comments:
comment = Comment(insn.address, self.kb.comments[insn.address])
self.raw_result.append(comment)
self.raw_result_map['comments'][comment.addr] = comment
instruction = Instruction(insn, bs)
self.raw_result.append(instruction)
self.raw_result_map['instructions'][instruction.addr] = instruction
self.block_to_insn_addrs[block.addr].append(insn.address)
def _add_block_ir_to_results(self, block: BlockNode, irsb: IRSBType) -> None:
"""
Add lifter IR for this block
"""
addr_to_ops_map = self.raw_result_map['ir']
addr = block.addr
ops = addr_to_ops_map[addr]
if irsb.statements is not None:
if (pcode is not None and
isinstance(self.project.factory.default_engine, pcode.HeavyPcodeMixin)):
for ins in irsb._instructions:
addr = ins.address.offset
addr_to_ops_map[addr].extend([
IROp(addr, op.seq.uniq, op, irsb) for op in ins.ops])
else:
for seq, stmt in enumerate(irsb.statements):
if isinstance(stmt, pyvex.stmt.IMark):
addr = stmt.addr
ops = addr_to_ops_map[addr]
else:
ops.append(IROp(addr, seq, stmt, irsb))
def parse_block(self, block: BlockNode) -> None:
"""
Parse instructions for a given block node
"""
func = self.func_lookup(block)
if func and func.addr == block.addr:
self.raw_result.append(FuncComment(block.function))
self.raw_result.append(func)
bs = BlockStart(block, func, self.project)
self.raw_result.append(bs)
if block.is_hook:
hook = Hook(block)
self.raw_result.append(hook)
self.raw_result_map['hooks'][block.addr] = hook
elif self.project.arch.capstone_support:
# Prefer Capstone first, where we are able to extract a bit more
# about the operands
if block.thumb:
aligned_block_addr = (block.addr >> 1) << 1
cs = self.project.arch.capstone_thumb
else:
aligned_block_addr = block.addr
cs = self.project.arch.capstone
if block.bytestr is None:
bytestr = self.project.loader.memory.load(aligned_block_addr, block.size)
else:
bytestr = block.bytestr
self.block_to_insn_addrs[block.addr] = []
for cs_insn in cs.disasm(bytestr, block.addr):
self._add_instruction_to_results(block, CapstoneInsn(cs_insn), bs)
elif pcode is not None and isinstance(self.project.factory.default_engine, pcode.HeavyPcodeMixin):
# When using the P-code engine, we can fall back on its disassembly
# in the event that Capstone does not support it
self.block_to_insn_addrs[block.addr] = []
b = self.project.factory.block(block.addr, size=block.size)
for insn in b.disassembly.insns:
self._add_instruction_to_results(block, insn, bs)
elif type(block) is SootBlockNode:
for raw_stmt in block.stmts:
stmt = SootStatement(block.addr, raw_stmt)
self.raw_result.append(stmt)
self.raw_result_map['instructions'][stmt.addr] = stmt
self.block_to_insn_addrs[block.addr].append(stmt.addr)
else:
raise TypeError("")
if self._include_ir:
b = self.project.factory.block(block.addr, size=block.size)
self._add_block_ir_to_results(block, b.vex)
def render(self, formatting=None, show_edges: bool = True, show_addresses: bool = True,
show_bytes: bool = False, ascii_only: Optional[bool] = None, color: bool = True) -> str:
"""
Render the disassembly to a string, with optional edges and addresses.
Color will be added by default, if enabled. To disable color pass an empty formatting dict.
"""
max_bytes_per_line = 5
bytes_width = max_bytes_per_line*3+1
a2ln = defaultdict(list)
buf = []
if formatting is None:
formatting = {
'colors': {
'address': 'gray',
'bytes': 'cyan',
'edge': 'yellow',
Label: 'bright_yellow',
ConstantOperand: 'cyan',
MemoryOperand: 'yellow',
Comment: 'gray',
Hook: 'green',
} if ansi_color_enabled and color else {},
'format_callback': lambda item, s: ansi_color(s, formatting['colors'].get(type(item), None))
}
def col(item: Any) -> Optional[str]:
try:
return formatting['colors'][item]
except KeyError:
return None
def format_address(addr: int, color: bool = True) -> str:
if not show_addresses:
return ''
a, pad = f'{addr:x}', ' '
return (ansi_color(a, col('address')) if color else a) + pad
def format_bytes(data: bytes, color: bool = True) -> str:
s = ' '.join(f'{x:02x}' for x in data).ljust(bytes_width)
return ansi_color(s, col('bytes')) if color else s
def format_comment(text: str, color: bool = True) -> str:
s = ' ; ' + text
return ansi_color(s, col(Comment)) if color else s
comment = None
for item in self.raw_result:
if isinstance(item, BlockStart):
if len(buf) > 0:
buf.append('')
elif isinstance(item, Label):
pad = len(format_address(item.addr, False)) * ' '
if show_bytes:
pad += bytes_width * ' '
buf.append(pad + item.render(formatting)[0])
elif isinstance(item, Comment):
comment = item
elif isinstance(item, Hook):
a2ln[item.addr].append(len(buf))
buf.append(format_address(item.addr) + item.render(formatting)[0])
elif isinstance(item, Instruction):
a2ln[item.addr].append(len(buf))
lines = []
# Chop instruction bytes into line segments
p, insn_bytes = 0, []
while show_bytes and p < len(item.insn.bytes):
s = item.insn.bytes[p:p+min(len(item.insn.bytes)-p, max_bytes_per_line)]
p += len(s)
insn_bytes.append(s)
# Format the instruction's address, bytes, disassembly, and comment
s_plain = format_address(item.addr, False)
s = format_address(item.addr)
if show_bytes:
bytes_column = len(s_plain)
s_plain += format_bytes(insn_bytes[0], False)
s += format_bytes(insn_bytes[0])
s_plain += item.render()[0]
s += item.render(formatting)[0]
if comment is not None:
comment_column = len(s_plain)
s += format_comment(comment.text[0])
lines.append(s)
# Add additional lines of instruction bytes
for i in range(1, len(insn_bytes)):
lines.append(' ' * bytes_column + format_bytes(insn_bytes[i]))
# Add additional lines of comments
if comment is not None:
for i in range(1, len(comment.text)):
if len(lines) <= i:
lines.append(' ' * comment_column)
lines[i] += format_comment(comment.text[i])
comment = None
buf.extend(lines)
else:
buf.append(''.join(item.render(formatting)))
if self._graph is not None and show_edges and buf:
edges_by_line = set()
for edge in self._graph.edges.items():
from_block, to_block = edge[0]
if from_block.size is None:
continue
if to_block.addr != from_block.addr + from_block.size:
from_addr = edge[1]['ins_addr']
to_addr = to_block.addr
if not (from_addr in a2ln and to_addr in a2ln):
continue
for f in a2ln[from_addr]:
for t in a2ln[to_addr]:
edges_by_line.add((f, t))
# Render block edges, to a reference buffer for tracking and output buffer for display
edge_buf = ['' for _ in buf]
ref_buf = ['' for _ in buf]
edge_col = col('edge')
for f, t in sorted(edges_by_line, key=lambda e: abs(e[0]-e[1])):
add_edge_to_buffer(edge_buf, ref_buf, f, t, lambda s: ansi_color(s, edge_col), ascii_only=ascii_only)
add_edge_to_buffer(ref_buf, ref_buf, f, t, ascii_only=ascii_only)
max_edge_depth = max(map(len, ref_buf))
# Justify edge and combine with disassembly
for i, line in enumerate(buf):
buf[i] = ' ' * (max_edge_depth - len(ref_buf[i])) + edge_buf[i] + line
return '\n'.join(buf)
from angr.analyses import AnalysesHub
AnalysesHub.register_default('Disassembly', Disassembly)
| {
"content_hash": "71534efd08a345df6e4ace5e3b819ccb",
"timestamp": "",
"source": "github",
"line_count": 1145,
"max_line_length": 141,
"avg_line_length": 34.8943231441048,
"alnum_prop": 0.5286329278670471,
"repo_name": "angr/angr",
"id": "f7ef46f3d75904e12d9b3b733cbd64dfebb7ff6c",
"size": "39954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angr/analyses/disassembly.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6694"
},
{
"name": "C++",
"bytes": "146292"
},
{
"name": "Makefile",
"bytes": "946"
},
{
"name": "Python",
"bytes": "27717304"
}
],
"symlink_target": ""
} |
import logging
from twisted.internet.defer import Deferred, fail
from twisted.internet.protocol import Factory, connectionDone
from twisted.logger import Logger
from twisted.protocols.basic import Int32StringReceiver
"""
Twisted protocols that understand Kafka message framing
"""
log = logging.getLogger('afkak.protocol')
log.addHandler(logging.NullHandler())
class _BaseKafkaProtocol(Int32StringReceiver):
MAX_LENGTH = 2 ** 31 - 1 # Max a signed Int32 can represent
class KafkaProtocol(_BaseKafkaProtocol):
"""
Very thin wrapper around the Int32StringReceiver
Simply knows to call its factory.handleResponse()
method with the string received by stringReceived() and
to cleanup the factory reference when the connection is lost
"""
factory = None
def stringReceived(self, string):
self.factory.handleResponse(string)
def connectionLost(self, reason=connectionDone):
self.factory._connectionLost(reason)
self.factory = None
def lengthLimitExceeded(self, length):
log.error("Broker at %s sent a %d byte message, exceeding the size limit of %d. "
"Terminating connection.", self.transport.getPeer(), length,
self.MAX_LENGTH)
self.transport.loseConnection()
class KafkaBootstrapProtocol(_BaseKafkaProtocol):
"""
`KafkaBootstrapProtocol` sends and receives Kafka messages.
It knows just enough about Kafka message framing to correlate responses
with requests. A deferred is issued for every request and fires when
a response is received or the connection is lost.
:ivar dict _pending:
Map of correlation ID to Deferred.
"""
_log = Logger()
def connectionMade(self):
self._pending = {}
self._failed = None
def stringReceived(self, response):
"""
Handle a response from the broker.
"""
correlation_id = response[0:4]
try:
d = self._pending.pop(correlation_id)
except KeyError:
self._log.warn((
"Response has unknown correlation ID {correlation_id!r}."
" Dropping connection to {peer}."
), correlation_id=correlation_id, peer=self.transport.getPeer())
self.transport.loseConnection()
else:
d.callback(response)
def connectionLost(self, reason=connectionDone):
"""
Mark the protocol as failed and fail all pending operations.
"""
self._failed = reason
pending, self._pending = self._pending, None
for d in pending.values():
d.errback(reason)
def lengthLimitExceeded(self, length):
self._log.error(
"Broker at {peer} sent a {length:,d} byte message, exceeding the size limit of {max_length:,d}.",
peer=self.transport.getPeer(), length=length, max_length=self.MAX_LENGTH,
)
self.transport.loseConnection()
def request(self, request):
"""
Send a request to the Kafka broker.
:param bytes request:
The bytes of a Kafka `RequestMessage`_ structure. It must have
a unique (to this connection) correlation ID.
:returns:
`Deferred` which will:
- Succeed with the bytes of a Kafka `ResponseMessage`_
- Fail when the connection terminates
.. _RequestMessage:: https://kafka.apache.org/protocol.html#protocol_messages
"""
if self._failed is not None:
return fail(self._failed)
correlation_id = request[4:8]
assert correlation_id not in self._pending
d = Deferred()
self.sendString(request)
self._pending[correlation_id] = d
return d
bootstrapFactory = Factory.forProtocol(KafkaBootstrapProtocol)
| {
"content_hash": "6e28a4f7ed7736b1eda64237ba840684",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 109,
"avg_line_length": 32.23529411764706,
"alnum_prop": 0.6459854014598541,
"repo_name": "ciena/afkak",
"id": "5ce11ddde02bc4a3cf59a650a5e7c0137474f8d5",
"size": "4470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "afkak/_protocol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3897"
},
{
"name": "Python",
"bytes": "716594"
},
{
"name": "Shell",
"bytes": "1458"
}
],
"symlink_target": ""
} |
import copy
import os
import time
from boto import dynamodb2
from boto.dynamodb2 import fields # AllIndex, GlobalAllIndex, HashKey, RangeKey
from boto.dynamodb2 import table
from boto.dynamodb2 import types
from boto.exception import JSONResponseError
from bunch import Bunch
import yaml
from .log import create_logger
logger = create_logger()
UPDATE_INDEX_RETRIES = 60
# Cache to avoid parsing YAML file repeatedly.
_cached_config = None
def set_config(table_config, namespace=None, aws_access_key_id=False, aws_secret_access_key=False,
host=None, port=None, is_secure=None):
global _cached_config
with open(table_config) as config_file:
yaml_config = yaml.load(config_file)
_cached_config = Bunch({
'yaml': yaml_config,
'namespace': namespace or os.environ.get('CC_DYNAMODB_NAMESPACE'),
'aws_access_key_id': aws_access_key_id if aws_access_key_id is not False else
os.environ.get('CC_DYNAMODB_ACCESS_KEY_ID', False),
'aws_secret_access_key': aws_secret_access_key if aws_secret_access_key is not False else
os.environ.get('CC_DYNAMODB_SECRET_ACCESS_KEY', False),
'host': host or os.environ.get('CC_DYNAMODB_HOST'),
'port': port or os.environ.get('CC_DYNAMODB_PORT'),
'is_secure': is_secure or os.environ.get('CC_DYNAMODB_IS_SECURE'),
})
if not _cached_config.namespace:
msg = 'Missing namespace kwarg OR environment variable CC_DYNAMODB_NAMESPACE'
logger.error('ConfigurationError: ' + msg)
raise ConfigurationError(msg)
if _cached_config.aws_access_key_id is False:
msg = 'Missing aws_access_key_id kwarg OR environment variable CC_DYNAMODB_ACCESS_KEY_ID'
logger.error('ConfigurationError: ' + msg)
raise ConfigurationError(msg)
if _cached_config.aws_secret_access_key is False:
msg = 'Missing aws_secret_access_key kwarg OR environment variable CC_DYNAMODB_SECRET_ACCESS_KEY'
logger.error('ConfigurationError: ' + msg)
raise ConfigurationError(msg)
if _cached_config.port:
try:
_cached_config.port = int(_cached_config.port)
except ValueError:
msg = ('Integer value expected for port '
'OR environment variable CC_DYNAMODB_PORT. Got %s' % _cached_config.port)
logger.error('ConfigurationError: ' + msg)
raise ConfigurationError(msg)
logger.info('cc_dynamodb.set_config', extra=dict(status='config loaded'))
def get_config(**kwargs):
global _cached_config
if not _cached_config:
set_config(**kwargs)
return Bunch(copy.deepcopy(_cached_config.toDict()))
class ConfigurationError(Exception):
pass
def _build_key(key_details):
key_details = key_details.copy()
key_type = getattr(fields, key_details.pop('type'))
key_details['data_type'] = getattr(types, key_details['data_type'])
return key_type(**key_details)
def _build_keys(keys_config):
return [_build_key(key_details)
for key_details in keys_config]
def _build_secondary_index(index_details, is_global):
index_details = index_details.copy()
index_type = getattr(fields, index_details.pop('type'))
kwargs = dict(
parts=[]
)
for key_details in index_details.get('parts', []):
kwargs['parts'].append(_build_key(key_details))
if is_global:
kwargs['throughput'] = index_details.pop('throughput', None)
return index_type(index_details['name'], **kwargs)
def _build_secondary_indexes(indexes_config, is_global):
return [_build_secondary_index(index_details, is_global=is_global)
for index_details in indexes_config]
def _get_table_metadata(table_name):
config = get_config().yaml
try:
keys_config = config['schemas'][table_name]
except KeyError:
logger.exception('cc_dynamodb.UnknownTable', extra=dict(table_name=table_name,
config=config,
DTM_EVENT='cc_dynamodb.UnknownTable'))
raise UnknownTableException('Unknown table: %s' % table_name)
schema = _build_keys(keys_config)
global_indexes_config = config.get('global_indexes', {}).get(table_name, [])
indexes_config = config.get('indexes', {}).get(table_name, [])
return dict(
schema=schema,
global_indexes=_build_secondary_indexes(global_indexes_config, is_global=True),
indexes=_build_secondary_indexes(indexes_config, is_global=False),
)
def get_table_name(table_name):
'''Prefixes the table name for the different environments/settings.'''
return get_config().namespace + table_name
def get_reverse_table_name(table_name):
'''Prefixes the table name for the different environments/settings.'''
prefix_length = len(get_config().namespace)
return table_name[prefix_length:]
def get_table_index(table_name, index_name):
"""Given a table name and an index name, return the index."""
config = get_config()
all_indexes = (config.yaml.get('indexes', {}).items() +
config.yaml.get('global_indexes', {}).items())
for config_table_name, table_indexes in all_indexes:
if config_table_name == table_name:
for index in table_indexes:
if index['name'] == index_name:
return index
def get_connection():
"""Returns a DynamoDBConnection even if credentials are invalid."""
config = get_config()
if config.host:
from boto.dynamodb2.layer1 import DynamoDBConnection
return DynamoDBConnection(
aws_access_key_id=config.aws_access_key_id,
aws_secret_access_key=config.aws_secret_access_key,
host=config.host, # Host where DynamoDB Local resides
port=config.port, # DynamoDB Local port (8000 is the default)
is_secure=config.is_secure or False) # For DynamoDB Local, disable secure connections
return dynamodb2.connect_to_region(
os.environ.get('CC_AWS_REGION', 'us-west-2'),
aws_access_key_id=config.aws_access_key_id,
aws_secret_access_key=config.aws_secret_access_key,
)
def get_table_columns(table_name):
"""Return known columns for a table and their data type."""
# TODO: see if table.describe() can return what dynamodb knows instead.
config = get_config().yaml
try:
return dict(
(column_name, getattr(types, column_type))
for column_name, column_type in config['columns'][table_name].items())
except KeyError:
logger.exception('UnknownTable: %s' % table_name, extra=dict(config=config,
DTM_EVENT='cc_dynamodb.UnknownTable'))
raise UnknownTableException('Unknown table: %s' % table_name)
def get_table(table_name, connection=None):
'''Returns a dict with table and preloaded schema, plus columns.
WARNING: Does not check the table actually exists. Querying against
a non-existent table raises boto.exception.JSONResponseError
This function avoids additional lookups when using a table.
The columns included are only the optional columns you may find in some of the items.
'''
return table.Table(
get_table_name(table_name),
connection=connection or get_connection(),
**_get_table_metadata(table_name)
)
def list_table_names():
"""List known table names from configuration, without namespace."""
return get_config().yaml['schemas'].keys()
def _get_or_default_throughput(throughput):
if throughput is False:
config = get_config()
throughput = config.yaml['default_throughput']
return throughput
def _get_table_init_data(table_name, connection, throughput):
init_data = dict(
table_name=get_table_name(table_name),
connection=connection or get_connection(),
throughput=_get_or_default_throughput(throughput),
)
init_data.update(_get_table_metadata(table_name))
return init_data
def create_table(table_name, connection=None, throughput=False):
"""Create table. Throws an error if table already exists."""
try:
db_table = table.Table.create(**_get_table_init_data(table_name, connection=connection, throughput=throughput))
logger.info('cc_dynamodb.create_table: %s' % table_name, extra=dict(status='created table'))
return db_table
except JSONResponseError as e:
if e.status == 400 and e.error_code == 'ResourceInUseException':
logger.warn('Called create_table("%s"), but already exists: %s' %
(table_name, e.body))
raise TableAlreadyExistsException(body=e.body)
raise e
def _validate_schema(schema, table_metadata):
"""Raise error if primary index (schema) is not the same as upstream"""
upstream_schema = table_metadata['Table']['KeySchema']
upstream_schema_attributes = [i['AttributeName'] for i in upstream_schema]
upstream_attributes = [item for item in table_metadata['Table']['AttributeDefinitions']
if item['AttributeName'] in upstream_schema_attributes]
local_schema = [item.schema() for item in schema]
local_schema_attributes = [i['AttributeName'] for i in local_schema]
local_attributes = [item.definition() for item in schema
if item.definition()['AttributeName'] in local_schema_attributes]
if sorted(upstream_schema, key=lambda i: i['AttributeName']) != sorted(local_schema, key=lambda i: i['AttributeName']):
msg = 'Mismatched schema: %s VS %s' % (upstream_schema, local_schema)
logger.warn(msg)
raise UpdateTableException(msg)
if sorted(upstream_attributes, key=lambda i: i['AttributeName']) != sorted(local_attributes, key=lambda i: i['AttributeName']):
msg = 'Mismatched attributes: %s VS %s' % (upstream_attributes, local_attributes)
logger.warn(msg)
raise UpdateTableException(msg)
def update_table(table_name, connection=None, throughput=False):
"""
Update existing table.
Handles updating primary index and global secondary indexes.
Updates throughput and creates/deletes indexes.
:param table_name: unprefixed table name
:param connection: optional dynamodb connection, to avoid creating one
:param throughput: a dict, e.g. {'read': 10, 'write': 10}
:return: the updated boto Table
"""
db_table = table.Table(**_get_table_init_data(table_name, connection=connection, throughput=throughput))
local_global_indexes_by_name = dict((index.name, index) for index in db_table.global_indexes)
try:
table_metadata = db_table.describe()
except JSONResponseError as e:
if e.status == 400 and e.error_code == 'ResourceNotFoundException':
raise UnknownTableException('Unknown table: %s' % table_name)
_validate_schema(schema=db_table.schema, table_metadata=table_metadata)
# Update existing primary index throughput
db_table.update(throughput=throughput)
upstream_global_indexes_by_name = dict((index['IndexName'], index)
for index in table_metadata['Table'].get('GlobalSecondaryIndexes', []))
for index_name, index in local_global_indexes_by_name.items():
if index_name not in upstream_global_indexes_by_name:
logger.info('Creating GSI %s for %s' % (index_name, table_name))
for i in range(UPDATE_INDEX_RETRIES + 1):
try:
db_table.create_global_secondary_index(index)
except JSONResponseError as e:
if 'already exists' in str(e.body):
break
if i < UPDATE_INDEX_RETRIES:
time.sleep(1)
else:
raise
else:
throughput = {
'write': upstream_global_indexes_by_name[index_name]['ProvisionedThroughput']['WriteCapacityUnits'],
'read': upstream_global_indexes_by_name[index_name]['ProvisionedThroughput']['ReadCapacityUnits'],
}
if throughput == index.throughput:
continue
# Update throughput
# TODO: this could be done in a single call with multiple indexes
db_table.update_global_secondary_index(global_indexes={
index_name: index.throughput
})
logger.info('Updating GSI %s throughput for %s to %s' % (index_name, table_name, index.throughput))
for index_name in upstream_global_indexes_by_name.keys():
if index_name not in local_global_indexes_by_name:
logger.info('Deleting GSI %s for %s' % (index_name, table_name))
for i in range(UPDATE_INDEX_RETRIES + 1):
try:
db_table.delete_global_secondary_index(index_name)
except JSONResponseError as e:
if 'ResourceNotFoundException' in str(e.body):
break
if i < UPDATE_INDEX_RETRIES:
time.sleep(1)
else:
raise
logger.info('cc_dynamodb.update_table: %s' % table_name, extra=dict(status='updated table'))
return db_table
class UnknownTableException(Exception):
pass
class TableAlreadyExistsException(Exception):
def __init__(self, body):
self.body = body
class UpdateTableException(Exception):
pass
| {
"content_hash": "5302fd30c24b78f28f630965a2de0861",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 131,
"avg_line_length": 39.17948717948718,
"alnum_prop": 0.6353984874927283,
"repo_name": "clearcare/cc_dynamodb",
"id": "25d6f67a8dc84477d3ea00f122435d398b92d8fa",
"size": "13752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cc_dynamodb/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31857"
},
{
"name": "Shell",
"bytes": "627"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'BackendAccess.backend_class'
db.add_column(u'post_office_backendaccess', 'backend_class',
self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'BackendAccess.backend_class'
db.delete_column(u'post_office_backendaccess', 'backend_class')
models = {
u'post_office.attachment': {
'Meta': {'object_name': 'Attachment'},
'emails': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'attachments'", 'symmetrical': 'False', 'to': u"orm['post_office.Email']"}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'post_office.backendaccess': {
'Meta': {'object_name': 'BackendAccess'},
'backend_class': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_time_sent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'limit_min': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'total_sent_last_min': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'use_tsl': ('django.db.models.fields.BooleanField', [], {}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'post_office.email': {
'Meta': {'object_name': 'Email'},
'backend_access': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['post_office.BackendAccess']", 'null': 'True', 'blank': 'True'}),
'bcc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'cc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'context': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'from_email': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'headers': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'html_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'priority': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'scheduled_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['post_office.EmailTemplate']", 'null': 'True', 'blank': 'True'}),
'to': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'post_office.emailtemplate': {
'Meta': {'object_name': 'EmailTemplate'},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'html_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'post_office.log': {
'Meta': {'object_name': 'Log'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['post_office.Email']"}),
'exception_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
}
}
complete_apps = ['post_office'] | {
"content_hash": "62ad1cb3c2cd14a74e5eadcec3da64ec",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 172,
"avg_line_length": 68.8,
"alnum_prop": 0.5545485636114911,
"repo_name": "carrerasrodrigo/django-post_office",
"id": "85cbb1cfca0c56e44ca33b65287235a4cb9a78c9",
"size": "5872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "post_office/south_migrations/0014_auto__add_field_backendaccess_backend_class.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "168860"
}
],
"symlink_target": ""
} |
import TASSELpy.javaObj
from TASSELpy.utils.OrderedSet import lruOrderedSet
from TASSELpy.utils.caching import LRUcache
from functools import wraps,update_wrapper,partial
import numpy as np
import javabridge
import re
if not javabridge.get_env():
from TASSELpy import TASSELbridge
TASSELbridge.start()
java_imports = {'String':'java/lang/String'}
str_class = javabridge.get_env().find_class(java_imports['String'])
to_java_string = lambda x: javabridge.make_instance(java_imports['String'],
"(Ljava/lang/String;)V", x)
## Define numpy types that need to be converted to java arrays
def make_string_array(arr):
java_arr = javabridge.get_env().make_object_array(len(arr),str_class)
for i,s in enumerate(arr):
javabridge.get_env().set_object_array_element(java_arr,
i,javabridge.get_env().new_string_utf(s))
return java_arr
array_conversion_func_dict = {np.int8: javabridge.get_env().make_byte_array,
np.int32: javabridge.get_env().make_int_array,
np.int64: javabridge.get_env().make_long_array,
np.string_: make_string_array}
## Dictionary that automatically inserts both k->v and v->k under set
class bidirectional_dict(dict):
## Instantiates bidirectionary dictionary
# @param args A bunch of pairs to be (key,values)
def __init__(self, *args):
super(bidirectional_dict,self).__init__()
for k,v in args:
self[k] = v
def __setitem__(self,k,v):
super(bidirectional_dict,self).__setitem__(k,v)
super(bidirectional_dict,self).__setitem__(v,k)
## Define a set of equivalent python types in signatures
eq_types = bidirectional_dict((int,np.int32),(long,np.int64),
(str,unicode))
## Define a set of types that, while actually equivalent in python,
# should not be considered equivalent for signatures
segregate_types = bidirectional_dict((int,np.int64))
## Default-dict type dictionary to work with signatures
class signature_dict(dict):
def __missing__(self, key):
found = False
## Find the right key
for k in self:
if len(k) != len(key): continue
use = True
## Check all arguments in key to see if everything
# subclass instance of existing key
for i,arg in enumerate(key):
# Skip if NoneType
if arg == type(None): continue
# Check if the arg type should be segregated from the k[i] type
if k[i] in segregate_types:
if arg == segregate_types[k[i]]:
use = False
break
# Check if k[i] a subclass of this type in key
if not issubclass(arg,k[i]):
if k[i] in eq_types:
if not (arg == eq_types[k[i]]):
use = False
break
else:
use = False
break
# Set the value if appropriate
if use:
self[key] = self[k]
found = True
break
if not found:
raise KeyError("Arguments do not fit any signature")
else:
return self[key]
## Decorator class used to overload java wrapper functions
# that are non-constructor members of an instantiated class
class javaOverload(object):
"""
Creates a function decorator for a javabridge function that
is the member of a class
Arguments:
func_name -- The name of the java function
args -- tuples of form (java_signature, (python arg types),post_process_func)
e.g. ("(I)I",(int,),None)
or ("(I)[L",(int,),lambda x: javabridge.get_env().get_long_array_elements(x))
"""
def __init__(self, func_name, *args):
"""
Creates a function decorator for a javabridge function that
is the member of a class
Arguments:
func_name -- The name of the java function
args -- tuples of form (java_signature, (python arg types),post_process_func)
e.g. ("(I)I",(int,),None)
or ("(I)[L",(int,),lambda x: javabridge.get_env().get_long_array_elements(x))
"""
self.func_name = func_name
## Create signature dictionary of {(python args) -> function}
self.func_dict = signature_dict()
self.return_func_dict = signature_dict()
for sig, pyargs, py_return in args:
pyargs = tuple(map(self._process_pyarg, pyargs))
self.func_dict[pyargs] = javabridge.make_method(func_name,
sig)
if py_return is None:
self.return_func_dict[pyargs] = lambda x: x
else:
self.return_func_dict[pyargs] = self._process_py_return(py_return)
def _process_pyarg(self, x):
if isinstance(x, tuple):
#return getattr(__import__(x[0],globals(),locals(),[x[1]]),x[1])
return getattr(__import__(x[0],globals(),locals()),x[1])
else:
return x
def _process_py_return(self, x):
if isinstance(x, tuple):
#func = getattr(__import__(x[0],globals(),locals(),[x[1]]),x[1]).__init__
func = getattr(__import__(x[0],globals(),locals()),x[1]).__init__
return lambda y: func(obj = y)
else:
return x
def __call__(self, f):
@wraps(f)
def wrapped_f(*args):
if len(args) > 1:
# Get the right function based on argument types
key = tuple(map(type, args[1:]))
# Convert any wrapped java items to their java objects
args = list(args)
args[1:] = map(lambda x: (x.o if isinstance(x,TASSELpy.javaObj.javaObj) else x),
args[1:])
return_val = self.func_dict[key](*args)
if self.func_name != 'getClass':
return self.return_func_dict[key](return_val)
else:
# Special method for getClass to put in generic type
return_obj = self.return_func_dict[key](return_val)
if not hasattr(return_obj, 'generic_dict'):
return return_obj
else:
return_obj.generic_dict['/@1/'] = type(args[0])
return return_obj
else:
return_val = self.func_dict[()](*args)
return self.return_func_dict[()](return_val)
return wrapped_f
## Decorator class used to overload constructors
class javaConstructorOverload(object):
"""
Creates a function decorator for a javabridge function that is
a constructor for an instantiated class
Arguments:
class_name -- The name of the java class, as in path/to/class
args -- tuples of form (java signature,(python arg types))
e.g. ("(I)V",(int,))
"""
def __init__(self, class_name, *args):
"""
Creates a function decorator for a javabridge function that is
a constructor for an instantiated class
Arguments:
class_name -- The name of the java class, as in path/to/class
args -- tuples of form (java signature,(python arg types))
e.g. ("(I)V",(int,))
"""
self.class_name = class_name
## Create signature dictionary of {(python args) -> function}
self.func_dict = signature_dict()
for sig, pyargs in args:
pyargs = tuple(map(self._process_pyarg, pyargs))
self.func_dict[pyargs] = javabridge.make_new(self.class_name,sig)
def _process_pyarg(self, x):
if isinstance(x, tuple):
#return getattr(__import__(x[0],globals(),locals(),[x[1]]),x[1])
return getattr(__import__(x[0],globals(),locals()),x[1])
else:
return x
def __call__(self, f):
@wraps(f)
def wrapped_f(*args, **kwargs):
if 'obj' in kwargs:
## If wrapping existing java object, put in
# raw Java object as attribute so that methods can
# find it
if isinstance(kwargs['obj'], unicode):
args[0].o = to_java_string(kwargs['obj'])
else:
args[0].o = kwargs['obj']
elif len(self.func_dict) == 0:
## Skip if there are no actual java functions being wrapped
pass
else:
if len(args) > 1:
# Get the right function based on argument types
key = tuple(map(type, args[1:]))
# Convert any wrapped java items to their java objects
args = list(args)
#args[1:] = map(lambda x: (x.o if not hasattr(x,'toPrimative') else x.toPrimative()) \
# if isinstance(x,TASSELpy.javaObj.javaObj) else x,
# args[1:])
args[1:] = map(lambda x: (x.o if isinstance(x,TASSELpy.javaObj.javaObj) else x),
args[1:])
self.func_dict[key](*args)
else:
self.func_dict[()](*args)
if 'generic' in kwargs:
## Put in the generic type dictionary if necessary
args[0].generic_dict = dict(zip(map(lambda x: '/@%d/' % x,
xrange(1,len(kwargs['generic'])+1)),
kwargs['generic']))
# Make the call from the function body
f(*args, **kwargs)
return wrapped_f
class javaStaticOverload(object):
"""
Creates a function decorator for a javabridge static function
Arguments:
class_name -- The name of the java class containing the method, as in path/to/class
func_name -- The name of the java function
args -- tuples of form (java signature, (python arg types), post process function)
e.g. ("(I)I",(int,),None)
or ("(I)[L",(int,),lambda x: javabridge.get_env().get_long_array_elements(x))
"""
def __init__(self, class_name, func_name, *args):
"""
Creates a function decorator for a javabridge static function
Arguments:
class_name -- The name of the java class containing the method, as in path/to/class
func_name -- The name of the java function
args -- tuples of form (java signature, (python arg types), post process function)
e.g. ("(I)I",(int,),None)
or ("(I)[L",(int,),lambda x: javabridge.get_env().get_long_array_elements(x))
"""
self.func_name = func_name
self.class_name = class_name
## Create signature dictionary of {(python args) -> function}
self.func_dict = signature_dict()
self.return_func_dict = signature_dict()
for sig, pyargs, py_return in args:
self.func_dict[pyargs] = javabridge.make_static_call(self.class_name,
self.func_name,sig)
if py_return is None:
self.return_func_dict[pyargs] = lambda x: x
else:
self.return_func_dict[pyargs] = py_return
def _arg_convert(self, x):
if isinstance(x, TASSELpy.javaObj.javaObj):
return x.o
elif isinstance(x, str):
return to_java_string(x)
else:
return x
def __call__(self, f):
def wrapped_f(*args):
# Get the right function based on argument types
key = tuple(map(type,args))
# Convert any wrapped java items to their java objects
args = list(args)
args = map(self._arg_convert, args)
# Convert any numpy arrays to their java arrays
if np.ndarray in key:
args = map(lambda x: array_conversion_func_dict[x.dtype.type](x) \
if type(x) == np.ndarray else x, args)
return_val = self.func_dict[key](*args)
return self.return_func_dict[key](return_val)
wrapped_f = update_wrapper(wrapped_f,f,
assigned=('__doc__','__name__','__module__'))
wrapped_f = staticmethod(wrapped_f)
return wrapped_f
## Decorator class used to overload java wrapper functions
# that are non-constructor members of an instantiated class
# and return and/or accept a generic type
class javaGenericOverload(object):
"""
Creates a function decorator for a javabridge function that
is the member of a class and returns and/or accepts a generic type
Arguments:
func_name -- The name of the java function
args -- tuples of form (java_signature, (python arg types),post_process_func)
Java signatures should have java/lang/Object where generic types go.
In the Python arg types, a generic types specified as, say (type1,type2)
in the generic argument of the constructor should be specified as a string
"/@1/" or "/@2/", corresponding to type1 and type2 respectively.
In the post_process_func, you can specify None or a function as usual in
order to deal with pre-specified types. Alternatively, you can put in
the string "/@1/" or whatever the corresponding string is for a given type
in order to send the return object through the constructor of that type
(e.g. type1(obj=x)). In the case of types that should receive generic arguments,
you can specify a dictionary with 'type' and 'generic' keys. For instance,
to send the return object to a wrapper class named MyClass that should receive
type1 as its one and only generic type argument, you can put in the following
dictionary:
dict(type=MyClass, generic=("/@1/",))
"""
def __init__(self, func_name, *args):
"""
Creates a function decorator for a javabridge function that
is the member of a class
Arguments:
func_name -- The name of the java function
args -- tuples of form (java_signature, (python arg types or None),post_process_func)
For java signature, put /@/ sign where the actual type of the generic should be placed.
For python args/return type put None where it should be
Note that if the returned type is the generic, the post-process function will
cast to the specified type unless otherwise specified
e.g. ("(/@/)I",(None,),None)
or ("(/@/)[L",(None,),lambda x: javabridge.get_env().get_long_array_elements(x))
"""
self.func_name = func_name
# Store function parameters
self.func_params = args
# Set the sig_set dictionary
self.func_dict = LRUcache(lambda x: x)
self.return_func_dict = LRUcache(lambda x: x)
def __call__(self, f):
@wraps(f)
def wrapped_f(*args):
## If the signature is not set, do it
if args[0].o not in self.func_dict:
self.func_dict[args[0].o] = signature_dict()
self.return_func_dict[args[0].o] = signature_dict()
## Set up signature dictionary ##
for sig, pyargs, py_return in self.func_params:
## Replace any instances of strings in the pyargs with the python type
pyargs = tuple(map(lambda x: (args[0].generic_dict[x] if \
x in args[0].generic_dict else TASSELpy.javaObj.javaObj) if \
type(x) == str else x,pyargs))
# Set func_dict[args[0].o] entries
self.func_dict[args[0].o][pyargs] = javabridge.make_method(self.func_name,sig)
if type(py_return) == str:
## If return function is replaced with string, meaning
# to instantiate generic type
if hasattr(args[0].generic_dict[py_return],'generic_dict'):
generic_tuple = tuple(map(lambda x: x[1],
sorted(args[0].generic_dict[py_return].generic_dict.items())))
self.return_func_dict[args[0].o][pyargs] = lambda x: \
args[0].generic_dict[py_return](obj=x, generic=generic_tuple) if \
isinstance(x,javabridge.JB_Object) else \
(args[0].generic_dict[py_return](x) if x is not None else None)
else:
def da_return_func(x):
if isinstance(x, javabridge.JB_Object):
return args[0].generic_dict[py_return](obj=x)
elif x is None:
return None
else:
try:
return args[0].generic_dict[py_return](x)
except KeyError:
obj = None
if isinstance(x, str) or isinstance(x,unicode):
obj = javabridge.make_instance('java/lang/String',
'(Ljava/lang/String;)V',x)
elif isinstance(x, int):
obj = javabridge.make_instance('java/lang/Integer',
'(Ljava/lang/Integer;)V',x)
elif isinstance(x, float):
obj = javabridge.make_instance('java/lang/Double',
'(Ljava/lang/Double;)V',x)
if obj is None: return obj
else:
return args[0].generic_dict[py_return](obj=obj)
self.return_func_dict[args[0].o][pyargs] = da_return_func
elif isinstance(py_return, dict):
## If this is a dictionary, specify the generic type
# and make constructor call method
if 'type' not in py_return:
raise ValueError("Return type of object not given")
elif 'generic' not in py_return:
raise ValueError("Generic type(s) for return object not given")
self.return_func_dict[args[0].o][pyargs] = \
lambda x: py_return['type'](obj=x,
generic=tuple(map(lambda y: args[0].generic_dict[y] if \
isinstance(y,str) else y,
py_return['generic']))) if \
isinstance(x,javabridge.JB_Object) else x
elif py_return is None:
## If no return function specified, return raw output
self.return_func_dict[args[0].o][pyargs] = lambda x: x
else:
## If function specified, use that
self.return_func_dict[args[0].o][pyargs] = py_return
## Run the function ##
if len(args) > 1:
# Get the right function based on argument types
key = tuple(map(type, args[1:]))
# Convert any wrapped java items to their java objects
args = list(args)
args[1:] = map(lambda x: (x.o if isinstance(x,TASSELpy.javaObj.javaObj) else x),
args[1:])
return_val = self.func_dict[args[0].o][key](*args)
return self.return_func_dict[args[0].o][key](return_val)
else:
return_val = self.func_dict[args[0].o][()](*args)
return self.return_func_dict[args[0].o][()](return_val)
return wrapped_f
| {
"content_hash": "4662d8ac2a7b6f222b3bdf74d1a49f70",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 106,
"avg_line_length": 48.09976798143852,
"alnum_prop": 0.5201389223867637,
"repo_name": "er432/TASSELpy",
"id": "8e32557d7aee34933b17408099526154b0142ec4",
"size": "20784",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "TASSELpy/utils/Overloading.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "947691"
},
{
"name": "Shell",
"bytes": "6705"
}
],
"symlink_target": ""
} |
'''Module that runs extra process and thread workers
to offload cpu usage from main thread.
Also has a interface class and queues, methods, attributes
for handling the data, functions, callbacks
'''
from utils.compat_queue import Queue, Empty
from kivy.utils import platform
from kivy.logger import Logger
from time import time, sleep
from .worker import Worker
from .async_funcs import *
import multiprocessing
import threading
workers = {}
class WorkerInterface(object):
'''Interface for Worker processes '''
task_callbacks = None
'''Dict with task callback id numbers and callback functions'''
process = None
'''Process object'''
_next_worker_id = 0
_next_task_id = 0
'''Task counter, used as task_callbacks dict key for every new task'''
if platform == 'win':
use_multiprocess = False
else:
use_multiprocess = True
def __init__(self):
self.id = WorkerInterface._next_worker_id
WorkerInterface._next_worker_id += 1
if self.use_multiprocess:
self.recv = multiprocessing.Queue()
self.send = multiprocessing.Queue()
else:
self.recv, self.send = Queue(), Queue()
self.task_callbacks = {}
def add_task(self, task, callback):
'''Puts task in worker queue and stores callback for later use'''
task['task_id'] = self._next_task_id
self.task_callbacks[self._next_task_id] = callback
self._next_task_id += 1
self.send.put(task)
def update(self):
'''Gets results from self.recv queue and calls task callbacks'''
try:
for i in range(5):
task = self.recv.get_nowait()
if task['method'] == 'task_done':
self.task_callbacks[task['task_id']](task)
elif task['method'] == 'Logger_info':
Logger.info(task['text'])
except Empty:
pass
def start_process(self):
'''Starts daemon process with
self.send and self.recv queues as arguments.
Stores it in self.process'''
if not self.process:
w = Worker()
if self.use_multiprocess:
self.process = multiprocessing.Process(
target=w.start, args=(self.send, self.recv,))
else:
self.process = threading.Thread(
target=w.start, args=(self.send, self.recv,))
self.process.daemon = True
self.process.start()
def stop(self):
'''Tells self.process to stop'''
self.send.put({'method': 'stop'})
self.process.join()
def start_workers(count):
'''Creates WorkerInterface instances and starts Worker processes,
then schedules _update with Clock interval'''
from kivy.clock import Clock
global workers
for i in range(count):
w_interface = WorkerInterface()
w_interface.start_process()
workers[w_interface.id] = w_interface
Clock.schedule_interval(_update, 0.1)
def _update(dt):
'''Calls WorkerInterface update method
to check task results and call callbacks '''
global workers
for k, worker in workers.items():
worker.update()
def add_task(task, callback):
'''Calls WorkerInterface instance add_task method.
It puts tasks into worker queue'''
global workers
if workers:
workers[0].add_task(task, callback)
def stop():
'''Tells workers to stop'''
global workers
for k, worker in workers.items():
worker.stop()
| {
"content_hash": "10418d6eb1bf9c5e89ea1dbb18eb2c17",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 74,
"avg_line_length": 30.75862068965517,
"alnum_prop": 0.6109865470852018,
"repo_name": "Bakterija/mmplayer",
"id": "41d1352780789c0b086eb0f20296366ead4c8450",
"size": "3568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mmplayer/appworker/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "450641"
}
],
"symlink_target": ""
} |
from master.chromium_git_poller_bb8 import ChromiumGitPoller
def Update(config, active_master, c):
syzygy_poller = ChromiumGitPoller(
repourl='https://github.com/google/syzygy.git',
branch='master',
pollInterval=60)
c['change_source'].append(syzygy_poller)
| {
"content_hash": "a5481d33a7c39bdde1e704231df151f1",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 60,
"avg_line_length": 31.22222222222222,
"alnum_prop": 0.7224199288256228,
"repo_name": "eunchong/build",
"id": "b37f2c8f6aee924b36740ecfe3b5cc25e0a3af9c",
"size": "448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "masters/master.client.syzygy/master_source_cfg.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3128"
},
{
"name": "CSS",
"bytes": "211818"
},
{
"name": "HTML",
"bytes": "429981"
},
{
"name": "JavaScript",
"bytes": "75624"
},
{
"name": "Makefile",
"bytes": "21204"
},
{
"name": "Python",
"bytes": "6143109"
},
{
"name": "Shell",
"bytes": "23512"
}
],
"symlink_target": ""
} |
"""
This simple animation example shows how to move an item with the mouse, and
handle mouse clicks.
"""
import arcade
import random
# Set up the constants
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
RECT_WIDTH = 50
RECT_HEIGHT = 50
class Rectangle():
""" Class to represent a rectangle on the screen """
def __init__(self, x, y, width, height, angle, color):
""" Initialize our rectangle variables """
# Position
self.x = x
self.y = y
# Size and rotation
self.width = width
self.height = height
self.angle = angle
# Color
self.color = color
def draw(self):
""" Draw our rectangle """
arcade.draw_rectangle_filled(self.x, self.y, self.width, self.height,
self.color, self.angle)
class MyApplication(arcade.Window):
""" Main application class. """
def setup(self):
""" Set up the game and initialize the variables. """
width = RECT_WIDTH
height = RECT_HEIGHT
x = 0
y = RECT_HEIGHT
angle = 0
color = arcade.color.WHITE
self.player = Rectangle(x, y, width, height, angle, color)
self.left_down = False
def animate(self, dt):
""" Move everything """
if self.left_down:
self.player.angle += 2
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
self.player.draw()
def on_mouse_motion(self, x, y, dx, dy):
"""
Called whenever the mouse moves.
"""
self.player.x = x
self.player.y = y
def on_mouse_press(self, x, y, button, modifiers):
"""
Called when the user presses a mouse button.
"""
print(button)
if button == arcade.MOUSE_BUTTON_LEFT:
self.left_down = True
def on_mouse_release(self, x, y, button, modifiers):
"""
Called when a user releases a mouse button.
"""
if button == arcade.MOUSE_BUTTON_LEFT:
self.left_down = False
def main():
window = MyApplication(SCREEN_WIDTH, SCREEN_HEIGHT,
title="Mouse control")
window.setup()
arcade.run()
main()
| {
"content_hash": "9edec5927878086f2a0472a188aaa7d2",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 77,
"avg_line_length": 23.309278350515463,
"alnum_prop": 0.550641309155241,
"repo_name": "mwreuter/arcade",
"id": "07be2ba7b6e052925565d088397f687d67876af6",
"size": "2261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/examples/move_mouse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "878"
},
{
"name": "Python",
"bytes": "202899"
}
],
"symlink_target": ""
} |
import importlib
import pytest
from django.template import TemplateDoesNotExist
from django.template.loader import render_to_string
from django.test import SimpleTestCase
from django.test.utils import override_settings
from unittest import mock
from hamlpy.compiler import Compiler
from hamlpy.template import loaders
class LoaderTest(SimpleTestCase):
def setUp(self):
super(LoaderTest, self).setUp()
importlib.reload(loaders)
@mock.patch('hamlpy.template.loaders.Compiler', wraps=Compiler)
def test_compiler_default_settings(self, mock_compiler_class):
render_to_string('simple.hamlpy')
mock_compiler_class.assert_called_once_with(options={})
mock_compiler_class.reset_mock()
@override_settings(HAMLPY_ATTR_WRAPPER='"', HAMLPY_DJANGO_INLINE_STYLE=False)
def test_compiler_settings(self):
importlib.reload(loaders)
with mock.patch('hamlpy.template.loaders.Compiler', wraps=Compiler) as mock_compiler_class:
rendered = render_to_string('simple.hamlpy')
mock_compiler_class.assert_called_once_with(options={
'attr_wrapper': '"',
'django_inline_style': False
})
assert '"someClass"' in rendered
def test_template_rendering(self):
assert render_to_string('simple.hamlpy') == self._load_test_template('simple.html')
context = {
'section': {'title': "News", 'subtitle': "Technology"},
'story_list': [{
'headline': "Haml Helps",
'tease': "Many HAML users...",
'get_absolute_url': lambda: "http://example.com/stories/1/"
}]
}
rendered = render_to_string('djangoCombo.hamlpy', context)
assert "<h2>Technology</h2>" in rendered
assert "HAML HELPS" in rendered
assert "<a href='http://example.com/stories/1/'>" in rendered
assert "<p>Many HAML users...</p>"
def test_should_ignore_non_haml_templates(self):
assert render_to_string('simple.html') == self._load_test_template('simple.html')
def test_should_raise_exception_when_template_doesnt_exist(self):
with pytest.raises(TemplateDoesNotExist):
render_to_string('simple.xyz')
def _load_test_template(self, name):
return open('hamlpy/test/templates/' + name, 'r').read()
| {
"content_hash": "4a84c8e58911a70783e192effc2e22e1",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 99,
"avg_line_length": 35.0735294117647,
"alnum_prop": 0.6452830188679245,
"repo_name": "Psycojoker/HamlPy",
"id": "da57d8647f488dd85b61c7c67fc3539d9b09a551",
"size": "2385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hamlpy/test/test_loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8021"
},
{
"name": "Python",
"bytes": "80063"
},
{
"name": "Ruby",
"bytes": "88"
}
],
"symlink_target": ""
} |
"""
Utility functions for training CLSTM neural networks.
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import str
import os
import click
import numpy as np
import bidi.algorithm as bd
from PIL import Image
from kraken import rpred
from kraken.lib import lstm
from kraken.lib import models
from kraken.lib.util import pil2array, array2pil
from kraken.lib.lineest import CenterNormalizer
def _fast_levenshtein(seq1, seq2):
oneago = None
thisrow = range(1, len(seq2) + 1) + [0]
for x in xrange(len(seq1)):
oneago, thisrow = thisrow, [0] * len(seq2) + [x + 1]
for y in xrange(len(seq2)):
delcost = oneago[y] + 1
addcost = thisrow[y - 1] + 1
subcost = oneago[y - 1] + (seq1[x] != seq2[y])
thisrow[y] = min(delcost, addcost, subcost)
return thisrow[len(seq2) - 1]
def compute_error(model, test_set):
"""
Computes detailed error report from a model and a list of line image-text
pairs.
Args:
model (kraken.lib.models.ClstmSeqRecognizer): Model used for
recognition
test_set (list): List of tuples (imae, text) for testing
Returns:
A tuple with total number of characters and edit distance across the
whole test set.
"""
total_chars = 0
error = 0
for im, text in test_set:
pred = model.predictString(im)
total_chars += len(text)
error += _fast_levenshtein(pred, text)
return total_chars, error
class GroundTruthContainer(object):
"""
Container for ground truth used during training.
Attributes:
training_set (list): List of tuples (image, text) for training
test_set (list): List of tuples (image, text) for testing
alphabet (str): Sorted string of all codepoint found in the ground
truth
"""
def __init__(self, images=None, split=lambda x: os.path.splitext(x)[0],
suffix='.gt.txt', normalization=None, reorder=True,
partition=0.9, pad=16):
"""
Reads a list of image-text pairs and creates a ground truth set.
Args:
images (list): List of file paths of line images
split (func): Function for generating the base name without
extensions from paths
suffix (str): Suffix to attach to image base name for text
retrieval
normalization (str): Unicode normalization for gt
reorder (bool): Whether to rearrange code points in "display"/LTR
order
partition (float): Ground truth data partition ratio between
train/test set.
pad (int): Padding to add to images left and right
"""
self.lnorm = CenterNormalizer()
self.training_set = []
self.test_set = []
self.alphabet = set()
if not images:
return
for line in images:
self.add(line, split, suffix, normalization, reorder, pad)
self.repartition(partition)
self.alphabet = sorted(set(''.join(t for _, t in self.training_set)))
def add(self, image, split=lambda x: os.path.splitext(x)[0],
suffix='.gt.txt', normalization=None, reorder=True,
pad=16):
"""
Adds a single image to the training set.
"""
with click.open_file(split(image) + suffix, 'r', encoding='utf-8') as fp:
gt = fp.read()
if normalization:
gt = unicodedata.normalize(normalization, gt)
if reorder:
gt = bd.get_display(gt)
im = Image.open(image)
im = rpred.dewarp(self.lnorm, im)
im = pil2array(im)
im = lstm.prepare_line(im, pad)
self.training_set.append((im, gt))
def repartition(self, partition=0.9):
"""
Repartitions the training/test sets.
Args:
partition (float): Ground truth data partition ratio between
training/test sets.
"""
self.training_set = self.training_set + self.test_set
idx = np.random.choice(len(self.training_set), int(len(self.training_set) * partition), replace=False)
tmp_set = [self.training_set[x] for x in idx]
[self.training_set.pop(x) for x in sorted(idx, reverse=True)]
self.test_set = self.training_set
self.training_set = tmp_set
self.alphabet = sorted(set(''.join(t for _, t in self.training_set)))
def sample(self):
"""
Samples a line image-text pair from the training set.
Returns:
A tuple (line, text) with line being a numpy.array run through
kraken.lib.lstm.prepare_line.
"""
return self.training_set[np.random.choice(len(self.training_set))]
| {
"content_hash": "ac1744ece6d2a7b5d9718bdc2989c36f",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 110,
"avg_line_length": 33.578947368421055,
"alnum_prop": 0.585423197492163,
"repo_name": "QuLogic/ocropy",
"id": "540b827ab7df2f7f980b5c604d1fa74ddb27cf0f",
"size": "5712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kraken/train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1359"
},
{
"name": "HTML",
"bytes": "4048"
},
{
"name": "Protocol Buffer",
"bytes": "635"
},
{
"name": "Python",
"bytes": "127097"
}
],
"symlink_target": ""
} |
from supriya.tools import osctools
from supriya.tools.requesttools.Request import Request
class BufferFreeRequest(Request):
r'''A /b_free request.
::
>>> from supriya.tools import requesttools
>>> request = requesttools.BufferFreeRequest(
... buffer_id=23,
... )
>>> request
BufferFreeRequest(
buffer_id=23
)
::
>>> message = request.to_osc_message()
>>> message
OscMessage(32, 23)
::
>>> message.address == requesttools.RequestId.BUFFER_FREE
True
'''
### CLASS VARIABLES ###
__slots__ = (
'_buffer_id',
'_completion_message',
)
### INITIALIZER ###
def __init__(
self,
buffer_id=None,
completion_message=None,
):
Request.__init__(self)
self._buffer_id = int(buffer_id)
self._completion_message = self._coerce_completion_message_input(
completion_message)
### PUBLIC METHODS ###
def to_osc_message(self):
request_id = int(self.request_id)
buffer_id = int(self.buffer_id)
contents = [
request_id,
buffer_id,
]
self._coerce_completion_message_output(contents)
message = osctools.OscMessage(*contents)
return message
### PUBLIC PROPERTIES ###
@property
def buffer_id(self):
return self._buffer_id
@property
def completion_message(self):
return self._completion_message
@property
def response_specification(self):
return None
@property
def request_id(self):
from supriya.tools import requesttools
return requesttools.RequestId.BUFFER_FREE | {
"content_hash": "949072b7ab26a3a0dcdcc1df07817e92",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 73,
"avg_line_length": 21.839506172839506,
"alnum_prop": 0.5579423403052572,
"repo_name": "andrewyoung1991/supriya",
"id": "56af992e51056db019036f2ec1618d69e1d26f89",
"size": "1795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supriya/tools/requesttools/BufferFreeRequest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2693776"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from .views import ToolkitActivityLogListView
urlpatterns = [
url(r'^browse/$', login_required(ToolkitActivityLogListView.as_view()), name='browse_activity_logs', ),
]
| {
"content_hash": "769316db1d5db251fcd27c0afe53df74",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 107,
"avg_line_length": 33.125,
"alnum_prop": 0.7773584905660378,
"repo_name": "cceit/cce-toolkit",
"id": "e59e6da614ab4521d8046b55031595e8a82eba6f",
"size": "265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toolkit/apps/toolkit_activity_log/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "325419"
},
{
"name": "Gherkin",
"bytes": "2506"
},
{
"name": "HTML",
"bytes": "66275"
},
{
"name": "JavaScript",
"bytes": "16338"
},
{
"name": "Python",
"bytes": "214599"
}
],
"symlink_target": ""
} |
"""Module that pre-processes the notebook for export via Reveal."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from .base import Preprocessor
from traitlets import Unicode
class RevealHelpPreprocessor(Preprocessor):
url_prefix = Unicode('reveal.js', config=True,
help="""The URL prefix for reveal.js.
This can be a a relative URL for a local copy of reveal.js,
or point to a CDN.
For speaker notes to work, a local reveal.js prefix must be used.
"""
)
def preprocess(self, nb, resources):
"""
Called once to 'preprocess' contents of the notebook.
Parameters
----------
nb : NotebookNode
Notebook being converted
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
"""
for index, cell in enumerate(nb.cells):
#Make sure the cell has slideshow metadata.
cell.metadata.slide_type = cell.get('metadata', {}).get('slideshow', {}).get('slide_type', '-')
for index, cell in enumerate(nb.cells):
# Get the slide type. If type is start, subslide, or slide,
# end the last subslide/slide.
if cell.metadata.slide_type in ['slide']:
nb.cells[index - 1].metadata.slide_helper = 'slide_end'
if cell.metadata.slide_type in ['subslide']:
nb.cells[index - 1].metadata.slide_helper = 'subslide_end'
# Prevent the rendering of "do nothing" cells before fragments
# Group fragments passing frag_number to the data-fragment-index
if cell.metadata.slide_type in ['fragment']:
nb.cells[index].metadata.frag_number = index
i = 1
while i < len(nb.cells) - index:
# We need to break the loop when a new slide or subslide is
# found to avoid the propagation of the data-fragment-index
# across multiple slides/subslides
if nb.cells[index + i].metadata.slide_type in ['slide', 'subslide']:
break
else:
nb.cells[index + i].metadata.frag_helper = 'fragment_end'
nb.cells[index + i].metadata.frag_number = index
i += 1
# Restart the slide_helper when the cell status is changed
# to other types.
if cell.metadata.slide_type in ['-', 'skip', 'notes', 'fragment']:
nb.cells[index - 1].metadata.slide_helper = '-'
if not isinstance(resources['reveal'], dict):
resources['reveal'] = {}
resources['reveal']['url_prefix'] = self.url_prefix
return nb, resources
| {
"content_hash": "2bbd52d0cd6a068a52be8f9682bccb03",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 107,
"avg_line_length": 43.2,
"alnum_prop": 0.5552248677248677,
"repo_name": "bdh1011/wau",
"id": "c21fb07ced6bf3295f8259caa5c20bc7fcd02d46",
"size": "3024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/nbconvert/preprocessors/revealhelp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1176"
},
{
"name": "C",
"bytes": "5022853"
},
{
"name": "C++",
"bytes": "43676"
},
{
"name": "CSS",
"bytes": "10359"
},
{
"name": "D",
"bytes": "1841"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "Groff",
"bytes": "7236"
},
{
"name": "HTML",
"bytes": "1709320"
},
{
"name": "JavaScript",
"bytes": "1200059"
},
{
"name": "Jupyter Notebook",
"bytes": "310219"
},
{
"name": "Lua",
"bytes": "11887"
},
{
"name": "Makefile",
"bytes": "112163"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Perl",
"bytes": "171375"
},
{
"name": "Python",
"bytes": "49407229"
},
{
"name": "Ruby",
"bytes": "58403"
},
{
"name": "Shell",
"bytes": "47672"
},
{
"name": "Smarty",
"bytes": "22599"
},
{
"name": "Tcl",
"bytes": "426334"
},
{
"name": "XSLT",
"bytes": "153073"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import atexit
import errno
import os
import sys
import time
import signal
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin=os.devnull,
stdout=os.devnull, stderr=os.devnull,
home_dir='.', umask=0o22, verbose=1,
use_gevent=False, use_eventlet=False):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
self.home_dir = home_dir
self.verbose = verbose
self.umask = umask
self.daemon_alive = True
self.use_gevent = use_gevent
self.use_eventlet = use_eventlet
def log(self, *args):
if self.verbose >= 1:
print(*args)
def daemonize(self):
"""
Do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
if self.use_eventlet:
import eventlet.tpool
eventlet.tpool.killall()
try:
pid = os.fork()
if pid > 0:
# Exit first parent
sys.exit(0)
except OSError as e:
sys.stderr.write(
"fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment
os.chdir(self.home_dir)
os.setsid()
os.umask(self.umask)
# Do second fork
try:
pid = os.fork()
if pid > 0:
# Exit from second parent
sys.exit(0)
except OSError as e:
sys.stderr.write(
"fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
if sys.platform != 'darwin': # This block breaks on OS X
# Redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(self.stdin, 'r')
so = open(self.stdout, 'a+')
if self.stderr:
# self.log(self.stderr)
try:
se = open(self.stderr, 'a+', 0)
except ValueError:
# Python 3 can't have unbuffered text I/O
se = open(self.stderr, 'a+', 1)
else:
se = so
os.dup2(se.fileno(), sys.stderr.fileno())
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
def sigtermhandler(signum, frame):
self.daemon_alive = False
sys.exit()
if self.use_gevent:
import gevent
gevent.reinit()
gevent.signal(signal.SIGTERM, sigtermhandler, signal.SIGTERM, None)
gevent.signal(signal.SIGINT, sigtermhandler, signal.SIGINT, None)
else:
signal.signal(signal.SIGTERM, sigtermhandler)
signal.signal(signal.SIGINT, sigtermhandler)
self.log("Started")
# Write pidfile
atexit.register(
self.delpid) # Make sure pid file is removed if we quit
pid = str(os.getpid())
open(self.pidfile, 'w+').write("%s\n" % pid)
def delpid(self):
try:
# the process may fork itself again
pid = int(open(self.pidfile, 'r').read().strip())
if pid == os.getpid():
os.remove(self.pidfile)
except OSError as e:
if e.errno == errno.ENOENT:
pass
else:
raise
def start(self, *args, **kwargs):
"""
Start the daemon
"""
self.log("Starting...")
# Check for a pidfile to see if the daemon already runs
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
except SystemExit:
pid = None
if pid:
message = "pidfile %s already exists. Is it already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run(*args, **kwargs)
def stop(self, *args, **kwargs):
"""
Stop the daemon
"""
if self.verbose >= 1:
self.log("Stopping...")
# Get the pid from the pidfile
pid = self.get_pid()
if not pid:
message = "pidfile %s does not exist. Not running?\n"
sys.stderr.write(message % self.pidfile)
# Just to be sure. A ValueError might occur if the PID file is
# empty but does actually exist
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
return # Not an error in a restart
# Try killing the daemon process
try:
i = 0
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
i = i + 1
if i % 10 == 0:
os.kill(pid, signal.SIGHUP)
except OSError as err:
if err.errno == errno.ESRCH:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print(str(err))
sys.exit(1)
self.log("Stopped")
def restart(self, *args, **kwargs):
"""
Restart the daemon
"""
self.stop()
self.start(*args, **kwargs)
def get_pid(self):
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
except SystemExit:
pid = None
return pid
def is_running(self):
pid = self.get_pid()
if pid is None:
self.log('Process is stopped')
return False
elif os.path.exists('/proc/%d' % pid):
self.log('Process (pid %d) is running...' % pid)
return True
else:
self.log('Process (pid %d) is killed' % pid)
return False
def run(self, *args, **kwargs):
"""
You should override this method when you subclass Daemon.
It will be called after the process has been
daemonized by start() or restart().
"""
raise NotImplementedError
| {
"content_hash": "a69ec81c34115581cf9dccef29a727d3",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 79,
"avg_line_length": 28.912663755458514,
"alnum_prop": 0.4979610330765745,
"repo_name": "360skyeye/kael",
"id": "16d160eeaacdaf856645e4de0ec53634355a9558",
"size": "6660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kael/daemon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "87917"
}
],
"symlink_target": ""
} |
from nose.tools import assert_raises
from scalymongo import Document, OR, IS
from scalymongo.errors import ValidationError
from tests.acceptance.base_acceptance_test import BaseAcceptanceTest
from tests.helpers import assert_raises_with_message
class DocumentWithListOfStrings(Document):
structure = {
'field': [basestring],
}
__database__ = 'test'
__collection__ = __name__ + '_0'
def when_validating_empty_document_should_pass():
DocumentWithListOfStrings({}).validate()
def when_validating_document_with_empty_list_should_pass():
DocumentWithListOfStrings({'field': []}).validate()
def when_validating_document_with_list_of_strings():
DocumentWithListOfStrings({'field': ['str', u'unicode']}).validate()
def when_validating_document_with_list_of_numbers():
assert_raises_with_message(
ValidationError,
"Position 'field.0' was declared to be <type 'basestring'>, but encountered value 1",
DocumentWithListOfStrings({'field': [1]}).validate)
def when_validating_document_with_extra_fields_should_raise_error():
doc = DocumentWithListOfStrings({'unknown': 1})
assert_raises_with_message(
ValidationError,
"Encountered field(s) not present in structure: 'unknown'",
doc.validate)
class DocumentWithEmbeddedDictOfStringToInt(Document):
structure = {
'field': {basestring: int},
}
__database__ = 'test'
__collection__ = __name__ + '_1'
def when_validating_proper_embedded_dict_should_pass():
doc = DocumentWithEmbeddedDictOfStringToInt({
'field': {'foo': 1, u'bar': 2}})
doc.validate()
def when_validating_document_with_string_mapped_to_float_should_fail_validation():
doc = DocumentWithEmbeddedDictOfStringToInt({
'field': {'foo': 1, u'bar': 2.3}})
assert_raises_with_message(
ValidationError,
"Position 'field.bar' was declared to be <type 'int'>, but encountered value 2.2999999999999998",
doc.validate)
class DocumentWithMultiplePotentialTypes(Document):
structure = {
'field': OR(basestring, int),
}
__database__ = 'test'
__collection__ = __name__ + '_2'
def when_validating_with_int_field_should_pass():
DocumentWithMultiplePotentialTypes({'field': 5}).validate()
def when_validating_with_string_should_pass():
DocumentWithMultiplePotentialTypes({'field': 'foo'}).validate()
def when_validating_with_list_of_int_should_fail():
assert_raises_with_message(
ValidationError,
"Position 'field' was declared to be <OR <type 'int'>, <type 'basestring'>>, but encountered value [1]",
DocumentWithMultiplePotentialTypes({'field': [1]}).validate)
class DocumentWithMultiplePotentialValues(Document):
structure = {
'field': IS(1, 'foo'),
}
__database__ = 'test'
__collection__ = __name__ + '_3'
def when_validating_with_1_field_should_pass():
DocumentWithMultiplePotentialValues({'field': 1}).validate()
def when_validating_with_foo_should_pass():
DocumentWithMultiplePotentialValues({'field': 'foo'}).validate()
def when_validating_with_2_should_fail():
assert_raises_with_message(
ValidationError,
"Position 'field' was declared to be <IS 1, 'foo'>, but encountered value 2",
DocumentWithMultiplePotentialValues({'field': 2}).validate)
| {
"content_hash": "9fca13cedc0b23718ff64d03e6a55c09",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 112,
"avg_line_length": 31.97142857142857,
"alnum_prop": 0.6821566875186178,
"repo_name": "allancaffee/scaly-mongo",
"id": "495aea13d5a535e877e1bf4a3deeab9e4e1d89ae",
"size": "3357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/acceptance/test_validation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "144127"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Java/JARCOMSTR.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Test that the $JARCOMSTR construction variable allows you to configure
the jar output.
"""
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('myjar.py', r"""
import sys
outfile = open(sys.argv[1], 'wb')
for f in sys.argv[2:]:
infile = open(f, 'rb')
for l in [l for l in infile.readlines() if l != '/*jar*/\n']:
outfile.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(TOOLS = ['default', 'jar'],
JARCOM = r'%(_python_)s myjar.py $TARGET $SOURCES',
JARCOMSTR = "Jar'ing up $TARGET from $SOURCES")
env.Jar(target = 'test1', source = ['file1.in', 'file2.in', 'file3.in'])
""" % locals())
test.write('file1.in', "file1.in\n/*jar*/\n")
test.write('file2.in', "file2.in\n/*jar*/\n")
test.write('file3.in', "file3.in\n/*jar*/\n")
test.run(stdout = test.wrap_stdout("""\
Jar'ing up test1.jar from file1.in file2.in file3.in
"""))
test.must_match('test1.jar', "file1.in\nfile2.in\nfile3.in\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "82b5274072d74db883d9f136602aa910",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 96,
"avg_line_length": 31.653333333333332,
"alnum_prop": 0.6992417860151643,
"repo_name": "EmanueleCannizzaro/scons",
"id": "c8954c7e74d1e97887dd457343311ff211b7dbe5",
"size": "2374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/Java/JARCOMSTR.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2491"
},
{
"name": "C",
"bytes": "659"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1997"
},
{
"name": "HTML",
"bytes": "817651"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7510453"
},
{
"name": "Roff",
"bytes": "556545"
},
{
"name": "Ruby",
"bytes": "11074"
},
{
"name": "Shell",
"bytes": "52682"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
import logging
import re
import textwrap
import unittest
import mock
from flexmock import flexmock, flexmock_teardown
from hamcrest import assert_that, has_length, equal_to, contains_string, has_key, \
is_, instance_of
from ncclient.devices.junos import JunosDeviceHandler
from ncclient.operations import RPCError, TimeoutExpiredError
from ncclient.xml_ import NCElement, to_ele, to_xml
from netman.adapters.switches import juniper
from netman.adapters.switches.juniper import Juniper
from netman.adapters.switches.juniper.standard import JuniperCustomStrategies
from netman.core.objects.access_groups import OUT, IN
from netman.core.objects.exceptions import LockedSwitch, VlanAlreadyExist, BadVlanNumber, BadVlanName, UnknownVlan, \
InterfaceInWrongPortMode, UnknownInterface, AccessVlanNotSet, NativeVlanNotSet, TrunkVlanNotSet, VlanAlreadyInTrunk, \
BadBondNumber, UnknownBond, InterfaceNotInBond, BondAlreadyExist, OperationNotCompleted, InvalidMtuSize
from netman.core.objects.interface_states import OFF, ON
from netman.core.objects.port_modes import ACCESS, TRUNK, BOND_MEMBER
from netman.core.objects.switch_descriptor import SwitchDescriptor
from netman.core.objects.switch_transactional import FlowControlSwitch
from tests import ignore_deprecation_warnings
@ignore_deprecation_warnings
def test_factory():
lock = mock.Mock()
switch = juniper.standard_factory(SwitchDescriptor(hostname='hostname', model='juniper', username='username', password='password', port=22), lock)
assert_that(switch, instance_of(FlowControlSwitch))
assert_that(switch.wrapped_switch, instance_of(Juniper))
assert_that(switch.lock, is_(lock))
assert_that(switch.switch_descriptor.hostname, equal_to("hostname"))
assert_that(switch.switch_descriptor.model, equal_to("juniper"))
assert_that(switch.switch_descriptor.username, equal_to("username"))
assert_that(switch.switch_descriptor.password, equal_to("password"))
assert_that(switch.switch_descriptor.port, equal_to(22))
class JuniperTest(unittest.TestCase):
def setUp(self):
self.switch = juniper.standard.netconf(SwitchDescriptor(model='juniper', hostname="toto"))
self.netconf_mock = flexmock()
self.switch.netconf = self.netconf_mock
self.switch.in_transaction = True
def tearDown(self):
flexmock_teardown()
def test_switch_has_a_logger_configured_with_the_switch_name(self):
assert_that(self.switch.logger.name, is_(Juniper.__module__ + ".toto"))
def test_get_vlans(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<vlans />
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>STANDARD</name>
<vlan-id>10</vlan-id>
<description>my-description</description>
</vlan>
<vlan>
<name>NO-VLAN-ID</name>
<description>shizzle</description>
</vlan>
<vlan>
<name>WITH-IF</name>
<vlan-id>20</vlan-id>
<l3-interface>vlan.20</l3-interface>
</vlan>
<vlan>
<name>WITH-IF-MULTI-IP</name>
<vlan-id>40</vlan-id>
<l3-interface>vlan.70</l3-interface>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/1</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>vlan</name>
<unit>
<name>20</name>
<family>
<inet>
<address>
<name>1.1.1.1/24</name>
</address>
<filter>
<input>
<filter-name>AC-IN</filter-name>
</input>
<output>
<filter-name>AC-OUT</filter-name>
</output>
</filter>
</inet>
</family>
</unit>
<unit>
<name>40</name>
</unit>
<unit>
<name>70</name>
<family>
<inet>
<address>
<name>2.1.1.1/24</name>
</address>
<address>
<name>4.1.1.1/24</name>
</address>
<address>
<name>3.1.1.1/24</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
vlan10, vlan20, vlan40 = self.switch.get_vlans()
assert_that(vlan10.number, equal_to(10))
assert_that(vlan10.name, equal_to("my-description"))
assert_that(vlan10.access_groups[IN], equal_to(None))
assert_that(vlan10.access_groups[OUT], equal_to(None))
assert_that(vlan10.ips, has_length(0))
assert_that(vlan20.number, equal_to(20))
assert_that(vlan20.name, equal_to(None))
assert_that(vlan20.access_groups[IN], equal_to("AC-IN"))
assert_that(vlan20.access_groups[OUT], equal_to("AC-OUT"))
assert_that(vlan20.ips, has_length(1))
vlan20ip1 = vlan20.ips[0]
assert_that(str(vlan20ip1.ip), equal_to("1.1.1.1"))
assert_that(vlan20ip1.prefixlen, equal_to(24))
assert_that(vlan40.number, equal_to(40))
assert_that(vlan40.name, equal_to(None))
assert_that(vlan40.access_groups[IN], equal_to(None))
assert_that(vlan40.access_groups[OUT], equal_to(None))
vlan40ip1, vlan40ip2, vlan40ip3 = vlan40.ips
assert_that(str(vlan40ip1.ip), equal_to("2.1.1.1"))
assert_that(vlan40ip1.prefixlen, equal_to(24))
assert_that(str(vlan40ip2.ip), equal_to("3.1.1.1"))
assert_that(vlan40ip2.prefixlen, equal_to(24))
assert_that(str(vlan40ip3.ip), equal_to("4.1.1.1"))
assert_that(vlan40ip3.prefixlen, equal_to(24))
def test_get_vlans_where_vlan_interfaces_can_also_be_called_irb(self):
self.switch.in_transaction = True
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<vlans />
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>ON_VLAN</name>
<vlan-id>10</vlan-id>
<l3-interface>vlan.10</l3-interface>
</vlan>
<vlan>
<name>ON_IRB</name>
<vlan-id>20</vlan-id>
<l3-interface>irb.20</l3-interface>
</vlan>
<vlan>
<name>ON_WHATEVER</name>
<vlan-id>30</vlan-id>
<l3-interface>whatever.30</l3-interface>
</vlan>
<vlan>
<name>ON_NOTFOUND</name>
<vlan-id>40</vlan-id>
<l3-interface>notfound.20</l3-interface>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/1</name>
</interface>
<interface>
<name>vlan</name>
<unit>
<name>10</name>
<family>
<inet>
<address>
<name>1.1.1.1/24</name>
</address>
</inet>
</family>
</unit>
</interface>
<interface>
<name>irb</name>
<unit>
<name>20</name>
<family>
<inet>
<address>
<name>2.1.1.1/24</name>
</address>
</inet>
</family>
</unit>
</interface>
<interface>
<name>whatever</name>
<unit>
<name>30</name>
<family>
<inet>
<address>
<name>3.1.1.1/24</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
vlan10, vlan20, vlan30, vlan40 = self.switch.get_vlans()
assert_that(str(vlan10.ips[0].ip), equal_to("1.1.1.1"))
assert_that(str(vlan20.ips[0].ip), equal_to("2.1.1.1"))
assert_that(str(vlan30.ips[0].ip), equal_to("3.1.1.1"))
assert_that(vlan40.ips, has_length(0))
def test_get_vlan_interfaces(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<vlans>
<vlan>
<vlan-id>705</vlan-id>
</vlan>
</vlans>
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>VLAN705</name>
<vlan-id>705</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>xe-0/0/6</name>
<unit>
<family>
<ethernet-switching>
<vlan>
<members>687</members>
<members>705</members>
<members>708</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>xe-0/0/7</name>
<unit>
<family>
<ethernet-switching>
<vlan>
<members>705</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>xe-0/0/8</name>
<unit>
<family>
<ethernet-switching>
<vlan>
<members>456</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>xe-0/0/9</name>
<unit>
<family>
<ethernet-switching>
<vlan>
<members>700-800</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
vlan_interfaces = self.switch.get_vlan_interfaces(705)
assert_that(vlan_interfaces, equal_to(["xe-0/0/6", "xe-0/0/7", "xe-0/0/9"]))
def test_get_vlan_interfaces_with_name_as_member(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<vlans>
<vlan>
<vlan-id>705</vlan-id>
</vlan>
</vlans>
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>bleu</name>
<vlan-id>705</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>xe-0/0/9</name>
<unit>
<family>
<ethernet-switching>
<vlan>
<members>bleu</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
vlan_interfaces = self.switch.get_vlan_interfaces(705)
assert_that(vlan_interfaces, equal_to(["xe-0/0/9"]))
def test_get_vlan_interfaces_nonexisting_vlan(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<vlans>
<vlan>
<vlan-id>9999999</vlan-id>
</vlan>
</vlans>
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans />
<interfaces>
<interface>
<name>xe-0/0/9</name>
<unit>
<family>
<ethernet-switching>
<vlan>
<members>705</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
with self.assertRaises(UnknownVlan):
self.switch.get_vlan_interfaces("9999999")
def test_get_vlan_with_no_interface(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<vlans>
<vlan>
<vlan-id>10</vlan-id>
</vlan>
</vlans>
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>STANDARD</name>
<vlan-id>10</vlan-id>
<description>my-description</description>
</vlan>
</vlans>
"""))
vlan = self.switch.get_vlan(10)
assert_that(vlan.number, equal_to(10))
assert_that(vlan.name, equal_to("my-description"))
assert_that(vlan.access_groups[IN], equal_to(None))
assert_that(vlan.access_groups[OUT], equal_to(None))
assert_that(vlan.ips, has_length(0))
def test_get_vlan_with_unknown_vlan(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<vlans>
<vlan>
<vlan-id>10</vlan-id>
</vlan>
</vlans>
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
"""))
with self.assertRaises(UnknownVlan) as expect:
self.switch.get_vlan(10)
assert_that(str(expect.exception), equal_to("Vlan 10 not found"))
def test_get_vlan_with_interface(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<vlans>
<vlan>
<vlan-id>20</vlan-id>
</vlan>
</vlans>
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>WITH-IF</name>
<vlan-id>20</vlan-id>
<l3-interface>vlan.20</l3-interface>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/1</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>ge-0/0/1</name>
</interface>
<interface>
<name>vlan</name>
<unit>
<name>20</name>
<family>
<inet>
<address>
<name>1.1.1.1/24</name>
</address>
<filter>
<input>
<filter-name>AC-IN</filter-name>
</input>
<output>
<filter-name>AC-OUT</filter-name>
</output>
</filter>
</inet>
</family>
</unit>
<unit>
<name>40</name>
</unit>
<unit>
<name>70</name>
<family>
<inet>
<address>
<name>2.1.1.1/24</name>
</address>
<address>
<name>4.1.1.1/24</name>
</address>
<address>
<name>3.1.1.1/24</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
vlan = self.switch.get_vlan(20)
assert_that(vlan.number, equal_to(20))
assert_that(vlan.name, equal_to(None))
assert_that(vlan.access_groups[IN], equal_to("AC-IN"))
assert_that(vlan.access_groups[OUT], equal_to("AC-OUT"))
assert_that(vlan.ips, has_length(1))
vlan20ip1 = vlan.ips[0]
assert_that(str(vlan20ip1.ip), equal_to("1.1.1.1"))
assert_that(vlan20ip1.prefixlen, equal_to(24))
def test_get_vlan_with_interface_multi_ip(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<vlans>
<vlan>
<vlan-id>40</vlan-id>
</vlan>
</vlans>
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>WITH-IF-MULTI-IP</name>
<vlan-id>40</vlan-id>
<l3-interface>vlan.70</l3-interface>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/1</name>
</interface>
<interface>
<name>vlan</name>
<unit>
<name>20</name>
<family>
<inet>
<address>
<name>1.1.1.1/24</name>
</address>
<filter>
<input>
<filter-name>AC-IN</filter-name>
</input>
<output>
<filter-name>AC-OUT</filter-name>
</output>
</filter>
</inet>
</family>
</unit>
<unit>
<name>40</name>
</unit>
<unit>
<name>70</name>
<family>
<inet>
<address>
<name>2.1.1.1/24</name>
</address>
<address>
<name>4.1.1.1/24</name>
</address>
<address>
<name>3.1.1.1/24</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
vlan = self.switch.get_vlan(40)
assert_that(vlan.number, equal_to(40))
assert_that(vlan.name, equal_to(None))
assert_that(vlan.access_groups[IN], equal_to(None))
assert_that(vlan.access_groups[OUT], equal_to(None))
vlanip1, vlanip2, vlanip3 = vlan.ips
assert_that(str(vlanip1.ip), equal_to("2.1.1.1"))
assert_that(vlanip1.prefixlen, equal_to(24))
assert_that(str(vlanip2.ip), equal_to("3.1.1.1"))
assert_that(vlanip2.prefixlen, equal_to(24))
assert_that(str(vlanip3.ip), equal_to("4.1.1.1"))
assert_that(vlanip3.prefixlen, equal_to(24))
def test_get_vlan_where_vlan_interfaces_can_also_be_called_irb(self):
self.switch.in_transaction = True
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<vlans>
<vlan>
<vlan-id>20</vlan-id>
</vlan>
</vlans>
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>ON_IRB</name>
<vlan-id>20</vlan-id>
<l3-interface>irb.20</l3-interface>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/1</name>
</interface>
<interface>
<name>vlan</name>
<unit>
<name>10</name>
<family>
<inet>
<address>
<name>1.1.1.1/24</name>
</address>
</inet>
</family>
</unit>
</interface>
<interface>
<name>irb</name>
<unit>
<name>20</name>
<family>
<inet>
<address>
<name>2.1.1.1/24</name>
</address>
</inet>
</family>
</unit>
</interface>
<interface>
<name>whatever</name>
<unit>
<name>30</name>
<family>
<inet>
<address>
<name>3.1.1.1/24</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
vlan = self.switch.get_vlan(20)
assert_that(str(vlan.ips[0].ip), equal_to("2.1.1.1"))
def test_get_vlan_where_vlan_interfaces_not_found(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<vlans>
<vlan>
<vlan-id>40</vlan-id>
</vlan>
</vlans>
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>NOT_FOUND</name>
<vlan-id>40</vlan-id>
<l3-interface>notfound.20</l3-interface>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/1</name>
</interface>
<interface>
<name>vlan</name>
<unit>
<name>10</name>
<family>
<inet>
<address>
<name>1.1.1.1/24</name>
</address>
</inet>
</family>
</unit>
</interface>
<interface>
<name>irb</name>
<unit>
<name>20</name>
<family>
<inet>
<address>
<name>2.1.1.1/24</name>
</address>
</inet>
</family>
</unit>
</interface>
<interface>
<name>whatever</name>
<unit>
<name>30</name>
<family>
<inet>
<address>
<name>3.1.1.1/24</name>
</address>
</inet>
</family>
</unit>
</interface>
</interfaces>
"""))
vlan = self.switch.get_vlan(40)
assert_that(vlan.ips, has_length(0))
def test_get_interface(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/1</name>
</interface>
</interfaces>
<vlans />
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/1</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
<vlans/>
"""))
interface = self.switch.get_interface('ge-0/0/1')
assert_that(interface.name, equal_to("ge-0/0/1"))
assert_that(interface.shutdown, equal_to(False))
assert_that(interface.port_mode, equal_to(ACCESS))
assert_that(interface.access_vlan, equal_to(None))
assert_that(interface.trunk_native_vlan, equal_to(None))
assert_that(interface.trunk_vlans, equal_to([]))
assert_that(interface.auto_negotiation, equal_to(None))
assert_that(interface.mtu, equal_to(None))
def test_get_unconfigured_but_existing_interface_returns_an_empty_interface(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/27</name>
</interface>
</interfaces>
<vlans />
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces/>
<vlans/>
"""))
self.netconf_mock.should_receive("rpc").with_args(is_xml("""
<get-interface-information>
<terse/>
</get-interface-information>
""")).and_return(an_rpc_response(textwrap.dedent("""
<interface-information style="terse">
<physical-interface>
<name>
ge-0/0/27
</name>
<admin-status>
up
</admin-status>
<oper-status>
down
</oper-status>
</physical-interface>
</interface-information>
""")))
interface = self.switch.get_interface('ge-0/0/27')
assert_that(interface.name, equal_to("ge-0/0/27"))
assert_that(interface.shutdown, equal_to(False))
assert_that(interface.port_mode, equal_to(ACCESS))
assert_that(interface.access_vlan, equal_to(None))
assert_that(interface.trunk_native_vlan, equal_to(None))
assert_that(interface.trunk_vlans, equal_to([]))
def test_get_unconfigured_interface_could_be_disabled(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/27</name>
</interface>
</interfaces>
<vlans />
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces/>
<vlans/>
"""))
self.netconf_mock.should_receive("rpc").with_args(is_xml("""
<get-interface-information>
<terse/>
</get-interface-information>
""")).and_return(an_rpc_response(textwrap.dedent("""
<interface-information style="terse">
<physical-interface>
<name>
ge-0/0/27
</name>
<admin-status>
down
</admin-status>
<oper-status>
down
</oper-status>
</physical-interface>
</interface-information>
""")))
assert_that(self.switch.get_interface('ge-0/0/27').shutdown, equal_to(True))
def test_get_nonexistent_interface_raises(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/INEXISTENT</name>
</interface>
</interfaces>
<vlans />
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces/>
<vlans/>
"""))
self.netconf_mock.should_receive("rpc").with_args(is_xml("""
<get-interface-information>
<terse/>
</get-interface-information>
""")).and_return(an_rpc_response(textwrap.dedent("""
<interface-information style="terse">
<physical-interface>
<name>
ge-0/0/1
</name>
<admin-status>
down
</admin-status>
<oper-status>
down
</oper-status>
</physical-interface>
</interface-information>
""")))
with self.assertRaises(UnknownInterface) as expect:
self.switch.get_interface('ge-0/0/INEXISTENT')
assert_that(str(expect.exception), equal_to("Unknown interface ge-0/0/INEXISTENT"))
def test_get_interfaces(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("rpc").with_args(is_xml("""
<get-interface-information>
<terse/>
</get-interface-information>
""")).and_return(an_rpc_response(textwrap.dedent("""
<interface-information style="terse">
<physical-interface>
<name>
ge-0/0/1
</name>
<admin-status>
up
</admin-status>
<oper-status>
down
</oper-status>
<logical-interface>
<name>
ge-0/0/1.0
</name>
<admin-status>
up
</admin-status>
<oper-status>
down
</oper-status>
<filter-information>
</filter-information>
<address-family>
<address-family-name>
eth-switch
</address-family-name>
</address-family>
</logical-interface>
</physical-interface>
<physical-interface>
<name>
ge-0/0/2
</name>
<admin-status>
down
</admin-status>
<oper-status>
down
</oper-status>
<logical-interface>
<name>
ge-0/0/2.0
</name>
<admin-status>
up
</admin-status>
<oper-status>
down
</oper-status>
<filter-information>
</filter-information>
<address-family>
<address-family-name>
eth-switch
</address-family-name>
</address-family>
</logical-interface>
</physical-interface>
<physical-interface>
<name>
ge-0/0/3
</name>
<admin-status>
up
</admin-status>
<oper-status>
down
</oper-status>
<logical-interface>
<name>
ge-0/0/3.0
</name>
<admin-status>
up
</admin-status>
<oper-status>
down
</oper-status>
<filter-information>
</filter-information>
<address-family>
<address-family-name>
eth-switch
</address-family-name>
</address-family>
</logical-interface>
</physical-interface>
<physical-interface>
<name>
ge-0/0/4
</name>
<admin-status>up</admin-status>
<oper-status>down</oper-status>
<logical-interface>
<name>
ge-0/0/4.0
</name>
<admin-status>
up
</admin-status>
<oper-status>
down
</oper-status>
<filter-information>
</filter-information>
<address-family>
<address-family-name>
eth-switch
</address-family-name>
</address-family>
</logical-interface>
</physical-interface>
<physical-interface>
<name>
ge-0/0/5
</name>
<admin-status>
up
</admin-status>
<oper-status>
down
</oper-status>
</physical-interface>
</interface-information>
""")))
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<interfaces />
<vlans />
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/1</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>ge-0/0/2</name>
<disable />
<description>Howdy</description>
<mtu>5000</mtu>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan>
<members>1000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>ge-0/0/3</name>
<ether-options>
<no-auto-negotiation/>
</ether-options>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan>
<members>999-1001</members>
<members>1000</members>
</vlan>
<native-vlan-id>2000</native-vlan-id>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>ge-0/0/4</name>
<ether-options>
<auto-negotiation/>
</ether-options>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>ge-0/0/5</name>
<ether-options>
<speed>
<ethernet-100m/>
</speed>
<ieee-802.3ad>
<bundle>ae10</bundle>
</ieee-802.3ad>
</ether-options>
</interface>
<interface>
<name>vlan</name>
<unit>
<name>40</name>
</unit>
</interface>
<interface>
<name>ae10</name>
<aggregated-ether-options>
<lacp>
<active/>
<periodic>slow</periodic>
</lacp>
</aggregated-ether-options>
<unit>
<name>0</name>
<family>
<ethernet-switching />
</family>
</unit>
</interface>
</interfaces>
<vlans/>
"""))
if1, if2, if3, if4, if5 = self.switch.get_interfaces()
assert_that(if1.name, equal_to("ge-0/0/1"))
assert_that(if1.shutdown, equal_to(False))
assert_that(if1.port_mode, equal_to(ACCESS))
assert_that(if1.access_vlan, equal_to(None))
assert_that(if1.trunk_native_vlan, equal_to(None))
assert_that(if1.trunk_vlans, equal_to([]))
assert_that(if1.auto_negotiation, equal_to(None))
assert_that(if1.mtu, equal_to(None))
assert_that(if2.name, equal_to("ge-0/0/2"))
assert_that(if2.shutdown, equal_to(True))
assert_that(if2.port_mode, equal_to(ACCESS))
assert_that(if2.access_vlan, equal_to(1000))
assert_that(if2.trunk_native_vlan, equal_to(None))
assert_that(if2.trunk_vlans, equal_to([]))
assert_that(if2.mtu, equal_to(5000))
assert_that(if3.name, equal_to("ge-0/0/3"))
assert_that(if3.port_mode, equal_to(TRUNK))
assert_that(if3.access_vlan, equal_to(None))
assert_that(if3.trunk_native_vlan, equal_to(2000))
assert_that(if3.trunk_vlans, equal_to([999, 1000, 1001]))
assert_that(if3.auto_negotiation, equal_to(False))
assert_that(if4.name, equal_to("ge-0/0/4"))
assert_that(if4.trunk_native_vlan, equal_to(None))
assert_that(if4.trunk_vlans, equal_to([]))
assert_that(if4.auto_negotiation, equal_to(True))
assert_that(if5.name, equal_to("ge-0/0/5"))
assert_that(if5.port_mode, equal_to(BOND_MEMBER))
assert_that(if5.bond_master, equal_to(10))
def test_get_interfaces_lists_configuration_less_interfaces(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("rpc").with_args(is_xml("""
<get-interface-information>
<terse/>
</get-interface-information>
""")).and_return(an_rpc_response(textwrap.dedent("""
<interface-information style="terse">
<physical-interface>
<name>
ge-0/0/1
</name>
<admin-status>
up
</admin-status>
<oper-status>
down
</oper-status>
</physical-interface>
<physical-interface>
<name>
ge-0/0/2
</name>
<admin-status>
down
</admin-status>
<oper-status>
down
</oper-status>
</physical-interface>
</interface-information>
""")))
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<interfaces />
<vlans />
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces />
<vlans/>
"""))
if1, if2 = self.switch.get_interfaces()
assert_that(if1.name, equal_to("ge-0/0/1"))
assert_that(if1.shutdown, equal_to(False))
assert_that(if1.port_mode, equal_to(ACCESS))
assert_that(if1.access_vlan, equal_to(None))
assert_that(if1.trunk_native_vlan, equal_to(None))
assert_that(if1.trunk_vlans, equal_to([]))
assert_that(if2.name, equal_to("ge-0/0/2"))
assert_that(if2.shutdown, equal_to(True))
def test_get_interfaces_supports_named_vlans(self):
self.switch.in_transaction = True
self.netconf_mock.should_receive("rpc").with_args(is_xml("""
<get-interface-information>
<terse/>
</get-interface-information>
""")).and_return(an_rpc_response(textwrap.dedent("""
<interface-information style="terse">
<physical-interface>
<name>
ge-0/0/1
</name>
<admin-status>
up
</admin-status>
<oper-status>
down
</oper-status>
<logical-interface>
<name>
ge-0/0/1.0
</name>
<admin-status>
up
</admin-status>
<oper-status>
down
</oper-status>
<filter-information>
</filter-information>
<address-family>
<address-family-name>
eth-switch
</address-family-name>
</address-family>
</logical-interface>
</physical-interface>
</interface-information>
""")))
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces />
<vlans />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>MON_VLAN_PREFERE</name>
<vlan-id>1234</vlan-id>
<description>Oh yeah</description>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/1</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan>
<members>MON_VLAN_PREFERE</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
if1, = self.switch.get_interfaces()
assert_that(if1.name, equal_to("ge-0/0/1"))
assert_that(if1.access_vlan, equal_to(1234))
def test_add_vlan(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<vlans />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>900</vlan-id>
</vlan>
</vlans>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<vlans>
<vlan>
<name>VLAN1000</name>
<vlan-id>1000</vlan-id>
<description>Shizzle</description>
</vlan>
</vlans>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.add_vlan(1000, name="Shizzle")
def test_add_vlan_already_in_use_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<vlans />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(VlanAlreadyExist) as expect:
self.switch.add_vlan(1000)
assert_that(str(expect.exception), contains_string("Vlan 1000 already exist"))
def test_add_existing_vlan_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<vlans />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>VLAN1000</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(VlanAlreadyExist) as expect:
self.switch.add_vlan(1000)
assert_that(str(expect.exception), contains_string("Vlan 1000 already exist"))
def test_add_vlan_bad_vlan_id(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<vlans />
</configuration>
</filter>
""")).and_return(a_configuration(""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<vlans>
<vlan>
<name>VLAN9000</name>
<vlan-id>9000</vlan-id>
</vlan>
</vlans>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-info>
<bad-element>9000</bad-element>
</error-info>
<error-message>Value 9000 is not within range (1..4094)</error-message>
</rpc-error>
"""))))
with self.assertRaises(BadVlanNumber) as expect:
self.switch.add_vlan(9000)
assert_that(str(expect.exception), equal_to("Vlan number is invalid"))
def test_add_vlan_bad_vlan_name(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<vlans />
</configuration>
</filter>
""")).and_return(a_configuration(""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<vlans>
<vlan>
<name>VLAN1000</name>
<vlan-id>1000</vlan-id>
<description>a</description>
</vlan>
</vlans>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-info>
<bad-element>a</bad-element>
</error-info>
<error-message>Length 1 is not within range (2..255)</error-message>
</rpc-error>
"""))))
with self.assertRaises(BadVlanName) as expect:
self.switch.add_vlan(1000, "a")
assert_that(str(expect.exception), equal_to("Vlan name is invalid"))
def test_remove_vlan_also_removes_associated_vlan_interface(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<vlans />
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>MEH</name>
<vlan-id>5</vlan-id>
</vlan>
<vlan>
<name>STANDARD</name>
<vlan-id>10</vlan-id>
<l3-interface>vlan.25</l3-interface>
</vlan>
<vlan>
<name>MEH2</name>
<vlan-id>15</vlan-id>
</vlan>
</vlans>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<vlans>
<vlan operation="delete">
<name>STANDARD</name>
</vlan>
</vlans>
<interfaces>
<interface>
<name>vlan</name>
<unit operation="delete">
<name>25</name>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.remove_vlan(10)
def test_remove_vlan_also_removes_associated_vlan_interface_even_if_non_standard_name(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<vlans />
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>MEH</name>
<vlan-id>5</vlan-id>
</vlan>
<vlan>
<name>STANDARD</name>
<vlan-id>10</vlan-id>
<l3-interface>irb.25</l3-interface>
</vlan>
<vlan>
<name>MEH2</name>
<vlan-id>15</vlan-id>
</vlan>
</vlans>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<vlans>
<vlan operation="delete">
<name>STANDARD</name>
</vlan>
</vlans>
<interfaces>
<interface>
<name>irb</name>
<unit operation="delete">
<name>25</name>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.remove_vlan(10)
def test_remove_vlan_ignores_removing_interface_not_created(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<vlans />
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>STANDARD</name>
<vlan-id>10</vlan-id>
</vlan>
</vlans>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<vlans>
<vlan operation="delete">
<name>STANDARD</name>
</vlan>
</vlans>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.remove_vlan(10)
def test_remove_vlan_invalid_vlan_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<vlans />
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>ANOTHER</name>
<vlan-id>10</vlan-id>
</vlan>
</vlans>
"""))
with self.assertRaises(UnknownVlan) as expect:
self.switch.remove_vlan(20)
assert_that(str(expect.exception), equal_to("Vlan 20 not found"))
def test_remove_vlan_in_use_deletes_all_usages(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<vlans />
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>STANDARD</name>
<vlan-id>10</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/1</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan>
<members>9</members>
<members>10</members>
<members>11</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>ge-0/0/2</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan>
<members>9-15</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>ge-0/0/3</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
<vlan>
<members>12</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>ge-0/0/4</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
<vlan>
<members>STANDARD</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>ge-0/0/5</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
<vlan>
<members>ANOTHER_NAME</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<vlans>
<vlan operation="delete">
<name>STANDARD</name>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/1</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan>
<members operation="delete">10</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>ge-0/0/2</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan>
<members operation="delete">9-15</members>
<members>9</members>
<members>11-15</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>ge-0/0/4</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan>
<members operation="delete">STANDARD</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>""")).and_return(an_ok_response())
self.switch.remove_vlan(10)
def test_remove_vlan_delete_usage_and_interface_at_same_time(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<vlans />
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>STANDARD</name>
<vlan-id>10</vlan-id>
<l3-interface>vlan.10</l3-interface>
</vlan>
</vlans>
<interfaces>
<interface>
<name>name</name>
<unit>
<name>10</name>
<family>
<inet>
<address>
<name>1.1.1.1/24</name>
</address>
</inet>
</family>
</unit>
</interface>
<interface>
<name>ge-0/0/1</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan>
<members>10</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<vlans>
<vlan operation="delete">
<name>STANDARD</name>
</vlan>
</vlans>
<interfaces>
<interface>
<name>vlan</name>
<unit operation="delete">
<name>10</name>
</unit>
</interface>
<interface>
<name>ge-0/0/1</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan>
<members operation="delete">10</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.remove_vlan(10)
def test_port_mode_access_with_no_port_mode_or_vlan_set_just_sets_the_port_mode(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
<vlans/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_access_mode("ge-0/0/6")
def test_port_mode_access_with_no_mode_and_1_vlan_does_not_remove_it(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan>
<members>2998</members>
<members>2998</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
<vlans/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_access_mode("ge-0/0/6")
def test_port_mode_access_with_trunk_mode_and_1_vlan_does_remove_it(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan>
<members>2998</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
<vlans/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
<vlan operation="delete" />
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_access_mode("ge-0/0/6")
def test_port_mode_access_with_trunk_mode_and_no_attributes_just_sets_mode(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
<vlans/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_access_mode("ge-0/0/6")
def test_port_mode_access_already_in_access_mode_does_nothing(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
<vlans/>
"""))
self.netconf_mock.should_receive("edit_config").never()
self.switch.set_access_mode("ge-0/0/6")
def test_port_mode_access_on_unknown_interface_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces/>
<vlans/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/99</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
port value outside range 0..63 for '99' in 'ge-0/0/99'
</error-message>
</rpc-error>"""))))
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_access_mode("ge-0/0/99")
assert_that(str(expect.exception), contains_string("Unknown interface ge-0/0/99"))
def test_port_mode_access_on_default_interface_works(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces/>
<vlans/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_access_mode("ge-0/0/6")
def test_port_mode_access_with_trunk_mode_wipes_all_trunk_stuff(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan>
<members>123</members>
<members>456</members>
</vlan>
<native-vlan-id>999</native-vlan-id>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
<vlans/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
<vlan operation="delete" />
<native-vlan-id operation="delete" />
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_access_mode("ge-0/0/6")
def test_port_mode_trunk_with_no_port_mode_or_vlan_set_just_sets_the_port_mode(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
<vlans/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_trunk_mode("ge-0/0/6")
def test_port_mode_trunk_with_no_port_mode_and_1_vlan_removes_it(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan>
<members>1000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
<vlans/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan operation="delete">
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_trunk_mode("ge-0/0/6")
def test_port_mode_trunk_with_access_port_mode_and_1_vlan_removes_it(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
<vlan>
<members>1000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
<vlans/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan operation="delete">
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_trunk_mode("ge-0/0/6")
def test_port_mode_trunk_already_in_trunk_mode_does_nothing(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan>
<members>1000</members>
<members>1001</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
<vlans/>
"""))
self.netconf_mock.should_receive("edit_config").never()
self.switch.set_trunk_mode("ge-0/0/6")
def test_port_mode_trunk_on_unknown_interface_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/99</name>
</interface>
</interfaces>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration())
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/99</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(a_port_value_outside_range_rpc_error())
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_trunk_mode("ge-0/0/99")
assert_that(str(expect.exception), contains_string("Unknown interface ge-0/0/99"))
def test_port_mode_trunk_on_default_interface_works(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration())
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_trunk_mode("ge-0/0/6")
def test_set_access_vlan_on_interface_with_access_mode_and_no_vlan_succeeds_easily(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan>
<members>1000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_access_vlan("ge-0/0/6", 1000)
def test_set_access_vlan_on_interface_that_already_has_it_does_nothing(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
<vlan>
<members>1000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
self.switch.set_access_vlan("ge-0/0/6", 1000)
def test_set_access_vlan_on_interface_that_has_no_port_mode_sets_it(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
<vlan>
<members>1000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_access_vlan("ge-0/0/6", 1000)
def test_set_access_vlan_on_interface_replaces_the_actual_ones(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
<vlan>
<name>PATATE2</name>
<vlan-id>2000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan>
<members>2000</members>
<members>2000-2000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
<vlan>
<members operation="delete">2000</members>
<members operation="delete">2000-2000</members>
<members>1000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_access_vlan("ge-0/0/6", 1000)
def test_set_access_vlan_on_interface_in_trunk_mode_should_raise(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(InterfaceInWrongPortMode) as expect:
self.switch.set_access_vlan("ge-0/0/6", 1000)
assert_that(str(expect.exception), contains_string("Operation cannot be performed on a trunk mode interface"))
def test_set_access_vlan_on_unknown_vlan_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>3333</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(UnknownVlan) as expect:
self.switch.set_access_vlan("ge-0/0/6", 1000)
assert_that(str(expect.exception), contains_string("Vlan 1000 not found"))
def test_set_access_vlan_on_default_interface_works(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
<vlan>
<members>1000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_access_vlan("ge-0/0/6", 1000)
def test_set_access_vlan_on_unknown_interface_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/99</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
<vlan>
<members>1000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(a_port_value_outside_range_rpc_error())
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_access_vlan("ge-0/0/99", 1000)
assert_that(str(expect.exception), contains_string("Unknown interface ge-0/0/99"))
def test_reset_interface_works(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface operation="delete">
<name>ge-0/0/6</name>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.reset_interface('ge-0/0/6')
def test_reset_port_value_outside_range_interface_raises_unknown_interface(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface operation="delete">
<name>ge-0/0/99</name>
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
port value outside range 0..63 for '99' in 'ge-0/0/99'
</error-message>
</rpc-error>"""))))
with self.assertRaises(UnknownInterface) as expect:
self.switch.reset_interface("ge-0/0/99")
assert_that(str(expect.exception), contains_string("Unknown interface ge-0/0/99"))
def test_reset_interface_with_invalid_interface_raises(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface operation="delete">
<name>ne-0/0/9</name>
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
invalid interface type in 'ne-0/0/9'
</error-message>
</rpc-error>"""))))
with self.assertRaises(UnknownInterface):
self.switch.reset_interface("ne-0/0/9")
def test_reset_interface_with_unknown_rpcerror_raises(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface operation="delete">
<name>ne-0/0/9</name>
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
Unknown error
</error-message>
</rpc-error>"""))))
with self.assertRaises(RPCError) as expect:
self.switch.reset_interface("ne-0/0/9")
assert_that(str(expect.exception), contains_string("Unknown error"))
def test_unset_interface_access_vlan_removes_the_vlan_members(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan>
<members>1000</members>
<members>1000-1000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
<vlans/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan operation="delete" />
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.unset_interface_access_vlan("ge-0/0/6")
def test_unset_interface_access_vlan_with_no_vlan_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
<vlans/>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(AccessVlanNotSet) as expect:
self.switch.unset_interface_access_vlan("ge-0/0/6")
assert_that(str(expect.exception), contains_string("Access Vlan is not set on interface ge-0/0/6"))
def test_unset_interface_access_vlan_on_trunk_mode_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan>
<members>123</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
<vlans/>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(InterfaceInWrongPortMode) as expect:
self.switch.unset_interface_access_vlan("ge-0/0/6")
assert_that(str(expect.exception), contains_string("Operation cannot be performed on a trunk mode interface"))
def test_unset_interface_access_vlan_on_default_interface_works(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration(""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(AccessVlanNotSet):
self.switch.unset_interface_access_vlan("ge-0/0/6")
def test_set_interface_native_vlan_on_interface_with_trunk_mode_and_no_native_vlan_succeeds_easily(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<native-vlan-id>1000</native-vlan-id>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_interface_native_vlan("ge-0/0/6", 1000)
def test_set_interface_native_vlan_on_interface_that_already_has_it_does_nothing(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<native-vlan-id>1000</native-vlan-id>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
self.switch.set_interface_native_vlan("ge-0/0/6", 1000)
def test_set_interface_native_vlan_on_interface_that_has_no_port_mode_sets_it(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<native-vlan-id>1000</native-vlan-id>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_interface_native_vlan("ge-0/0/6", 1000)
def test_set_interface_native_vlan_on_interface_replaces_the_actual_ones(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
<vlan>
<name>PATATE2</name>
<vlan-id>2000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<native-vlan-id>2000</native-vlan-id>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<native-vlan-id>1000</native-vlan-id>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_interface_native_vlan("ge-0/0/6", 1000)
def test_set_interface_native_vlan_on_interface_in_access_mode_should_raise(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(InterfaceInWrongPortMode) as expect:
self.switch.set_interface_native_vlan("ge-0/0/6", 1000)
assert_that(str(expect.exception), contains_string("Operation cannot be performed on a access mode interface"))
def test_set_interface_native_vlan_on_interface_that_is_already_a_member_of_the_trunk_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE0</name>
<vlan-id>999</vlan-id>
</vlan>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
<vlan>
<name>PATATE2</name>
<vlan-id>1001</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan>
<members>999-1001</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(VlanAlreadyInTrunk) as expect:
self.switch.set_interface_native_vlan("ge-0/0/6", 1000)
assert_that(str(expect.exception), contains_string("Vlan 1000 cannot be set as native vlan because it is already a member of the trunk"))
def test_set_interface_native_vlan_on_unknown_vlan_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>3333</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(UnknownVlan) as expect:
self.switch.set_interface_native_vlan("ge-0/0/6", 1000)
assert_that(str(expect.exception), contains_string("Vlan 1000 not found"))
def test_set_interface_native_vlan_on_unknown_interface_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/99</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<native-vlan-id>1000</native-vlan-id>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(a_port_value_outside_range_rpc_error())
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_interface_native_vlan("ge-0/0/99", 1000)
assert_that(str(expect.exception), contains_string("Unknown interface ge-0/0/99"))
def test_set_interface_native_vlan_on_default_interface_works(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<native-vlan-id>1000</native-vlan-id>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_interface_native_vlan("ge-0/0/6", 1000)
def test_unset_interface_native_vlan_succeeds(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<native-vlan-id>1000</native-vlan-id>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<native-vlan-id operation="delete" />
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.unset_interface_native_vlan("ge-0/0/6")
def test_unset_interface_native_vlan_when_none_is_set_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(NativeVlanNotSet) as expect:
self.switch.unset_interface_native_vlan("ge-0/0/6")
assert_that(str(expect.exception), contains_string("Trunk native Vlan is not set on interface ge-0/0/6"))
def test_unset_interface_native_vlan_on_default_interface_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration(""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(NativeVlanNotSet):
self.switch.unset_interface_native_vlan("ge-0/0/6")
def test_set_interface_auto_negotiation_state_ON_works(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<ether-options>
<auto-negotiation/>
</ether-options>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_interface_auto_negotiation_state("ge-0/0/6", ON)
def test_set_interface_auto_negotiation_state_OFF_works(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<ether-options>
<no-auto-negotiation/>
</ether-options>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_interface_auto_negotiation_state("ge-0/0/6", OFF)
def test_set_interface_auto_negotiation_raises_on_unknown_interface(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/128</name>
<ether-options>
<no-auto-negotiation/>
</ether-options>
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(a_port_value_outside_range_rpc_error())
with self.assertRaises(UnknownInterface):
self.switch.set_interface_auto_negotiation_state("ge-0/0/128", OFF)
def test_unset_interface_auto_negotiation_state_works_when_enabled(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
<ether-options>
<auto-negotiation/>
</ether-options>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<ether-options>
<auto-negotiation operation=\"delete\"/>
</ether-options>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.unset_interface_auto_negotiation_state("ge-0/0/6")
def test_unset_interface_auto_negotiation_state_does_nothing_on_default_interface(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration(""))
self.netconf_mock.should_receive("rpc").with_args(is_xml("""
<get-interface-information>
<terse/>
</get-interface-information>
""")).and_return(an_rpc_response(textwrap.dedent("""
<interface-information style="terse">
<physical-interface>
<name>
ge-0/0/6
</name>
<admin-status>
up
</admin-status>
<oper-status>
down
</oper-status>
</physical-interface>
</interface-information>
""")))
self.switch.unset_interface_auto_negotiation_state("ge-0/0/6")
def test_unset_interface_auto_negotiation_state_works_when_disabled(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
<ether-options>
<no-auto-negotiation/>
</ether-options>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<ether-options>
<no-auto-negotiation operation=\"delete\"/>
</ether-options>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.unset_interface_auto_negotiation_state("ge-0/0/6")
def test_unset_interface_auto_negotiation_state_raises_on_unknown_interface(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/99</name>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration(""))
self.netconf_mock.should_receive("rpc").with_args(is_xml("""
<get-interface-information>
<terse/>
</get-interface-information>
""")).and_return(an_rpc_response(textwrap.dedent("""
<interface-information style="terse">
<physical-interface>
<name>
ge-0/0/6
</name>
<admin-status>
up
</admin-status>
<oper-status>
down
</oper-status>
</physical-interface>
</interface-information>
""")))
with self.assertRaises(UnknownInterface):
self.switch.unset_interface_auto_negotiation_state("ge-0/0/99")
def test_add_trunk_vlan_on_interface_with_trunk_mode_and_no_vlan_succeeds_easily(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan>
<members>1000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.add_trunk_vlan("ge-0/0/6", 1000)
def test_add_trunk_vlan_on_interface_that_already_has_it_does_nothing(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan>
<members>900-1100</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
self.switch.add_trunk_vlan("ge-0/0/6", 1000)
def test_add_trunk_vlan_on_interface_that_has_no_port_mode_and_no_vlan_sets_it(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan>
<members>1000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.add_trunk_vlan("ge-0/0/6", 1000)
def test_add_trunk_vlan_on_interface_adds_to_the_list(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan>
<members>2000</members>
<members>2100-2200</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan>
<members>1000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.add_trunk_vlan("ge-0/0/6", 1000)
def test_add_trunk_vlan_on_interface_that_has_no_port_mode_with_a_vlan_assumes_access_mode_and_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan>
<members>500</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(InterfaceInWrongPortMode) as expect:
self.switch.add_trunk_vlan("ge-0/0/6", 1000)
assert_that(str(expect.exception), contains_string("Operation cannot be performed on a access mode interface"))
def test_add_trunk_vlan_on_interface_in_access_mode_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
<vlan>
<members>500</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(InterfaceInWrongPortMode) as expect:
self.switch.add_trunk_vlan("ge-0/0/6", 1000)
assert_that(str(expect.exception), contains_string("Operation cannot be performed on a access mode interface"))
def test_add_trunk_vlan_on_unknown_vlan_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(UnknownVlan) as expect:
self.switch.add_trunk_vlan("ge-0/0/6", 1000)
assert_that(str(expect.exception), contains_string("Vlan 1000 not found"))
def test_add_trunk_vlan_on_unknown_interface_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>PATATE</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/99</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan>
<members>1000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(a_port_value_outside_range_rpc_error())
with self.assertRaises(UnknownInterface) as expect:
self.switch.add_trunk_vlan("ge-0/0/99", 1000)
assert_that(str(expect.exception), contains_string("Unknown interface ge-0/0/99"))
def test_remove_trunk_vlan_removes_the_vlan_members_in_every_possible_way(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>VLAN_NAME</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan>
<members>1000</members>
<members>1000-1001</members>
<members>999-1000</members>
<members>999-1001</members>
<members>998-1002</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan>
<members operation="delete">1000</members>
<members operation="delete">1000-1001</members>
<members>1001</members>
<members operation="delete">999-1000</members>
<members>999</members>
<members operation="delete">999-1001</members>
<members>999</members>
<members>1001</members>
<members operation="delete">998-1002</members>
<members>998-999</members>
<members>1001-1002</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.remove_trunk_vlan("ge-0/0/6", 1000)
def test_remove_trunk_vlan_removes_the_vlan_even_if_referenced_by_name(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>VLAN_NAME</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan>
<members>1000</members>
<members>VLAN_NAME</members>
<members>SOEMTHING</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan>
<members operation="delete">1000</members>
<members operation="delete">VLAN_NAME</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.remove_trunk_vlan("ge-0/0/6", 1000)
def test_remove_trunk_vlan_not_in_members_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>VLAN_NAME</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan>
<members>500-999</members>
<members>1001-4000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(TrunkVlanNotSet) as expect:
self.switch.remove_trunk_vlan("ge-0/0/6", 1000)
assert_that(str(expect.exception), contains_string("Trunk Vlan is not set on interface ge-0/0/6"))
def test_remove_trunk_vlan_on_access_with_the_correct_vlan_interface_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>VLAN_NAME</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>access</port-mode>
<vlan>
<members>1000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(InterfaceInWrongPortMode) as expect:
self.switch.remove_trunk_vlan("ge-0/0/6", 1000)
assert_that(str(expect.exception), contains_string("Operation cannot be performed on a access mode interface"))
def test_remove_trunk_vlan_on_no_port_mode_interface_with_the_correct_vlan_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>VLAN_NAME</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan>
<members>1000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(InterfaceInWrongPortMode) as expect:
self.switch.remove_trunk_vlan("ge-0/0/6", 1000)
assert_that(str(expect.exception), contains_string("Operation cannot be performed on a access mode interface"))
def test_remove_trunk_vlan_on_unknown_interface_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<vlans>
<vlan>
<name>VLAN_NAME</name>
<vlan-id>1000</vlan-id>
</vlan>
</vlans>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(UnknownInterface) as expect:
self.switch.remove_trunk_vlan("ge-0/0/6", 1000)
assert_that(str(expect.exception), contains_string("Unknown interface ge-0/0/6"))
def test_set_interface_description_succeeds(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<description>Resistance is futile</name>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_interface_description("ge-0/0/6", "Resistance is futile")
def test_set_interface_description_on_unkown_interface_raises(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/99</name>
<description>Resistance is futile</name>
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
port value outside range 0..47 for '99' in 'ge-0/0/99'
</error-message>
</rpc-error>"""))))
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_interface_description("ge-0/0/99", "Resistance is futile")
assert_that(str(expect.exception), contains_string("Unknown interface ge-0/0/99"))
def test_unset_interface_description_succeeds(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<description operation="delete" />
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.unset_interface_description("ge-0/0/6")
def test_unset_interface_description_on_unkown_interface_raises(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/99</name>
<description operation="delete" />
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
port value outside range 0..47 for '99' in 'ge-0/0/99'
</error-message>
</rpc-error>"""))))
with self.assertRaises(UnknownInterface) as expect:
self.switch.unset_interface_description("ge-0/0/99")
assert_that(str(expect.exception), contains_string("Unknown interface ge-0/0/99"))
def test_unset_interface_description_on_interface_with_no_description_just_ignores_it(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/99</name>
<description operation="delete" />
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>warning</error-severity>
<error-path>[edit interfaces ge-0/0/6]</error-path>
<error-message>statement not found: description</error-message>
</rpc-error>"""))))
self.switch.unset_interface_description("ge-0/0/99")
def test_edit_interface_spanning_tree_enable_edge_from_nothing(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
</interface>
</rstp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
</interface>
</rstp>
</protocols>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
<edge />
<no-root-port />
</interface>
</rstp>
</protocols>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.edit_interface_spanning_tree('ge-0/0/6', edge=True)
def test_edit_interface_spanning_tree_enable_edge_when_all_is_already_set(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
</interface>
</rstp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
<edge/>
<no-root-port/>
</interface>
</rstp>
</protocols>
"""))
self.netconf_mock.should_receive("edit_config").never()
self.switch.edit_interface_spanning_tree('ge-0/0/6', edge=True)
def test_edit_interface_spanning_tree_enable_edge_when_only_edge_is_already_set(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
</interface>
</rstp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
<edge/>
</interface>
</rstp>
</protocols>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
<no-root-port />
</interface>
</rstp>
</protocols>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.edit_interface_spanning_tree('ge-0/0/6', edge=True)
def test_edit_interface_spanning_tree_enable_edge_when_only_no_root_port_is_already_set(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
</interface>
</rstp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
<no-root-port />
</interface>
</rstp>
</protocols>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
<edge />
</interface>
</rstp>
</protocols>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.edit_interface_spanning_tree('ge-0/0/6', edge=True)
def test_edit_interface_spanning_tree_disable_edge_when_all_is_set(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
</interface>
</rstp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
<edge/>
<no-root-port/>
</interface>
</rstp>
</protocols>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
<edge operation="delete" />
<no-root-port operation="delete" />
</interface>
</rstp>
</protocols>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.edit_interface_spanning_tree('ge-0/0/6', edge=False)
def test_edit_interface_spanning_tree_disable_edge_when_all_is_only_edge_is_set(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
</interface>
</rstp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
<edge/>
</interface>
</rstp>
</protocols>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
<edge operation="delete" />
</interface>
</rstp>
</protocols>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.edit_interface_spanning_tree('ge-0/0/6', edge=False)
def test_edit_interface_spanning_tree_disable_edge_when_all_is_only_no_root_port_is_set(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
</interface>
</rstp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
<no-root-port />
</interface>
</rstp>
</protocols>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
<no-root-port operation="delete" />
</interface>
</rstp>
</protocols>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.edit_interface_spanning_tree('ge-0/0/6', edge=False)
def test_edit_interface_spanning_tree_disable_edge_when_nothing_is_set(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
</interface>
</rstp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface>
<name>ge-0/0/6</name>
</interface>
</rstp>
</protocols>
"""))
self.netconf_mock.should_receive("edit_config").never()
self.switch.edit_interface_spanning_tree('ge-0/0/6', edge=False)
def test_edit_interface_spanning_tree_unknown_interface(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/99</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface>
<name>ge-0/0/99</name>
</interface>
</rstp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration(""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<protocols>
<rstp>
<interface>
<name>ge-0/0/99</name>
<edge />
<no-root-port />
</interface>
</rstp>
</protocols>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
port value outside range 0..47 for '99' in 'ge-0/0/99'
</error-message>
</rpc-error>"""))))
with self.assertRaises(UnknownInterface) as expect:
self.switch.edit_interface_spanning_tree('ge-0/0/99', edge=True)
assert_that(str(expect.exception), contains_string("Unknown interface ge-0/0/99"))
def test_set_interface_state_to_on_succeeds(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<disable operation="delete" />
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_interface_state("ge-0/0/6", ON)
def test_set_interface_state_to_off_succeeds(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<disable />
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_interface_state("ge-0/0/6", OFF)
def test_unset_interface_state_succeeds(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<disable operation="delete" />
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.unset_interface_state("ge-0/0/6")
def test_unset_interface_state_raises_on_unknown_interface(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/99</name>
<disable operation="delete" />
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
port value outside range 0..47 for '99' in 'ge-0/0/99'
</error-message>
</rpc-error>"""))))
with self.assertRaises(UnknownInterface) as expect:
self.switch.unset_interface_state("ge-0/0/99")
assert_that(str(expect.exception), contains_string("Unknown interface ge-0/0/99"))
def test_unset_interface_state_without_disabled(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<disable operation="delete" />
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>warning</error-severity>
<error-path>[edit interfaces ge-0/0/6]</error-path>
<error-message>statement not found: </error-message>
</rpc-error>"""))))
self.switch.unset_interface_state("ge-0/0/6")
def test_set_interface_state_to_on_unknown_interface_raises(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/99</name>
<disable operation="delete"/>
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
port value outside range 0..47 for '99' in 'ge-0/0/99'
</error-message>
</rpc-error>"""))))
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_interface_state("ge-0/0/99", ON)
assert_that(str(expect.exception), contains_string("Unknown interface ge-0/0/99"))
def test_set_interface_state_to_off_unknown_interface_raises(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/99</name>
<disable />
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
port value outside range 0..47 for '99' in 'ge-0/0/99'
</error-message>
</rpc-error>"""))))
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_interface_state("ge-0/0/99", OFF)
assert_that(str(expect.exception), contains_string("Unknown interface ge-0/0/99"))
def test_add_bond(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ae6</name>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration(""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ae6</name>
<aggregated-ether-options>
<lacp>
<active/>
<periodic>slow</periodic>
</lacp>
</aggregated-ether-options>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.add_bond(6)
def test_add_bond_already_created_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ae6</name>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ae6</name>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").never()
with self.assertRaises(BondAlreadyExist) as expect:
self.switch.add_bond(6)
assert_that(str(expect.exception), equal_to("Bond 6 already exists"))
def test_add_bond_bad_bond_number(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ae9000</name>
</interface>
</interfaces>
</configuration>
</filter>
""")).and_return(a_configuration(""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ae9000</name>
<aggregated-ether-options>
<lacp>
<active/>
<periodic>slow</periodic>
</lacp>
</aggregated-ether-options>
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
device value outside range 0..31 for '9000' in 'ae9000'
</error-message>
</rpc-error>
"""))))
with self.assertRaises(BadBondNumber) as expect:
self.switch.add_bond(9000)
assert_that(str(expect.exception), equal_to("Bond number is invalid"))
def test_remove_bond(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<protocols>
<rstp>
<interface>
<name>ae10</name>
</interface>
</rstp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ae10</name>
</interface>
<interface>
<name>ge-4/3/3</name>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface operation="delete">
<name>ae10</name>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.remove_bond(10)
def test_remove_bond_also_removes_rstp_protocol(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<protocols>
<rstp>
<interface>
<name>ae10</name>
</interface>
</rstp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ae10</name>
</interface>
<interface>
<name>ge-4/3/3</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface>
<name>ae10</name>
<edge/>
<no-root-port/>
</interface>
</rstp>
</protocols>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface operation="delete">
<name>ae10</name>
</interface>
</interfaces>
<protocols>
<rstp>
<interface operation="delete">
<name>ae10</name>
</interface>
</rstp>
</protocols>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.remove_bond(10)
def test_remove_bond_invalid_number_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<protocols>
<rstp>
<interface>
<name>ae7</name>
</interface>
</rstp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration())
with self.assertRaises(UnknownBond) as expect:
self.switch.remove_bond(007)
assert_that(str(expect.exception), equal_to("Bond 7 not found"))
def test_remove_bond_delete_slaves_and_interface_at_same_time(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces />
<protocols>
<rstp>
<interface>
<name>ae10</name>
</interface>
</rstp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ae10</name>
</interface>
<interface>
<name>ge-0/0/1</name>
<ether-options>
<ieee-802.3ad>
<bundle>ae10</bundle>
</ieee-802.3ad>
</ether-options>
</interface>
<interface>
<name>ge-0/0/2</name>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface operation="delete">
<name>ae10</name>
</interface>
<interface>
<name>ge-0/0/1</name>
<ether-options>
<ieee-802.3ad operation="delete" />
</ether-options>
</interface>
</interfaces>
</configuration>
</config>""")).and_return(an_ok_response())
self.switch.remove_bond(10)
def test_add_interface_to_bond(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
<protocols>
<rstp>
<interface />
</rstp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ae10</name>
</interface>
<interface>
<name>ge-0/0/1</name>
</interface>
</interfaces>
<vlans/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface operation="replace">
<name>ge-0/0/1</name>
<ether-options>
<ieee-802.3ad>
<bundle>ae10</bundle>
</ieee-802.3ad>
</ether-options>
</interface>
</interfaces>
</configuration>
</config>""")).and_return(an_ok_response())
self.switch.add_interface_to_bond('ge-0/0/1', 10)
def test_add_interface_to_bond_gets_up_to_speed_and_removes_existing_rstp_protocol(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
<protocols>
<rstp>
<interface />
</rstp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ae10</name>
<aggregated-ether-options>
<link-speed>1g</link-speed>
</aggregated-ether-options>
</interface>
<interface>
<name>ge-0/0/1</name>
</interface>
</interfaces>
<vlans/>
<protocols>
<rstp>
<interface>
<name>ge-0/0/1</name>
<edge />
</interface>
</rstp>
</protocols>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface operation="replace">
<name>ge-0/0/1</name>
<ether-options>
<ieee-802.3ad>
<bundle>ae10</bundle>
</ieee-802.3ad>
<speed>
<ethernet-1g/>
</speed>
</ether-options>
</interface>
</interfaces>
<protocols>
<rstp>
<interface operation="delete">
<name>ge-0/0/1</name>
</interface>
</rstp>
</protocols>
</configuration>
</config>""")).and_return(an_ok_response())
self.switch.add_interface_to_bond('ge-0/0/1', 10)
def test_add_interface_to_bond_without_bond(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
<protocols>
<rstp>
<interface />
</rstp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/1</name>
</interface>
</interfaces>
<vlans/>
"""))
with self.assertRaises(UnknownBond):
self.switch.add_interface_to_bond('ge-0/0/1', 10)
def test_add_interface_to_bond_without_interface(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
<protocols>
<rstp>
<interface />
</rstp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ae10</name>
</interface>
</interfaces>
<vlans/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface operation="replace">
<name>ge-0/0/99</name>
<ether-options>
<ieee-802.3ad>
<bundle>ae10</bundle>
</ieee-802.3ad>
</ether-options>
</interface>
</interfaces>
</configuration>
</config>""")).and_raise(
RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
port value outside range 0..47 for '99' in 'ge-0/0/99'
</error-message>
</rpc-error>"""))))
with self.assertRaises(UnknownInterface):
self.switch.add_interface_to_bond('ge-0/0/99', 10)
def test_remove_interface_from_bond(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/1</name>
<ether-options>
<ieee-802.3ad operation="delete" />
</ether-options>
</interface>
</interfaces>
</configuration>
</config>""")).and_return(an_ok_response())
self.switch.remove_interface_from_bond('ge-0/0/1')
def test_remove_interface_from_bond_not_in_bond(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/1</name>
<ether-options>
<ieee-802.3ad operation="delete" />
</ether-options>
</interface>
</interfaces>
</configuration>
</config>""")).and_raise(
RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
statement not found: 802.3ad
</error-message>
</rpc-error>"""))))
self.netconf_mock.should_receive("rpc").with_args(is_xml("""
<get-interface-information>
<terse/>
</get-interface-information>
""")).and_return(an_rpc_response(textwrap.dedent("""
<interface-information style="terse">
<physical-interface>
<name>
ge-0/0/1
</name>
<admin-status>
up
</admin-status>
<oper-status>
down
</oper-status>
</physical-interface>
</interface-information>
""")))
with self.assertRaises(InterfaceNotInBond):
self.switch.remove_interface_from_bond('ge-0/0/1')
def test_remove_interface_from_bond_unknown_interface_raises(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/27</name>
<ether-options>
<ieee-802.3ad operation="delete" />
</ether-options>
</interface>
</interfaces>
</configuration>
</config>""")).and_raise(
RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
statement not found: 802.3ad
</error-message>
</rpc-error>"""))))
self.netconf_mock.should_receive("rpc").with_args(is_xml("""
<get-interface-information>
<terse/>
</get-interface-information>
""")).and_return(an_rpc_response(textwrap.dedent("""
<interface-information style="terse">
<physical-interface>
<name>
ge-0/0/1
</name>
<admin-status>
up
</admin-status>
<oper-status>
down
</oper-status>
</physical-interface>
</interface-information>
""")))
with self.assertRaises(UnknownInterface):
self.switch.remove_interface_from_bond('ge-0/0/27')
def test_change_bond_speed_update_slaves_and_interface_at_same_time(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ae10</name>
</interface>
<interface>
<name>ge-0/0/1</name>
<ether-options>
<ieee-802.3ad>
<bundle>ae10</bundle>
</ieee-802.3ad>
</ether-options>
</interface>
<interface>
<name>ge-0/0/2</name>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ae10</name>
<aggregated-ether-options>
<link-speed>1g</link-speed>
</ether-options>
</interface>
<interface>
<name>ge-0/0/1</name>
<ether-options>
<speed>
<ethernet-1g/>
</speed>
</ether-options>
</interface>
</interfaces>
</configuration>
</config>""")).and_return(an_ok_response())
self.switch.set_bond_link_speed(10, '1g')
def test_change_bond_speed_on_unknown_bond(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces />
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ae10</name>
</interface>
<interface>
<name>ge-0/0/1</name>
<ether-options>
<ieee-802.3ad>
<bundle>ae10</bundle>
</ieee-802.3ad>
</ether-options>
</interface>
<interface>
<name>ge-0/0/2</name>
</interface>
</interfaces>
"""))
with self.assertRaises(UnknownBond):
self.switch.set_bond_link_speed(20, '1g')
def test_get_bond(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ae3</name>
<aggregated-ether-options>
<link-speed>1g</link-speed>
<lacp>
<active/>
<periodic>slow</periodic>
</lacp>
</aggregated-ether-options>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan>
<members>999-1001</members>
<members>1000</members>
</vlan>
<native-vlan-id>2000</native-vlan-id>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>ge-1/0/1</name>
<ether-options>
<speed>
<ethernet-100m/>
</speed>
<ieee-802.3ad>
<bundle>ae3</bundle>
</ieee-802.3ad>
</ether-options>
</interface>
</interfaces>
"""))
if3 = self.switch.get_bond(3)
assert_that(if3.number, equal_to(3))
assert_that(if3.link_speed, equal_to('1g'))
assert_that(if3.port_mode, equal_to(TRUNK))
assert_that(if3.access_vlan, equal_to(None))
assert_that(if3.trunk_native_vlan, equal_to(2000))
assert_that(if3.trunk_vlans, equal_to([999, 1000, 1001]))
assert_that(if3.members, equal_to(['ge-1/0/1']))
def test_get_unknown_bond(self):
self.switch.in_transaction = True
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration(""))
with self.assertRaises(UnknownBond):
self.switch.get_bond(3)
def test_get_bonds(self):
self.switch.in_transaction = False
self.netconf_mock.should_receive("get_config").with_args(source="running", filter=is_xml("""
<filter>
<configuration>
<interfaces/>
<vlans/>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ae1</name>
<aggregated-ether-options>
<lacp>
<active/>
<periodic>slow</periodic>
</lacp>
</aggregated-ether-options>
<unit>
<name>0</name>
<family>
<ethernet-switching>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>ae2</name>
<disable />
<description>Howdy</description>
<mtu>5000</mtu>
<aggregated-ether-options>
<link-speed>10g</link-speed>
<lacp>
<active/>
<periodic>slow</periodic>
</lacp>
</aggregated-ether-options>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<vlan>
<members>1000</members>
</vlan>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>ae3</name>
<aggregated-ether-options>
<link-speed>1g</link-speed>
<lacp>
<active/>
<periodic>slow</periodic>
</lacp>
</aggregated-ether-options>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
<vlan>
<members>999-1001</members>
<members>1000</members>
</vlan>
<native-vlan-id>2000</native-vlan-id>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>ge-0/0/4</name>
<unit>
<name>0</name>
<family>
<ethernet-switching>
<port-mode>trunk</port-mode>
</ethernet-switching>
</family>
</unit>
</interface>
<interface>
<name>ge-1/0/1</name>
<ether-options>
<speed>
<ethernet-100m/>
</speed>
<ieee-802.3ad>
<bundle>ae3</bundle>
</ieee-802.3ad>
</ether-options>
</interface>
<interface>
<name>vlan</name>
<unit>
<name>40</name>
</unit>
</interface>
</interfaces>
"""))
if1, if2, if3 = self.switch.get_bonds()
assert_that(if1.number, equal_to(1))
assert_that(if1.link_speed, equal_to(None))
assert_that(if1.shutdown, equal_to(False))
assert_that(if1.port_mode, equal_to(ACCESS))
assert_that(if1.access_vlan, equal_to(None))
assert_that(if1.trunk_native_vlan, equal_to(None))
assert_that(if1.trunk_vlans, equal_to([]))
assert_that(if1.mtu, equal_to(None))
assert_that(if1.members, equal_to([]))
assert_that(if2.number, equal_to(2))
assert_that(if2.link_speed, equal_to('10g'))
assert_that(if2.shutdown, equal_to(True))
assert_that(if2.port_mode, equal_to(ACCESS))
assert_that(if2.access_vlan, equal_to(1000))
assert_that(if2.trunk_native_vlan, equal_to(None))
assert_that(if2.trunk_vlans, equal_to([]))
assert_that(if2.mtu, equal_to(5000))
assert_that(if2.members, equal_to([]))
assert_that(if3.number, equal_to(3))
assert_that(if3.link_speed, equal_to('1g'))
assert_that(if3.port_mode, equal_to(TRUNK))
assert_that(if3.access_vlan, equal_to(None))
assert_that(if3.trunk_native_vlan, equal_to(2000))
assert_that(if3.trunk_vlans, equal_to([999, 1000, 1001]))
assert_that(if3.members, equal_to(['ge-1/0/1']))
def test_set_interface_lldp_state_from_nothing(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<lldp>
<interface>
<name>ge-0/0/6</name>
</interface>
</lldp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<protocols>
<lldp>
<interface>
<name>ge-0/0/6</name>
</interface>
</lldp>
</protocols>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_interface_lldp_state('ge-0/0/6', True)
def test_set_interface_lldp_state_from_default_interface(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<lldp>
<interface>
<name>ge-0/0/6</name>
</interface>
</lldp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<protocols>
<lldp>
<interface>
<name>ge-0/0/6</name>
</interface>
</lldp>
</protocols>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_interface_lldp_state('ge-0/0/6', True)
def test_set_interface_lldp_state_from_unknown_interface_raises(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/99</name>
</interface>
</interfaces>
<protocols>
<lldp>
<interface>
<name>ge-0/0/99</name>
</interface>
</lldp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces/>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<protocols>
<lldp>
<interface>
<name>ge-0/0/99</name>
</interface>
</lldp>
</protocols>
</configuration>
</config>
""")).and_raise(a_port_value_outside_range_rpc_error())
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_interface_lldp_state('ge-0/0/99', True)
assert_that(str(expect.exception), contains_string("Unknown interface ge-0/0/99"))
def test_set_interface_lldp_state_when_disabled(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<lldp>
<interface>
<name>ge-0/0/6</name>
</interface>
</lldp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<lldp>
<interface>
<name>ge-0/0/6</name>
<disable/>
</interface>
</lldp>
</protocols>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<protocols>
<lldp>
<interface>
<name>ge-0/0/6</name>
<disable operation="delete"/>
</interface>
</lldp>
</protocols>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_interface_lldp_state('ge-0/0/6', True)
def test_disable_lldp_when_disabled(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<lldp>
<interface>
<name>ge-0/0/6</name>
</interface>
</lldp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<lldp>
<interface>
<name>ge-0/0/6</name>
<disable/>
</interface>
</lldp>
</protocols>
"""))
self.netconf_mock.should_receive("edit_config").never()
self.switch.set_interface_lldp_state('ge-0/0/6', False)
def test_disable_lldp_when_enabled(self):
self.netconf_mock.should_receive("get_config").with_args(source="candidate", filter=is_xml("""
<filter>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
<protocols>
<lldp>
<interface>
<name>ge-0/0/6</name>
</interface>
</lldp>
</protocols>
</configuration>
</filter>
""")).and_return(a_configuration("""
<interfaces>
<interface>
<name>ge-0/0/6</name>
</interface>
</interfaces>
"""))
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<protocols>
<lldp>
<interface>
<name>ge-0/0/6</name>
<disable />
</interface>
</lldp>
</protocols>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_interface_lldp_state('ge-0/0/6', False)
def test_set_interface_mtu_success(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<mtu>5000</mtu>
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.set_interface_mtu('ge-0/0/6', 5000)
def test_set_interface_mtu_wrong_value_raises(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<mtu>100</mtu>
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
Value 100 is not within range (256..9216)
</error-message>
</rpc-error>"""))))
with self.assertRaises(InvalidMtuSize) as expect:
self.switch.set_interface_mtu('ge-0/0/6', 100)
assert_that(str(expect.exception), contains_string("Value 100 is not within range (256..9216)"))
def test_set_interface_mtu_unknown_interface_raises(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/99</name>
<mtu>5000</mtu>
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
port value outside range 0..63 for '99' in 'ge-0/0/99'
</error-message>
</rpc-error>"""))))
with self.assertRaises(UnknownInterface):
self.switch.set_interface_mtu('ge-0/0/99', 5000)
def test_unset_interface_mtu_success(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/6</name>
<mtu operation="delete" />
</interface>
</interfaces>
</configuration>
</config>
""")).and_return(an_ok_response())
self.switch.unset_interface_mtu('ge-0/0/6')
def test_unset_interface_mtu_unknown_intercace_raises(self):
self.netconf_mock.should_receive("edit_config").once().with_args(target="candidate", config=is_xml("""
<config>
<configuration>
<interfaces>
<interface>
<name>ge-0/0/99</name>
<mtu operation="delete" />
</interface>
</interfaces>
</configuration>
</config>
""")).and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
port value outside range 0..63 for '99' in 'ge-0/0/99'
</error-message>
</rpc-error>"""))))
with self.assertRaises(UnknownInterface):
self.switch.unset_interface_mtu('ge-0/0/99')
def test_bond_port_mode_access(self):
switch = juniper.standard.netconf(SwitchDescriptor(model='', hostname=''))
switch.set_access_mode = mock.Mock()
switch.set_bond_access_mode(6)
switch.set_access_mode.assert_called_with('ae6')
def test_bond_port_mode_trunk(self):
switch = juniper.standard.netconf(SwitchDescriptor(model='', hostname=''))
switch.set_trunk_mode = mock.Mock()
switch.set_bond_trunk_mode(6)
switch.set_trunk_mode.assert_called_with('ae6')
def test_set_bond_description_succeeds(self):
switch = juniper.standard.netconf(SwitchDescriptor(model='', hostname=''))
switch.set_interface_description = mock.Mock()
switch.set_bond_description(6, "Resistance is futile")
switch.set_interface_description.assert_called_with('ae6', "Resistance is futile")
def test_unset_bond_description_succeeds(self):
switch = juniper.standard.netconf(SwitchDescriptor(model='', hostname=''))
switch.unset_interface_description = mock.Mock()
switch.unset_bond_description(6)
switch.unset_interface_description.assert_called_with('ae6')
def test_set_bond_mtu_succeeds(self):
switch = juniper.standard.netconf(SwitchDescriptor(model='', hostname=''))
switch.set_interface_mtu = mock.Mock()
switch.set_bond_mtu(6, 5000)
switch.set_interface_mtu.assert_called_with('ae6', 5000)
def test_unset_bond_mtu_succeeds(self):
switch = juniper.standard.netconf(SwitchDescriptor(model='', hostname=''))
switch.unset_interface_mtu = mock.Mock()
switch.unset_bond_mtu(6)
switch.unset_interface_mtu.assert_called_with('ae6')
def test_add_bond_trunk_vlan(self):
switch = juniper.standard.netconf(SwitchDescriptor(model='', hostname=''))
switch.add_trunk_vlan = mock.Mock()
switch.add_bond_trunk_vlan(6, 1000)
switch.add_trunk_vlan.assert_called_with('ae6', 1000)
def test_remove_bond_trunk_vlan(self):
switch = juniper.standard.netconf(SwitchDescriptor(model='', hostname=''))
switch.remove_trunk_vlan = mock.Mock()
switch.remove_bond_trunk_vlan(6, 1000)
switch.remove_trunk_vlan.assert_called_with('ae6', 1000)
def test_set_bond_native_vlan(self):
switch = juniper.standard.netconf(SwitchDescriptor(model='', hostname=''))
switch.set_interface_native_vlan = mock.Mock()
switch.set_bond_native_vlan(6, 1000)
switch.set_interface_native_vlan.assert_called_with('ae6', 1000)
def test_unset_bond_native_vlan(self):
switch = juniper.standard.netconf(SwitchDescriptor(model='', hostname=''))
switch.unset_interface_native_vlan = mock.Mock()
switch.unset_bond_native_vlan(6)
switch.unset_interface_native_vlan.assert_called_with('ae6')
def test_edit_bond_spanning_tree(self):
switch = juniper.standard.netconf(SwitchDescriptor(model='', hostname=''))
switch.edit_interface_spanning_tree = mock.Mock()
switch.edit_bond_spanning_tree(6, edge=False)
switch.edit_interface_spanning_tree.assert_called_with('ae6', edge=False)
@mock.patch("ncclient.manager.connect")
def test_connect(self, connect_mock):
connect_mock.return_value = self.netconf_mock
self.netconf_mock._session = mock.Mock()
self.switch = Juniper(
SwitchDescriptor(model='juniper', hostname="toto", username="tutu", password="titi", port=8000),
custom_strategies=JuniperCustomStrategies(), timeout=120)
self.switch.connect()
connect_mock.assert_called_with(
host="toto",
username="tutu",
password="titi",
hostkey_verify=False,
device_params={'name':'junos'},
port=8000,
timeout=120
)
@mock.patch("ncclient.manager.connect")
def test_connect_without_port_uses_default(self, connect_mock):
connect_mock.return_value = self.netconf_mock
self.netconf_mock._session = mock.Mock()
self.switch = Juniper(
SwitchDescriptor(model='juniper', hostname="toto", username="tutu", password="titi"),
custom_strategies=JuniperCustomStrategies(), timeout=120)
self.switch.connect()
connect_mock.assert_called_with(
host="toto",
username="tutu",
password="titi",
hostkey_verify=False,
device_params={'name':'junos'},
timeout=120
)
def test_disconnect(self):
self.netconf_mock.should_receive("close_session").once().ordered()
self.switch.disconnect()
def test_disconnect_doesnt_fail_if_close_session_does(self):
self.netconf_mock.should_receive("close_session").once().ordered().and_raise(TimeoutExpiredError)
self.switch.disconnect()
def test_start_transaction_locks_the_candidate(self):
self.netconf_mock.should_receive("lock").with_args(target="candidate").once().ordered()
self.switch.start_transaction()
def test_start_transaction_fails_discard_changes_and_retries(self):
self.netconf_mock.should_receive("lock").with_args(target="candidate").twice()\
.and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
configuration database modified
</error-message>
<database-status-information>
<database-status>
<user>admin</user>
<terminal>p0</terminal>
<pid>9511</pid>
<start-time junos:seconds="1416432176">2014-11-19 16:22:56 EST</start-time>
<idle-time junos:seconds="197">00:03:17</idle-time>
<edit-path>[edit]</edit-path>
</database-status>
</database-status-information>
</rpc-error>"""))))\
.and_return()
self.netconf_mock.should_receive("discard_changes").with_args().once().and_return(an_ok_response())
self.switch.start_transaction()
def test_start_transaction_locking_fails_already_in_use_raises(self):
self.netconf_mock.should_receive("lock").with_args(target="candidate").once().ordered().and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
Configuration database is already open
</error-message>
</rpc-error>
"""))))
with self.assertRaises(LockedSwitch) as expect:
self.switch.start_transaction()
assert_that(str(expect.exception), equal_to("Switch is locked and can't be modified"))
def test_start_transaction_locking_fails_of_unknown_reason_raises(self):
self.netconf_mock.should_receive("lock").with_args(target="candidate").once().ordered().and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
Whatever right?
</error-message>
</rpc-error>
"""))))
with self.assertRaises(RPCError) as expect:
self.switch.start_transaction()
assert_that(str(expect.exception), contains_string("Whatever right?"))
def test_end_transaction(self):
self.netconf_mock.should_receive("unlock").with_args(target="candidate").once().ordered()
self.switch.end_transaction()
def test_commit_succeeds(self):
self.netconf_mock.should_receive("commit").with_args().once().ordered()
self.switch.commit_transaction()
def test_commit_transaction_failing_to_commit_discard_changes_and_raises(self):
self.netconf_mock.should_receive("commit").with_args().once().ordered().and_raise(RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<source-daemon>
eswd
</source-daemon>
<error-message>
tag value 1000 is being used by more than one vlan <VLAN1000> and <SOMETHINGELSE>
</error-message>
</rpc-error>
"""))))
with self.assertRaises(OperationNotCompleted) as expect:
self.switch.commit_transaction()
assert_that(str(expect.exception), equal_to("An error occured while completing operation, no modifications have been applied : tag value 1000 is being used by more than one vlan <VLAN1000> and <SOMETHINGELSE>"))
def test_rollback_succeeds(self):
self.netconf_mock.should_receive("discard_changes").with_args().once().ordered()
self.switch.rollback_transaction()
def a_configuration(inner_data=""):
return an_rpc_response("""
<data>
<configuration>{}</configuration>
</data>
""".format(inner_data))
def an_ok_response():
return an_rpc_response(textwrap.dedent("""
<ok/>
"""))
def an_rpc_response(data):
return NCElement(textwrap.dedent("""
<rpc-reply message-id="urn:uuid:34c41736-bed3-11e4-8c40-7c05070fe456">
{}
</rpc-reply>""".format(data)), JunosDeviceHandler(None).transform_reply())
def a_port_value_outside_range_rpc_error():
return RPCError(to_ele(textwrap.dedent("""
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/11.4R1/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-message>
port value outside range 0..63 for '99' in 'ge-0/0/99'
</error-message>
</rpc-error>""")))
def is_xml(string):
return IsXmlFlexmockArgMatcher(string)
class IsXmlFlexmockArgMatcher(object):
def __init__(self, expected):
self.expected = to_ele(expected)
def __eq__(self, other):
otherxml = other if not isinstance(other, basestring) else to_ele(other)
try:
self.compare(self.expected, otherxml)
return True
except AssertionError as e:
logging.warning("Given XML : \n" + to_xml(otherxml, pretty_print=True) +
"\n\ndiffers from expected : \n" + to_xml(self.expected, pretty_print=True) +
"Because : " + str(e))
return False
def compare(self, expected, actual):
for i, node in enumerate(expected):
assert_that(node.tag, equal_to(unqualify(actual[i].tag)))
assert_that(node, has_length(len(actual[i])))
assert_that(actual[i].attrib, has_length(len(node.attrib)))
if node.text is not None:
if node.text.strip() == "":
assert_that(actual[i].text is None or actual[i].text.strip() == "")
else:
assert_that(actual[i].text is not None, "Node is " + node.tag)
assert_that(node.text.strip(), equal_to(actual[i].text.strip()))
for name, value in node.attrib.items():
assert_that(actual[i].attrib, has_key(name))
assert_that(actual[i].attrib[name], equal_to(value))
self.compare(node, actual[i])
def unqualify(tag):
return re.sub("\{[^\}]*\}", "", tag)
| {
"content_hash": "dcbc877afdbfa2bf698530313dbfff80",
"timestamp": "",
"source": "github",
"line_count": 6612,
"max_line_length": 219,
"avg_line_length": 36.117816091954026,
"alnum_prop": 0.4347664052325898,
"repo_name": "idjaw/netman",
"id": "2011317c2d42a405fd95c8c1f635618c881d50d1",
"size": "239385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/adapters/switches/juniper_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7048"
},
{
"name": "Python",
"bytes": "1335541"
}
],
"symlink_target": ""
} |
from pysmt.operators import new_node_type
LTL_X = new_node_type(node_str="LTL_X")
LTL_Y = new_node_type(node_str="LTL_Y")
LTL_Z = new_node_type(node_str="LTL_Z")
LTL_F = new_node_type(node_str="LTL_F")
LTL_G = new_node_type(node_str="LTL_G")
LTL_O = new_node_type(node_str="LTL_O")
LTL_H = new_node_type(node_str="LTL_H")
LTL_U = new_node_type(node_str="LTL_U")
LTL_S = new_node_type(node_str="LTL_S")
ALL_LTL = (LTL_X, LTL_Y, LTL_Z,
LTL_F, LTL_G, LTL_O, LTL_H,
LTL_U, LTL_S)
# The FormulaManager needs to be extended to be able to use these
# operators. Notice that additional checks, and some simplifications
# can be performed at construction time. We keep this example simple.
import pysmt.environment
import pysmt.formula
class FormulaManager(pysmt.formula.FormulaManager):
"""Extension of FormulaManager to handle LTL Operators."""
def X(self, formula):
return self.create_node(node_type=LTL_X, args=(formula,))
def Y(self, formula):
return self.create_node(node_type=LTL_Y, args=(formula,))
def Z(self, formula):
return self.create_node(node_type=LTL_Z, args=(formula,))
def F(self, formula):
return self.create_node(node_type=LTL_F, args=(formula,))
def G(self, formula):
return self.create_node(node_type=LTL_G, args=(formula,))
def O(self, formula):
return self.create_node(node_type=LTL_O, args=(formula,))
def H(self, formula):
return self.create_node(node_type=LTL_H, args=(formula,))
def S(self, left, right):
return self.create_node(node_type=LTL_S, args=(left, right))
def U(self, left, right):
return self.create_node(node_type=LTL_U, args=(left, right))
# We can also add syntactic sugar, by creating constructors that
# are not mapped directly to a new node type. For example X^n:
def Xn(self, n, formula):
X_ = self.X
res = formula
for _ in range(n):
res = X_(res)
return res
#
# For the system to work, we need to extend a few walkers.
#
# The first extension is the TypeChecker. The typechecker provides
# several convenience methods for many types of operators. In this
# case, all the LTL operators have boolean argument and boolean return
# value, therefore we map them all to walk_bool_to_bool.
#
# This is an example of how to extend an existing class
# (SimpleTypeChecker) in order to deal with new node-types, by calling
# an existing method (walk_bool_to_bool).
from pysmt.type_checker import SimpleTypeChecker
SimpleTypeChecker.set_handler(SimpleTypeChecker.walk_bool_to_bool, *ALL_LTL)
from pysmt.oracles import FreeVarsOracle
FreeVarsOracle.set_handler(FreeVarsOracle.walk_simple_args, *ALL_LTL)
# An alternative approach is to subclass the walker that we are
# interested in. For example, the HRPrinter has utility methods for
# the nary operators. For the unary operators, we define a unique
# function. The walk_* method that we override needs to have the same
# name as the string used in new_node_type (lowercase): for LTL_G, we
# override walk_ltl_g.
import pysmt.printers
from pysmt.walkers.generic import handles
LTL_TYPE_TO_STR = { LTL_X: "X", LTL_Y: "Y", LTL_Z: "Z",
LTL_F: "F", LTL_G: "F", LTL_O: "O", LTL_H: "H"}
class HRPrinter(pysmt.printers.HRPrinter):
def walk_ltl_s(self, formula):
return self.walk_nary(formula, " S ")
def walk_ltl_u(self, formula):
return self.walk_nary(formula, " U ")
@handles(LTL_X, LTL_Y, LTL_Z, LTL_F, LTL_G, LTL_O, LTL_H)
def walk_ltl(self, formula):
node_type = formula.node_type()
op_symbol = LTL_TYPE_TO_STR[node_type]
self.stream.write("(%s " % op_symbol)
yield formula.arg(0)
self.stream.write(")")
# EOC HRPrinter
class HRSerializer(pysmt.printers.HRSerializer):
PrinterClass = HRPrinter
# EOC HRSerialize
# Finally, a third option is to define new methods and attach them to
# existing classes. We do so for the IdentityDagWalker
from pysmt.walkers import IdentityDagWalker
def walk_ltl_x(self, formula, args, **kwargs): return self.mgr.X(args[0])
def walk_ltl_y(self, formula, args, **kwargs): return self.mgr.Y(args[0])
def walk_ltl_u(self, formula, args, **kwargs): return self.mgr.U(args[0], args[1])
def walk_ltl_s(self, formula, args, **kwargs): return self.mgr.S(args[0], args[1])
def walk_ltl_f(self, formula, args, **kwargs): return self.mgr.F(args[0])
def walk_ltl_g(self, formula, args, **kwargs): return self.mgr.G(args[0])
def walk_ltl_o(self, formula, args, **kwargs): return self.mgr.O(args[0])
def walk_ltl_h(self, formula, args, **kwargs): return self.mgr.H(args[0])
IdentityDagWalker.set_handler(walk_ltl_x, LTL_X)
IdentityDagWalker.set_handler(walk_ltl_y, LTL_Y)
IdentityDagWalker.set_handler(walk_ltl_u, LTL_U)
IdentityDagWalker.set_handler(walk_ltl_s, LTL_S)
IdentityDagWalker.set_handler(walk_ltl_f, LTL_F)
IdentityDagWalker.set_handler(walk_ltl_g, LTL_G)
IdentityDagWalker.set_handler(walk_ltl_o, LTL_O)
IdentityDagWalker.set_handler(walk_ltl_h, LTL_H)
# EOC IdentityDagWalker
from pysmt.environment import Environment, pop_env, get_env
from pysmt.environment import push_env as pysmt_push_env
class EnvironmentLTL(Environment):
"""Extension of pySMT environment."""
# Only specify new classes. Classes that have been extended
# directly do not need to be redefined here (e.g., TypeChecker)
FormulaManagerClass = FormulaManager
HRSerializerClass = HRSerializer
# EOC EnvironmentLTL
def push_env(env=None):
"""Overload push_env to default to the new Environment class."""
if env is None:
env = EnvironmentLTL()
return pysmt_push_env(env=env)
def reset_env():
"""Overload reset_env to use the new push_env()."""
pop_env()
push_env()
return get_env()
# Create the default environment
reset_env()
if __name__ == "__main__":
with EnvironmentLTL() as env:
mgr = env.formula_manager
f = mgr.X(mgr.Symbol("x"))
g = mgr.G(f)
print(g)
print(g.get_free_variables())
| {
"content_hash": "c87e57dc6381c135f81667acb2580c7a",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 82,
"avg_line_length": 35.77647058823529,
"alnum_prop": 0.6857941466622821,
"repo_name": "pysmt/pysmt",
"id": "b3fbc75e27b7d2b3f10e4f31e42c92e2b3d2ebdc",
"size": "6387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/ltl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1227123"
},
{
"name": "Shell",
"bytes": "8632"
}
],
"symlink_target": ""
} |
from WebStudioLib import *
from WebStudioUtil import *
from WebStudioBase import *
sqlDataType = {}
sqlDataType['counter_view'] = {
'elems': [
{'name': 'name','type': 'text'},
{'name': 'author','type': 'text'},
{'name': 'description','type': 'text'},
{'name': 'counterList','type': 'text'},
{'name': 'graphtype','type': 'text'},
{'name': 'interval','type': 'text'},
],
}
sqlDataType['app_package'] = {
'elems': [
{'name': 'name','type': 'text'},
{'name': 'author','type': 'text'},
{'name': 'description','type': 'text'},
{'name': 'register_state','type': 'text'},
{'name': 'cluster_type','type': 'text'},
{'name': 'schema_info','type': 'text'},
{'name': 'schema_type','type': 'text'},
{'name': 'server_type','type': 'text'},
{'name': 'parameters','type': 'text'},
{'name': 'if_stateful','type': 'text'},
],
}
sqlDataType['cmd_scenario'] = {
'elems': [
{'name': 'name','type': 'text'},
{'name': 'author','type': 'text'},
{'name': 'description','type': 'text'},
{'name': 'machines','type': 'text'},
{'name': 'cmdtext','type': 'text'},
{'name': 'interval','type': 'text'},
{'name': 'times','type': 'text'},
],
}
TCreate = jinja2.Template('CREATE TABLE IF NOT EXISTS {{dataType}} ({% for elem in elems %}{{elem.name}} {{elem.type}}{% if not loop.last %},{% endif %}{% endfor %});')
TDelete = jinja2.Template("DELETE FROM {{dataType}} WHERE name = '{{dataName}}';")
TDeleteall = jinja2.Template('DELETE FROM {{dataType}};')
TInsert = jinja2.Template("INSERT INTO {{dataType}} VALUES ({% for val in val_list %}'{{val}}'{% if not loop.last %},{% endif %}{% endfor %});")
TSelect = jinja2.Template('SELECT * FROM {{dataType}}')
TSelectone = jinja2.Template("SELECT * FROM {{dataType}} WHERE name = '{{dataName}}';")
TUpdate = jinja2.Template("UPDATE {{dataType}} SET {{updateColumn}}='{{updateValue}}' WHERE name='{{dataName}}';")
def sqlOp(op='',dataType='',dataName='',val_list=''):
res = None
local_dir = os.path.join(GetWebStudioDirPath(),'local')
if not os.path.exists(local_dir):
os.makedirs(local_dir)
conn = sqlite3.connect(os.path.join(GetWebStudioDirPath(),'local','data.db'))
c = conn.cursor()
c.execute(TCreate.render({'dataType':dataType,'elems': sqlDataType[dataType]['elems']}))
if op == 'save':
c.execute(TDeleteall.render({'dataType':dataType}))
c.execute(TInsert.render({'dataType':dataType,'val_list':val_list}))
elif op == 'load':
res = list(c.execute(TSelect.render({'dataType':dataType})))
elif op == 'delete':
c.execute(TDelete.render({'dataType':dataType,'dataName':dataName}))
elif op == 'detail':
c.execute(TSelectone.render({'dataType':dataType,'dataName':dataName}))
res = list(c.fetchone())
conn.commit()
conn.close()
return res
class ApiBashHandler(BaseHandler):
def get(self):
command = self.request.get('command');
queryRes = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read()
self.response.write(queryRes)
class ApiPsutilHandler(BaseHandler):
def get(self):
queryRes = {}
queryRes['cpu'] = psutil.cpu_percent(interval=1);
queryRes['memory'] = psutil.virtual_memory()[2];
queryRes['disk'] = psutil.disk_usage('/')[3];
queryRes['networkio'] = psutil.net_io_counters()
self.response.write(json.dumps(queryRes))
class ApiSaveViewHandler(BaseHandler):
def post(self):
sqlOp(op='save',dataType='counter_view',val_list=[
self.request.get('name'),
self.request.get('author'),
self.request.get('description'),
self.request.get('counterList'),
self.request.get('graphtype'),
self.request.get('interval')
])
self.response.write('view "'+ self.request.get('name') +'" is successfully saved!')
class ApiLoadViewHandler(BaseHandler):
def post(self):
viewList = []
for view in sqlOp(op='load',dataType='counter_view'):
viewList.append({'name':view[0],'author':view[1],'description':view[2],'counterList':view[3],'graphtype':view[4],'interval':view[5]})
self.SendJson(viewList)
class ApiDelViewHandler(BaseHandler):
def post(self):
sqlOp(op='delete',dataType='counter_view',dataName=self.request.get('name'))
self.response.write('success')
class ApiLoadPackHandler(BaseHandler):
def post(self):
pack_dir = os.path.join(GetWebStudioDirPath(),'local','pack')
if not os.path.exists(pack_dir):
os.makedirs(pack_dir)
packList = []
for pack in sqlOp(op='load',dataType='app_package'):
packList.append({'name':pack[0],'author':pack[1],'description':pack[2],'register_state':pack[3],'cluster_type':pack[4],'if_stateful':pack[9]})
self.SendJson(packList)
class ApiPackDetailHandler(BaseHandler):
def post(self):
pack_info = sqlOp(op='detail',dataType='app_package',dataName=self.request.get('id'))
if pack_info == [] :
self.response.write('cannot find the package : ' + pack_id)
return
ret = {'name' : pack_info[0], \
'schema_info' : pack_info[5], \
'schema_type' : pack_info[6], \
'server_type' : pack_info[7], \
'parameters' : pack_info[8]};
self.SendJson(ret)
class ApiDelPackHandler(BaseHandler):
def post(self):
packName = self.request.get('name')
sqlOp(op='delete',dataType='app_package',dataName=packName)
try:
shutil.rmtree(os.path.join(pack_dir,packName))
os.remove(os.path.join(pack_dir,packName+'.jpg'))
os.remove(os.path.join(pack_dir,packName+'.7z'))
os.remove(os.path.join(pack_dir,packName+'.thrift'))
self.response.write('success')
except:
self.response.write('fail')
class ApiSaveScenarioHandler(BaseHandler):
def post(self):
sqlOp(op='save',dataType='cmd_scenario',val_list=[
self.request.get('name'),
self.request.get('author'),
self.request.get('description'),
self.request.get('machines'),
self.request.get('cmdtext'),
self.request.get('interval'),
self.request.get('times')
])
self.response.write('success')
class ApiLoadScenarioHandler(BaseHandler):
def post(self):
scenarioList = []
for scenario in sqlOp(op='load',dataType='cmd_scenario'):
scenarioList.append({'name':scenario[0],'author':scenario[1],'description':scenario[2],'machines':scenario[3],'cmdtext':scenario[4],'interval':scenario[5],'times':scenario[6]})
self.SendJson(scenarioList)
class ApiDelScenarioHandler(BaseHandler):
def post(self):
sqlOp(op='delete',dataType='cmd_scenario',dataName=self.request.get('name'))
self.response.write('success')
| {
"content_hash": "e67b750fff47c2d583ca023e0e74a93c",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 188,
"avg_line_length": 36.92307692307692,
"alnum_prop": 0.58375,
"repo_name": "glglwty/rDSN",
"id": "dd88bc928f77ccb949e90663f8d9b1cbd875b87d",
"size": "7200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tools/webstudio/app_package/WebStudioApi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "25290"
},
{
"name": "C",
"bytes": "110032"
},
{
"name": "C++",
"bytes": "3149292"
},
{
"name": "CMake",
"bytes": "55282"
},
{
"name": "Objective-C",
"bytes": "3073"
},
{
"name": "PHP",
"bytes": "88752"
},
{
"name": "Protocol Buffer",
"bytes": "331"
},
{
"name": "Python",
"bytes": "11100"
},
{
"name": "Shell",
"bytes": "52656"
},
{
"name": "Thrift",
"bytes": "13577"
}
],
"symlink_target": ""
} |
"""All piple creation subrutines tests"""
# =============================================================================
# IMPORTS
# =============================================================================
import tempfile
import shutil
import os
from corral import creator
from corral.exceptions import ValidationError
from .base import BaseTest
# =============================================================================
# BASE CLASS
# =============================================================================
class ValidateName(BaseTest):
def test_validate_name(self):
creator.validate_name("fooo")
creator.validate_name("_fooo")
creator.validate_name("fooo111")
with self.assertRaises(ValidationError):
creator.validate_name("try")
with self.assertRaises(ValidationError):
creator.validate_name("import")
with self.assertRaises(ValidationError):
creator.validate_name("corral")
with self.assertRaises(ValidationError):
creator.validate_name("True")
with self.assertRaises(ValidationError):
creator.validate_name("models")
with self.assertRaises(ValidationError):
creator.validate_name("int")
with self.assertRaises(ValidationError):
creator.validate_name("1a")
with self.assertRaises(ValidationError):
creator.validate_name("pipeline")
with self.assertRaises(ValidationError):
creator.validate_name("load")
class CreatePipeline(BaseTest):
def setup(self):
self.path = tempfile.mkdtemp("_corral_tests")
self.pipeline_name = "example"
self.pipeline_path = os.path.join(self.path, self.pipeline_name)
self.container_path = os.path.join(self.path, self.pipeline_name)
def teardown(self):
if os.path.isdir(self.path):
shutil.rmtree(self.path)
def test_create_pipeline(self):
self.assertEqual(os.listdir(self.path), [])
creator.create_pipeline(self.pipeline_path)
expected = ['in_corral.py', 'example']
self.assertCountEqual(os.listdir(self.container_path), expected)
self.assertTrue(os.path.isdir(self.pipeline_path))
manager_path = os.path.join(self.container_path, "in_corral.py")
self.assertTrue(os.path.isfile(manager_path))
def test_directory_exists_failure(self):
with self.assertRaises(ValidationError):
creator.create_pipeline(self.path)
| {
"content_hash": "5bc5f6543e371a14cb52c4a408e23787",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 79,
"avg_line_length": 30.926829268292682,
"alnum_prop": 0.5772870662460567,
"repo_name": "toros-astro/corral",
"id": "010af6b4bbdc497c1d9c891970c87363c9a3ee6f",
"size": "4316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_creator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Mako",
"bytes": "988"
},
{
"name": "Python",
"bytes": "279543"
},
{
"name": "TeX",
"bytes": "228160"
}
],
"symlink_target": ""
} |
from pymoku import Moku, MokuException, NoDataException
from pymoku.instruments import *
import time, logging, traceback
logging.basicConfig(format='%(asctime)s:%(name)s:%(levelname)s::%(message)s')
logging.getLogger('pymoku').setLevel(logging.INFO)
# Use Moku.get_by_serial() or get_by_name() if you don't know the IP
m = Moku.get_by_name('example')
i = Oscilloscope()
m.attach_instrument(i)
try:
i.set_samplerate(10)
i.set_xmode(OSC_ROLL)
i.commit()
time.sleep(1)
i.datalogger_stop()
i.datalogger_start(start=0, duration=100, use_sd=False, ch1=True, ch2=False, filetype='net')
while True:
ch, idx, d = i.datalogger_get_samples(timeout=5)
print("Received samples %d to %d from channel %d" % (idx, idx + len(d) - 1, ch))
except NoDataException as e:
# This will be raised if we try and get samples but the session has finished.
print(e)
except Exception as e:
print(traceback.format_exc())
finally:
i.datalogger_stop()
m.close()
| {
"content_hash": "8f2459ea08161e5f380d5ece27e62d06",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 93,
"avg_line_length": 28,
"alnum_prop": 0.7184873949579832,
"repo_name": "benizl/pymoku",
"id": "bd48e4fcdc498b34f30295f6d66e5e2f3b758765",
"size": "952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/livestream_datalogger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "219652"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [(b'wh_habitica', '0001_initial'), (b'wh_habitica', '0002_auto_20151226_1552')]
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Habitica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.CharField(blank=True, max_length=255, verbose_name='User ID')),
('name', models.CharField(blank=True, max_length=255, verbose_name='Name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='Email')),
('api_token', models.CharField(blank=True, max_length=255, verbose_name='API Token')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='Modified at')),
('owner', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='habitica', to=settings.AUTH_USER_MODEL)),
],
),
]
| {
"content_hash": "feff69eb5e215584500cc65274dccf78",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 147,
"avg_line_length": 43.3125,
"alnum_prop": 0.6284271284271284,
"repo_name": "passuf/WunderHabit",
"id": "86da73ec42034f5ee0a0cb11c80d39ffba4b8ec1",
"size": "1458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wh_habitica/migrations/0001_squashed_0002_auto_20151226_1552.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2631"
},
{
"name": "HTML",
"bytes": "22419"
},
{
"name": "Python",
"bytes": "73618"
}
],
"symlink_target": ""
} |
import copy
from oslo_log import log as logging
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.engine import attributes
from heat.engine.clients import progress
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources import scheduler_hints as sh
LOG = logging.getLogger(__name__)
class Instance(resource.Resource, sh.SchedulerHintsMixin):
PROPERTIES = (
IMAGE_ID, INSTANCE_TYPE, KEY_NAME, AVAILABILITY_ZONE,
DISABLE_API_TERMINATION, KERNEL_ID, MONITORING,
PLACEMENT_GROUP_NAME, PRIVATE_IP_ADDRESS, RAM_DISK_ID,
SECURITY_GROUPS, SECURITY_GROUP_IDS, NETWORK_INTERFACES,
SOURCE_DEST_CHECK, SUBNET_ID, TAGS, NOVA_SCHEDULER_HINTS, TENANCY,
USER_DATA, VOLUMES, BLOCK_DEVICE_MAPPINGS
) = (
'ImageId', 'InstanceType', 'KeyName', 'AvailabilityZone',
'DisableApiTermination', 'KernelId', 'Monitoring',
'PlacementGroupName', 'PrivateIpAddress', 'RamDiskId',
'SecurityGroups', 'SecurityGroupIds', 'NetworkInterfaces',
'SourceDestCheck', 'SubnetId', 'Tags', 'NovaSchedulerHints', 'Tenancy',
'UserData', 'Volumes', 'BlockDeviceMappings'
)
_TAG_KEYS = (
TAG_KEY, TAG_VALUE,
) = (
'Key', 'Value',
)
_NOVA_SCHEDULER_HINT_KEYS = (
NOVA_SCHEDULER_HINT_KEY, NOVA_SCHEDULER_HINT_VALUE,
) = (
'Key', 'Value',
)
_VOLUME_KEYS = (
VOLUME_DEVICE, VOLUME_ID,
) = (
'Device', 'VolumeId',
)
_BLOCK_DEVICE_MAPPINGS_KEYS = (
DEVICE_NAME, EBS, NO_DEVICE, VIRTUAL_NAME,
) = (
'DeviceName', 'Ebs', 'NoDevice', 'VirtualName',
)
_EBS_KEYS = (
DELETE_ON_TERMINATION, IOPS, SNAPSHOT_ID, VOLUME_SIZE,
VOLUME_TYPE,
) = (
'DeleteOnTermination', 'Iops', 'SnapshotId', 'VolumeSize',
'VolumeType'
)
ATTRIBUTES = (
AVAILABILITY_ZONE_ATTR, PRIVATE_DNS_NAME, PUBLIC_DNS_NAME, PRIVATE_IP,
PUBLIC_IP,
) = (
'AvailabilityZone', 'PrivateDnsName', 'PublicDnsName', 'PrivateIp',
'PublicIp',
)
properties_schema = {
IMAGE_ID: properties.Schema(
properties.Schema.STRING,
_('Glance image ID or name.'),
constraints=[
constraints.CustomConstraint('glance.image')
],
required=True
),
# AWS does not require InstanceType but Heat does because the nova
# create api call requires a flavor
INSTANCE_TYPE: properties.Schema(
properties.Schema.STRING,
_('Nova instance type (flavor).'),
required=True,
update_allowed=True,
constraints=[
constraints.CustomConstraint('nova.flavor')
]
),
KEY_NAME: properties.Schema(
properties.Schema.STRING,
_('Optional Nova keypair name.'),
constraints=[
constraints.CustomConstraint("nova.keypair")
]
),
AVAILABILITY_ZONE: properties.Schema(
properties.Schema.STRING,
_('Availability zone to launch the instance in.')
),
DISABLE_API_TERMINATION: properties.Schema(
properties.Schema.STRING,
_('Not Implemented.'),
implemented=False
),
KERNEL_ID: properties.Schema(
properties.Schema.STRING,
_('Not Implemented.'),
implemented=False
),
MONITORING: properties.Schema(
properties.Schema.BOOLEAN,
_('Not Implemented.'),
implemented=False
),
PLACEMENT_GROUP_NAME: properties.Schema(
properties.Schema.STRING,
_('Not Implemented.'),
implemented=False
),
PRIVATE_IP_ADDRESS: properties.Schema(
properties.Schema.STRING,
_('Not Implemented.'),
implemented=False
),
RAM_DISK_ID: properties.Schema(
properties.Schema.STRING,
_('Not Implemented.'),
implemented=False
),
SECURITY_GROUPS: properties.Schema(
properties.Schema.LIST,
_('Security group names to assign.')
),
SECURITY_GROUP_IDS: properties.Schema(
properties.Schema.LIST,
_('Security group IDs to assign.')
),
NETWORK_INTERFACES: properties.Schema(
properties.Schema.LIST,
_('Network interfaces to associate with instance.'),
update_allowed=True
),
SOURCE_DEST_CHECK: properties.Schema(
properties.Schema.BOOLEAN,
_('Not Implemented.'),
implemented=False
),
SUBNET_ID: properties.Schema(
properties.Schema.STRING,
_('Subnet ID to launch instance in.'),
update_allowed=True
),
TAGS: properties.Schema(
properties.Schema.LIST,
_('Tags to attach to instance.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
TAG_KEY: properties.Schema(
properties.Schema.STRING,
required=True
),
TAG_VALUE: properties.Schema(
properties.Schema.STRING,
required=True
),
},
),
update_allowed=True
),
NOVA_SCHEDULER_HINTS: properties.Schema(
properties.Schema.LIST,
_('Scheduler hints to pass to Nova (Heat extension).'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
NOVA_SCHEDULER_HINT_KEY: properties.Schema(
properties.Schema.STRING,
required=True
),
NOVA_SCHEDULER_HINT_VALUE: properties.Schema(
properties.Schema.STRING,
required=True
),
},
)
),
TENANCY: properties.Schema(
properties.Schema.STRING,
_('Not Implemented.'),
constraints=[
constraints.AllowedValues(['dedicated', 'default']),
],
implemented=False
),
USER_DATA: properties.Schema(
properties.Schema.STRING,
_('User data to pass to instance.')
),
VOLUMES: properties.Schema(
properties.Schema.LIST,
_('Volumes to attach to instance.'),
default=[],
schema=properties.Schema(
properties.Schema.MAP,
schema={
VOLUME_DEVICE: properties.Schema(
properties.Schema.STRING,
_('The device where the volume is exposed on the '
'instance. This assignment may not be honored and '
'it is advised that the path '
'/dev/disk/by-id/virtio-<VolumeId> be used '
'instead.'),
required=True
),
VOLUME_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the volume to be attached.'),
required=True,
constraints=[
constraints.CustomConstraint('cinder.volume')
]
),
}
)
),
BLOCK_DEVICE_MAPPINGS: properties.Schema(
properties.Schema.LIST,
_('Block device mappings to attach to instance.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
DEVICE_NAME: properties.Schema(
properties.Schema.STRING,
_('A device name where the volume will be '
'attached in the system at /dev/device_name.'
'e.g. vdb'),
required=True,
),
EBS: properties.Schema(
properties.Schema.MAP,
_('The ebs volume to attach to the instance.'),
schema={
DELETE_ON_TERMINATION: properties.Schema(
properties.Schema.BOOLEAN,
_('Indicate whether the volume should be '
'deleted when the instance is terminated.'),
default=True
),
IOPS: properties.Schema(
properties.Schema.NUMBER,
_('The number of I/O operations per second '
'that the volume supports.'),
implemented=False
),
SNAPSHOT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the snapshot to create '
'a volume from.'),
constraints=[
constraints.CustomConstraint(
'cinder.snapshot')
]
),
VOLUME_SIZE: properties.Schema(
properties.Schema.STRING,
_('The size of the volume, in GB. Must be '
'equal or greater than the size of the '
'snapshot. It is safe to leave this blank '
'and have the Compute service infer '
'the size.'),
),
VOLUME_TYPE: properties.Schema(
properties.Schema.STRING,
_('The volume type.'),
implemented=False
),
},
),
NO_DEVICE: properties.Schema(
properties.Schema.MAP,
_('The can be used to unmap a defined device.'),
implemented=False
),
VIRTUAL_NAME: properties.Schema(
properties.Schema.STRING,
_('The name of the virtual device. The name must be '
'in the form ephemeralX where X is a number '
'starting from zero (0); for example, ephemeral0.'),
implemented=False
),
},
),
),
}
attributes_schema = {
AVAILABILITY_ZONE_ATTR: attributes.Schema(
_('The Availability Zone where the specified instance is '
'launched.'),
type=attributes.Schema.STRING
),
PRIVATE_DNS_NAME: attributes.Schema(
_('Private DNS name of the specified instance.'),
type=attributes.Schema.STRING
),
PUBLIC_DNS_NAME: attributes.Schema(
_('Public DNS name of the specified instance.'),
type=attributes.Schema.STRING
),
PRIVATE_IP: attributes.Schema(
_('Private IP address of the specified instance.'),
type=attributes.Schema.STRING
),
PUBLIC_IP: attributes.Schema(
_('Public IP address of the specified instance.'),
type=attributes.Schema.STRING
),
}
# Server host name limit to 53 characters by due to typical default
# linux HOST_NAME_MAX of 64, minus the .novalocal appended to the name
physical_resource_name_limit = 53
default_client_name = 'nova'
def __init__(self, name, json_snippet, stack):
super(Instance, self).__init__(name, json_snippet, stack)
self.ipaddress = None
def _set_ipaddress(self, networks):
"""Set IP address to self.ipaddress from a list of networks.
Read the server's IP address from a list of networks provided by Nova.
"""
# Just record the first ipaddress
for n in sorted(networks, reverse=True):
if len(networks[n]) > 0:
self.ipaddress = networks[n][0]
break
def _ipaddress(self):
"""Return the server's IP address.
Fetching it from Nova if necessary.
"""
if self.ipaddress is None:
self.ipaddress = self.client_plugin().server_to_ipaddress(
self.resource_id)
return self.ipaddress or '0.0.0.0'
def _availability_zone(self):
"""Return Server's Availability Zone.
Fetching it from Nova if necessary.
"""
availability_zone = self.properties[self.AVAILABILITY_ZONE]
if availability_zone is None:
try:
server = self.client().servers.get(self.resource_id)
availability_zone = getattr(server,
'OS-EXT-AZ:availability_zone')
except Exception as e:
self.client_plugin().ignore_not_found(e)
return
return availability_zone
def _resolve_attribute(self, name):
res = None
if name == self.AVAILABILITY_ZONE_ATTR:
res = self._availability_zone()
elif name in self.ATTRIBUTES[1:]:
res = self._ipaddress()
LOG.info(_LI('%(name)s._resolve_attribute(%(attname)s) == %(res)s'),
{'name': self.name, 'attname': name, 'res': res})
return six.text_type(res) if res else None
def _port_data_delete(self):
# delete the port data which implicit-created
port_id = self.data().get('port_id')
if port_id:
with self.client_plugin('neutron').ignore_not_found:
self.neutron().delete_port(port_id)
self.data_delete('port_id')
def _build_nics(self, network_interfaces,
security_groups=None, subnet_id=None):
nics = None
if network_interfaces:
unsorted_nics = []
for entry in network_interfaces:
nic = (entry
if not isinstance(entry, six.string_types)
else {'NetworkInterfaceId': entry,
'DeviceIndex': len(unsorted_nics)})
unsorted_nics.append(nic)
sorted_nics = sorted(unsorted_nics,
key=lambda nic: int(nic['DeviceIndex']))
nics = [{'port-id': snic['NetworkInterfaceId']}
for snic in sorted_nics]
else:
# if SubnetId property in Instance, ensure subnet exists
if subnet_id:
neutronclient = self.neutron()
network_id = self.client_plugin(
'neutron').network_id_from_subnet_id(subnet_id)
# if subnet verified, create a port to use this subnet
# if port is not created explicitly, nova will choose
# the first subnet in the given network.
if network_id:
fixed_ip = {'subnet_id': subnet_id}
props = {
'admin_state_up': True,
'network_id': network_id,
'fixed_ips': [fixed_ip]
}
if security_groups:
props['security_groups'] = self.client_plugin(
'neutron').get_secgroup_uuids(security_groups)
port = neutronclient.create_port({'port': props})['port']
# after create the port, set the port-id to
# resource data, so that the port can be deleted on
# instance delete.
self.data_set('port_id', port['id'])
nics = [{'port-id': port['id']}]
return nics
def _get_security_groups(self):
security_groups = []
for key in (self.SECURITY_GROUPS, self.SECURITY_GROUP_IDS):
if self.properties.get(key) is not None:
for sg in self.properties.get(key):
security_groups.append(sg)
if not security_groups:
security_groups = None
return security_groups
def _build_block_device_mapping(self, bdm):
if not bdm:
return None
bdm_dict = {}
for mapping in bdm:
device_name = mapping.get(self.DEVICE_NAME)
ebs = mapping.get(self.EBS)
if ebs:
mapping_parts = []
snapshot_id = ebs.get(self.SNAPSHOT_ID)
volume_size = ebs.get(self.VOLUME_SIZE)
delete = ebs.get(self.DELETE_ON_TERMINATION)
if snapshot_id:
mapping_parts.append(snapshot_id)
mapping_parts.append('snap')
if volume_size:
mapping_parts.append(str(volume_size))
else:
mapping_parts.append('')
if delete is not None:
mapping_parts.append(str(delete))
bdm_dict[device_name] = ':'.join(mapping_parts)
return bdm_dict
def _get_nova_metadata(self, properties):
if properties is None or properties.get(self.TAGS) is None:
return None
return dict((tm[self.TAG_KEY], tm[self.TAG_VALUE])
for tm in properties[self.TAGS])
def handle_create(self):
security_groups = self._get_security_groups()
userdata = self.properties[self.USER_DATA] or ''
flavor = self.properties[self.INSTANCE_TYPE]
availability_zone = self.properties[self.AVAILABILITY_ZONE]
image_name = self.properties[self.IMAGE_ID]
image_id = self.client_plugin('glance').get_image_id(image_name)
flavor_id = self.client_plugin().get_flavor_id(flavor)
scheduler_hints = {}
if self.properties[self.NOVA_SCHEDULER_HINTS]:
for tm in self.properties[self.NOVA_SCHEDULER_HINTS]:
# adopted from novaclient shell
hint = tm[self.NOVA_SCHEDULER_HINT_KEY]
hint_value = tm[self.NOVA_SCHEDULER_HINT_VALUE]
if hint in scheduler_hints:
if isinstance(scheduler_hints[hint], six.string_types):
scheduler_hints[hint] = [scheduler_hints[hint]]
scheduler_hints[hint].append(hint_value)
else:
scheduler_hints[hint] = hint_value
else:
scheduler_hints = None
scheduler_hints = self._scheduler_hints(scheduler_hints)
nics = self._build_nics(self.properties[self.NETWORK_INTERFACES],
security_groups=security_groups,
subnet_id=self.properties[self.SUBNET_ID])
block_device_mapping = self._build_block_device_mapping(
self.properties.get(self.BLOCK_DEVICE_MAPPINGS))
server = None
try:
server = self.client().servers.create(
name=self.physical_resource_name(),
image=image_id,
flavor=flavor_id,
key_name=self.properties[self.KEY_NAME],
security_groups=security_groups,
userdata=self.client_plugin().build_userdata(
self.metadata_get(), userdata, 'ec2-user'),
meta=self._get_nova_metadata(self.properties),
scheduler_hints=scheduler_hints,
nics=nics,
availability_zone=availability_zone,
block_device_mapping=block_device_mapping)
finally:
# Avoid a race condition where the thread could be cancelled
# before the ID is stored
if server is not None:
self.resource_id_set(server.id)
creator = progress.ServerCreateProgress(server.id)
attachers = []
for vol_id, device in self.volumes():
attachers.append(progress.VolumeAttachProgress(self.resource_id,
vol_id, device))
return creator, tuple(attachers)
def check_create_complete(self, cookie):
creator, attachers = cookie
if not creator.complete:
creator.complete = self.client_plugin()._check_active(
creator.server_id, 'Instance')
if creator.complete:
server = self.client_plugin().get_server(creator.server_id)
self._set_ipaddress(server.networks)
# NOTE(pas-ha) small optimization,
# return True if there are no volumes to attach
# to save one check_create_complete call
return not len(attachers)
else:
return False
return self._attach_volumes(attachers)
def _attach_volumes(self, attachers):
for attacher in attachers:
if not attacher.called:
self.client_plugin().attach_volume(attacher.srv_id,
attacher.vol_id,
attacher.device)
attacher.called = True
return False
for attacher in attachers:
if not attacher.complete:
attacher.complete = self.client_plugin(
'cinder').check_attach_volume_complete(attacher.vol_id)
break
out = all(attacher.complete for attacher in attachers)
return out
def volumes(self):
"""Return an iterator for all volumes that should be attached.
Return an iterator over (volume_id, device) tuples for all volumes that
should be attached to this instance.
"""
volumes = self.properties[self.VOLUMES]
return ((vol[self.VOLUME_ID],
vol[self.VOLUME_DEVICE]) for vol in volumes)
def _remove_matched_ifaces(self, old_network_ifaces, new_network_ifaces):
# find matches and remove them from old and new ifaces
old_network_ifaces_copy = copy.deepcopy(old_network_ifaces)
for iface in old_network_ifaces_copy:
if iface in new_network_ifaces:
new_network_ifaces.remove(iface)
old_network_ifaces.remove(iface)
def handle_check(self):
server = self.client().servers.get(self.resource_id)
if not self.client_plugin()._check_active(server, 'Instance'):
raise exception.Error(_("Instance is not ACTIVE (was: %s)") %
server.status.strip())
def _update_instance_type(self, prop_diff):
flavor = prop_diff[self.INSTANCE_TYPE]
flavor_id = self.client_plugin().get_flavor_id(flavor)
handler_args = {'args': (flavor_id,)}
checker_args = {'args': (flavor_id, flavor)}
prg_resize = progress.ServerUpdateProgress(self.resource_id,
'resize',
handler_extra=handler_args,
checker_extra=checker_args)
prg_verify = progress.ServerUpdateProgress(self.resource_id,
'verify_resize')
return prg_resize, prg_verify
def _update_network_interfaces(self, server, prop_diff):
updaters = []
new_network_ifaces = prop_diff.get(self.NETWORK_INTERFACES)
old_network_ifaces = self.properties.get(self.NETWORK_INTERFACES)
# if there is entrys in old_network_ifaces and new_network_ifaces,
# remove the same entrys from old and new ifaces
if old_network_ifaces and new_network_ifaces:
# there are four situations:
# 1.old includes new, such as: old = 2,3, new = 2
# 2.new includes old, such as: old = 2,3, new = 1,2,3
# 3.has overlaps, such as: old = 2,3, new = 1,2
# 4.different, such as: old = 2,3, new = 1,4
# detach unmatched ones in old, attach unmatched ones in new
self._remove_matched_ifaces(old_network_ifaces,
new_network_ifaces)
if old_network_ifaces:
old_nics = self._build_nics(old_network_ifaces)
for nic in old_nics:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_detach',
complete=True,
handler_extra={'args': (nic['port-id'],)})
)
if new_network_ifaces:
new_nics = self._build_nics(new_network_ifaces)
for nic in new_nics:
handler_kwargs = {'port_id': nic['port-id']}
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_attach',
complete=True,
handler_extra={'kwargs': handler_kwargs})
)
# if there is no change of 'NetworkInterfaces', do nothing,
# keep the behavior as creation
elif (old_network_ifaces and
(self.NETWORK_INTERFACES not in prop_diff)):
LOG.warn(_LW('There is no change of "%(net_interfaces)s" '
'for instance %(server)s, do nothing '
'when updating.'),
{'net_interfaces': self.NETWORK_INTERFACES,
'server': self.resource_id})
# if the interfaces not come from property 'NetworkInterfaces',
# the situation is somewhat complex, so to detach the old ifaces,
# and then attach the new ones.
else:
subnet_id = (prop_diff.get(self.SUBNET_ID) or
self.properties.get(self.SUBNET_ID))
security_groups = self._get_security_groups()
if not server:
server = self.client().servers.get(self.resource_id)
interfaces = server.interface_list()
for iface in interfaces:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_detach',
complete=True,
handler_extra={'args': (iface.port_id,)})
)
# first to delete the port which implicit-created by heat
self._port_data_delete()
nics = self._build_nics(new_network_ifaces,
security_groups=security_groups,
subnet_id=subnet_id)
# 'SubnetId' property is empty(or None) and
# 'NetworkInterfaces' property is empty(or None),
# _build_nics() will return nics = None,we should attach
# first free port, according to similar behavior during
# instance creation
if not nics:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_attach', complete=True)
)
else:
for nic in nics:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_attach',
complete=True,
handler_extra={'kwargs':
{'port_id': nic['port-id']}})
)
return updaters
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if 'Metadata' in tmpl_diff:
self.metadata_set(tmpl_diff['Metadata'])
updaters = []
server = None
if self.TAGS in prop_diff:
server = self.client().servers.get(self.resource_id)
self.client_plugin().meta_update(
server, self._get_nova_metadata(prop_diff))
if self.INSTANCE_TYPE in prop_diff:
updaters.extend(self._update_instance_type(prop_diff))
if (self.NETWORK_INTERFACES in prop_diff or
self.SUBNET_ID in prop_diff):
updaters.extend(self._update_network_interfaces(server, prop_diff))
# NOTE(pas-ha) optimization is possible (starting first task
# right away), but we'd rather not, as this method already might
# have called several APIs
return updaters
def check_update_complete(self, updaters):
"""Push all updaters to completion in list order."""
for prg in updaters:
if not prg.called:
handler = getattr(self.client_plugin(), prg.handler)
prg.called = handler(*prg.handler_args,
**prg.handler_kwargs)
return False
if not prg.complete:
check_complete = getattr(self.client_plugin(), prg.checker)
prg.complete = check_complete(*prg.checker_args,
**prg.checker_kwargs)
break
return all(prg.complete for prg in updaters)
def metadata_update(self, new_metadata=None):
"""Refresh the metadata if new_metadata is None."""
if new_metadata is None:
self.metadata_set(self.t.metadata())
def validate(self):
"""Validate any of the provided params."""
res = super(Instance, self).validate()
if res:
return res
# check validity of security groups vs. network interfaces
security_groups = self._get_security_groups()
network_interfaces = self.properties.get(self.NETWORK_INTERFACES)
if security_groups and network_interfaces:
raise exception.ResourcePropertyConflict(
'/'.join([self.SECURITY_GROUPS, self.SECURITY_GROUP_IDS]),
self.NETWORK_INTERFACES)
# check bdm property
# now we don't support without snapshot_id in bdm
bdm = self.properties.get(self.BLOCK_DEVICE_MAPPINGS)
if bdm:
for mapping in bdm:
ebs = mapping.get(self.EBS)
if ebs:
snapshot_id = ebs.get(self.SNAPSHOT_ID)
if not snapshot_id:
msg = _("SnapshotId is missing, this is required "
"when specifying BlockDeviceMappings.")
raise exception.StackValidationFailed(message=msg)
else:
msg = _("Ebs is missing, this is required "
"when specifying BlockDeviceMappings.")
raise exception.StackValidationFailed(message=msg)
subnet_id = self.properties.get(self.SUBNET_ID)
if network_interfaces and subnet_id:
# consider the old templates, we only to log to warn user
# NetworkInterfaces has higher priority than SubnetId
LOG.warn(_LW('"%(subnet)s" will be ignored if specified '
'"%(net_interfaces)s". So if you specified the '
'"%(net_interfaces)s" property, '
'do not specify "%(subnet)s" property.'),
{'subnet': self.SUBNET_ID,
'net_interfaces': self.NETWORK_INTERFACES})
def handle_delete(self):
# make sure to delete the port which implicit-created by heat
self._port_data_delete()
if self.resource_id is None:
return
try:
self.client().servers.delete(self.resource_id)
except Exception as e:
self.client_plugin().ignore_not_found(e)
return
return self.resource_id
def check_delete_complete(self, server_id):
if not server_id:
return True
return self.client_plugin().check_delete_server_complete(server_id)
def handle_suspend(self):
"""Suspend an instance.
Note we do not wait for the SUSPENDED state, this is polled for by
check_suspend_complete in a similar way to the create logic so we can
take advantage of coroutines.
"""
if self.resource_id is None:
raise exception.Error(_('Cannot suspend %s, resource_id not set') %
self.name)
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
if self.client_plugin().is_not_found(e):
raise exception.NotFound(_('Failed to find instance %s') %
self.resource_id)
else:
raise
else:
# if the instance has been suspended successful,
# no need to suspend again
if self.client_plugin().get_status(server) != 'SUSPENDED':
LOG.debug("suspending instance %s" % self.resource_id)
server.suspend()
return server.id
def check_suspend_complete(self, server_id):
cp = self.client_plugin()
server = cp.fetch_server(server_id)
if not server:
return False
status = cp.get_status(server)
LOG.debug('%(name)s check_suspend_complete status = %(status)s'
% {'name': self.name, 'status': status})
if status in list(cp.deferred_server_statuses + ['ACTIVE']):
return status == 'SUSPENDED'
else:
exc = exception.ResourceUnknownStatus(
result=_('Suspend of instance %s failed') % server.name,
resource_status=status)
raise exc
def handle_resume(self):
"""Resume an instance.
Note we do not wait for the ACTIVE state, this is polled for by
check_resume_complete in a similar way to the create logic so we can
take advantage of coroutines.
"""
if self.resource_id is None:
raise exception.Error(_('Cannot resume %s, resource_id not set') %
self.name)
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
if self.client_plugin().is_not_found(e):
raise exception.NotFound(_('Failed to find instance %s') %
self.resource_id)
else:
raise
else:
# if the instance has been resumed successful,
# no need to resume again
if self.client_plugin().get_status(server) != 'ACTIVE':
LOG.debug("resuming instance %s" % self.resource_id)
server.resume()
return server.id
def check_resume_complete(self, server_id):
return self.client_plugin()._check_active(server_id, 'Instance')
def resource_mapping():
return {
'AWS::EC2::Instance': Instance,
}
| {
"content_hash": "f0a7780c1c4e52411274a29babb69f8d",
"timestamp": "",
"source": "github",
"line_count": 897,
"max_line_length": 79,
"avg_line_length": 40.09141583054627,
"alnum_prop": 0.5172960347032979,
"repo_name": "dragorosson/heat",
"id": "0eef1d8cb27419184ce7247c4f0e45938a0f2cd1",
"size": "36537",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "heat/engine/resources/aws/ec2/instance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7196510"
},
{
"name": "Shell",
"bytes": "32271"
}
],
"symlink_target": ""
} |
import time
import uuid
import fixtures
from lxml import etree
import six
from nova.compute import arch
from nova.virt.libvirt import config as vconfig
# Allow passing None to the various connect methods
# (i.e. allow the client to rely on default URLs)
allow_default_uri_connection = True
# Has libvirt connection been used at least once
connection_used = False
def _reset():
global allow_default_uri_connection
allow_default_uri_connection = True
# virDomainState
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
# NOTE(mriedem): These values come from include/libvirt/libvirt-domain.h
VIR_DOMAIN_XML_SECURE = 1
VIR_DOMAIN_XML_INACTIVE = 2
VIR_DOMAIN_XML_UPDATE_CPU = 4
VIR_DOMAIN_XML_MIGRATABLE = 8
VIR_DOMAIN_BLOCK_REBASE_SHALLOW = 1
VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT = 2
VIR_DOMAIN_BLOCK_REBASE_COPY = 8
VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC = 1
VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT = 2
VIR_DOMAIN_EVENT_ID_LIFECYCLE = 0
VIR_DOMAIN_EVENT_DEFINED = 0
VIR_DOMAIN_EVENT_UNDEFINED = 1
VIR_DOMAIN_EVENT_STARTED = 2
VIR_DOMAIN_EVENT_SUSPENDED = 3
VIR_DOMAIN_EVENT_RESUMED = 4
VIR_DOMAIN_EVENT_STOPPED = 5
VIR_DOMAIN_EVENT_SHUTDOWN = 6
VIR_DOMAIN_EVENT_PMSUSPENDED = 7
VIR_DOMAIN_UNDEFINE_MANAGED_SAVE = 1
VIR_DOMAIN_AFFECT_CURRENT = 0
VIR_DOMAIN_AFFECT_LIVE = 1
VIR_DOMAIN_AFFECT_CONFIG = 2
VIR_CPU_COMPARE_ERROR = -1
VIR_CPU_COMPARE_INCOMPATIBLE = 0
VIR_CPU_COMPARE_IDENTICAL = 1
VIR_CPU_COMPARE_SUPERSET = 2
VIR_CRED_USERNAME = 1
VIR_CRED_AUTHNAME = 2
VIR_CRED_LANGUAGE = 3
VIR_CRED_CNONCE = 4
VIR_CRED_PASSPHRASE = 5
VIR_CRED_ECHOPROMPT = 6
VIR_CRED_NOECHOPROMPT = 7
VIR_CRED_REALM = 8
VIR_CRED_EXTERNAL = 9
VIR_MIGRATE_LIVE = 1
VIR_MIGRATE_PEER2PEER = 2
VIR_MIGRATE_TUNNELLED = 4
VIR_MIGRATE_PERSIST_DEST = 8
VIR_MIGRATE_UNDEFINE_SOURCE = 16
VIR_MIGRATE_NON_SHARED_INC = 128
VIR_NODE_CPU_STATS_ALL_CPUS = -1
VIR_DOMAIN_START_PAUSED = 1
# libvirtError enums
# (Intentionally different from what's in libvirt. We do this to check,
# that consumers of the library are using the symbolic names rather than
# hardcoding the numerical values)
VIR_FROM_QEMU = 100
VIR_FROM_DOMAIN = 200
VIR_FROM_NWFILTER = 330
VIR_FROM_REMOTE = 340
VIR_FROM_RPC = 345
VIR_FROM_NODEDEV = 666
VIR_ERR_INVALID_ARG = 8
VIR_ERR_NO_SUPPORT = 3
VIR_ERR_XML_DETAIL = 350
VIR_ERR_NO_DOMAIN = 420
VIR_ERR_OPERATION_FAILED = 510
VIR_ERR_OPERATION_INVALID = 55
VIR_ERR_OPERATION_TIMEOUT = 68
VIR_ERR_NO_NWFILTER = 620
VIR_ERR_SYSTEM_ERROR = 900
VIR_ERR_INTERNAL_ERROR = 950
VIR_ERR_CONFIG_UNSUPPORTED = 951
VIR_ERR_NO_NODE_DEVICE = 667
VIR_ERR_NO_SECRET = 66
# Readonly
VIR_CONNECT_RO = 1
# virConnectBaselineCPU flags
VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES = 1
# snapshotCreateXML flags
VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA = 4
VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY = 16
VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
# blockCommit flags
VIR_DOMAIN_BLOCK_COMMIT_RELATIVE = 4
# blockRebase flags
VIR_DOMAIN_BLOCK_REBASE_RELATIVE = 8
VIR_CONNECT_LIST_DOMAINS_ACTIVE = 1
VIR_CONNECT_LIST_DOMAINS_INACTIVE = 2
# secret type
VIR_SECRET_USAGE_TYPE_NONE = 0
VIR_SECRET_USAGE_TYPE_VOLUME = 1
VIR_SECRET_USAGE_TYPE_CEPH = 2
VIR_SECRET_USAGE_TYPE_ISCSI = 3
# Libvirt version to match MIN_LIBVIRT_VERSION in driver.py
FAKE_LIBVIRT_VERSION = 1002001
# Libvirt version to match MIN_QEMU_VERSION in driver.py
FAKE_QEMU_VERSION = 1005003
class HostInfo(object):
def __init__(self, arch=arch.X86_64, kB_mem=4096,
cpus=2, cpu_mhz=800, cpu_nodes=1,
cpu_sockets=1, cpu_cores=2,
cpu_threads=1, cpu_model="Penryn",
cpu_vendor="Intel", numa_topology='',
cpu_disabled=None):
"""Create a new Host Info object
:param arch: (string) indicating the CPU arch
(eg 'i686' or whatever else uname -m might return)
:param kB_mem: (int) memory size in KBytes
:param cpus: (int) the number of active CPUs
:param cpu_mhz: (int) expected CPU frequency
:param cpu_nodes: (int) the number of NUMA cell, 1 for unusual
NUMA topologies or uniform
:param cpu_sockets: (int) number of CPU sockets per node if nodes > 1,
total number of CPU sockets otherwise
:param cpu_cores: (int) number of cores per socket
:param cpu_threads: (int) number of threads per core
:param cpu_model: CPU model
:param cpu_vendor: CPU vendor
:param numa_topology: Numa topology
:param cpu_disabled: List of disabled cpus
"""
self.arch = arch
self.kB_mem = kB_mem
self.cpus = cpus
self.cpu_mhz = cpu_mhz
self.cpu_nodes = cpu_nodes
self.cpu_cores = cpu_cores
self.cpu_threads = cpu_threads
self.cpu_sockets = cpu_sockets
self.cpu_model = cpu_model
self.cpu_vendor = cpu_vendor
self.numa_topology = numa_topology
self.disabled_cpus_list = cpu_disabled or []
@classmethod
def _gen_numa_topology(self, cpu_nodes, cpu_sockets, cpu_cores,
cpu_threads, kb_mem, numa_mempages_list=None):
topology = vconfig.LibvirtConfigCapsNUMATopology()
cpu_count = 0
for cell_count in range(cpu_nodes):
cell = vconfig.LibvirtConfigCapsNUMACell()
cell.id = cell_count
cell.memory = kb_mem / cpu_nodes
for socket_count in range(cpu_sockets):
for cpu_num in range(cpu_cores * cpu_threads):
cpu = vconfig.LibvirtConfigCapsNUMACPU()
cpu.id = cpu_count
cpu.socket_id = cell_count
cpu.core_id = cpu_num // cpu_threads
cpu.siblings = set([cpu_threads *
(cpu_count // cpu_threads) + thread
for thread in range(cpu_threads)])
cell.cpus.append(cpu)
cpu_count += 1
# Set mempages per numa cell. if numa_mempages_list is empty
# we will set only the default 4K pages.
if numa_mempages_list:
mempages = numa_mempages_list[cell_count]
else:
mempages = vconfig.LibvirtConfigCapsNUMAPages()
mempages.size = 4
mempages.total = cell.memory / mempages.size
mempages = [mempages]
cell.mempages = mempages
topology.cells.append(cell)
return topology
def get_numa_topology(self):
return self.numa_topology
VIR_DOMAIN_JOB_NONE = 0
VIR_DOMAIN_JOB_BOUNDED = 1
VIR_DOMAIN_JOB_UNBOUNDED = 2
VIR_DOMAIN_JOB_COMPLETED = 3
VIR_DOMAIN_JOB_FAILED = 4
VIR_DOMAIN_JOB_CANCELLED = 5
def _parse_disk_info(element):
disk_info = {}
disk_info['type'] = element.get('type', 'file')
disk_info['device'] = element.get('device', 'disk')
driver = element.find('./driver')
if driver is not None:
disk_info['driver_name'] = driver.get('name')
disk_info['driver_type'] = driver.get('type')
source = element.find('./source')
if source is not None:
disk_info['source'] = source.get('file')
if not disk_info['source']:
disk_info['source'] = source.get('dev')
if not disk_info['source']:
disk_info['source'] = source.get('path')
target = element.find('./target')
if target is not None:
disk_info['target_dev'] = target.get('dev')
disk_info['target_bus'] = target.get('bus')
return disk_info
def disable_event_thread(self):
"""Disable nova libvirt driver event thread.
The Nova libvirt driver includes a native thread which monitors
the libvirt event channel. In a testing environment this becomes
problematic because it means we've got a floating thread calling
sleep(1) over the life of the unit test. Seems harmless? It's not,
because we sometimes want to test things like retry loops that
should have specific sleep paterns. An unlucky firing of the
libvirt thread will cause a test failure.
"""
# because we are patching a method in a class MonkeyPatch doesn't
# auto import correctly. Import explicitly otherwise the patching
# may silently fail.
import nova.virt.libvirt.host # noqa
def evloop(*args, **kwargs):
pass
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.host.Host._init_events',
evloop))
class libvirtError(Exception):
"""This class was copied and slightly modified from
`libvirt-python:libvirt-override.py`.
Since a test environment will use the real `libvirt-python` version of
`libvirtError` if it's installed and not this fake, we need to maintain
strict compatibility with the original class, including `__init__` args
and instance-attributes.
To create a libvirtError instance you should:
# Create an unsupported error exception
exc = libvirtError('my message')
exc.err = (libvirt.VIR_ERR_NO_SUPPORT,)
self.err is a tuple of form:
(error_code, error_domain, error_message, error_level, str1, str2,
str3, int1, int2)
Alternatively, you can use the `make_libvirtError` convenience function to
allow you to specify these attributes in one shot.
"""
def __init__(self, defmsg, conn=None, dom=None, net=None, pool=None,
vol=None):
Exception.__init__(self, defmsg)
self.err = None
def get_error_code(self):
if self.err is None:
return None
return self.err[0]
def get_error_domain(self):
if self.err is None:
return None
return self.err[1]
def get_error_message(self):
if self.err is None:
return None
return self.err[2]
def get_error_level(self):
if self.err is None:
return None
return self.err[3]
def get_str1(self):
if self.err is None:
return None
return self.err[4]
def get_str2(self):
if self.err is None:
return None
return self.err[5]
def get_str3(self):
if self.err is None:
return None
return self.err[6]
def get_int1(self):
if self.err is None:
return None
return self.err[7]
def get_int2(self):
if self.err is None:
return None
return self.err[8]
class NWFilter(object):
def __init__(self, connection, xml):
self._connection = connection
self._xml = xml
self._parse_xml(xml)
def _parse_xml(self, xml):
tree = etree.fromstring(xml)
root = tree.find('.')
self._name = root.get('name')
def undefine(self):
self._connection._remove_filter(self)
class NodeDevice(object):
def __init__(self, connection, xml=None):
self._connection = connection
self._xml = xml
if xml is not None:
self._parse_xml(xml)
def _parse_xml(self, xml):
tree = etree.fromstring(xml)
root = tree.find('.')
self._name = root.get('name')
def attach(self):
pass
def dettach(self):
pass
def reset(self):
pass
class Domain(object):
def __init__(self, connection, xml, running=False, transient=False):
self._connection = connection
if running:
connection._mark_running(self)
self._state = running and VIR_DOMAIN_RUNNING or VIR_DOMAIN_SHUTOFF
self._transient = transient
self._def = self._parse_definition(xml)
self._has_saved_state = False
self._snapshots = {}
self._id = self._connection._id_counter
def _parse_definition(self, xml):
try:
tree = etree.fromstring(xml)
except etree.ParseError:
raise make_libvirtError(
libvirtError, "Invalid XML.",
error_code=VIR_ERR_XML_DETAIL,
error_domain=VIR_FROM_DOMAIN)
definition = {}
name = tree.find('./name')
if name is not None:
definition['name'] = name.text
uuid_elem = tree.find('./uuid')
if uuid_elem is not None:
definition['uuid'] = uuid_elem.text
else:
definition['uuid'] = str(uuid.uuid4())
vcpu = tree.find('./vcpu')
if vcpu is not None:
definition['vcpu'] = int(vcpu.text)
memory = tree.find('./memory')
if memory is not None:
definition['memory'] = int(memory.text)
os = {}
os_type = tree.find('./os/type')
if os_type is not None:
os['type'] = os_type.text
os['arch'] = os_type.get('arch', self._connection.host_info.arch)
os_kernel = tree.find('./os/kernel')
if os_kernel is not None:
os['kernel'] = os_kernel.text
os_initrd = tree.find('./os/initrd')
if os_initrd is not None:
os['initrd'] = os_initrd.text
os_cmdline = tree.find('./os/cmdline')
if os_cmdline is not None:
os['cmdline'] = os_cmdline.text
os_boot = tree.find('./os/boot')
if os_boot is not None:
os['boot_dev'] = os_boot.get('dev')
definition['os'] = os
features = {}
acpi = tree.find('./features/acpi')
if acpi is not None:
features['acpi'] = True
definition['features'] = features
devices = {}
device_nodes = tree.find('./devices')
if device_nodes is not None:
disks_info = []
disks = device_nodes.findall('./disk')
for disk in disks:
disks_info += [_parse_disk_info(disk)]
devices['disks'] = disks_info
nics_info = []
nics = device_nodes.findall('./interface')
for nic in nics:
nic_info = {}
nic_info['type'] = nic.get('type')
mac = nic.find('./mac')
if mac is not None:
nic_info['mac'] = mac.get('address')
source = nic.find('./source')
if source is not None:
if nic_info['type'] == 'network':
nic_info['source'] = source.get('network')
elif nic_info['type'] == 'bridge':
nic_info['source'] = source.get('bridge')
nics_info += [nic_info]
devices['nics'] = nics_info
definition['devices'] = devices
return definition
def create(self):
self.createWithFlags(0)
def createWithFlags(self, flags):
# FIXME: Not handling flags at the moment
self._state = VIR_DOMAIN_RUNNING
self._connection._mark_running(self)
self._has_saved_state = False
def isActive(self):
return int(self._state == VIR_DOMAIN_RUNNING)
def undefine(self):
self._connection._undefine(self)
def isPersistent(self):
return True
def undefineFlags(self, flags):
self.undefine()
if flags & VIR_DOMAIN_UNDEFINE_MANAGED_SAVE:
if self.hasManagedSaveImage(0):
self.managedSaveRemove()
def destroy(self):
self._state = VIR_DOMAIN_SHUTOFF
self._connection._mark_not_running(self)
def ID(self):
return self._id
def name(self):
return self._def['name']
def UUIDString(self):
return self._def['uuid']
def interfaceStats(self, device):
return [10000242400, 1234, 0, 2, 213412343233, 34214234, 23, 3]
def blockStats(self, device):
return [2, 10000242400, 234, 2343424234, 34]
def suspend(self):
self._state = VIR_DOMAIN_PAUSED
def shutdown(self):
self._state = VIR_DOMAIN_SHUTDOWN
self._connection._mark_not_running(self)
def reset(self, flags):
# FIXME: Not handling flags at the moment
self._state = VIR_DOMAIN_RUNNING
self._connection._mark_running(self)
def info(self):
return [self._state,
int(self._def['memory']),
int(self._def['memory']),
self._def['vcpu'],
123456789]
def migrateToURI(self, desturi, flags, dname, bandwidth):
raise make_libvirtError(
libvirtError,
"Migration always fails for fake libvirt!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def migrateToURI2(self, dconnuri, miguri, dxml, flags, dname, bandwidth):
raise make_libvirtError(
libvirtError,
"Migration always fails for fake libvirt!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def migrateToURI3(self, dconnuri, params, logical_sum):
raise make_libvirtError(
libvirtError,
"Migration always fails for fake libvirt!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def migrateSetMaxDowntime(self, downtime):
pass
def attachDevice(self, xml):
disk_info = _parse_disk_info(etree.fromstring(xml))
disk_info['_attached'] = True
self._def['devices']['disks'] += [disk_info]
return True
def attachDeviceFlags(self, xml, flags):
if (flags & VIR_DOMAIN_AFFECT_LIVE and
self._state != VIR_DOMAIN_RUNNING):
raise make_libvirtError(
libvirtError,
"AFFECT_LIVE only allowed for running domains!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
self.attachDevice(xml)
def detachDevice(self, xml):
disk_info = _parse_disk_info(etree.fromstring(xml))
disk_info['_attached'] = True
return disk_info in self._def['devices']['disks']
def detachDeviceFlags(self, xml, flags):
self.detachDevice(xml)
def setUserPassword(self, user, password, flags=0):
pass
def XMLDesc(self, flags):
disks = ''
for disk in self._def['devices']['disks']:
disks += '''<disk type='%(type)s' device='%(device)s'>
<driver name='%(driver_name)s' type='%(driver_type)s'/>
<source file='%(source)s'/>
<target dev='%(target_dev)s' bus='%(target_bus)s'/>
<address type='drive' controller='0' bus='0' unit='0'/>
</disk>''' % disk
nics = ''
for nic in self._def['devices']['nics']:
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
<source %(type)s='%(source)s'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
function='0x0'/>
</interface>''' % nic
return '''<domain type='kvm'>
<name>%(name)s</name>
<uuid>%(uuid)s</uuid>
<memory>%(memory)s</memory>
<currentMemory>%(memory)s</currentMemory>
<vcpu>%(vcpu)s</vcpu>
<os>
<type arch='%(arch)s' machine='pc-0.12'>hvm</type>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<clock offset='localtime'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<emulator>/usr/bin/kvm</emulator>
%(disks)s
<controller type='ide' index='0'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01'
function='0x1'/>
</controller>
%(nics)s
<serial type='file'>
<source path='dummy.log'/>
<target port='0'/>
</serial>
<serial type='pty'>
<source pty='/dev/pts/27'/>
<target port='1'/>
</serial>
<serial type='tcp'>
<source host="-1" service="-1" mode="bind"/>
</serial>
<console type='file'>
<source path='dummy.log'/>
<target port='0'/>
</console>
<input type='tablet' bus='usb'/>
<input type='mouse' bus='ps2'/>
<graphics type='vnc' port='-1' autoport='yes'/>
<graphics type='spice' port='-1' autoport='yes'/>
<video>
<model type='cirrus' vram='9216' heads='1'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02'
function='0x0'/>
</video>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04'
function='0x0'/>
</memballoon>
</devices>
</domain>''' % {'name': self._def['name'],
'uuid': self._def['uuid'],
'memory': self._def['memory'],
'vcpu': self._def['vcpu'],
'arch': self._def['os']['arch'],
'disks': disks,
'nics': nics}
def managedSave(self, flags):
self._connection._mark_not_running(self)
self._has_saved_state = True
def managedSaveRemove(self, flags):
self._has_saved_state = False
def hasManagedSaveImage(self, flags):
return int(self._has_saved_state)
def resume(self):
self._state = VIR_DOMAIN_RUNNING
def snapshotCreateXML(self, xml, flags):
tree = etree.fromstring(xml)
name = tree.find('./name').text
snapshot = DomainSnapshot(name, self)
self._snapshots[name] = snapshot
return snapshot
def vcpus(self):
vcpus = ([], [])
for i in range(0, self._def['vcpu']):
vcpus[0].append((i, 1, 120405, i))
vcpus[1].append((True, True, True, True))
return vcpus
def memoryStats(self):
return {}
def maxMemory(self):
return self._def['memory']
def blockJobInfo(self, disk, flags):
return {}
def blockJobAbort(self, disk, flags):
pass
def blockResize(self, disk, size):
pass
def blockRebase(self, disk, base, bandwidth=0, flags=0):
if (not base) and (flags and VIR_DOMAIN_BLOCK_REBASE_RELATIVE):
raise make_libvirtError(
libvirtError,
'flag VIR_DOMAIN_BLOCK_REBASE_RELATIVE is '
'valid only with non-null base',
error_code=VIR_ERR_INVALID_ARG,
error_domain=VIR_FROM_QEMU)
return 0
def blockCommit(self, disk, base, top, flags):
return 0
def jobInfo(self):
# NOTE(danms): This is an array of 12 integers, so just report
# something to avoid an IndexError if we look at this
return [0] * 12
def jobStats(self, flags=0):
return {}
def injectNMI(self, flags=0):
return 0
def abortJob(self):
pass
def fsFreeze(self):
pass
def fsThaw(self):
pass
class DomainSnapshot(object):
def __init__(self, name, domain):
self._name = name
self._domain = domain
def delete(self, flags):
del self._domain._snapshots[self._name]
class Connection(object):
def __init__(self, uri=None, readonly=False, version=FAKE_LIBVIRT_VERSION,
hv_version=FAKE_QEMU_VERSION, host_info=None):
if not uri or uri == '':
if allow_default_uri_connection:
uri = 'qemu:///session'
else:
raise ValueError("URI was None, but fake libvirt is "
"configured to not accept this.")
uri_whitelist = ['qemu:///system',
'qemu:///session',
'lxc:///', # from LibvirtDriver._uri()
'xen:///', # from LibvirtDriver._uri()
'uml:///system',
'test:///default',
'parallels:///system']
if uri not in uri_whitelist:
raise make_libvirtError(
libvirtError,
"libvirt error: no connection driver "
"available for No connection for URI %s" % uri,
error_code=5, error_domain=0)
self.readonly = readonly
self._uri = uri
self._vms = {}
self._running_vms = {}
self._id_counter = 1 # libvirt reserves 0 for the hypervisor.
self._nwfilters = {}
self._nodedevs = {}
self._event_callbacks = {}
self.fakeLibVersion = version
self.fakeVersion = hv_version
self.host_info = host_info or HostInfo()
def _add_filter(self, nwfilter):
self._nwfilters[nwfilter._name] = nwfilter
def _remove_filter(self, nwfilter):
del self._nwfilters[nwfilter._name]
def _add_nodedev(self, nodedev):
self._nodedevs[nodedev._name] = nodedev
def _remove_nodedev(self, nodedev):
del self._nodedevs[nodedev._name]
def _mark_running(self, dom):
self._running_vms[self._id_counter] = dom
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0)
self._id_counter += 1
def _mark_not_running(self, dom):
if dom._transient:
self._undefine(dom)
dom._id = -1
for (k, v) in six.iteritems(self._running_vms):
if v == dom:
del self._running_vms[k]
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STOPPED, 0)
return
def _undefine(self, dom):
del self._vms[dom.name()]
if not dom._transient:
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_UNDEFINED, 0)
def getInfo(self):
return [self.host_info.arch,
self.host_info.kB_mem,
self.host_info.cpus,
self.host_info.cpu_mhz,
self.host_info.cpu_nodes,
self.host_info.cpu_sockets,
self.host_info.cpu_cores,
self.host_info.cpu_threads]
def numOfDomains(self):
return len(self._running_vms)
def listDomainsID(self):
return list(self._running_vms.keys())
def lookupByID(self, id):
if id in self._running_vms:
return self._running_vms[id]
raise make_libvirtError(
libvirtError,
'Domain not found: no domain with matching id %d' % id,
error_code=VIR_ERR_NO_DOMAIN,
error_domain=VIR_FROM_QEMU)
def lookupByName(self, name):
if name in self._vms:
return self._vms[name]
raise make_libvirtError(
libvirtError,
'Domain not found: no domain with matching name "%s"' % name,
error_code=VIR_ERR_NO_DOMAIN,
error_domain=VIR_FROM_QEMU)
def listAllDomains(self, flags):
vms = []
for vm in self._vms:
if flags & VIR_CONNECT_LIST_DOMAINS_ACTIVE:
if vm.state != VIR_DOMAIN_SHUTOFF:
vms.append(vm)
if flags & VIR_CONNECT_LIST_DOMAINS_INACTIVE:
if vm.state == VIR_DOMAIN_SHUTOFF:
vms.append(vm)
return vms
def _emit_lifecycle(self, dom, event, detail):
if VIR_DOMAIN_EVENT_ID_LIFECYCLE not in self._event_callbacks:
return
cbinfo = self._event_callbacks[VIR_DOMAIN_EVENT_ID_LIFECYCLE]
callback = cbinfo[0]
opaque = cbinfo[1]
callback(self, dom, event, detail, opaque)
def defineXML(self, xml):
dom = Domain(connection=self, running=False, transient=False, xml=xml)
self._vms[dom.name()] = dom
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_DEFINED, 0)
return dom
def createXML(self, xml, flags):
dom = Domain(connection=self, running=True, transient=True, xml=xml)
self._vms[dom.name()] = dom
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0)
return dom
def getType(self):
if self._uri == 'qemu:///system':
return 'QEMU'
def getLibVersion(self):
return self.fakeLibVersion
def getVersion(self):
return self.fakeVersion
def getHostname(self):
return 'compute1'
def domainEventRegisterAny(self, dom, eventid, callback, opaque):
self._event_callbacks[eventid] = [callback, opaque]
def registerCloseCallback(self, cb, opaque):
pass
def getCPUMap(self):
"""Return calculated CPU map from HostInfo, by default showing 2
online CPUs.
"""
active_cpus = self.host_info.cpus
total_cpus = active_cpus + len(self.host_info.disabled_cpus_list)
cpu_map = [True if cpu_num not in self.host_info.disabled_cpus_list
else False for cpu_num in range(total_cpus)]
return (total_cpus, cpu_map, active_cpus)
def getCapabilities(self):
"""Return spoofed capabilities."""
numa_topology = self.host_info.get_numa_topology()
if isinstance(numa_topology, vconfig.LibvirtConfigCapsNUMATopology):
numa_topology = numa_topology.to_xml()
return '''<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='%(sockets)s' cores='%(cores)s' threads='%(threads)s'/>
<feature name='xtpr'/>
<feature name='tm2'/>
<feature name='est'/>
<feature name='vmx'/>
<feature name='ds_cpl'/>
<feature name='monitor'/>
<feature name='pbe'/>
<feature name='tm'/>
<feature name='ht'/>
<feature name='ss'/>
<feature name='acpi'/>
<feature name='ds'/>
<feature name='vme'/>
</cpu>
<migration_features>
<live/>
<uri_transports>
<uri_transport>tcp</uri_transport>
</uri_transports>
</migration_features>
%(topology)s
<secmodel>
<model>apparmor</model>
<doi>0</doi>
</secmodel>
</host>
<guest>
<os_type>hvm</os_type>
<arch name='i686'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
<domain type='qemu'>
</domain>
<domain type='kvm'>
<emulator>/usr/bin/kvm</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
</domain>
</arch>
<features>
<cpuselection/>
<deviceboot/>
<pae/>
<nonpae/>
<acpi default='on' toggle='yes'/>
<apic default='on' toggle='no'/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='x86_64'>
<wordsize>64</wordsize>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
<domain type='qemu'>
</domain>
<domain type='kvm'>
<emulator>/usr/bin/kvm</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
</domain>
</arch>
<features>
<cpuselection/>
<deviceboot/>
<acpi default='on' toggle='yes'/>
<apic default='on' toggle='no'/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='armv7l'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-arm</emulator>
<machine>integratorcp</machine>
<machine>vexpress-a9</machine>
<machine>syborg</machine>
<machine>musicpal</machine>
<machine>mainstone</machine>
<machine>n800</machine>
<machine>n810</machine>
<machine>n900</machine>
<machine>cheetah</machine>
<machine>sx1</machine>
<machine>sx1-v1</machine>
<machine>beagle</machine>
<machine>beaglexm</machine>
<machine>tosa</machine>
<machine>akita</machine>
<machine>spitz</machine>
<machine>borzoi</machine>
<machine>terrier</machine>
<machine>connex</machine>
<machine>verdex</machine>
<machine>lm3s811evb</machine>
<machine>lm3s6965evb</machine>
<machine>realview-eb</machine>
<machine>realview-eb-mpcore</machine>
<machine>realview-pb-a8</machine>
<machine>realview-pbx-a9</machine>
<machine>versatilepb</machine>
<machine>versatileab</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='mips'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-mips</emulator>
<machine>malta</machine>
<machine>mipssim</machine>
<machine>magnum</machine>
<machine>pica61</machine>
<machine>mips</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='mipsel'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-mipsel</emulator>
<machine>malta</machine>
<machine>mipssim</machine>
<machine>magnum</machine>
<machine>pica61</machine>
<machine>mips</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='sparc'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-sparc</emulator>
<machine>SS-5</machine>
<machine>leon3_generic</machine>
<machine>SS-10</machine>
<machine>SS-600MP</machine>
<machine>SS-20</machine>
<machine>Voyager</machine>
<machine>LX</machine>
<machine>SS-4</machine>
<machine>SPARCClassic</machine>
<machine>SPARCbook</machine>
<machine>SS-1000</machine>
<machine>SS-2000</machine>
<machine>SS-2</machine>
<domain type='qemu'>
</domain>
</arch>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='ppc'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-ppc</emulator>
<machine>g3beige</machine>
<machine>virtex-ml507</machine>
<machine>mpc8544ds</machine>
<machine canonical='bamboo-0.13'>bamboo</machine>
<machine>bamboo-0.13</machine>
<machine>bamboo-0.12</machine>
<machine>ref405ep</machine>
<machine>taihu</machine>
<machine>mac99</machine>
<machine>prep</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
</capabilities>''' % {'sockets': self.host_info.cpu_sockets,
'cores': self.host_info.cpu_cores,
'threads': self.host_info.cpu_threads,
'topology': numa_topology}
def compareCPU(self, xml, flags):
tree = etree.fromstring(xml)
arch_node = tree.find('./arch')
if arch_node is not None:
if arch_node.text not in [arch.X86_64,
arch.I686]:
return VIR_CPU_COMPARE_INCOMPATIBLE
model_node = tree.find('./model')
if model_node is not None:
if model_node.text != self.host_info.cpu_model:
return VIR_CPU_COMPARE_INCOMPATIBLE
vendor_node = tree.find('./vendor')
if vendor_node is not None:
if vendor_node.text != self.host_info.cpu_vendor:
return VIR_CPU_COMPARE_INCOMPATIBLE
# The rest of the stuff libvirt implements is rather complicated
# and I don't think it adds much value to replicate it here.
return VIR_CPU_COMPARE_IDENTICAL
def getCPUStats(self, cpuNum, flag):
if cpuNum < 2:
return {'kernel': 5664160000000,
'idle': 1592705190000000,
'user': 26728850000000,
'iowait': 6121490000000}
else:
raise make_libvirtError(
libvirtError,
"invalid argument: Invalid cpu number",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def nwfilterLookupByName(self, name):
try:
return self._nwfilters[name]
except KeyError:
raise make_libvirtError(
libvirtError,
"no nwfilter with matching name %s" % name,
error_code=VIR_ERR_NO_NWFILTER,
error_domain=VIR_FROM_NWFILTER)
def nwfilterDefineXML(self, xml):
nwfilter = NWFilter(self, xml)
self._add_filter(nwfilter)
def nodeDeviceLookupByName(self, name):
try:
return self._nodedevs[name]
except KeyError:
raise make_libvirtError(
libvirtError,
"no nodedev with matching name %s" % name,
error_code=VIR_ERR_NO_NODE_DEVICE,
error_domain=VIR_FROM_NODEDEV)
def listDefinedDomains(self):
return []
def listDevices(self, cap, flags):
return []
def baselineCPU(self, cpu, flag):
"""Add new libvirt API."""
return """<cpu mode='custom' match='exact'>
<model>Penryn</model>
<vendor>Intel</vendor>
<feature name='xtpr'/>
<feature name='tm2'/>
<feature name='est'/>
<feature name='vmx'/>
<feature name='ds_cpl'/>
<feature name='monitor'/>
<feature name='pbe'/>
<feature name='tm'/>
<feature name='ht'/>
<feature name='ss'/>
<feature name='acpi'/>
<feature name='ds'/>
<feature name='vme'/>
<feature policy='require' name='aes'/>
</cpu>"""
def secretLookupByUsage(self, usage_type_obj, usage_id):
pass
def secretDefineXML(self, xml):
pass
def openAuth(uri, auth, flags=0):
if type(auth) != list:
raise Exception("Expected a list for 'auth' parameter")
if type(auth[0]) != list:
raise Exception("Expected a function in 'auth[0]' parameter")
if not callable(auth[1]):
raise Exception("Expected a function in 'auth[1]' parameter")
return Connection(uri, (flags == VIR_CONNECT_RO))
def virEventRunDefaultImpl():
time.sleep(1)
def virEventRegisterDefaultImpl():
if connection_used:
raise Exception("virEventRegisterDefaultImpl() must be "
"called before connection is used.")
def registerErrorHandler(handler, ctxt):
pass
def make_libvirtError(error_class, msg, error_code=None,
error_domain=None, error_message=None,
error_level=None, str1=None, str2=None, str3=None,
int1=None, int2=None):
"""Convenience function for creating `libvirtError` exceptions which
allow you to specify arguments in constructor without having to manipulate
the `err` tuple directly.
We need to pass in `error_class` to this function because it may be
`libvirt.libvirtError` or `fakelibvirt.libvirtError` depending on whether
`libvirt-python` is installed.
"""
exc = error_class(msg)
exc.err = (error_code, error_domain, error_message, error_level,
str1, str2, str3, int1, int2)
return exc
virDomain = Domain
virNodeDevice = NodeDevice
virConnect = Connection
class FakeLibvirtFixture(fixtures.Fixture):
"""Performs global setup/stubbing for all libvirt tests.
"""
def setUp(self):
super(FakeLibvirtFixture, self).setUp()
disable_event_thread(self)
| {
"content_hash": "2b03f8647a00913ca73fd761a5cc15e0",
"timestamp": "",
"source": "github",
"line_count": 1345,
"max_line_length": 79,
"avg_line_length": 30.136802973977694,
"alnum_prop": 0.5768737356293482,
"repo_name": "bigswitch/nova",
"id": "8761e69c1716e11c137f4d203ef729c1b1d25749",
"size": "41150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/unit/virt/libvirt/fakelibvirt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17220528"
},
{
"name": "Shell",
"bytes": "36658"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
} |
import argparse
import runeclan.xp_tracker as tracker
import runescape.clan_stats as stats
import template.state as state
import progressbar
import os
def __parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--clan', action='append', help='The official name(s) of the clan', required=True)
parser.add_argument('-x', '--xp', default=1, help='The minimum amount of XP to be considered active')
parser.add_argument('-s', '--skill', action='append', help='The RuneClan identifier of the skill(s) to show', required=True)
parser.add_argument('-t', '--top', default=5, help='The amount of people to show in the top list for each skill')
parser.add_argument('-m', '--mode', default='normal', choices=['normal', 'preview', 'dxpw', 'year'], help='The type of report you want to generate.')
parser.add_argument('-d', '--dir', default='out', help='Location of the generated HTML file')
return parser.parse_args()
def __get_highscores(clans, skills, top, mode):
results = []
bar = progressbar.ProgressBar(max_value=len(clans) * len(skills))
counter = 0
for clan in clans:
result = {'clan': clan, 'highscores': [], 'averages': stats.get_clan_stats(clan)}
results.append(result)
for skill in skills:
result['highscores'].append(tracker.get_users(clan, skill, top, mode))
counter += 1
bar.update(counter)
return results
def main():
args = __parse_arguments()
highscores = __get_highscores(args.clan, args.skill, args.top, args.mode)
base = os.path.dirname(os.path.abspath(__file__))
state.render(highscores, args.xp, args.top, args.mode, base, args.dir)
if __name__ == '__main__':
main() | {
"content_hash": "0ad5150d85e4e20679e539471fc68186",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 153,
"avg_line_length": 43.5,
"alnum_prop": 0.6586206896551724,
"repo_name": "TotalF2PSkillers/f2p-state",
"id": "1587223ca6e5bec7765f902dab43fe22d21c67bc",
"size": "1740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2207"
},
{
"name": "Python",
"bytes": "8459"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Parcoords(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "parcoords"
_valid_props = {
"customdata",
"customdatasrc",
"dimensiondefaults",
"dimensions",
"domain",
"ids",
"idssrc",
"labelangle",
"labelfont",
"labelside",
"line",
"meta",
"metasrc",
"name",
"rangefont",
"stream",
"tickfont",
"type",
"uid",
"uirevision",
"visible",
}
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# dimensions
# ----------
@property
def dimensions(self):
"""
The dimensions (variables) of the parallel coordinates chart.
2..60 dimensions are supported.
The 'dimensions' property is a tuple of instances of
Dimension that may be specified as:
- A list or tuple of instances of plotly.graph_objs.parcoords.Dimension
- A list or tuple of dicts of string/value properties that
will be passed to the Dimension constructor
Supported dict properties:
constraintrange
The domain range to which the filter on the
dimension is constrained. Must be an array of
`[fromValue, toValue]` with `fromValue <=
toValue`, or if `multiselect` is not disabled,
you may give an array of arrays, where each
inner array is `[fromValue, toValue]`.
label
The shown name of the dimension.
multiselect
Do we allow multiple selection ranges or just a
single range?
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
range
The domain range that represents the full,
shown axis extent. Defaults to the `values`
extent. Must be an array of `[fromValue,
toValue]` with finite numbers as elements.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
ticktext
Sets the text displayed at the ticks position
via `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
values
Dimension values. `values[n]` represents the
value of the `n`th point in the dataset,
therefore the `values` vector for all
dimensions must be the same (longer vectors
will be truncated). Each value must be a finite
number.
valuessrc
Sets the source reference on Chart Studio Cloud
for values .
visible
Shows the dimension when set to `true` (the
default). Hides the dimension for `false`.
Returns
-------
tuple[plotly.graph_objs.parcoords.Dimension]
"""
return self["dimensions"]
@dimensions.setter
def dimensions(self, val):
self["dimensions"] = val
# dimensiondefaults
# -----------------
@property
def dimensiondefaults(self):
"""
When used in a template (as
layout.template.data.parcoords.dimensiondefaults), sets the
default property values to use for elements of
parcoords.dimensions
The 'dimensiondefaults' property is an instance of Dimension
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.Dimension`
- A dict of string/value properties that will be passed
to the Dimension constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.parcoords.Dimension
"""
return self["dimensiondefaults"]
@dimensiondefaults.setter
def dimensiondefaults(self, val):
self["dimensiondefaults"] = val
# domain
# ------
@property
def domain(self):
"""
The 'domain' property is an instance of Domain
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.Domain`
- A dict of string/value properties that will be passed
to the Domain constructor
Supported dict properties:
column
If there is a layout grid, use the domain for
this column in the grid for this parcoords
trace .
row
If there is a layout grid, use the domain for
this row in the grid for this parcoords trace .
x
Sets the horizontal domain of this parcoords
trace (in plot fraction).
y
Sets the vertical domain of this parcoords
trace (in plot fraction).
Returns
-------
plotly.graph_objs.parcoords.Domain
"""
return self["domain"]
@domain.setter
def domain(self, val):
self["domain"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# labelangle
# ----------
@property
def labelangle(self):
"""
Sets the angle of the labels with respect to the horizontal.
For example, a `tickangle` of -90 draws the labels vertically.
Tilted labels with "labelangle" may be positioned better inside
margins when `labelposition` is set to "bottom".
The 'labelangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["labelangle"]
@labelangle.setter
def labelangle(self, val):
self["labelangle"] = val
# labelfont
# ---------
@property
def labelfont(self):
"""
Sets the font for the `dimension` labels.
The 'labelfont' property is an instance of Labelfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.Labelfont`
- A dict of string/value properties that will be passed
to the Labelfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.parcoords.Labelfont
"""
return self["labelfont"]
@labelfont.setter
def labelfont(self, val):
self["labelfont"] = val
# labelside
# ---------
@property
def labelside(self):
"""
Specifies the location of the `label`. "top" positions labels
above, next to the title "bottom" positions labels below the
graph Tilted labels with "labelangle" may be positioned better
inside margins when `labelposition` is set to "bottom".
The 'labelside' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'bottom']
Returns
-------
Any
"""
return self["labelside"]
@labelside.setter
def labelside(self, val):
self["labelside"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `line.colorscale`. Has an effect
only if in `line.color`is set to a numerical
array. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette
will be chosen according to whether numbers in
the `color` array are all positive, all
negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `line.color`) or the bounds set in
`line.cmin` and `line.cmax` Has an effect only
if in `line.color`is set to a numerical array.
Defaults to `false` when `line.cmin` and
`line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `line.color`is set to a
numerical array. Value should have the same
units as in `line.color` and if set,
`line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `line.cmin` and/or `line.cmax` to be
equidistant to this point. Has an effect only
if in `line.color`is set to a numerical array.
Value should have the same units as in
`line.color`. Has no effect when `line.cauto`
is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `line.color`is set to a
numerical array. Value should have the same
units as in `line.color` and if set,
`line.cmax` must be set as well.
color
Sets thelinecolor. It accepts either a specific
color or an array of numbers that are mapped to
the colorscale relative to the max and min
values of the array or relative to `line.cmin`
and `line.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.parcoords.line.Col
orBar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`line.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`line.cmin` and `line.cmax`.
Alternatively, `colorscale` may be a palette
name string of the following list: Greys,YlGnBu
,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,R
ainbow,Portland,Jet,Hot,Blackbody,Earth,Electri
c,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
reversescale
Reverses the color mapping if true. Has an
effect only if in `line.color`is set to a
numerical array. If true, `line.cmin` will
correspond to the last color in the array and
`line.cmax` will correspond to the first color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `line.color`is set to a numerical array.
Returns
-------
plotly.graph_objs.parcoords.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# rangefont
# ---------
@property
def rangefont(self):
"""
Sets the font for the `dimension` range values.
The 'rangefont' property is an instance of Rangefont
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.Rangefont`
- A dict of string/value properties that will be passed
to the Rangefont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.parcoords.Rangefont
"""
return self["rangefont"]
@rangefont.setter
def rangefont(self, val):
self["rangefont"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.parcoords.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the font for the `dimension` tick values.
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.parcoords.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
dimensions
The dimensions (variables) of the parallel coordinates
chart. 2..60 dimensions are supported.
dimensiondefaults
When used in a template (as
layout.template.data.parcoords.dimensiondefaults), sets
the default property values to use for elements of
parcoords.dimensions
domain
:class:`plotly.graph_objects.parcoords.Domain` instance
or dict with compatible properties
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
labelangle
Sets the angle of the labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
labels vertically. Tilted labels with "labelangle" may
be positioned better inside margins when
`labelposition` is set to "bottom".
labelfont
Sets the font for the `dimension` labels.
labelside
Specifies the location of the `label`. "top" positions
labels above, next to the title "bottom" positions
labels below the graph Tilted labels with "labelangle"
may be positioned better inside margins when
`labelposition` is set to "bottom".
line
:class:`plotly.graph_objects.parcoords.Line` instance
or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
rangefont
Sets the font for the `dimension` range values.
stream
:class:`plotly.graph_objects.parcoords.Stream` instance
or dict with compatible properties
tickfont
Sets the font for the `dimension` tick values.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
def __init__(
self,
arg=None,
customdata=None,
customdatasrc=None,
dimensions=None,
dimensiondefaults=None,
domain=None,
ids=None,
idssrc=None,
labelangle=None,
labelfont=None,
labelside=None,
line=None,
meta=None,
metasrc=None,
name=None,
rangefont=None,
stream=None,
tickfont=None,
uid=None,
uirevision=None,
visible=None,
**kwargs
):
"""
Construct a new Parcoords object
Parallel coordinates for multidimensional exploratory data
analysis. The samples are specified in `dimensions`. The colors
are set in `line.color`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Parcoords`
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
dimensions
The dimensions (variables) of the parallel coordinates
chart. 2..60 dimensions are supported.
dimensiondefaults
When used in a template (as
layout.template.data.parcoords.dimensiondefaults), sets
the default property values to use for elements of
parcoords.dimensions
domain
:class:`plotly.graph_objects.parcoords.Domain` instance
or dict with compatible properties
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
labelangle
Sets the angle of the labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
labels vertically. Tilted labels with "labelangle" may
be positioned better inside margins when
`labelposition` is set to "bottom".
labelfont
Sets the font for the `dimension` labels.
labelside
Specifies the location of the `label`. "top" positions
labels above, next to the title "bottom" positions
labels below the graph Tilted labels with "labelangle"
may be positioned better inside margins when
`labelposition` is set to "bottom".
line
:class:`plotly.graph_objects.parcoords.Line` instance
or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
rangefont
Sets the font for the `dimension` range values.
stream
:class:`plotly.graph_objects.parcoords.Stream` instance
or dict with compatible properties
tickfont
Sets the font for the `dimension` tick values.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Parcoords
"""
super(Parcoords, self).__init__("parcoords")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Parcoords
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Parcoords`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("dimensions", None)
_v = dimensions if dimensions is not None else _v
if _v is not None:
self["dimensions"] = _v
_v = arg.pop("dimensiondefaults", None)
_v = dimensiondefaults if dimensiondefaults is not None else _v
if _v is not None:
self["dimensiondefaults"] = _v
_v = arg.pop("domain", None)
_v = domain if domain is not None else _v
if _v is not None:
self["domain"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("labelangle", None)
_v = labelangle if labelangle is not None else _v
if _v is not None:
self["labelangle"] = _v
_v = arg.pop("labelfont", None)
_v = labelfont if labelfont is not None else _v
if _v is not None:
self["labelfont"] = _v
_v = arg.pop("labelside", None)
_v = labelside if labelside is not None else _v
if _v is not None:
self["labelside"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("rangefont", None)
_v = rangefont if rangefont is not None else _v
if _v is not None:
self["rangefont"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
# Read-only literals
# ------------------
self._props["type"] = "parcoords"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "a9c0ba5b57d156bd0cdf962b913823af",
"timestamp": "",
"source": "github",
"line_count": 1137,
"max_line_length": 81,
"avg_line_length": 37.08795074758135,
"alnum_prop": 0.542436386919301,
"repo_name": "plotly/python-api",
"id": "ce614d190c0f2878e5d1dc542d1e6f2484f9e4e8",
"size": "42169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/_parcoords.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse
from django.views.generic import TemplateView
class About(TemplateView):
template_name = 'about.html'
| {
"content_hash": "87136b97aefd451f24b1a388cb20ca6b",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 45,
"avg_line_length": 28.8,
"alnum_prop": 0.7986111111111112,
"repo_name": "fsxfreak/club-suite",
"id": "a931665aa6776922367ed287b5820871410b8804",
"size": "144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clubsuite/suite/views/view_about.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "63477"
},
{
"name": "HTML",
"bytes": "77966"
},
{
"name": "JavaScript",
"bytes": "2779"
},
{
"name": "Python",
"bytes": "165393"
}
],
"symlink_target": ""
} |
try: # Python 2.7 compat
from collections.abc import Mapping, Iterable
except ImportError:
from collections import Mapping, Iterable
from itertools import chain
from functools import reduce
from .common import _raise
def empty(_ = None):
class __puredict(puredict_base):
__getitem__ = lambda _, __: _raise(KeyError())
__contains__ = lambda _, __: False
__iter__ = lambda _: iter(())
items = lambda _: iter(())
return __puredict()
def insert(parent, key, value):
class __puredict(puredict_base):
__getitem__ = lambda _, _key: value if (key == _key) else parent[_key]
__contains__ = lambda _, _key: (key == _key) or (_key in parent)
__iter__ = lambda _: chain((key,), (k for k in parent if not key == k))
items = lambda _: chain(((key, value),), ((k, v) for (k, v) in parent.items() if not key == k))
return __puredict()
def delete(parent, key):
class __puredict(puredict_base):
__getitem__ = lambda _, _key: _raise(KeyError()) if (key == _key) else parent[_key]
__contains__ = lambda _, _key: (not key == _key) and (_key in parent)
__iter__ = lambda _: (_key for _key in parent if not key == _key)
items = lambda _: ((k, v) for (k, v) in parent.items() if not key == k)
return __puredict()
class puredict_base(Mapping):
empty = empty
insert = insert
delete = delete
__len__ = lambda self: sum(1 for _ in self.items())
__hash__ = lambda self: hash(frozenset(self.items())) ^ hash(puredict_base)
__repr__ = lambda self: repr(dict(self))
class puredict(puredict_base):
from_iterable = staticmethod(lambda it: reduce(lambda x, y: insert(x, *y), it, empty()))
from_mapping = staticmethod(lambda mapping: puredict.from_iterable(mapping.items()))
__new__ = lambda cls, *args, **kwargs: \
next((cons(args[0]) for (typeof, cons) in (
(Mapping, cls.from_mapping),
(Iterable, cls.from_iterable)
) if (args and isinstance(args[0], typeof))), cls.from_mapping(kwargs))
| {
"content_hash": "0bd72c7a1c1e5e4bb97e0aa43fc89646",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 103,
"avg_line_length": 43.020833333333336,
"alnum_prop": 0.5941888619854722,
"repo_name": "maweki/more-collections",
"id": "904706fb72da3a81bb117ee90a7437b1a485d160",
"size": "2065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "more_collections/puredict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22711"
}
],
"symlink_target": ""
} |
from nmrmint.GUI.history import Subspectrum
def test_subspectrum_instantiated_inactive():
ss = Subspectrum()
assert not ss.active
def test_subspectrum_toggle_true():
ss = Subspectrum()
ss.toggle_active()
assert ss.active
def test_subspectrum_toggle_false():
ss = Subspectrum(activity=True)
assert ss.active
ss.toggle_active()
assert not ss.active
| {
"content_hash": "efc7119853abf5005669df402db2175a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 45,
"avg_line_length": 20.68421052631579,
"alnum_prop": 0.6972010178117048,
"repo_name": "sametz/nmrmint",
"id": "90026b2cb66fc5aa6526f9ba972cfa2df69a7ad0",
"size": "393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/GUI/test_subspectrum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "321947"
}
],
"symlink_target": ""
} |
from enum import Enum
from typing import NamedTuple, Optional
from pyrsistent import pmap, pvector
from pyrsistent.typing import PVector, PMap
from typeit import type_constructor
class AppConfig(NamedTuple):
name: str
url_prefix: str
setup: PVector[PMap] = pvector([])
class Session(NamedTuple):
cookie_name: str
cookie_secure: bool
cookie_httponly: bool
class Redis(NamedTuple):
host: str = '127.0.0.1'
port: int = 6379
db: int = 0
min_connections: int = 1
max_connections: int = 10
class Postgresql(NamedTuple):
user: str = 'solo'
dbname: str = 'solo'
password: str = 'solo'
host: str = '127.0.0.1'
port: int = 5432
min_connections: int = 1
max_connections: int = 10
class EventLoopType(Enum):
ASYNCIO = 'asyncio'
UVLOOP = 'uvloop'
class Server(NamedTuple):
public_uri: str = 'http://127.0.0.1:8000'
host: str = '127.0.0.1'
port: int = 8000
keep_alive: bool = True
keep_alive_timeout: int = 30
# asyncio/uvloop
event_loop: EventLoopType = EventLoopType.ASYNCIO
class Testing(NamedTuple):
docker_pull: bool = True
""" Pull images from registry if they are not available locally yet
"""
class Config(NamedTuple):
server: Server
session: Session
apps: PVector[AppConfig] = pvector([])
logging: PMap = pmap({'version': 1})
debug: bool = True
postgresql: Postgresql = Postgresql()
redis: Redis = Redis()
testing: Testing = Testing()
mk_config, dict_config = type_constructor ^ Config
| {
"content_hash": "f16584e4a5b616185adbc66d1f61e304",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 71,
"avg_line_length": 21.569444444444443,
"alnum_prop": 0.6555054732775274,
"repo_name": "avanov/solo",
"id": "b1add68d86613812567a0dbbe71df2bc9b189715",
"size": "1553",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "solo/config/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "273"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "162984"
},
{
"name": "RAML",
"bytes": "903"
}
],
"symlink_target": ""
} |
"""
pySpatialTools package.
"""
from version import *
from tests import test
# Release data
import release
__author__ = release.author
__license__ = release.license
__date__ = release.date
__version__ = release.version
'''
import Feature_engineering
import Geo_tools
import io
import Preprocess
import Recommender
import Retrieve
import Sampling
import Simulations
import Testers
import tests
import tools
import utils
'''
| {
"content_hash": "203cd157be4d02f75ece87d6ec010aff",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 29,
"avg_line_length": 13.4375,
"alnum_prop": 0.7604651162790698,
"repo_name": "tgquintela/pySpatialTools",
"id": "a8281c9c8610e1675e08cb1d75a87c5a516ad919",
"size": "431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pySpatialTools/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1231973"
},
{
"name": "Shell",
"bytes": "4091"
}
],
"symlink_target": ""
} |
"""Generates a .cc file that embeds the V8 snapshot.
This defines the following function
void shaka::SetupV8Snapshots();
"""
import argparse
import sys
import embed_utils
def _SetupStartupData(writer, var_name):
"""Writes the code to setup a StartupData variable."""
writer.Write(
'%s_startup_data.data = reinterpret_cast<const char*>(%s_uncompressed);',
var_name, var_name)
writer.Write('%s_startup_data.raw_size = %s_uncompressed_size;', var_name,
var_name)
def _GenerateFile(output):
"""Generates a C++ file which embeds the snapshot files."""
with open('snapshot_blob.bin', 'rb') as f:
snapshot_data = f.read()
writer = embed_utils.CompressedCodeWriter(output)
writer.Write('#include <v8.h>')
writer.Write()
writer.Write('#include "src/util/utils.h"')
writer.Write()
with writer.Namespace('shaka'):
with writer.Namespace():
writer.CompressedVariable('snapshot', snapshot_data)
writer.Write('v8::StartupData snapshot_startup_data;')
writer.Write()
writer.Write('void SetupV8Snapshots();')
writer.Write()
with writer.Block('void SetupV8Snapshots()'):
writer.Decompress('snapshot')
_SetupStartupData(writer, 'snapshot')
writer.Write('v8::V8::SetSnapshotDataBlob(&snapshot_startup_data);')
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--output', dest='output',
help='The filename to output to.')
ns = parser.parse_args(args)
with open(ns.output, 'w') as output:
_GenerateFile(output)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| {
"content_hash": "87eb3adf3d627c3bca65f07f716959f1",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 79,
"avg_line_length": 27.098360655737704,
"alnum_prop": 0.6684815486993345,
"repo_name": "shaka-project/shaka-player-embedded",
"id": "1fbb479c87f5ecb69ab77cb002fdf8a2a139c1cd",
"size": "2248",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "shaka/tools/embed_v8_snapshot.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4629"
},
{
"name": "C++",
"bytes": "1577496"
},
{
"name": "JavaScript",
"bytes": "96384"
},
{
"name": "Makefile",
"bytes": "1354"
},
{
"name": "Objective-C",
"bytes": "120570"
},
{
"name": "Objective-C++",
"bytes": "65339"
},
{
"name": "Python",
"bytes": "176793"
},
{
"name": "Shell",
"bytes": "1985"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from rest_framework import viewsets
from rest_framework import permissions
from .models import Topic
from .serializers import TopicSerializer
from .views import get_all_topics
class TopicViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Topic.objects.all()
serializer_class = TopicSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def get_queryset(self):
user = self.request.user
return get_all_topics(user)
| {
"content_hash": "762ebb4c329475be2b0e2cae97e926f0",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 65,
"avg_line_length": 28.27777777777778,
"alnum_prop": 0.7662082514734774,
"repo_name": "JiaruZhang/Five",
"id": "935cc54a3e05e778b752bba90fce26e8cd032630",
"size": "533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lbforum/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "99227"
},
{
"name": "HTML",
"bytes": "165128"
},
{
"name": "JavaScript",
"bytes": "7623"
},
{
"name": "Python",
"bytes": "108883"
}
],
"symlink_target": ""
} |
import serial
import pika
import json
import threading
import thread
import time
import logging
from cleaner.cleaner import KinematicsSolver
from functools import partial
import cleaner.constants as const
from random import uniform, randint, randrange
log = logging.getLogger("glove")
log.setLevel(logging.DEBUG)
fh = logging.FileHandler("log/glove.log")
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(name)-10s: %(levelname)-8s %(message)s')
ch.setFormatter(formatter)
log.addHandler(fh)
log.addHandler(ch)
def read_from_serial(s, connection, channel, kinematics, debug):
if not debug:
i = 1
log.info("Waiting for Serial Port to Clear Out")
while True:
incoming_data = s.readline()
log.debug(incoming_data)
if "DMP ready! Waiting for first interrupt..." in incoming_data:
break
log.info("Serial Port Cleared out")
start = time.time()
data_points = 0
while True:
stdin = s.readline()
packet = stdin.split(const.PACKET_DELIMINATOR)
if i % 10 == 0:
for sub_packet in packet:
# print(sub_packet)
data = sub_packet.split(const.SUB_PACKET_DELIMINATOR)
kinematics.process_acceleration_sample([float(data[0]), float(data[1]), float(data[2])], float(data[3]), float(data[4]))
[t, x, y, z, pressure] = kinematics.get_latest_measurements()
payload = {}
payload['timestamp'] = int(time.time()*1000)
payload['x'] = x
payload['y'] = y
payload['z'] = z
payload['pressure'] = min(5, pressure)
p = json.dumps(payload, sort_keys=True);
log.debug(p)
data_points += 1
throughput = (time.time() - start) / data_points
log.debug("Throughput: %f",throughput)
channel.basic_publish(exchange='touchvision', routing_key='glove', body=p)
i += 1
else:
t = 0
start = time.time()
data_points = 0
while True:
try:
payload = {}
payload['timestamp'] = int(time.time() * 1000)
payload['x'] = int(uniform(0,100))
payload['y'] = int(uniform(0,100))
payload['z'] = int(uniform(0,100))
payload['pressure'] = int(uniform(0,5))
p = json.dumps(payload, sort_keys=True);
log.debug(p)
data_points += 1
throughput = (time.time() - start) / data_points
log.debug("Throughput: %d", throughput)
channel.basic_publish(exchange='touchvision', routing_key='glove', body=p)
t += 1
time.sleep(.1)
except Exception as e:
print e
class Glove(object):
def __init__(self, exchange, tty, debug):
if not debug:
log.info("Glove in Live Mode")
self.serial = serial.Serial(tty, 115200)
else:
log.info("Glove in Debug Mode")
self.serial = None
self.debug = debug
self.kinematics = KinematicsSolver(1)
self.connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
self.channel = self.connection.channel()
# channel.queue_declare(queue='glove')
self.channel.exchange_declare(exchange=exchange, type='direct')
self.glove = self.channel.queue_declare(exclusive=True)
self.glove_queue_name = self.glove.method.queue
self.channel.queue_bind(exchange=exchange,
routing_key="glove-in",
queue=self.glove_queue_name)
self.channel.basic_consume(self.glove_callback, queue=self.glove_queue_name, no_ack=True)
# self.channel.basic_consume(partial(self.glove_callback, kin=self.kinematics), queue=self.glove_queue_name, no_ack=True)
def start(self):
log.info("Starting Glove Thread")
self.thread = threading.Thread(target=read_from_serial, args=(self.serial, self.connection, self.channel, self.kinematics, self.debug))
self.thread.daemon = True
self.thread.start()
log.info("Glove Up and Running")
def stop(self):
self.thread.join(0)
self.connection.close()
def glove_callback(self, ch, method, properties, body):
log.warn("Resetting Kinematics...")
self.kinematics.reset()
log.info("Kinematics Reset Complete")
| {
"content_hash": "e855c68813edea8f7bf28bead834df59",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 143,
"avg_line_length": 36.17557251908397,
"alnum_prop": 0.5691074066258704,
"repo_name": "GEverding/touchVision",
"id": "ecedc10ed35a702a3b9ec38a1d87b0a7e958b51d",
"size": "4739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "io/glove.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3232"
},
{
"name": "Clojure",
"bytes": "83580"
},
{
"name": "HTML",
"bytes": "1071"
},
{
"name": "PLpgSQL",
"bytes": "1593"
},
{
"name": "Python",
"bytes": "33773"
},
{
"name": "Ruby",
"bytes": "954"
},
{
"name": "Shell",
"bytes": "69"
}
],
"symlink_target": ""
} |
import json
import logging
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from desktop.lib.django_util import JsonResponse
from desktop.models import Document2, Document
from spark.models import get_api, Notebook, QueryExpired
from spark.decorators import api_error_handler
LOG = logging.getLogger(__name__)
@api_error_handler
def create_session(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
response['session'] = get_api(request.user, snippet).create_session(lang=snippet['type'])
response['status'] = 0
return JsonResponse(response)
@api_error_handler
def execute(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
response['handle'] = get_api(request.user, snippet).execute(notebook, snippet)
response['status'] = 0
return JsonResponse(response)
@api_error_handler
def check_status(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
response['query_status'] = get_api(request.user, snippet).check_status(notebook, snippet)
response['status'] = 0
return JsonResponse(response)
@api_error_handler
def fetch_result_data(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
rows = json.loads(request.POST.get('rows', 100))
start_over = json.loads(request.POST.get('startOver', False))
response['result'] = get_api(request.user, snippet).fetch_result(notebook, snippet, rows, start_over)
response['status'] = 0
return JsonResponse(response)
@api_error_handler
def fetch_result_metadata(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
response['result'] = get_api(request.user, snippet).fetch_result_metadata(notebook, snippet)
response['status'] = 0
return JsonResponse(response)
@api_error_handler
def cancel_statement(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
response['result'] = get_api(request.user, snippet).cancel(notebook, snippet)
response['status'] = 0
return JsonResponse(response)
@api_error_handler
def get_logs(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
db = get_api(request.user, snippet)
response['logs'] = db.get_log(snippet)
response['progress'] = db._progress(snippet, response['logs']) if snippet['status'] != 'available' else 100
response['job_urls'] = [{
'name': job,
'url': reverse('jobbrowser.views.single_job', kwargs={'job': job})
} for job in db._get_jobs(response['logs'])]
response['status'] = 0
return JsonResponse(response)
def save_notebook(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}')) # TODO perms
if notebook.get('id'):
notebook_doc = Document2.objects.get(id=notebook['id'])
else:
notebook_doc = Document2.objects.create(name=notebook['name'], type='notebook', owner=request.user)
Document.objects.link(notebook_doc, owner=notebook_doc.owner, name=notebook_doc.name, description=notebook_doc.description, extra='notebook')
notebook_doc.update_data(notebook)
notebook_doc.name = notebook['name']
notebook_doc.save()
response['status'] = 0
response['id'] = notebook_doc.id
response['message'] = _('Notebook saved !')
return JsonResponse(response)
def open_notebook(request):
response = {'status': -1}
notebook_id = request.GET.get('notebook')
notebook = Notebook(document=Document2.objects.get(id=notebook_id)) # Todo perms
response['status'] = 0
response['notebook'] = notebook.get_json()
response['message'] = _('Notebook saved !')
return JsonResponse(response)
def close_notebook(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}')) # Todo perms
response['status'] = 0
for snippet in notebook['snippets']:
try:
if snippet['result']['handle']:
get_api(request.user, snippet).close(snippet)
except QueryExpired:
pass
response['message'] = _('Notebook closed !')
return JsonResponse(response)
def close_statement(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}')) # Todo perms
snippet = json.loads(request.POST.get('snippet', '{}'))
try:
response['result'] = get_api(request.user, snippet).close(snippet)
except QueryExpired:
pass
response['status'] = 0
return JsonResponse(response)
| {
"content_hash": "ff49b6bc089f5fa1e2712ea3f84568c0",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 145,
"avg_line_length": 27.933701657458563,
"alnum_prop": 0.685126582278481,
"repo_name": "erickt/hue",
"id": "15d39dec2852a52032c8900a4fe1a5c8660b1f23",
"size": "5848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/spark/src/spark/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "9315"
},
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10789304"
},
{
"name": "C++",
"bytes": "178518"
},
{
"name": "CSS",
"bytes": "501761"
},
{
"name": "Emacs Lisp",
"bytes": "14875"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gettext Catalog",
"bytes": "13534784"
},
{
"name": "Groff",
"bytes": "14877"
},
{
"name": "HTML",
"bytes": "21550731"
},
{
"name": "Java",
"bytes": "3080564"
},
{
"name": "JavaScript",
"bytes": "2675808"
},
{
"name": "Makefile",
"bytes": "86291"
},
{
"name": "Mako",
"bytes": "2035662"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "Perl",
"bytes": "161801"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Prolog",
"bytes": "4590"
},
{
"name": "Python",
"bytes": "31452674"
},
{
"name": "Scala",
"bytes": "60295"
},
{
"name": "Shell",
"bytes": "48346"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TeX",
"bytes": "129526"
},
{
"name": "Thrift",
"bytes": "100994"
},
{
"name": "XSLT",
"bytes": "342237"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('content', '0002_auto_20160120_1345'),
]
operations = [
migrations.AlterModelOptions(
name='page',
options={'ordering': ['weight']},
),
migrations.AddField(
model_name='page',
name='weight',
field=models.IntegerField(default=0, help_text='Pages in the menu will be ordered by weight', verbose_name='weight'),
),
]
| {
"content_hash": "6930d08f57a5cfda97d78e72b33bbd8c",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 129,
"avg_line_length": 26.045454545454547,
"alnum_prop": 0.5846422338568935,
"repo_name": "DesjardinsLab/event-kiosk",
"id": "207e0eb941452c7f95355884cf9854c835ef116b",
"size": "645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/event_kiosk/event_kiosk/content/migrations/0003_auto_20160120_1422.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14630"
},
{
"name": "HTML",
"bytes": "1790"
},
{
"name": "JavaScript",
"bytes": "44007"
},
{
"name": "Python",
"bytes": "50632"
},
{
"name": "Shell",
"bytes": "1035"
}
],
"symlink_target": ""
} |
"""Includes bioportalSearchWidgets class.
bioportalSearchWidgets uses ipywidgets to create search boxes for the purpose
of collecting metadata via searching the Bioportal ontology API.
"""
import ipywidgets as widgets
import requests
from IPython.display import display
from metadataCollector import MetadataCollector
class BioportalSearchWidgets:
"""Uses ipwidgets to create search boxes.
Provides a template for a search box and a results box.
Connects to the bioportal REST API to return ontology information.
Use of this class must adhear to a strict call order as follows.
1) Initialize object to provide callback.
2) add_search_widget, this may be called as many times as needed to
add the necessary metadata collecting widgets.
3) display_widgets, this displays the already created widgets which step
2 created.
"""
def __init__(self, submit_callback,
bioportal_api_key='efa3babf-b23c-4399-89f7-689bb9d576fb'):
"""Initialize variables, provide valid api key for bioportal.
param: submit_callback: Callback to be executed on submit. The
single parameter to the callback is a dictionary whose keys
are the topics and whose values are a dictionary whose keys
are selected keywords and whose values are bioportal responses
for the keyword.
"""
self._widgets = []
self._submit_callback = submit_callback
self._apply_widget = None
self._api_url = 'http://data.bioontology.org/'
self._key = bioportal_api_key
self._headers = {'Authorization': 'apikey token=' + self._key}
def add_search_widget(self, topic, ontologies, required=False):
mc = MetadataCollector(topic, ontologies, required, self.__value_changed_callback)
self._widgets.append(mc)
def GET(self, url, params=None):
"""Convenient method for requests.get().
Headers already included in call. JSON response data is returned.
:param url: The website to access JSON data from.
:param params: Parameters for the REST request.
"""
request = requests.get(url, headers=self._headers, params=params)
return request.json()
def display_widgets(self):
self._apply_widget = widgets.Button(description='Submit',
disabled=True)
for widget in self._widgets:
widget.display()
display(self._apply_widget)
self._apply_widget.on_click(self.__on_apply_clicked)
def __value_changed_callback(self):
for widget in self._widgets:
if widget.is_required():
if not widget.has_results():
self._apply_widget.disabled = True
return
self._apply_widget.disabled = False
def __on_apply_clicked(self, change):
final_results = dict()
for widget in self._widgets:
if widget.has_results():
results = widget.get_results()
topic = widget.get_topic()
final_results[topic] = results
self._submit_callback(final_results)
| {
"content_hash": "d55c49c8cf68b05c4813643faefd4296",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 90,
"avg_line_length": 36.17977528089887,
"alnum_prop": 0.639751552795031,
"repo_name": "chapmanbe/pymitools",
"id": "c332f51846d37a22c470e2eaf744c7b129a32102",
"size": "3220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymitools/ontologies/bioportalSearchWidgets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "60910"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from flask import current_app as app
from flask import Blueprint, render_template, abort, redirect, request
from www.content import repository, exceptions
from www.decorators import html_minify, cache_headers
from www.main.serializers import serialize
views = Blueprint(
'main-views',
__name__,
subdomain='james',
static_folder='static'
)
@views.route('/browserconfig.xml')
def browser_config(extension):
return redirect(
app.config.get('MEDIA_URL') + 'browserconfig.xml' + extension
)
@views.route('/favicon.<extension>')
def favicon(extension):
return redirect(app.config.get('MEDIA_URL') + 'favi.' + extension)
@views.route('/')
@views.route('/<path:path>/')
@cache_headers(seconds=21600)
@html_minify
def index(path=''):
branch = request.args.get('branch', app.config.get('BRANCHES_DEFAULT'))
if branch not in app.config.get('BRANCHES_PUBLIC'):
abort(403)
try:
assert app.config.get('CONTENT_ROOT', None), 'No CONTENT_ROOT'
repo = repository(app.config.get('CONTENT_ROOT')).changeset(branch)
except exceptions.RepositoryError:
abort(404)
try:
directory = repo.get_directory(path)
except exceptions.NodeDoesNotExistError:
abort(404)
try:
page = repo.find_file(
path,
'index',
app.config.get('FILE_RENDERERS', {}).keys()
)
except exceptions.NodeDoesNotExistError:
page = None
return render_template(
'index.html',
index=serialize(directory, config=app.config),
page=serialize(page, config=app.config)
)
@views.route('/<name>')
@views.route('/<path:path>/<name>')
@cache_headers(seconds=21600)
@html_minify
def file(name, path=''):
branch = request.args.get('branch', app.config.get('BRANCHES_DEFAULT'))
if branch not in app.config.get('BRANCHES_PUBLIC'):
abort(403)
try:
repo = repository(app.config.get('CONTENT_ROOT')).changeset(branch)
except exceptions.RepositoryError:
abort(404)
try:
directory = repo.get_directory(path)
except exceptions.NodeDoesNotExistError:
directory = None
try:
page = repo.find_file(
path,
name,
app.config.get('FILE_RENDERERS', {}).keys()
)
except exceptions.NodeDoesNotExistError:
abort(404)
return render_template(
'file.html',
index=serialize(directory, config=app.config),
page=serialize(page, config=app.config)
)
| {
"content_hash": "788e43e792980c70220283084a1c5335",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 75,
"avg_line_length": 27.70967741935484,
"alnum_prop": 0.6418315871168024,
"repo_name": "spryle/james.spry-leverton.com",
"id": "decbd51cb6897f6cd68f9f5299694fc4bfea4fbe",
"size": "2597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "www/main/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "76264"
},
{
"name": "JavaScript",
"bytes": "66998"
},
{
"name": "Python",
"bytes": "33129"
}
],
"symlink_target": ""
} |
import unittest
import mock
from sync_settings import sync_version as version
class TestSyncVersion(unittest.TestCase):
@mock.patch('requests.get')
def test_get_remote_version_failed(self, get_mock):
response = mock.Mock()
response.status_code = 404
get_mock.return_value = response
self.assertDictEqual({}, version.get_remote_version())
@mock.patch('sync_settings.libs.gist.Gist.commits')
def test_get_remote_version(self, commits_mock):
commits_mock.return_value = [{
'version': '123123123',
'committed_at': '2019-01-11T02:15:15Z'
}]
v = version.get_remote_version()
self.assertDictEqual({'hash': '123123123', 'created_at': '2019-01-11T02:15:15Z'}, v)
@mock.patch('sync_settings.libs.path.exists', mock.MagicMock(return_value=False))
def test_get_local_version_no_file(self):
v = version.get_local_version()
self.assertDictEqual({}, v)
@mock.patch('sync_settings.libs.path.exists', mock.MagicMock(return_value=True))
@mock.patch('sync_settings.sync_version.open', mock.mock_open(read_data='plain text'))
def test_get_local_version_invalid_content(self):
self.assertDictEqual({}, version.get_local_version())
@mock.patch('sync_settings.libs.path.exists', mock.MagicMock(return_value=True))
@mock.patch('sync_settings.sync_version.open', mock.mock_open(read_data='{}'))
def test_get_local_version_empty_json(self):
self.assertDictEqual({}, version.get_local_version())
@mock.patch('sync_settings.libs.path.exists', mock.MagicMock(return_value=True))
@mock.patch('sync_settings.sync_version.open', mock.mock_open(
read_data='{"created_at": "2019-01-11T02:15:15Z", "hash": "123123123"}'))
def test_get_local_version_with_content(self):
v = version.get_local_version()
self.assertDictEqual({'hash': '123123123', 'created_at': '2019-01-11T02:15:15Z'}, v)
@mock.patch('sync_settings.libs.path.exists', mock.MagicMock(return_value=True))
@mock.patch(
'sync_settings.sync_version.open',
mock.mock_open(
read_data='{"created_at": "2019-01-11T02:15:15Z", /* some comment */"hash": "123123123"}'
),
)
def test_get_local_version_with_commented_content(self):
v = version.get_local_version()
self.assertDictEqual({"hash": "123123123", "created_at": "2019-01-11T02:15:15Z"}, v)
@mock.patch('sublime.yes_no_cancel_dialog', mock.MagicMock(return_value=1))
def test_show_update_dialog(self):
def on_done():
on_done.called = True
on_done.called = False
version.show_update_dialog(on_done)
self.assertTrue(on_done.called)
@mock.patch('sync_settings.sync_version.get_local_version', mock.MagicMock(return_value={}))
@mock.patch('sync_settings.sync_version.show_update_dialog')
def test_upgrade_without_local_version(self, dialog_mock):
version.upgrade()
self.assertTrue(dialog_mock.called)
@mock.patch('sync_settings.sync_version.get_local_version', mock.MagicMock(return_value={
'hash': '123123123',
'created_at': '2019-01-11T02:15:15Z'
}))
@mock.patch('sync_settings.sync_version.get_remote_version', mock.MagicMock(return_value={}))
@mock.patch('sync_settings.sync_version.show_update_dialog')
def test_upgrade_without_remote_version(self, dialog_mock):
version.upgrade()
self.assertFalse(dialog_mock.called)
@mock.patch('sync_settings.sync_version.get_local_version', mock.MagicMock(return_value={
'hash': '123123123',
'created_at': '2019-01-11T02:15:15Z'
}))
@mock.patch('sync_settings.sync_version.get_remote_version', mock.MagicMock(return_value={
'hash': '123123123',
'created_at': '2019-01-11T02:15:15Z'
}))
@mock.patch('sync_settings.sync_version.show_update_dialog')
def test_upgrade_same_version(self, dialog_mock):
version.upgrade()
self.assertFalse(dialog_mock.called)
@mock.patch('sync_settings.sync_version.get_local_version', mock.MagicMock(return_value={
'hash': '123123123',
'created_at': '2019-01-11T02:15:15Z'
}))
@mock.patch('sync_settings.sync_version.get_remote_version', mock.MagicMock(return_value={
'hash': '123123124',
'created_at': '2019-01-12T02:15:15Z'
}))
@mock.patch('sync_settings.sync_version.show_update_dialog')
def test_upgrade_outdated_version(self, dialog_mock):
version.upgrade()
self.assertTrue(dialog_mock.called)
| {
"content_hash": "2a203f3b8d904b0a244dee6e474c5345",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 101,
"avg_line_length": 43.80952380952381,
"alnum_prop": 0.6573913043478261,
"repo_name": "mfuentesg/SyncSettings",
"id": "e518ce891398a2603acdf14cb7a49beaa68a5a6b",
"size": "4600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sync_version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53167"
}
],
"symlink_target": ""
} |
"""
Copyright (C), 2013, The Schilduil Team. All rights reserved.
"""
import logging
import inspect
import time
from collections import OrderedDict
from operator import add
from functools import wraps
__all__ = [
"logging",
"get_timings",
"init_timings",
"disable_timings",
"timings_report",
"loguse",
]
timings = None
def get_timings():
""" Returns the gathered timings. """
return timings
def init_timings():
""" Initiates/Resets the timings. """
global timings
timings = {}
def disable_timings():
""" Disables the gathering of timings. """
global timings
timings = None
def add_timing(f, time):
"""Adds an executing time for a callable to the timings."""
global timings
if timings is None:
return
if f in timings:
timings[f] = tuple(map(add, timings[f], (1, time)))
else:
timings[f] = (1, time)
def timings_report():
"""Generated a report of the timings of functions.
The slowest on average will be first."""
if timings is None:
return None
report = {}
for f, (count, time) in timings.items():
report[f] = time / count
sorted_report = OrderedDict()
for f in sorted(report, key=report.get, reverse=True):
sorted_report[f] = timings[f] + (report[f],)
return sorted_report
def loguse(param=None):
"""When in debug it will log entering and exiting a function or object methods.
WARNING: Some callables are broken when you use this (e.g. Thread.__init__)
Upon entering the callable it will also log all arguments.
You can specify arguments to this decorator: a string, an int or a list of
string and/or integers. A string causes not log that name argument, an int
causes not to log that positional argument.
Example:
@loguse # will log all arguments.
@loguse('arg0') # will log all but not a named argument named 'arg0'.
@loguse(0) # will log all but the first positional argument.
@loguse(['arg0',0]) # will log all but the first pos. and 'arg0'.
"""
# Checking the param
# If it is a callable then just return what real_loguse(f) would return.
# If it is a parameter (even if None) return the real_loguse function.
# Param:
# It could be None => empty list
# It could be a string => list with that string as element
# It could be an iterable => ok
start_time = time.time()
start_time_callable = 0.0
end_time_callable = 0.0
f = None
ignore_parameters = []
if param is None:
ignore_parameters = []
elif callable(param):
f = param
elif isinstance(param, str):
ignore_parameters = [param]
elif isinstance(param, int):
ignore_parameters = [param]
elif hasattr(param, "__iter__"):
ignore_parameters = param
# Looking for the classname.
classname = "?"
try:
# We don't want this weird stuff messing in the log decorator
# halting our code. And that is a real possibility as this
# stuff is in CPtython but does not have to present in other
# python implementation. More info on inspect:
# http://docs.python.org/3/library/inspect.html
classname = inspect.getouterframes(inspect.currentframe())[1][3]
except:
pass
def real_loguse(f):
log = logging.getLogger(f.__module__)
@wraps(f)
def decorator(*args, **kwargs):
start_time_logdecorator = time.time()
l_args = list(args)
l_kwargs = dict(kwargs)
if log.isEnabledFor(logging.DEBUG):
ignore_parameters.sort(key=str, reverse=True)
if ignore_parameters:
# Deleting any parameters so they are not logged.
for param in ignore_parameters:
if isinstance(param, int):
try:
l_args.pop(param)
except:
pass
else:
try:
del l_kwargs[str(param)]
except:
pass
if classname == "<module>":
log.debug("> %s(%r, %r)", f.__name__, tuple(l_args), l_kwargs)
else:
log.debug(
"> %s.%s(%r, %r)",
classname,
f.__name__,
tuple(l_args),
l_kwargs,
)
start_time_callable = time.time()
result = f(*args, **kwargs)
end_time_callable = time.time()
add_timing(f, end_time_callable - start_time_callable)
if log.isEnabledFor(logging.DEBUG):
if "@" in ignore_parameters:
if classname == "<module>":
log.debug("< %s", f.__name__)
else:
log.debug("< %s.%s", classname, f.__name__)
else:
if classname == "<module>":
log.debug("< %s: %r", f.__name__, result)
else:
log.debug("< %s.%s: %r", classname, f.__name__, result)
end_time_logdecorator = time.time()
add_timing(
"loguse function call overhead",
end_time_logdecorator
- end_time_callable
+ start_time_callable
- start_time_logdecorator,
)
return result
return decorator
end_time = time.time()
add_timing("loguse function initialization overhead", end_time - start_time)
if f:
return real_loguse(f)
else:
return real_loguse
| {
"content_hash": "d215e73bf4abc81cf4776c8b9206752e",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 83,
"avg_line_length": 32.20108695652174,
"alnum_prop": 0.530464135021097,
"repo_name": "schilduil/suapp",
"id": "5709a55817a8771fa4de6f0e109702446763a59b",
"size": "5949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "suapp/logdecorator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5285"
},
{
"name": "Python",
"bytes": "222509"
},
{
"name": "Shell",
"bytes": "2835"
}
],
"symlink_target": ""
} |
from . import google_translator_toolkit
from . import google_sheets
from grow.common import extensions
from grow.common import utils
_kinds_to_classes = {}
_builtins = (
google_translator_toolkit.GoogleTranslatorToolkitTranslator,
google_sheets.GoogleSheetsTranslator,
)
def install_translator(translator):
_kinds_to_classes[translator.KIND] = translator
def install_builtins():
global _destination_kinds_to_classes
for builtin in _builtins:
install_translator(builtin)
def create_translator(pod, kind, config, inject=False,
project_title=None, instructions=None):
install_builtins()
if kind not in _kinds_to_classes:
raise ValueError('No translator exists: "{}"'.format(kind))
translator = _kinds_to_classes[kind]
return translator(pod=pod, config=config, inject=inject,
project_title=project_title, instructions=instructions)
def register_extensions(extension_paths, pod_root):
for path in extension_paths:
cls = extensions.import_extension(path, [pod_root])
install_translator(cls)
| {
"content_hash": "a30a641908c8577b5784d46f2b492a1b",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 30.027027027027028,
"alnum_prop": 0.7083708370837084,
"repo_name": "grow/pygrow",
"id": "7a64cf75e3719a155ee6dc5ab6e0b02c1b2f15bc",
"size": "1111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grow/translators/translators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "527"
},
{
"name": "HTML",
"bytes": "8714"
},
{
"name": "Python",
"bytes": "309004"
},
{
"name": "Shell",
"bytes": "4219"
}
],
"symlink_target": ""
} |
from google.cloud import artifactregistry_v1
def sample_delete_repository():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.DeleteRepositoryRequest(
name="name_value",
)
# Make the request
operation = client.delete_repository(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END artifactregistry_v1_generated_ArtifactRegistry_DeleteRepository_sync]
| {
"content_hash": "f91b09576da033e390d3a7be6f904d44",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 76,
"avg_line_length": 25.304347826086957,
"alnum_prop": 0.7250859106529209,
"repo_name": "googleapis/python-artifact-registry",
"id": "439a59642879ce463e2f578e3c041b9554414ed6",
"size": "1995",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/artifactregistry_v1_generated_artifact_registry_delete_repository_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1577437"
},
{
"name": "Shell",
"bytes": "30693"
}
],
"symlink_target": ""
} |
import cPickle
import errno
import itertools
import os
import sys
import tempfile
import traceback
from conary import files, trove, callbacks
from conary.deps import deps
from conary.lib import util, openpgpfile, sha1helper, openpgpkey
from conary.repository import changeset, errors, filecontents
from conary.repository.datastore import DataStoreRepository, DataStore
from conary.repository.datastore import DataStoreSet
from conary.repository.repository import AbstractRepository
from conary.repository.repository import ChangeSetJob
from conary.repository import netclient
from conary.server import schema
class FilesystemChangeSetJob(ChangeSetJob):
def __init__(self, repos, cs, *args, **kw):
self.mirror = kw.get('mirror', False)
self.requireSigs = kw.pop('requireSigs', False)
self.callback = kw.get('callback', False)
self.addTroveSetStart(repos, cs)
ChangeSetJob.__init__(self, repos, cs, *args, **kw)
repos.troveStore.addTroveSetDone(self.callback)
def addTroveSetStart(self, repos, cs):
newDirNames = set()
newBaseNames = set()
oldTroves = []
for i, csTrove in enumerate(cs.iterNewTroveList()):
if csTrove.getOldVersion():
oldTroves.append(csTrove.getOldNameVersionFlavor())
for fileInfo in itertools.chain(
csTrove.getNewFileList(raw = True),
csTrove.getChangedFileList(raw = True)):
if fileInfo[1] is None:
continue
newDirNames.add(fileInfo[1])
newBaseNames.add(fileInfo[2])
repos.troveStore.addTroveSetStart(oldTroves, newDirNames, newBaseNames)
def _containsFileContents(self, sha1iter):
return self.repos.troveStore.hasFileContents(sha1iter)
def markTroveRemoved(self, name, version, flavor):
self.repos.markTroveRemoved(name, version, flavor)
def checkTroveCompleteness(self, trv):
if not self.mirror and not trv.troveInfo.sigs.sha1():
raise errors.TroveChecksumMissing(trv.getName(), trv.getVersion(),
trv.getFlavor())
if trv.troveInfo.incomplete():
if trv.troveInfo.troveVersion() > trove.TROVE_VERSION:
raise errors.TroveSchemaError(trv.getName(), trv.getVersion(),
trv.getFlavor(),
trv.troveInfo.troveVersion(),
trove.TROVE_VERSION)
else:
nvf = trv.getName(), trv.getVersion(), trv.getFlavor(),
err = 'Attempted to commit incomplete trove %s=%s[%s]' % nvf
raise errors.TroveIntegrityError(error=err, *nvf)
def checkTroveSignatures(self, trv, callback):
assert(hasattr(callback, 'verifyTroveSignatures'))
if callback.keyCache is None:
callback.keyCache = openpgpkey.getKeyCache()
for fingerprint, timestamp, sig in trv.troveInfo.sigs.digitalSigs.iter():
try:
pubKey = callback.keyCache.getPublicKey(fingerprint)
if pubKey.isRevoked():
raise openpgpfile.IncompatibleKey('Key %s is revoked'
%pubKey.getFingerprint())
expirationTime = pubKey.getTimestamp()
if expirationTime and expirationTime < timestamp:
raise openpgpfile.IncompatibleKey('Key %s is expired'
%pubKey.getFingerprint())
except openpgpfile.KeyNotFound:
# missing keys could be okay; that depends on the threshold
# we've set. it's the callbacks problem in any case.
pass
res = ChangeSetJob.checkTroveSignatures(self, trv, callback)
if len(res[1]) and self.requireSigs:
raise openpgpfile.KeyNotFound('Repository does not recognize '
'key: %s'% res[1][0])
class UpdateCallback(callbacks.UpdateCallback):
def __init__(self, statusPath, trustThreshold, keyCache):
self.path = statusPath
if statusPath:
self.tmpDir = os.path.dirname(statusPath)
callbacks.UpdateCallback.__init__(self, trustThreshold, keyCache)
def _dumpStatus(self, *args):
if self.path:
# make the new status dump in a temp location
# for atomicity
(fd, path) = tempfile.mkstemp(dir = self.tmpDir,
suffix = '.commit-status')
buf = cPickle.dumps(args)
os.write(fd, buf)
os.close(fd)
os.rename(path, self.path)
def creatingDatabaseTransaction(self, *args):
self._dumpStatus('creatingDatabaseTransaction', *args)
def updatingDatabase(self, *args):
self._dumpStatus('updatingDatabase', *args)
class FilesystemRepository(DataStoreRepository, AbstractRepository):
def __init__(self, serverNameList, troveStore, contentsDir, repositoryMap,
requireSigs = False, paranoidCommits = False):
self.serverNameList = serverNameList
self.paranoidCommits = paranoidCommits
map = dict(repositoryMap)
for serverName in serverNameList:
map[serverName] = self
# XXX this client needs to die
from conary import conarycfg
self.reposSet = netclient.NetworkRepositoryClient(map,
conarycfg.UserInformation())
self.troveStore = troveStore
self.requireSigs = requireSigs
for dir in contentsDir:
util.mkdirChain(dir)
if len(contentsDir) == 1:
store = DataStore(contentsDir[0])
else:
storeList = []
for dir in contentsDir:
storeList.append(DataStore(dir))
store = DataStoreSet(*storeList)
DataStoreRepository.__init__(self, dataStore = store)
AbstractRepository.__init__(self)
def close(self):
if self.troveStore is not None:
self.troveStore.db.close()
self.troveStore = None
### Package access functions
def thawFlavor(self, flavor):
return deps.ThawFlavor(flavor)
def hasTrove(self, pkgName, version, flavor):
return self.troveStore.hasTrove(pkgName, troveVersion = version,
troveFlavor = flavor)
def getTrove(self, pkgName, version, flavor, pristine = True,
withFiles = True, hidden = False):
return self.troveStore.getTrove(
pkgName, version, flavor, withFiles = withFiles,
hidden = hidden)
def iterTroves(self, troveList, withFiles = True, hidden = False):
return self.troveStore.iterTroves(troveList, withFiles = withFiles,
hidden = hidden)
def getParentTroves(self, troveList):
return self.troveStore.getParentTroves(troveList)
def addTrove(self, trv, trvCs, hidden = False, oldTroveSpec = None):
return self.troveStore.addTrove(trv, trvCs, hidden = hidden)
def addTroveDone(self, pkg, mirror=False):
self.troveStore.addTroveDone(pkg, mirror=mirror)
### File functions
def getFileVersion(self, pathId, fileId, fileVersion, withContents = 0):
# the get trove netclient provides doesn't work with a
# FilesystemRepository (it needs to create a change set which gets
# passed)
if fileVersion.getHost() not in self.serverNameList:
# XXX This code is not needed as of version 1.0.14 of the client.
assert(not withContents)
return self.reposSet.getFileVersion(pathId, fileId, fileVersion)
fileObj = self.troveStore.getFile(pathId, fileId)
if withContents:
if fileObj.hasContents:
cont = filecontents.FromDataStore(self.contentsStore,
file.contents.sha1())
else:
cont = None
return (fileObj, cont)
return fileObj
def getFileVersions(self, fileList, withContents = False):
# this is for compatibility with <= 1.0.13
crossRepos = False
for (pathId, fileId, fileVersion) in fileList:
if fileVersion.getHost() not in self.serverNameList:
crossRepos = True
if crossRepos:
for x in fileList:
yield self.getFileVersion(withContents = withContents, *x)
else:
fileDict = self.troveStore.getFiles(fileList)
for x in fileList:
# (pathId, fileId) lookup
try:
fileObj = fileDict[x[0:2]]
except KeyError:
raise errors.FileStreamMissing(x[1])
if withContents:
if file.hasContents:
cont = filecontents.FromDataStore(self.contentsStore,
file.contents.sha1())
else:
cont = None
yield (fileObj, cont)
yield fileObj
def addFileVersion(self, troveInfo, pathId, path, fileId,
fileVersion, fileStream = None, withContents = True):
troveInfo.addFile(pathId, path, fileId, fileVersion,
fileStream = fileStream, withContents = withContents)
###
def commitChangeSet(self, cs, mirror=False, hidden=False, serialize=False,
excludeCapsuleContents = False, callback = None,
statusPath = None):
# when we add troves (no removals) we disable constraints on
# the TroveFiles table; it speeds up large commits massively on
# postgres
enableConstraints = True
# let's make sure commiting this change set is a sane thing to attempt
for trvCs in cs.iterNewTroveList():
if trvCs.troveType() == trove.TROVE_TYPE_REMOVED:
enableConstraints = False
v = trvCs.getNewVersion()
if v.isOnLocalHost():
label = v.branch().label()
raise errors.CommitError('can not commit items on '
'%s label' %(label.asString()))
self.troveStore.begin(serialize)
if enableConstraints:
enableConstraints = self.troveStore.db.disableTableConstraints(
'TroveFiles')
if self.requireSigs:
threshold = openpgpfile.TRUST_FULL
else:
threshold = openpgpfile.TRUST_UNTRUSTED
# Callback for signature verification and progress
if statusPath:
assert not callback
callback = UpdateCallback(statusPath=statusPath,
trustThreshold=threshold,
keyCache=self.troveStore.keyTable.keyCache)
try:
# reset time stamps only if we're not mirroring.
FilesystemChangeSetJob(self, cs, self.serverNameList,
resetTimestamps = not mirror,
callback=callback,
mirror = mirror,
hidden = hidden,
excludeCapsuleContents =
excludeCapsuleContents,
requireSigs = self.requireSigs)
except openpgpfile.KeyNotFound:
# don't be quite so noisy, this is a common error
self.troveStore.rollback()
raise
except:
print >> sys.stderr, "exception occurred while committing change set"
print >> sys.stderr, ''.join(traceback.format_exception(*sys.exc_info()))
print >> sys.stderr, "attempting rollback"
self.troveStore.rollback()
raise
else:
if self.paranoidCommits:
for trvCs in cs.iterNewTroveList():
newTuple = trvCs.getNewNameVersionFlavor()
if newTuple[1] is None:
continue
trv = self.getTrove(withFiles = True, *newTuple)
assert(trv.verifyDigests())
if enableConstraints:
enableConstraints.enable()
self.troveStore.commit()
def markTroveRemoved(self, name, version, flavor):
sha1s = self.troveStore.markTroveRemoved(name, version, flavor)
for sha1 in sha1s:
try:
self.contentsStore.removeFile(sha1helper.sha1ToString(sha1))
except OSError, e:
if e.errno != errno.ENOENT:
raise
def getFileContents(self, itemList):
contents = []
for item in itemList:
(fileId, fileVersion) = item[0:2]
# the get trove netclient provides doesn't work with a
# FilesystemRepository (it needs to create a change set which gets
# passed)
if fileVersion.getHost() in self.serverNameList:
fileObj = item[2]
cont = filecontents.FromDataStore(self.contentsStore,
fileObj.contents.sha1())
else:
# XXX This code is not needed as of version 1.0.14 of the
# client.
#
# a bit of sleight of hand here... we look for this file in
# the trove it was first built in
#
# this could cause us to run out of file descriptors on large
# troves. it might be better to close the file and return
# a filecontents object?
cont = self.reposSet.getFileContents([ item ])[0]
contents.append(cont)
return contents
def createChangeSet(self, origTroveList, recurse = True,
withFiles = True, withFileContents = True,
excludeCapsuleContents = False,
excludeAutoSource = False,
mirrorMode = False, roleIds = None):
"""
@param origTroveList: a list of
C{(troveName, flavor, oldVersion, newVersion, absolute)} tuples.
If C{oldVersion == None} and C{absolute == 0}, then the trove is
assumed to be new for the purposes of the change set.
If C{newVersion == None} then the trove is being removed.
If recurse is set, this yields one result for the entire troveList.
If recurse is not set, it yields one result per troveList entry.
@param excludeCapsuleContents: If True, troves which include capsules
have all of their content excluded from the changeset no matter how
withFileContents is set.
"""
cs = changeset.ChangeSet()
externalTroveList = []
externalFileList = []
removedTroveList = []
dupFilter = set()
resultList = []
# make a copy to remove things from
troveList = origTroveList[:]
# def createChangeSet begins here
troveWrapper = _TroveListWrapper(troveList, self.troveStore, withFiles,
roleIds = roleIds)
for (job, old, new, streams) in troveWrapper:
(troveName, (oldVersion, oldFlavor),
(newVersion, newFlavor), absolute) = job
# make sure we haven't already generated this changeset; since
# troves can be included from other troves we could try
# to generate quite a few duplicates
if job in dupFilter:
continue
else:
dupFilter.add(job)
done = False
if not newVersion:
if oldVersion.getHost() not in self.serverNameList:
externalTroveList.append((troveName,
(oldVersion, oldFlavor),
(None, None), absolute))
else:
# remove this trove and any trove contained in it
cs.oldTrove(troveName, oldVersion, oldFlavor)
for (name, version, flavor) in \
old.iterTroveList(strongRefs=True):
troveWrapper.append((name, (version, flavor),
(None, None), absolute),
False)
done = True
elif (newVersion.getHost() not in self.serverNameList
or (oldVersion and
oldVersion.getHost() not in self.serverNameList)):
# don't try to make changesets between repositories; the
# client can do that itself
# we don't generate chagnesets between removed and
# present troves; that's up to the client
externalTroveList.append((troveName, (oldVersion, oldFlavor),
(newVersion, newFlavor), absolute))
done = True
elif (oldVersion and old.type() == trove.TROVE_TYPE_REMOVED):
removedTroveList.append((troveName, (oldVersion, oldFlavor),
(newVersion, newFlavor), absolute))
done = True
if done:
if not recurse:
yield (cs, externalTroveList, externalFileList,
removedTroveList)
cs = changeset.ChangeSet()
externalTroveList = []
externalFileList = []
removedTroveList = []
continue
(troveChgSet, filesNeeded, pkgsNeeded) = \
new.diff(old, absolute = absolute)
if recurse:
for refJob in pkgsNeeded:
refOldVersion = refJob[1][0]
refNewVersion = refJob[2][0]
if (refNewVersion and
(refNewVersion.getHost() not in self.serverNameList)
or (refOldVersion and
refOldVersion.getHost() not in self.serverNameList)
):
# don't try to make changesets between repositories; the
# client can do that itself
externalTroveList.append(refJob)
else:
troveWrapper.append(refJob, True)
cs.newTrove(troveChgSet)
if job in origTroveList and job[2][0] is not None:
# add the primary w/ timestamps on the version
try:
primary = troveChgSet.getNewNameVersionFlavor()
cs.addPrimaryTrove(*primary)
except KeyError:
# primary troves could be in the externalTroveList, in
# which case they aren't primries
pass
# sort the set of files we need into bins based on the server
# name
serverIdx = {}
getList = []
localFilesNeeded = []
for (pathId, oldFileId, oldFileVersion, newFileId, newFileVersion) in filesNeeded:
# if either the old or new file version is on a different
# repository, creating this diff is someone else's problem
if (newFileVersion.getHost() not in self.serverNameList
or (oldFileVersion and
oldFileVersion.getHost() not in self.serverNameList)):
externalFileList.append((pathId, troveName,
(oldVersion, oldFlavor, oldFileId, oldFileVersion),
(newVersion, newFlavor, newFileId, newFileVersion)))
else:
localFilesNeeded.append((pathId, oldFileId, oldFileVersion,
newFileId, newFileVersion))
if oldFileVersion:
getList.append((pathId, oldFileId, oldFileVersion))
getList.append((pathId, newFileId, newFileVersion))
# Walk this in reverse order. This may seem odd, but the
# order in the final changeset is set by sorting that happens
# in the change set object itself. The only reason we sort
# here at all is to make sure PTR file types come before the
# file they refer to. Reverse shorting makes this a bit easier.
localFilesNeeded.sort()
localFilesNeeded.reverse()
ptrTable = {}
for (pathId, oldFileId, oldFileVersion, newFileId, \
newFileVersion) in localFilesNeeded:
oldFile = None
if oldFileVersion:
#oldFile = idIdx[(pathId, oldFileId)]
oldFile = files.ThawFile(streams[oldFileId], pathId)
oldCont = None
newCont = None
#newFile = idIdx[(pathId, newFileId)]
newFile = files.ThawFile(streams[newFileId], pathId)
if mirrorMode:
(filecs, contentsHash) = changeset.fileChangeSet(pathId,
None,
newFile)
else:
(filecs, contentsHash) = changeset.fileChangeSet(pathId,
oldFile,
newFile)
cs.addFile(oldFileId, newFileId, filecs)
if (excludeCapsuleContents and new.troveInfo.capsule.type and
new.troveInfo.capsule.type()):
continue
if (not withFileContents
or (excludeAutoSource and newFile.flags.isAutoSource())
or (newFile.flags.isEncapsulatedContent()
and not newFile.flags.isCapsuleOverride())):
continue
# this test catches files which have changed from not
# config files to config files; these need to be included
# unconditionally so we always have the pristine contents
# to include in the local database
if ((mirrorMode and newFile.hasContents) or contentsHash or
(oldFile and newFile.flags.isConfig()
and not oldFile.flags.isConfig())):
if oldFileVersion and oldFile.hasContents:
oldCont = self.getFileContents(
[ (oldFileId, oldFileVersion, oldFile) ])[0]
newCont = self.getFileContents(
[ (newFileId, newFileVersion, newFile) ])[0]
(contType, cont) = changeset.fileContentsDiff(oldFile,
oldCont, newFile, newCont,
mirrorMode = mirrorMode)
# we don't let config files be ptr types; if they were
# they could be ptrs to things which aren't config files,
# which would completely hose the sort order we use. this
# could be relaxed someday to let them be ptr's to other
# config files
if not newFile.flags.isConfig() and \
contType == changeset.ChangedFileTypes.file:
contentsHash = newFile.contents.sha1()
ptr = ptrTable.get(contentsHash, None)
if ptr is not None:
contType = changeset.ChangedFileTypes.ptr
cont = filecontents.FromString(ptr)
else:
ptrTable[contentsHash] = pathId + newFileId
if not newFile.flags.isConfig() and \
contType == changeset.ChangedFileTypes.file:
cont = filecontents.CompressedFromDataStore(
self.contentsStore,
newFile.contents.sha1())
compressed = True
else:
compressed = False
# ptr entries are not compressed, whether or not they
# are config files. override the compressed rule from
# above
if contType == changeset.ChangedFileTypes.ptr:
compressed = False
cs.addFileContents(pathId, newFileId, contType, cont,
newFile.flags.isConfig(),
compressed = compressed)
if not recurse:
yield cs, externalTroveList, externalFileList, removedTroveList
cs = changeset.ChangeSet()
externalTroveList = []
externalFileList = []
removedTroveList = []
if recurse:
yield cs, externalTroveList, externalFileList, removedTroveList
class _TroveListWrapper:
def _handleJob(self, job, recursed, idx):
t = self.trvIterator.next()
if t is not None:
if self.withFiles:
t, streams = t
else:
streams = {}
if t is None:
if recursed:
# synthesize a removed trove for this missing
# trove
t = trove.Trove(job[0], job[idx][0], job[idx][1],
type=trove.TROVE_TYPE_REMOVED)
t.setIsMissing(True)
t.computeDigests()
# synthesize empty filestreams
streams = {}
else:
# drain the iterator, in order to complete
# the sql queries
for x in self.trvIterator: pass
raise errors.TroveMissing(job[0], job[idx][0])
return t, streams
def next(self):
if not self.l and self.new:
# self.l (and self.trvIterator) are empty; look to
# self.new for new jobs we need
troveList = []
for job, recursed in self.new:
# do we need the old trove?
if job[1][0] is not None:
troveList.append((job[0], job[1][0], job[1][1]))
# do we need the new trove?
if job[2][0] is not None:
troveList.append((job[0], job[2][0], job[2][1]))
# flip to the new job set and it's trove iterator, and
# reset self.new for later additions
self.trvIterator = self.troveStore.iterTroves(
troveList, withFiles = self.withFiles,
withFileStreams = self.withFiles,
permCheckFilter = self._permCheck,
hidden=True,
)
self.l = self.new
self.new = []
if self.l:
job, recursed = self.l.pop(0)
# Does it have an old job?
if job[1][0] is None:
old = None
oldStreams = {}
else:
old, oldStreams = self._handleJob(job, recursed, 1)
# Does it have a new job
if job[2][0] is None:
new = None
newStreams = {}
else:
new, newStreams = self._handleJob(job, recursed, 2)
newStreams.update(oldStreams)
return job, old, new, newStreams
else:
raise StopIteration
def _permCheck(self, cu, instanceTblName):
# returns a list of instance id's we're allowed to see
sql = """
DELETE FROM %s WHERE instanceId NOT IN
(SELECT DISTINCT ugi.instanceId
FROM %s JOIN UserGroupInstancesCache as ugi ON
%s.instanceId = ugi.instanceId
WHERE
ugi.userGroupId IN (%s))
""" % (instanceTblName, instanceTblName, instanceTblName,
",".join("%d" % x for x in self.roleIds))
cu.execute(sql, start_transaction = False)
def __iter__(self):
while True:
yield self.next()
def append(self, item, recurse):
self.new.append((item, recurse))
def __init__(self, l, troveStore, withFiles, roleIds = None):
self.trvIterator = None
self.new = [ (x, False) for x in l ]
self.l = []
self.troveStore = troveStore
self.withFiles = withFiles
self.roleIds = roleIds
| {
"content_hash": "52b198f1c34dd43f67f31162c6e3e936",
"timestamp": "",
"source": "github",
"line_count": 702,
"max_line_length": 94,
"avg_line_length": 41.851851851851855,
"alnum_prop": 0.531075561606535,
"repo_name": "fedora-conary/conary",
"id": "cf918dc3bb658aff0cd1fe1bd2467ef619f532d7",
"size": "30003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conary/repository/netrepos/fsrepos.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "481681"
},
{
"name": "C++",
"bytes": "8244"
},
{
"name": "CSS",
"bytes": "3920"
},
{
"name": "Erlang",
"bytes": "477"
},
{
"name": "Perl",
"bytes": "45629"
},
{
"name": "Python",
"bytes": "10586616"
},
{
"name": "Shell",
"bytes": "4657"
},
{
"name": "Standard ML",
"bytes": "2756"
}
],
"symlink_target": ""
} |
"""Tests for the Google Chrome extension activity database plugin."""
import unittest
from plaso.formatters import chrome_extension_activity # pylint: disable=unused-import
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import chrome_extension_activity
from tests import test_lib as shared_test_lib
from tests.parsers.sqlite_plugins import test_lib
class ChromeExtensionActivityPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Google Chrome extension activity database plugin."""
@shared_test_lib.skipUnlessHasTestFile([u'Extension Activity'])
def testProcess(self):
"""Tests the Process function on a Chrome extension activity database."""
plugin_object = chrome_extension_activity.ChromeExtensionActivityPlugin()
cache = sqlite.SQLiteCache()
storage_writer = self._ParseDatabaseFileWithPlugin(
[u'Extension Activity'], plugin_object, cache=cache)
self.assertEqual(len(storage_writer.events), 56)
event_object = storage_writer.events[0]
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.UNKNOWN)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2014-11-25 21:08:23.698737')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_extension_id = u'ognampngfcbddbfemdapefohjiobgbdl'
self.assertEqual(event_object.extension_id, expected_extension_id)
self.assertEqual(event_object.action_type, 1)
self.assertEqual(event_object.activity_id, 48)
self.assertEqual(event_object.api_name, u'browserAction.onClicked')
expected_msg = (
u'Chrome extension: ognampngfcbddbfemdapefohjiobgbdl '
u'Action type: API event callback (type 1) '
u'Activity identifier: 48 '
u'API name: browserAction.onClicked')
expected_short = (
u'ognampngfcbddbfemdapefohjiobgbdl browserAction.onClicked')
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "a86aadae5d7090a44c69f2a3fe1f34c2",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 87,
"avg_line_length": 37.17857142857143,
"alnum_prop": 0.7512007684918348,
"repo_name": "dc3-plaso/plaso",
"id": "39bb79d57e54e585712e9cce2e984924493f0c57",
"size": "2124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/parsers/sqlite_plugins/chrome_extension_activity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1683"
},
{
"name": "Makefile",
"bytes": "1151"
},
{
"name": "Python",
"bytes": "3875098"
},
{
"name": "Shell",
"bytes": "17861"
}
],
"symlink_target": ""
} |
"""
Copyright 2022 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import TypeVar
# https://peps.python.org/pep-0484/#annotating-instance-and-class-methods
T = TypeVar("T", bound="EnvironmentAttributeDiff")
class EnvironmentAttributeDiff:
def __init__(
self: T,
category_of_diff: str,
diff_anchor: str,
env_1_anchor_value: str,
env_2_anchor_value: str,
) -> None:
self.category_of_diff: str = category_of_diff
self.diff_anchor = diff_anchor
self.env_1_anchor_value = env_1_anchor_value
self.env_2_anchor_value = env_2_anchor_value
self.values_match = self.env_1_anchor_value == self.env_2_anchor_value
| {
"content_hash": "62af9a85a67c58a4f446a47aec9016bd",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 78,
"avg_line_length": 34.6,
"alnum_prop": 0.7010734929810074,
"repo_name": "GoogleCloudPlatform/composer-utilities",
"id": "b63e03c003bdfa8d337c2663505836895f6c5522",
"size": "1211",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "cloudcomposerdiff/src/cloudcomposerdiff/lib/difference.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "34038"
}
],
"symlink_target": ""
} |
"""
Convert h264 videos (in the current folder) to mp4 using MP4Box
"""
import os
import subprocess
files = [f for f in os.listdir(".") if f.endswith(".h264")]
for f in files:
print("Converting {}".format(f))
ok = subprocess.call(['MP4Box', '-add', f, f[:-4] + "mp4"])
| {
"content_hash": "1d8f3acd77a25aad5ba8509d9b118d9e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 63,
"avg_line_length": 24.818181818181817,
"alnum_prop": 0.6410256410256411,
"repo_name": "sergionr2/RacingRobot",
"id": "ea68aa78644c38b824fdf84bcfb74d2064b9dd44",
"size": "273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "train/convert_video.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3062"
},
{
"name": "C++",
"bytes": "5014"
},
{
"name": "Makefile",
"bytes": "2557"
},
{
"name": "Python",
"bytes": "84061"
},
{
"name": "Shell",
"bytes": "802"
}
],
"symlink_target": ""
} |
import os
import sys
import shutil
import unittest
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
from PIL import Image
import numpy as np
from model.environment import Environment
from model.agent import Agent
from model.dqn_agent import DQNAgent, Q
class FormatAgent(Agent):
def __init__(self, path):
self.agent = DQNAgent((0, 1))
self.path = path
self._step = 0
def act(self, observation, reward):
arr = self.agent._format(observation)
img = Image.fromarray((arr * 255).astype(np.uint8)) # because 0/1 value
img.save(os.path.join(self.path, "image_{0}.png".format(self._step)), "png")
self._step += 1
return 0
class TestDQNAgent(unittest.TestCase):
IMG_PATH = ""
@classmethod
def setUpClass(cls):
cls.IMG_PATH = os.path.join(os.path.dirname(__file__), "./images")
if not os.path.exists(cls.IMG_PATH):
os.mkdir(cls.IMG_PATH)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.IMG_PATH)
def test_format_image(self):
agent = FormatAgent(self.IMG_PATH)
env = Environment()
for ep, s, r in env.play(agent, episode=1):
pass
img = Image.open(os.path.join(self.IMG_PATH, "image_0.png"))
self.assertTrue(img)
arr = np.asarray(img)
self.assertTrue(arr.shape, (Q.SIZE, Q.SIZE))
def test_save_state(self):
env = Environment()
agent = DQNAgent(env.actions)
zeros = np.zeros((agent.q.SIZE, agent.q.SIZE), np.float32)
pre_state = None
for ep, s, r in env.play(agent, episode=1):
state = agent.get_state()
self.assertEqual(agent.q.n_history, len(state))
last_state = np.maximum(agent._observations[0], agent._observations[-1])
if s == 0:
# after first action
self.assertEqual(0, np.sum(zeros != agent._observations[-1]))
self.assertEqual(1, len(agent._state))
if s < agent.q.n_history:
# until n_history
self.assertEqual(0, np.sum(last_state != state[s]))
if pre_state is not None:
self.assertEqual(0, np.sum(pre_state != state[s - 1]))
else:
# over n_history
self.assertEqual(0, np.sum(last_state != state[-1]))
if pre_state is not None:
self.assertEqual(0, np.sum(pre_state != state[-2]))
pre_state = last_state.copy()
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "99efacb608bb9e7f67ba997301a28398",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 84,
"avg_line_length": 30.776470588235295,
"alnum_prop": 0.5649847094801224,
"repo_name": "icoxfog417/chainer_pong",
"id": "9ca16cd78449a1625d680bbb95353ce2b069d2fa",
"size": "2616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_dqn_agent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22962"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ucs_query
short_description: Queries UCS Manager objects by class or distinguished name
description:
-Queries UCS Manager objects by class or distinguished name.
- Examples can be used with the UCS Platform Emulator U(https://cs.co/ucspe).
extends_documentation_fragment: ucs
options:
class_ids:
description:
- One or more UCS Manager Class IDs to query.
- As a comma separated list
type: str
distinguished_names:
description:
- One or more UCS Manager Distinguished Names to query.
- As a comma separated list
type: str
delegate_to:
description:
- Where the module will be run
default: localhost
type: str
requirements:
- ucsmsdk
author:
- John McDonough (@movinalot)
- CiscoUcs (@CiscoUcs)
version_added: "2.10"
'''
EXAMPLES = r'''
- name: Query UCS Class ID
ucs_query:
hostname: "{{ ucs_hostname }}"
username: "{{ ucs_username }}"
password: "{{ ucs_password }}"
class_ids: computeBlade
delegate_to: localhost
- name: Query UCS Class IDs
ucs_query:
hostname: "{{ ucs_hostname }}"
username: "{{ ucs_username }}"
password: "{{ ucs_password }}"
class_ids: computeBlade, fabricVlan
delegate_to: localhost
- name: Query UCS Distinguished Name
ucs_query:
hostname: "{{ ucs_hostname }}"
username: "{{ ucs_username }}"
password: "{{ ucs_password }}"
distinguished_names: org-root
delegate_to: localhost
- name: Query UCS Distinguished Names
ucs_query:
hostname: "{{ ucs_hostname }}"
username: "{{ ucs_username }}"
password: "{{ ucs_password }}"
distinguished_names: org-root, sys/rack-unit-1, sys/chassis-1/blade-2
delegate_to: localhost
'''
RETURN = r'''
#
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec
def retrieve_class_id(class_id, ucs):
return ucs.login_handle.query_classid(class_id)
def retrieve_distinguished_name(distinguished_name, ucs):
return ucs.login_handle.query_dn(distinguished_name)
def make_mo_dict(ucs_mo):
obj_dict = {}
for mo_property in ucs_mo.prop_map.values():
obj_dict[mo_property] = getattr(ucs_mo, mo_property)
return obj_dict
def main():
argument_spec = ucs_argument_spec
argument_spec.update(
class_ids=dict(type='str'),
distinguished_names=dict(type='str'),
delegate_to=dict(type='str', default='localhost'),
)
module = AnsibleModule(
argument_spec,
supports_check_mode=False,
mutually_exclusive=[
['class_ids', 'distinguished_names'],
],
)
# UCSModule verifies ucsmsdk is present and exits on failure.
# Imports are below for UCS object creation.
ucs = UCSModule(module)
err = False
query_result = {}
try:
if module.params['class_ids']:
class_ids = [
x.strip() for x in module.params['class_ids'].split(',')
]
for class_id in class_ids:
query_result[class_id] = []
ucs_mos = retrieve_class_id(class_id, ucs)
if ucs_mos:
for ucs_mo in ucs_mos:
query_result[class_id].append(make_mo_dict(ucs_mo))
ucs.result['objects'] = query_result
elif module.params['distinguished_names']:
distinguished_names = [
x.strip()
for x in module.params['distinguished_names'].split(',')
]
for distinguished_name in distinguished_names:
query_result[distinguished_name] = {}
ucs_mo = retrieve_distinguished_name(distinguished_name, ucs)
if ucs_mo:
query_result[distinguished_name] = make_mo_dict(ucs_mo)
ucs.result['objects'] = query_result
except Exception as e:
err = True
ucs.result['msg'] = "setup error: %s " % str(e)
if err:
module.fail_json(**ucs.result)
ucs.result['changed'] = False
module.exit_json(**ucs.result)
if __name__ == '__main__':
main()
| {
"content_hash": "fda840661a861adbe87d139d3e4aa56f",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 83,
"avg_line_length": 26.74251497005988,
"alnum_prop": 0.6018808777429467,
"repo_name": "thaim/ansible",
"id": "de35b4a17ff03a21c16aa60e0a0c513bae5f88b5",
"size": "4603",
"binary": false,
"copies": "11",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/remote_management/ucs/ucs_query.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
from ambari_commons.constants import AMBARI_SUDO_BINARY
from resource_management.libraries.functions import format
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions import upgrade_summary
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import get_port_from_url
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.lzo_utils import should_install_lzo
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.functions.get_architecture import get_architecture
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions.stack_tools import get_stack_name
from resource_management.libraries.functions.version import get_major_version
from resource_management.core.utils import PasswordString
from ambari_commons.credential_store_helper import get_password_from_credential_store
from urlparse import urlparse
import status_params
import os
# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY
architecture = get_architecture()
# Needed since this writes out the Atlas Hive Hook config file.
cluster_name = config['clusterName']
serviceName = config['serviceName']
role = config['role']
hostname = config["hostname"]
# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)
stack_name = status_params.stack_name
stack_name_uppercase = stack_name.upper()
upgrade_direction = default("/commandParams/upgrade_direction", None)
agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
stack_root = status_params.stack_root
# The source stack will be present during a cross-stack upgrade.
# E.g., BigInsights-4.2.5 or HDP-2.6
source_stack = default("/commandParams/source_stack", None)
if source_stack is None:
source_stack = upgrade_summary.get_source_stack("OOZIE")
# This variable name is important, do not change
source_stack_name = get_stack_name(source_stack)
stack_version_unformatted = status_params.stack_version_unformatted
stack_version_formatted = status_params.stack_version_formatted
major_stack_version = get_major_version(stack_version_formatted)
version_for_stack_feature_checks = get_stack_feature_version(config)
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hadoop_lib_home = stack_select.get_hadoop_dir("lib")
#hadoop params
if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE,stack_version_formatted):
stack_version = None
upgrade_stack = stack_select._get_upgrade_stack()
if upgrade_stack is not None and len(upgrade_stack) == 2 and upgrade_stack[1] is not None:
stack_version = upgrade_stack[1]
# oozie-server or oozie-client, depending on role
oozie_root = status_params.component_directory
# using the correct oozie root dir, format the correct location
oozie_lib_dir = format("{stack_root}/current/{oozie_root}")
oozie_setup_sh = format("{stack_root}/current/{oozie_root}/bin/oozie-setup.sh")
oozie_webapps_dir = format("{stack_root}/current/{oozie_root}/oozie-server/webapps")
oozie_webapps_conf_dir = format("{stack_root}/current/{oozie_root}/oozie-server/conf")
oozie_libext_dir = format("{stack_root}/current/{oozie_root}/libext")
oozie_server_dir = format("{stack_root}/current/{oozie_root}/oozie-server")
oozie_shared_lib = format("{stack_root}/current/{oozie_root}/share")
oozie_home = format("{stack_root}/current/{oozie_root}")
oozie_bin_dir = format("{stack_root}/current/{oozie_root}/bin")
oozie_examples_regex = format("{stack_root}/current/{oozie_root}/doc")
# set the falcon home for copying JARs; if in an upgrade, then use the version of falcon that
# matches the version of oozie
falcon_home = format("{stack_root}/current/falcon-client")
if stack_version is not None:
falcon_home = '{0}/{1}/falcon'.format(stack_root, stack_version)
conf_dir = format("{stack_root}/current/{oozie_root}/conf")
hive_conf_dir = format("{conf_dir}/action-conf/hive")
else:
oozie_lib_dir = "/var/lib/oozie"
oozie_setup_sh = "/usr/lib/oozie/bin/oozie-setup.sh"
oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
oozie_webapps_conf_dir = "/var/lib/oozie/oozie-server/conf"
oozie_libext_dir = "/usr/lib/oozie/libext"
oozie_server_dir = "/var/lib/oozie/oozie-server"
oozie_shared_lib = "/usr/lib/oozie/share"
oozie_home = "/usr/lib/oozie"
oozie_bin_dir = "/usr/bin"
falcon_home = '/usr/lib/falcon'
conf_dir = "/etc/oozie/conf"
hive_conf_dir = "/etc/oozie/conf/action-conf/hive"
oozie_examples_regex = "/usr/share/doc/oozie-*"
execute_path = oozie_bin_dir + os.pathsep + hadoop_bin_dir
oozie_user = config['configurations']['oozie-env']['oozie_user']
smokeuser = config['configurations']['cluster-env']['smokeuser']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
smoke_hdfs_user_mode = 0770
service_check_queue_name = default('/configurations/yarn-env/service_check.queue.name', 'default')
# This config actually contains {oozie_user}
oozie_admin_users = format(config['configurations']['oozie-env']['oozie_admin_users'])
user_group = config['configurations']['cluster-env']['user_group']
jdk_location = config['hostLevelParams']['jdk_location']
check_db_connection_jar_name = "DBConnectionVerification.jar"
check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
oozie_tmp_dir = default("configurations/oozie-env/oozie_tmp_dir", "/var/tmp/oozie")
oozie_hdfs_user_dir = format("/user/{oozie_user}")
oozie_pid_dir = status_params.oozie_pid_dir
pid_file = status_params.pid_file
hadoop_jar_location = "/usr/lib/hadoop/"
java_share_dir = "/usr/share/java"
java64_home = config['hostLevelParams']['java_home']
java_exec = format("{java64_home}/bin/java")
# This variable name is important, do not change
ext_js_file = "ext-2.2.zip"
# During a cross-stack migration, the source location will be different
# This variable name is important, do not change
ext_js_path = format("/usr/share/{stack_name_uppercase}-oozie/{ext_js_file}")
security_enabled = config['configurations']['cluster-env']['security_enabled']
oozie_heapsize = config['configurations']['oozie-env']['oozie_heapsize']
oozie_permsize = config['configurations']['oozie-env']['oozie_permsize']
limits_conf_dir = "/etc/security/limits.d"
oozie_user_nofile_limit = config['configurations']['oozie-env']['oozie_user_nofile_limit']
oozie_user_nproc_limit = config['configurations']['oozie-env']['oozie_user_nproc_limit']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
http_principal = config['configurations']['oozie-site']['oozie.authentication.kerberos.principal']
oozie_site = config['configurations']['oozie-site']
# Need this for yarn.nodemanager.recovery.dir in yarn-site
yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
yarn_resourcemanager_address = config['configurations']['yarn-site']['yarn.resourcemanager.address']
zk_namespace = default('/configurations/oozie-site/oozie.zookeeper.namespace', 'oozie')
zk_connection_string = default('/configurations/oozie-site/oozie.zookeeper.connection.string', None)
jaas_file = os.path.join(conf_dir, 'zkmigrator_jaas.conf')
stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks)
credential_store_enabled = False
if 'credentialStoreEnabled' in config:
credential_store_enabled = config['credentialStoreEnabled']
if security_enabled:
oozie_site = dict(config['configurations']['oozie-site'])
oozie_principal_with_host = oozie_principal.replace('_HOST', hostname)
# If a user-supplied oozie.ha.authentication.kerberos.principal property exists in oozie-site,
# use it to replace the existing oozie.authentication.kerberos.principal value. This is to ensure
# that any special principal name needed for HA is used rather than the Ambari-generated value
if "oozie.ha.authentication.kerberos.principal" in oozie_site:
oozie_site['oozie.authentication.kerberos.principal'] = oozie_site['oozie.ha.authentication.kerberos.principal']
http_principal = oozie_site['oozie.authentication.kerberos.principal']
# If a user-supplied oozie.ha.authentication.kerberos.keytab property exists in oozie-site,
# use it to replace the existing oozie.authentication.kerberos.keytab value. This is to ensure
# that any special keytab file needed for HA is used rather than the Ambari-generated value
if "oozie.ha.authentication.kerberos.keytab" in oozie_site:
oozie_site['oozie.authentication.kerberos.keytab'] = oozie_site['oozie.ha.authentication.kerberos.keytab']
if stack_version_formatted and check_stack_feature(StackFeature.OOZIE_HOST_KERBEROS, stack_version_formatted):
#older versions of oozie have problems when using _HOST in principal
oozie_site['oozie.service.HadoopAccessorService.kerberos.principal'] = oozie_principal_with_host
oozie_site['oozie.authentication.kerberos.principal'] = http_principal.replace('_HOST', hostname)
smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
oozie_keytab = default("/configurations/oozie-env/oozie_keytab", oozie_service_keytab)
oozie_env_sh_template = config['configurations']['oozie-env']['content']
oracle_driver_jar_name = "ojdbc6.jar"
oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
if credential_store_enabled:
if 'hadoop.security.credential.provider.path' in config['configurations']['oozie-site']:
cs_lib_path = config['configurations']['oozie-site']['credentialStoreClassPath']
java_home = config['hostLevelParams']['java_home']
alias = 'oozie.service.JPAService.jdbc.password'
provider_path = config['configurations']['oozie-site']['hadoop.security.credential.provider.path']
oozie_metastore_user_passwd = PasswordString(get_password_from_credential_store(alias, provider_path, cs_lib_path, java_home, jdk_location))
else:
raise Exception("hadoop.security.credential.provider.path property should be set")
else:
oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
oozie_log_dir = config['configurations']['oozie-env']['oozie_log_dir']
oozie_data_dir = config['configurations']['oozie-env']['oozie_data_dir']
oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
oozie_server_admin_port = config['configurations']['oozie-env']['oozie_admin_port']
if 'export OOZIE_HTTPS_PORT' in oozie_env_sh_template or 'oozie.https.port' in config['configurations']['oozie-site'] or 'oozie.https.keystore.file' in config['configurations']['oozie-site'] or 'oozie.https.keystore.pass' in config['configurations']['oozie-site']:
oozie_secure = '-secure'
else:
oozie_secure = ''
https_port = None
# try to get https port form oozie-env content
for line in oozie_env_sh_template.splitlines():
result = re.match(r"export\s+OOZIE_HTTPS_PORT=(\d+)", line)
if result is not None:
https_port = result.group(1)
# or from oozie-site.xml
if https_port is None and 'oozie.https.port' in config['configurations']['oozie-site']:
https_port = config['configurations']['oozie-site']['oozie.https.port']
oozie_base_url = config['configurations']['oozie-site']['oozie.base.url']
service_check_job_name = default("/configurations/oozie-env/service_check_job_name", "no-op")
# construct proper url for https
if https_port is not None:
parsed_url = urlparse(oozie_base_url)
oozie_base_url = oozie_base_url.replace(parsed_url.scheme, "https")
if parsed_url.port is None:
oozie_base_url.replace(parsed_url.hostname, ":".join([parsed_url.hostname, str(https_port)]))
else:
oozie_base_url = oozie_base_url.replace(str(parsed_url.port), str(https_port))
oozie_setup_sh_current = oozie_setup_sh
hdfs_site = config['configurations']['hdfs-site']
fs_root = config['configurations']['core-site']['fs.defaultFS']
if stack_version_formatted and check_stack_feature(StackFeature.OOZIE_SETUP_SHARED_LIB, stack_version_formatted):
put_shared_lib_to_hdfs_cmd = format("{oozie_setup_sh} sharelib create -fs {fs_root} -locallib {oozie_shared_lib}")
# for older
else:
put_shared_lib_to_hdfs_cmd = format("hadoop --config {hadoop_conf_dir} dfs -put {oozie_shared_lib} {oozie_hdfs_user_dir}")
default_connectors_map = { "com.microsoft.sqlserver.jdbc.SQLServerDriver":"sqljdbc4.jar",
"com.mysql.jdbc.Driver":"mysql-connector-java.jar",
"org.postgresql.Driver":"postgresql-jdbc.jar",
"oracle.jdbc.driver.OracleDriver":"ojdbc.jar",
"sap.jdbc4.sqlanywhere.IDriver":"sajdbc4.jar"}
jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
# NOT SURE THAT IT'S A GOOD IDEA TO USE PATH TO CLASS IN DRIVER, MAYBE IT WILL BE BETTER TO USE DB TYPE.
# BECAUSE PATH TO CLASSES COULD BE CHANGED
sqla_db_used = False
previous_jdbc_jar_name = None
if jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver":
jdbc_driver_jar = default("/hostLevelParams/custom_mssql_jdbc_name", None)
previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
elif jdbc_driver_name == "com.mysql.jdbc.Driver":
jdbc_driver_jar = default("/hostLevelParams/custom_mysql_jdbc_name", None)
previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
elif jdbc_driver_name == "org.postgresql.Driver":
jdbc_driver_jar = format("{oozie_home}/libserver/postgresql-9.0-801.jdbc4.jar") #oozie using it's own postgres jdbc
previous_jdbc_jar_name = None
elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
jdbc_driver_jar = default("/hostLevelParams/custom_oracle_jdbc_name", None)
previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
elif jdbc_driver_name == "sap.jdbc4.sqlanywhere.IDriver":
jdbc_driver_jar = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
sqla_db_used = True
else:
jdbc_driver_jar = ""
jdbc_symlink_name = ""
previous_jdbc_jar_name = None
default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
driver_curl_source = format("{jdk_location}/{jdbc_driver_jar}")
downloaded_custom_connector = format("{tmp_dir}/{jdbc_driver_jar}")
if jdbc_driver_name == "org.postgresql.Driver":
target = jdbc_driver_jar
previous_jdbc_jar = None
else:
target = format("{oozie_libext_dir}/{jdbc_driver_jar}")
previous_jdbc_jar = format("{oozie_libext_dir}/{previous_jdbc_jar_name}")
#constants for type2 jdbc
jdbc_libs_dir = format("{oozie_libext_dir}/native/lib64")
lib_dir_available = os.path.exists(jdbc_libs_dir)
if sqla_db_used:
jars_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/java/*")
libs_path_in_archive = format("{tmp_dir}/sqla-client-jdbc/native/lib64/*")
downloaded_custom_connector = format("{tmp_dir}/{jdbc_driver_jar}")
hdfs_share_dir = format("{oozie_hdfs_user_dir}/share")
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
has_falcon_host = not len(falcon_host) == 0
oozie_server_hostnames = default("/clusterHostInfo/oozie_server", [])
oozie_server_hostnames = sorted(oozie_server_hostnames)
oozie_log_maxhistory = default('configurations/oozie-log4j/oozie_log_maxhistory',720)
#oozie-log4j.properties
if (('oozie-log4j' in config['configurations']) and ('content' in config['configurations']['oozie-log4j'])):
log4j_props = config['configurations']['oozie-log4j']['content']
else:
log4j_props = None
oozie_hdfs_user_mode = 0775
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
dfs_type = default("/commandParams/dfs_type", "")
########################################################
############# Atlas related params #####################
########################################################
#region Atlas Hooks needed by Hive on Oozie
hive_atlas_application_properties = default('/configurations/hive-atlas-application.properties', {})
if has_atlas_in_cluster():
atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
#endregion
import functools
#create partial functions with common arguments for every HdfsResource call
#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources(),
dfs_type = dfs_type
)
is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
lzo_enabled = should_install_lzo()
| {
"content_hash": "8d0271cad221091fe47427a33db702d4",
"timestamp": "",
"source": "github",
"line_count": 389,
"max_line_length": 264,
"avg_line_length": 50.210796915167094,
"alnum_prop": 0.7489248412860946,
"repo_name": "arenadata/ambari",
"id": "8f388433644781d655c64875c8b68e05b3c2a80c",
"size": "19554",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-adh-1.6",
"path": "ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
} |
import logging
import base64, io
from struct import unpack
from xml.dom.minidom import parseString
import zipfile
import olefile
from . import base
from .common import _parse_encryptionheader, _parse_encryptionverifier
from ..method.ecma376_agile import ECMA376Agile
from ..method.ecma376_standard import ECMA376Standard
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def _parseinfo_standard(ole):
headerFlags, = unpack('<I', ole.read(4))
encryptionHeaderSize, = unpack('<I', ole.read(4))
block = ole.read(encryptionHeaderSize)
blob = io.BytesIO(block)
header = _parse_encryptionheader(blob)
block = ole.read()
blob = io.BytesIO(block)
algIdMap = {
0x0000660E: 'AES-128',
0x0000660F: 'AES-192',
0x00006610: 'AES-256',
}
verifier = _parse_encryptionverifier(blob, "AES" if header['algId'] & 0xFF00 == 0x6600 else "RC4") # TODO: Fix
info = {
'header': header,
'verifier': verifier,
}
return info
def _parseinfo_agile(ole):
ole.seek(8)
xml = parseString(ole.read())
keyDataSalt = base64.b64decode(xml.getElementsByTagName('keyData')[0].getAttribute('saltValue'))
keyDataHashAlgorithm = xml.getElementsByTagName('keyData')[0].getAttribute('hashAlgorithm')
keyDataBlockSize = int(xml.getElementsByTagName('keyData')[0].getAttribute('blockSize'))
encryptedHmacKey = base64.b64decode(xml.getElementsByTagName('dataIntegrity')[0].getAttribute('encryptedHmacKey'))
encryptedHmacValue = base64.b64decode(xml.getElementsByTagName('dataIntegrity')[0].getAttribute('encryptedHmacValue'))
password_node = xml.getElementsByTagNameNS("http://schemas.microsoft.com/office/2006/keyEncryptor/password", 'encryptedKey')[0]
spinValue = int(password_node.getAttribute('spinCount'))
encryptedKeyValue = base64.b64decode(password_node.getAttribute('encryptedKeyValue'))
encryptedVerifierHashInput = base64.b64decode(password_node.getAttribute('encryptedVerifierHashInput'))
encryptedVerifierHashValue = base64.b64decode(password_node.getAttribute('encryptedVerifierHashValue'))
passwordSalt = base64.b64decode(password_node.getAttribute('saltValue'))
passwordHashAlgorithm = password_node.getAttribute('hashAlgorithm')
passwordKeyBits = int(password_node.getAttribute('keyBits'))
info = {
'keyDataSalt': keyDataSalt,
'keyDataHashAlgorithm': keyDataHashAlgorithm,
'keyDataBlockSize': keyDataBlockSize,
'encryptedHmacKey': encryptedHmacKey,
'encryptedHmacValue': encryptedHmacValue,
'encryptedVerifierHashInput': encryptedVerifierHashInput,
'encryptedVerifierHashValue': encryptedVerifierHashValue,
'encryptedKeyValue': encryptedKeyValue,
'spinValue': spinValue,
'passwordSalt': passwordSalt,
'passwordHashAlgorithm': passwordHashAlgorithm,
'passwordKeyBits': passwordKeyBits,
}
return info
def _parseinfo(ole):
versionMajor, versionMinor = unpack('<HH', ole.read(4))
if versionMajor == 4 and versionMinor == 4: # Agile
return 'agile', _parseinfo_agile(ole)
elif versionMajor in [2, 3, 4] and versionMinor == 2: # Standard
return 'standard', _parseinfo_standard(ole)
elif versionMajor in [3, 4] and versionMinor == 3: # Extensible
raise Exception("Unsupported EncryptionInfo version (Extensible Encryption)")
class OOXMLFile(base.BaseOfficeFile):
def __init__(self, file):
self.format = "ooxml"
file.seek(0) # TODO: Investigate the effect (required for olefile.isOleFile)
# olefile cannot process non password protected ooxml files.
# TODO: this code is duplicate of OfficeFile(). Merge?
if olefile.isOleFile(file):
ole = olefile.OleFileIO(file)
self.file = ole
with self.file.openstream('EncryptionInfo') as stream:
self.type, self.info = _parseinfo(stream)
logger.debug("OOXMLFile.type: {}".format(self.type))
self.secret_key = None
if self.type == 'agile':
# TODO: Support aliases?
self.keyTypes = ('password', 'private_key', 'secret_key')
elif self.type == 'standard':
self.keyTypes = ('password', 'secret_key')
elif self.type == 'extensible':
pass
elif zipfile.is_zipfile(file):
self.file = file
self.type, self.info = None, None
self.secret_key = None
else:
raise Exception("Unsupported file format")
def load_key(self, password=None, private_key=None, secret_key=None, verify_password=False):
if password:
if self.type == 'agile':
self.secret_key = ECMA376Agile.makekey_from_password(
password,
self.info['passwordSalt'],
self.info['passwordHashAlgorithm'],
self.info['encryptedKeyValue'],
self.info['spinValue'],
self.info['passwordKeyBits']
)
if verify_password:
verified = ECMA376Agile.verify_password(
password,
self.info['passwordSalt'],
self.info['passwordHashAlgorithm'],
self.info['encryptedVerifierHashInput'],
self.info['encryptedVerifierHashValue'],
self.info['spinValue'],
self.info['passwordKeyBits']
)
if not verified:
raise Exception("Key verification failed")
elif self.type == 'standard':
self.secret_key = ECMA376Standard.makekey_from_password(
password,
self.info['header']['algId'],
self.info['header']['algIdHash'],
self.info['header']['providerType'],
self.info['header']['keySize'],
self.info['verifier']['saltSize'],
self.info['verifier']['salt']
)
if verify_password:
verified = ECMA376Standard.verifykey(
self.secret_key,
self.info['verifier']['encryptedVerifier'],
self.info['verifier']['encryptedVerifierHash']
)
if not verified:
raise Exception("Key verification failed")
elif self.type == 'extensible':
pass
elif private_key:
if self.type == 'agile':
self.secret_key = ECMA376Agile.makekey_from_privkey(private_key, self.info['encryptedKeyValue'])
else:
raise Exception("Unsupported key type for the encryption method")
elif secret_key:
self.secret_key = secret_key
def decrypt(self, ofile, verify_integrity=False):
if self.type == 'agile':
with self.file.openstream('EncryptedPackage') as stream:
if verify_integrity:
verified = ECMA376Agile.verify_integrity(
self.secret_key,
self.info['keyDataSalt'],
self.info['keyDataHashAlgorithm'],
self.info['keyDataBlockSize'],
self.info['encryptedHmacKey'],
self.info['encryptedHmacValue'],
stream,
)
if not verified:
raise Exception('Payload integrity verification failed')
obuf = ECMA376Agile.decrypt(
self.secret_key, self.info['keyDataSalt'],
self.info['keyDataHashAlgorithm'],
stream
)
ofile.write(obuf)
elif self.type == 'standard':
with self.file.openstream('EncryptedPackage') as stream:
obuf = ECMA376Standard.decrypt(self.secret_key, stream)
ofile.write(obuf)
# If the file is successfully decrypted, there must be a valid OOXML file, i.e. a valid zip file
if not zipfile.is_zipfile(io.BytesIO(obuf)):
raise Exception("The file could not be decrypted with this password")
def is_encrypted(self):
# Heuristic
if isinstance(self.file, olefile.OleFileIO):
return True
else:
return False
| {
"content_hash": "c8da53bfc08034683a224066278d1e55",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 131,
"avg_line_length": 43.97959183673469,
"alnum_prop": 0.5926914153132251,
"repo_name": "nolze/ms-offcrypto-tool",
"id": "35d1593aa89cb2579f35ecbcf32eca0d853aaaf9",
"size": "8620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "msoffcrypto/format/ooxml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5517"
},
{
"name": "Shell",
"bytes": "158"
}
],
"symlink_target": ""
} |
from settings import *
INSTALLED_APPS += ('django_hudson', )
PROJECT_APPS = ('jamsession', )
HUDSON_TASKS = ('coverage', 'tests')
import logging
logging.basicConfig(level=logging.WARNING)
| {
"content_hash": "54453d7042ed3ec63a450f12bc843b3b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 42,
"avg_line_length": 19.2,
"alnum_prop": 0.71875,
"repo_name": "cmheisel/django-jamsession",
"id": "5d057359f2f08ecec976396554c16bfdf518ea55",
"size": "192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/test_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33657"
},
{
"name": "Shell",
"bytes": "248"
}
],
"symlink_target": ""
} |
import sys
sys.path.append('/home/dopefactor2/webapps/flask/htdocs')
from ashflash import app as application
import views
# import sys
# def application(environ, start_response):
# output = 'Welcome to your mod_wsgi website! It uses:\n\nPython %s' % sys.version
# response_headers = [
# ('Content-Length', str(len(output))),
# ('Content-Type', 'text/plain'),
# ]
# start_response('200 OK', response_headers)
#
# return [output]
| {
"content_hash": "f6e04348eb863d13c1aa7f44ba578278",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 85,
"avg_line_length": 27.058823529411764,
"alnum_prop": 0.6652173913043479,
"repo_name": "Acour83/ashflashtheorig",
"id": "8fcfa1c08fcd6dbcd8c3010a402507b5337d8b70",
"size": "460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12225"
},
{
"name": "JavaScript",
"bytes": "24465"
},
{
"name": "Python",
"bytes": "6929"
}
],
"symlink_target": ""
} |
import csv
import datetime
from flask import g, jsonify, request
from server import app, sqldb
from server.auth import auth
from server.models import Account, DiningTransaction
@app.route("/dining/transactions", methods=["POST"])
@auth()
def save_dining_dollar_transactions():
account = g.account
if not account:
# DEPRECATED
try:
account = Account.get_account()
except ValueError as e:
return jsonify({"success": False, "error": str(e)}), 400
last_transaction = (
sqldb.session.query(DiningTransaction.date)
.filter_by(account_id=account.id)
.order_by(DiningTransaction.date.desc())
.first()
)
decoded_content = request.form.get("transactions")
cr = csv.reader(decoded_content.splitlines(), delimiter=",")
# Create list of rows, remove headers, and reverse so in order of date
row_list = list(cr)
row_list.pop(0)
row_list.reverse()
for row in row_list:
if len(row) == 4:
if row[0] == "No transaction history found for this date range.":
continue
else:
date = datetime.datetime.strptime(row[0], "%m/%d/%Y %I:%M%p")
if last_transaction is None or date > last_transaction.date:
transaction = DiningTransaction(
account_id=account.id,
date=date,
description=row[1],
amount=float(row[2]),
balance=float(row[3]),
)
sqldb.session.add(transaction)
sqldb.session.commit()
return jsonify({"success": True, "error": None})
@app.route("/dining/transactions", methods=["GET"])
@auth(nullable=True)
def get_dining_dollar_transactions():
account = g.account
if not account:
# DEPRECATED
try:
account = Account.get_account()
except ValueError as e:
return jsonify({"success": False, "error": str(e)}), 400
transactions = (
sqldb.session.query(DiningTransaction)
.filter_by(account_id=account.id)
.order_by(DiningTransaction.date.desc())
)
results = []
for transaction in transactions:
date = datetime.datetime.strftime(transaction.date, "%Y-%m-%dT%H:%M:%S")
results.append(
{
"date": date,
"description": transaction.description,
"amount": transaction.amount,
"balance": transaction.balance,
}
)
return jsonify({"results": results})
| {
"content_hash": "effdee20649c4c5121fa2deafa1da85a",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 80,
"avg_line_length": 30.310344827586206,
"alnum_prop": 0.5676905574516496,
"repo_name": "pennlabs/penn-mobile-server",
"id": "01a2c74c91fd2907d30478740773a95bd4c86c70",
"size": "2637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/dining/transactions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "229483"
},
{
"name": "Python",
"bytes": "67531"
}
],
"symlink_target": ""
} |
"""
Filename: calc_global_metric.py
Author: Damien Irving, irving.damien@gmail.com
Description: Calculate global metric
"""
# Import general Python modules
import sys, os, pdb
import argparse
import numpy
import iris
import iris.analysis.cartography
from iris.experimental.equalise_cubes import equalise_attributes
# Import my modules
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
modules_dir = os.path.join(repo_dir, 'modules')
sys.path.append(modules_dir)
try:
import general_io as gio
import convenient_universal as uconv
import timeseries
except ImportError:
raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')
# Define functions
history = []
def save_history(cube, field, filename):
"""Save the history attribute when reading the data.
(This is required because the history attribute differs between input files
and is therefore deleted upon equilising attributes)
"""
history.append(cube.attributes['history'])
def read_area(area_file):
"""Read the optional area file."""
if area_file:
area_cube = iris.load_cube(area_file)
else:
area_cube = None
return area_cube
def set_attributes(inargs, data_cube, area_cube, clim_cube):
"""Set the attributes for the output cube."""
atts = data_cube.attributes
infile_history = {}
infile_history[inargs.infiles[0]] = history[0]
infile_history[inargs.climatology] = clim_cube.attributes['history']
if area_cube:
infile_history[inargs.area_file] = area_cube.attributes['history']
atts['history'] = gio.write_metadata(file_info=infile_history)
return atts
def calc_mean_anomaly(data_cube, clim_cube, sign, grid_areas):
"""Calculate the mean of all the positive or negative anomalies."""
clim_data = uconv.broadcast_array(clim_cube.data, [1, 2], data_cube.shape)
grid_areas = uconv.broadcast_array(grid_areas, [1, 2], data_cube.shape)
if sign == 'positive':
new_mask = numpy.where((data_cube.data.mask == False) & (clim_data > 0.0), False, True)
elif sign == 'negative':
new_mask = numpy.where((data_cube.data.mask == False) & (clim_data < 0.0), False, True)
data_cube.data.mask = new_mask
data_cube = data_cube.collapsed(['longitude', 'latitude'], iris.analysis.MEAN, weights=grid_areas)
data_cube.remove_coord('longitude')
data_cube.remove_coord('latitude')
return data_cube
def calc_amplification_metric(data_cube, clim_cube, grid_areas, atts):
"""Calculate amplification metric.
Definition: difference between the average positive
and average negative spatial anomaly.
"""
assert data_cube.standard_name in ['sea_surface_salinity', 'sea_water_salinity']
clim_fldmean = clim_cube.collapsed(['longitude', 'latitude'], iris.analysis.MEAN, weights=grid_areas)
clim_spatial_anom = clim_cube - clim_fldmean
ave_pos_anom = calc_mean_anomaly(data_cube.copy(), clim_spatial_anom.copy(), 'positive', grid_areas.copy())
ave_neg_anom = calc_mean_anomaly(data_cube.copy(), clim_spatial_anom.copy(), 'negative', grid_areas.copy())
metric = ave_pos_anom - ave_neg_anom
metric.var_name = data_cube.var_name
metric.standard_name = data_cube.standard_name
metric.long_name = data_cube.long_name
metric.units = data_cube.units
metric.attributes = atts
return metric
def get_area_weights(cube, area_cube):
"""Get area weights for averaging"""
if area_cube:
area_weights = area_cube.data
else:
if not cube.coord('latitude').has_bounds():
cube.coord('latitude').guess_bounds()
if not cube.coord('longitude').has_bounds():
cube.coord('longitude').guess_bounds()
area_weights = iris.analysis.cartography.area_weights(cube)
return area_weights
def smooth_data(cube, smooth_type):
"""Apply temporal smoothing to a data cube."""
assert smooth_type in ['annual', 'annual_running_mean']
if smooth_type == 'annual_running_mean':
cube = cube.rolling_window('time', iris.analysis.MEAN, 12)
elif smooth_type == 'annual':
cube = timeseries.convert_to_annual(cube)
return cube
def main(inargs):
"""Run the program."""
if inargs.depth:
level_constraint = iris.Constraint(depth=inargs.depth)
else:
level_constraint = iris.Constraint()
cube = iris.load(inargs.infiles, inargs.var & level_constraint, callback=save_history)
equalise_attributes(cube)
iris.util.unify_time_units(cube)
cube = cube.concatenate_cube()
cube = gio.check_time_units(cube)
area_cube = read_area(inargs.area_file)
clim_cube = iris.load_cube(inargs.climatology, inargs.var & level_constraint)
atts = set_attributes(inargs, cube, area_cube, clim_cube)
if inargs.smoothing:
cube = smooth_data(cube, inargs.smoothing)
area_weights = get_area_weights(clim_cube, area_cube)
metric = calc_amplification_metric(cube, clim_cube, area_weights, atts)
iris.save(metric, inargs.outfile)
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, irving.damien@gmail.com
"""
description='Calculate a global metric'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("infiles", type=str, nargs='*', help="Input data files (can merge on time)")
parser.add_argument("var", type=str, help="Input variable name (i.e. the standard_name)")
parser.add_argument("climatology", type=str, help="Climatology file")
parser.add_argument("outfile", type=str, help="Output file name")
parser.add_argument("--area_file", type=str, default=None,
help="Input cell area file")
parser.add_argument("--smoothing", type=str, choices=('annual', 'annual_running_mean'), default=None,
help="Apply smoothing to data")
parser.add_argument("--depth", type=float, default=None,
help="Level selection")
args = parser.parse_args()
main(args)
| {
"content_hash": "221da1cb7b8e0cd617394b3dd3d9e7e9",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 111,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.6577243293246994,
"repo_name": "DamienIrving/ocean-analysis",
"id": "efd597015e8d15a7b96cd7425b32497573384307",
"size": "6486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_processing/metrics/calc_my_salinity_amp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "39906464"
},
{
"name": "Makefile",
"bytes": "171683"
},
{
"name": "Python",
"bytes": "887747"
},
{
"name": "Shell",
"bytes": "114403"
}
],
"symlink_target": ""
} |
import random
#ver 1.3 29.01.2016
#Rebecca Iversen
#generer en tilfeldig valør og farge
valor = [1,2,3,4,5,6,7,8,9,10,11,12,13]
farge = ["klover", "hjerter", "spar", "ruter"]
random.shuffle(valor, random=None)
random.shuffle(farge, random=None)
valor[10] = "Knekt"
valor[11] = "Dronning"
valor[12] = "Konge"
valor[0] = "Ess"
print(random.choice(valor)),
print(random.choice(farge))
print(random.choice(valor)),
print(random.choice(farge))
print(random.choice(valor)),
print(random.choice(farge))
print(random.choice(valor)),
print(random.choice(farge))
print(random.choice(valor)),
print(random.choice(farge)) | {
"content_hash": "ba859ad2af60f95b17eb6f97f6b22ced",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 46,
"avg_line_length": 21.964285714285715,
"alnum_prop": 0.71869918699187,
"repo_name": "github4321/IS-105_2016_Gruppe92",
"id": "d8defca6563bcd95d6c7a8a03a4138e28bcfd319",
"size": "640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uke04/card3.5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27243"
}
],
"symlink_target": ""
} |
from . import context
| {
"content_hash": "b19ec21675481e16553ca0c8f207944d",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 21,
"avg_line_length": 22,
"alnum_prop": 0.7727272727272727,
"repo_name": "brianjbuck/robie",
"id": "4d057a203df694ed726bdba5ef0950f88c1cf0ea",
"size": "22",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "258"
},
{
"name": "Python",
"bytes": "35905"
}
],
"symlink_target": ""
} |
from google.cloud.devtools import cloudbuild_v1
async def sample_update_build_trigger():
# Create a client
client = cloudbuild_v1.CloudBuildAsyncClient()
# Initialize request argument(s)
trigger = cloudbuild_v1.BuildTrigger()
trigger.autodetect = True
request = cloudbuild_v1.UpdateBuildTriggerRequest(
project_id="project_id_value",
trigger_id="trigger_id_value",
trigger=trigger,
)
# Make the request
response = await client.update_build_trigger(request=request)
# Handle the response
print(response)
# [END cloudbuild_v1_generated_CloudBuild_UpdateBuildTrigger_async]
| {
"content_hash": "3713834b4a66d3bb91addd79dd865f41",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 67,
"avg_line_length": 26.916666666666668,
"alnum_prop": 0.7105263157894737,
"repo_name": "googleapis/python-cloudbuild",
"id": "e2a0b44151d8ad48c13dde51f1309c63dae4bf43",
"size": "2040",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/cloudbuild_v1_generated_cloud_build_update_build_trigger_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "729451"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
} |
"""
Common distribution functions for plasmas, such as the Maxwellian or
Kappa distributions. Functionality is intended to include generation,
fitting and calculation.
"""
__all__ = [
"Maxwellian_1D",
"Maxwellian_velocity_2D",
"Maxwellian_velocity_3D",
"Maxwellian_speed_1D",
"Maxwellian_speed_2D",
"Maxwellian_speed_3D",
"kappa_velocity_1D",
"kappa_velocity_3D",
]
import astropy.units as u
import numpy as np
from scipy.special import gamma
from plasmapy.formulary.speeds import kappa_thermal_speed, thermal_speed
from plasmapy.particles import particle_input, ParticleLike
from plasmapy.utils._units_definitions import (
SPEED_DISTRIBUTION_UNITS_1D,
SPEED_DISTRIBUTION_UNITS_2D,
SPEED_DISTRIBUTION_UNITS_3D,
SPEED_UNITS,
)
def _v_drift_conversion(v_drift):
# Helper method to assign equivalent value in SPEED_UNITS and/or remove units
if isinstance(v_drift, u.Quantity):
v_drift = v_drift.to_value(SPEED_UNITS)
return v_drift
@particle_input
def Maxwellian_1D(
v, T, particle: ParticleLike = "e", v_drift=0, vTh=np.nan, units="units"
):
r"""
Probability distribution function of velocity for a Maxwellian
distribution in 1D.
Returns the probability density function at the velocity ``v`` in m/s
to find a particle ``particle`` in a plasma of temperature ``T``
following the Maxwellian distribution function.
Parameters
----------
v : `~astropy.units.Quantity`
The velocity in units convertible to m/s.
T : `~astropy.units.Quantity`
The temperature in kelvin.
particle : `str`, optional
Representation of the particle species(e.g., ``'p'`` for protons,
``'D+'`` for deuterium, or ``'He-4 +1'`` for singly ionized
helium-4), which defaults to electrons.
v_drift : `~astropy.units.Quantity`, optional
The drift velocity in units convertible to m/s.
vTh : `~astropy.units.Quantity`, optional
Thermal velocity (most probable velocity) in m/s. This is used for
optimization purposes to avoid re-calculating ``vTh``, for example
when integrating over velocity-space.
units : `str`, optional
Selects whether to run function with units and unit checks (when
equal to "units") or to run as unitless (when equal to "unitless").
The unitless version is substantially faster for intensive
computations.
Returns
-------
f : `~astropy.units.Quantity`
Probability density in units of velocity\ :sup:`-1`\ , normalized so that
:math:`\int_{-∞}^{+∞} f(v) dv = 1`.
Raises
------
`TypeError`
The parameter arguments are not Quantities and
cannot be converted into Quantities.
`~astropy.units.UnitConversionError`
If the parameters are not in appropriate units.
`ValueError`
If the temperature is negative, or the particle mass or charge state
cannot be found.
Notes
-----
In one dimension, the Maxwellian distribution function for a particle of
mass m, velocity v, a drift velocity V and with temperature T is:
.. math::
f = \sqrt{\frac{m}{2 \pi k_B T}} e^{-\frac{m}{2 k_B T} (v-V)^2}
\equiv \frac{1}{\sqrt{\pi v_{Th}^2}} e^{-(v - v_{drift})^2 / v_{Th}^2}
where :math:`v_{Th} = \sqrt{2 k_B T / m}` is the thermal speed
Examples
--------
>>> from astropy import units as u
>>> v=1*u.m/u.s
>>> Maxwellian_1D(v=v, T=30000 * u.K, particle='e', v_drift=0 * u.m / u.s)
<Quantity 5.9163...e-07 s / m>
"""
if units == "units":
# unit checks and conversions
# checking velocity units
v = v.to_value(SPEED_UNITS)
# Catching case where drift velocities have default values,
v_drift = _v_drift_conversion(v_drift)
# convert temperature to kelvin
T = T.to_value(u.K, equivalencies=u.temperature_energy())
if not np.isnan(vTh):
# check units of thermal velocity
vTh = vTh.to_value(SPEED_UNITS)
if np.isnan(vTh):
# get thermal speed
vTh = thermal_speed(
T << u.K, particle=particle, method="most_probable"
).to_value(SPEED_UNITS)
# Get thermal velocity squared
vThSq = vTh**2
# Get square of relative particle velocity
vSq = (v - v_drift) ** 2
# calculating distribution function
coeff = (vThSq * np.pi) ** (-1 / 2)
expTerm = np.exp(-vSq / vThSq)
distFunc = coeff * expTerm
if units == "units":
return distFunc << SPEED_DISTRIBUTION_UNITS_1D
elif units == "unitless":
return distFunc
@particle_input
def Maxwellian_velocity_2D(
vx,
vy,
T,
particle: ParticleLike = "e",
vx_drift=0,
vy_drift=0,
vTh=np.nan,
units="units",
):
r"""
Probability distribution function of velocity for a Maxwellian
distribution in 2D.
Return the probability density function for finding a particle with
velocity components ``vx`` and ``vy`` in m/s in an equilibrium plasma of
temperature ``T`` which follows the 2D Maxwellian distribution function.
This function assumes Cartesian coordinates.
Parameters
----------
vx : `~astropy.units.Quantity`
The velocity in x-direction units convertible to m/s.
vy : `~astropy.units.Quantity`
The velocity in y-direction units convertible to m/s.
T : `~astropy.units.Quantity`
The temperature, preferably in kelvin.
particle : `str`, optional
Representation of the particle species [e.g., ``'p'`` for protons,
``'D+'`` for deuterium, or ``'He-4 +1'`` for :math:`He_4^{+1}`
(singly ionized helium-4)], which defaults to electrons.
vx_drift : `~astropy.units.Quantity`, optional
The drift velocity in x-direction in units convertible to m/s.
vy_drift : `~astropy.units.Quantity`, optional
The drift velocity in y-direction in units convertible to m/s.
vTh : `~astropy.units.Quantity`, optional
Thermal velocity (most probable) in m/s. This is used for
optimization purposes to avoid re-calculating ``vTh``, for example
when integrating over velocity-space.
units : `str`, optional
Selects whether to run function with units and unit checks (when
equal to "units") or to run as unitless (when equal to "unitless").
The unitless version is substantially faster for intensive
computations.
Returns
-------
f : `~astropy.units.Quantity`
Probability density in Velocity\ :sup:`-1`\ , normalized so that
:math:`\iiint_{0}^∞ f(\vec{v}) d\vec{v} = 1`.
Raises
------
TypeError
A parameter argument is not a `~astropy.units.Quantity` and
cannot be converted into a `~astropy.units.Quantity`.
~astropy.units.UnitConversionError
If the parameters is not in appropriate units.
ValueError
If the temperature is negative, or the particle mass or charge state
cannot be found.
Notes
-----
In 2D, the Maxwellian velocity distribution function describing
the distribution of particles with speed :math:`v` in a plasma with
temperature :math:`T` is given by:
.. math::
f = (\pi v_{Th}^2)^{-1} \exp \left [-(\vec{v} -
\vec{V}_{drift})^2 / v_{Th}^2 \right ]
where :math:`v_{Th} = \sqrt{2 k_B T / m}` is the thermal speed.
See Also
--------
Maxwellian_1D
Examples
--------
>>> from astropy import units as u
>>> v=1 * u.m / u.s
>>> Maxwellian_velocity_2D(vx=v,
... vy=v,
... T=30000*u.K,
... particle='e',
... vx_drift=0 * u.m / u.s,
... vy_drift=0 * u.m / u.s)
<Quantity 3.5002...e-13 s2 / m2>
"""
if units == "units":
# unit checks and conversions
# checking velocity units
vx = vx.to_value(SPEED_UNITS)
vy = vy.to_value(SPEED_UNITS)
# catching case where drift velocities have default values, they
# need to be assigned units
vx_drift = _v_drift_conversion(vx_drift)
vy_drift = _v_drift_conversion(vy_drift)
# convert temperature to kelvin
T = T.to_value(u.K, equivalencies=u.temperature_energy())
if not np.isnan(vTh):
# check units of thermal velocity
vTh = vTh.to_value(SPEED_UNITS)
if np.isnan(vTh):
# get thermal speed
vTh = thermal_speed(
T << u.K, particle=particle, method="most_probable"
).to_value(SPEED_UNITS)
# accounting for thermal velocity in 2D
vThSq = vTh**2
# Get square of relative particle velocity
vSq = (vx - vx_drift) ** 2 + (vy - vy_drift) ** 2
# calculating distribution function
coeff = (vThSq * np.pi) ** (-1)
expTerm = np.exp(-vSq / vThSq)
distFunc = coeff * expTerm
if units == "units":
return distFunc << SPEED_DISTRIBUTION_UNITS_2D
elif units == "unitless":
return distFunc
@particle_input
def Maxwellian_velocity_3D(
vx,
vy,
vz,
T,
particle: ParticleLike = "e",
vx_drift=0,
vy_drift=0,
vz_drift=0,
vTh=np.nan,
units="units",
):
r"""
Probability distribution function of velocity for a Maxwellian
distribution in 3D.
Return the probability density function for finding a particle with
velocity components ``vx``, ``vy``, and ``vz`` in m/s in an equilibrium
plasma of temperature ``T`` which follows the 3D Maxwellian distribution
function. This function assumes Cartesian coordinates.
Parameters
----------
vx : `~astropy.units.Quantity`
The velocity in x-direction in units convertible to m/s.
vy : `~astropy.units.Quantity`
The velocity in y-direction units convertible to m/s.
vz : `~astropy.units.Quantity`
The velocity in z-direction units convertible to m/s.
T : `~astropy.units.Quantity`
The temperature, preferably in kelvin.
particle : `str`, optional
Representation of the particle species (e.g., ``'p'`` for protons,
``'D+'`` for deuterium, or ``'He-4 +1'`` for
singly ionized helium-4), which defaults to electrons.
vx_drift : `~astropy.units.Quantity`, optional
The drift velocity in x-direction units convertible to m/s.
vy_drift : `~astropy.units.Quantity`, optional
The drift velocity in y-direction units convertible to m/s.
vz_drift : `~astropy.units.Quantity`, optional
The drift velocity in z-direction units convertible to m/s.
vTh : `~astropy.units.Quantity`, optional
Thermal velocity (most probable) in m/s. This is used for
optimization purposes to avoid re-calculating ``vTh``, for example
when integrating over velocity-space.
units : `str`, optional
Selects whether to run function with units and unit checks (when
equal to "units") or to run as unitless (when equal to "unitless").
The unitless version is substantially faster for intensive
computations.
Returns
-------
f : `~astropy.units.Quantity`
Probability density in Velocity^-1, normalized so that
:math:`\iiint_{0}^∞ f(\vec{v}) d\vec{v} = 1`.
Raises
------
`TypeError`
A parameter argument is not a `~astropy.units.Quantity` and
cannot be converted into a `~astropy.units.Quantity`.
`~astropy.units.UnitConversionError`
If the parameters is not in appropriate units.
`ValueError`
If the temperature is negative, or the particle mass or charge state
cannot be found.
Notes
-----
In 3D, the Maxwellian speed distribution function describing
the distribution of particles with speed :math:`v` in a plasma with
temperature :math:`T` is given by:
.. math::
f = (\pi v_{Th}^2)^{-3/2} \exp \left [-(\vec{v} -
\vec{V}_{drift})^2 / v_{Th}^2 \right ]
where :math:`v_{Th} = \sqrt{2 k_B T / m}` is the thermal speed.
See Also
--------
Maxwellian_1D
Examples
--------
>>> from astropy import units as u
>>> v=1 * u.m / u.s
>>> Maxwellian_velocity_3D(vx=v,
... vy=v,
... vz=v,
... T=30000 * u.K,
... particle='e',
... vx_drift=0 * u.m / u.s,
... vy_drift=0 * u.m / u.s,
... vz_drift=0 * u.m / u.s)
<Quantity 2.0708...e-19 s3 / m3>
"""
if units == "units":
# unit checks and conversions
# checking velocity units
vx = vx.to_value(SPEED_UNITS)
vy = vy.to_value(SPEED_UNITS)
vz = vz.to_value(SPEED_UNITS)
# catching case where drift velocities have default values, they
# need to be assigned units
vx_drift = _v_drift_conversion(vx_drift)
vy_drift = _v_drift_conversion(vy_drift)
vz_drift = _v_drift_conversion(vz_drift)
# convert temperature to kelvin
T = T.to_value(u.K, equivalencies=u.temperature_energy())
if not np.isnan(vTh):
# check units of thermal velocity
vTh = vTh.to_value(SPEED_UNITS)
if np.isnan(vTh):
# get thermal velocity and thermal velocity squared
vTh = thermal_speed(
T << u.K, particle=particle, method="most_probable"
).to_value(SPEED_UNITS)
# accounting for thermal velocity in 3D
vThSq = vTh**2
# Get square of relative particle velocity
vSq = (vx - vx_drift) ** 2 + (vy - vy_drift) ** 2 + (vz - vz_drift) ** 2
# calculating distribution function
coeff = (vThSq * np.pi) ** (-3 / 2)
expTerm = np.exp(-vSq / vThSq)
distFunc = coeff * expTerm
if units == "units":
return distFunc << SPEED_DISTRIBUTION_UNITS_3D
elif units == "unitless":
return distFunc
@particle_input
def Maxwellian_speed_1D(
v, T, particle: ParticleLike = "e", v_drift=0, vTh=np.nan, units="units"
):
r"""
Probability distribution function of speed for a Maxwellian distribution
in 1D.
Return the probability density function for finding a particle with
speed ``v`` in m/s in an equilibrium plasma of temperature ``T`` which
follows the Maxwellian distribution function.
Parameters
----------
v : `~astropy.units.Quantity`
The speed in units convertible to m/s.
T : `~astropy.units.Quantity`
The temperature, preferably in kelvin.
particle : `str`, optional
Representation of the particle species [e.g., ``'p'`` for protons, ``'D+'``
for deuterium, or ``'He-4 +1'`` for :math:`He_4^{+1}`
(singly ionized helium-4)], which defaults to electrons.
v_drift : `~astropy.units.Quantity`
The drift speed in units convertible to m/s.
vTh : `~astropy.units.Quantity`, optional
Thermal velocity (most probable) in m/s. This is used for
optimization purposes to avoid re-calculating ``vTh``, for example
when integrating over velocity-space.
units : `str`, optional
Selects whether to run function with units and unit checks (when
equal to "units") or to run as unitless (when equal to "unitless").
The unitless version is substantially faster for intensive
computations.
Returns
-------
f : `~astropy.units.Quantity`
Probability density in speed\ :sup:`-1`\ , normalized so that
:math:`\int_{0}^∞ f(v) dv = 1`.
Raises
------
`TypeError`
The parameter arguments are not Quantities and
cannot be converted into Quantities.
`~astropy.units.UnitConversionError`
If the parameters is not in appropriate units.
`ValueError`
If the temperature is negative, or the particle mass or charge state
cannot be found.
Notes
-----
In one dimension, the Maxwellian speed distribution function describing
the distribution of particles with speed :math:`v` in a plasma with
temperature :math:`T` is given by:
.. math::
f(v) = 2 \frac{1}{(π v_{Th}^2)^{1/2}} \exp(-(v - V_{drift})^2 / v_{Th}^2 )
where :math:`v_{Th} = \sqrt{2 k_B T / m}` is the thermal speed.
Examples
--------
>>> from astropy import units as u
>>> v=1 * u.m / u.s
>>> Maxwellian_speed_1D(v=v, T=30000 * u.K, particle='e', v_drift=0 * u.m / u.s)
<Quantity 1.1832...e-06 s / m>
"""
if units == "units":
# unit checks and conversions
# checking velocity units
v = v.to_value(SPEED_UNITS)
# Catching case where drift velocities have default values, they
# need to be assigned units
v_drift = _v_drift_conversion(v_drift)
# convert temperature to kelvin
T = T.to_value(u.K, equivalencies=u.temperature_energy())
if not np.isnan(vTh):
# check units of thermal velocity
vTh = vTh.to_value(SPEED_UNITS)
if np.isnan(vTh):
# get thermal velocity and thermal velocity squared
vTh = thermal_speed(
T << u.K, particle=particle, method="most_probable"
).to_value(SPEED_UNITS)
# Get thermal velocity squared
vThSq = vTh**2
# Get square of relative particle velocity
vSq = (v - v_drift) ** 2
# calculating distribution function
coeff = 2 * (vThSq * np.pi) ** (-1 / 2)
expTerm = np.exp(-vSq / vThSq)
distFunc = coeff * expTerm
if units == "units":
return distFunc << SPEED_DISTRIBUTION_UNITS_1D
elif units == "unitless":
return distFunc
@particle_input
def Maxwellian_speed_2D(
v, T, particle: ParticleLike = "e", v_drift=0, vTh=np.nan, units="units"
):
r"""
Probability distribution function of speed for a Maxwellian distribution
in 2D.
Return the probability density function of finding a particle with speed components
``vx`` and ``vy`` in m/s in an equilibrium plasma of temperature
``T`` which follows the 2D Maxwellian distribution function. This
function assumes Cartesian coordinates.
Parameters
----------
v: `~astropy.units.Quantity`
The speed in units convertible to m/s.
T: `~astropy.units.Quantity`
The temperature, preferably in kelvin.
particle: `str`, optional
Representation of the particle species(e.g., ``'p'`` for protons, ``'D+'``
for deuterium, or ``'He-4 +1'`` for singly ionized helium-4),
which defaults to electrons.
v_drift: `~astropy.units.Quantity`
The drift speed in units convertible to m/s.
vTh: `~astropy.units.Quantity`, optional
Thermal velocity (most probable) in m/s. This is used for
optimization purposes to avoid re-calculating ``vTh``, for example
when integrating over velocity-space.
units: `str`, optional
Selects whether to run function with units and unit checks (when
equal to "units") or to run as unitless (when equal to "unitless").
The unitless version is substantially faster for intensive
computations.
Returns
-------
f : `~astropy.units.Quantity`
Probability density in \ :sup:`-1`\ , normalized so that:
:math:`\iiint_{0}^∞ f(\vec{v}) d\vec{v} = 1`.
Raises
------
`TypeError`
A parameter argument is not a `~astropy.units.Quantity` and
cannot be converted into a `~astropy.units.Quantity`.
`~astropy.units.UnitConversionError`
If the parameters is not in appropriate units.
`ValueError`
If the temperature is negative, or the particle mass or charge state
cannot be found.
Notes
-----
In 2D, the Maxwellian speed distribution function describing
the distribution of particles with speed :math:`v` in a plasma with
temperature :math:`T` is given by:
.. math::
f = 2 π v (π v_{Th}^2)^{-1} \exp(-v^2 / v_{Th}^2)
where :math:`v_{Th} = \sqrt{2 k_B T / m}` is the thermal speed.
See Also
--------
Maxwellian_speed_1D
Examples
--------
>>> from astropy import units as u
>>> v=1 * u.m / u.s
>>> Maxwellian_speed_2D(v=v, T=30000 * u.K, particle='e', v_drift=0 * u.m / u.s)
<Quantity 2.199...e-12 s / m>
"""
if v_drift != 0:
raise NotImplementedError("Non-zero drift speed is work in progress.")
if units == "units":
# unit checks and conversions
# checking velocity units
v = v.to_value(SPEED_UNITS)
# Catching case where drift velocity has default value, and
# needs to be assigned units
v_drift = _v_drift_conversion(v_drift)
# convert temperature to kelvin
T = T.to_value(u.K, equivalencies=u.temperature_energy())
if not np.isnan(vTh):
# check units of thermal velocity
vTh = vTh.to_value(SPEED_UNITS)
if np.isnan(vTh):
# get thermal velocity and thermal velocity squared
vTh = thermal_speed(
T << u.K, particle=particle, method="most_probable"
).to_value(SPEED_UNITS)
# getting square of thermal speed
vThSq = vTh**2
# get square of relative particle speed
vSq = (v - v_drift) ** 2
# calculating distribution function
coeff1 = (np.pi * vThSq) ** (-1)
coeff2 = 2 * np.pi * (v - v_drift)
expTerm = np.exp(-vSq / vThSq)
distFunc = coeff1 * coeff2 * expTerm
if units == "units":
return distFunc << SPEED_DISTRIBUTION_UNITS_1D
elif units == "unitless":
return distFunc
@particle_input
def Maxwellian_speed_3D(
v, T, particle: ParticleLike = "e", v_drift=0, vTh=np.nan, units="units"
):
r"""
Probability distribution function of speed for a Maxwellian
distribution in 3D.
Return the probability density function for finding a particle with
speed components ``vx``, ``vy``, and ``vz`` in m/s in an equilibrium
plasma of temperature ``T`` which follows the 3D Maxwellian
distribution function. This function assumes Cartesian coordinates.
Parameters
----------
v : `~astropy.units.Quantity`
The speed in units convertible to m/s.
T : `~astropy.units.Quantity`
The temperature, preferably in kelvin.
particle : `str`, optional
Representation of the particle species(e.g., ``'p'`` for protons, ``'D+'``
for deuterium, or ``'He-4 +1'`` for :math:`He_4^{+1}`
(singly ionized helium-4)), which defaults to electrons.
v_drift : `~astropy.units.Quantity`
The drift speed in units convertible to m/s.
vTh : `~astropy.units.Quantity`, optional
Thermal velocity (most probable) in m/s. This is used for
optimization purposes to avoid re-calculating vTh, for example
when integrating over velocity-space.
units : `str`, optional
Selects whether to run function with units and unit checks (when
equal to "units") or to run as unitless (when equal to "unitless").
The unitless version is substantially faster for intensive
computations.
Returns
-------
f : `~astropy.units.Quantity`
Probability density in speed\ :sup:`-1`\ , normalized so that:
:math:`\iiint_0^∞ f(\vec{v}) d\vec{v} = 1`.
Raises
------
`TypeError`
A parameter argument is not a `~astropy.units.Quantity` and
cannot be converted into a `~astropy.units.Quantity`.
`~astropy.units.UnitConversionError`
If the parameters is not in appropriate units.
`ValueError`
If the temperature is negative, or the particle mass or charge state
cannot be found.
Notes
-----
In 3D, the Maxwellian speed distribution function describing
the distribution of particles with speed :math:`v` in a plasma with
temperature :math:`T` is given by:
.. math::
f = 4 π v^{2} (π v_{Th}^2)^{-3/2} \exp(-v^{2} / v_{Th}^2)
where :math:`v_{Th} = \sqrt{2 k_B T / m}` is the thermal speed.
See Also
--------
Maxwellian_speed_1D
Examples
--------
>>> from astropy import units as u
>>> v=1 * u.m / u.s
>>> Maxwellian_speed_3D(v=v, T=30000*u.K, particle='e', v_drift=0 * u.m / u.s)
<Quantity 2.60235...e-18 s / m>
"""
if v_drift != 0:
raise NotImplementedError("Non-zero drift speed is work in progress.")
if units == "units":
# unit checks and conversions
# checking velocity units
v = v.to_value(SPEED_UNITS)
# Catching case where drift velocity has default value, and
# needs to be assigned units
v_drift = _v_drift_conversion(v_drift)
# convert temperature to kelvin
T = T.to_value(u.K, equivalencies=u.temperature_energy())
if not np.isnan(vTh):
# check units of thermal velocity
vTh = vTh.to_value(SPEED_UNITS)
if np.isnan(vTh):
# get thermal velocity and thermal velocity squared
vTh = thermal_speed(
T << u.K, particle=particle, method="most_probable"
).to_value(SPEED_UNITS)
# getting square of thermal speed
vThSq = vTh**2
# get square of relative particle speed
vSq = (v - v_drift) ** 2
# calculating distribution function
coeff1 = (np.pi * vThSq) ** (-3 / 2)
coeff2 = 4 * np.pi * vSq
expTerm = np.exp(-vSq / vThSq)
distFunc = coeff1 * coeff2 * expTerm
if units == "units":
return distFunc << SPEED_DISTRIBUTION_UNITS_1D
elif units == "unitless":
return distFunc
@particle_input
def kappa_velocity_1D(
v, T, kappa, particle: ParticleLike = "e", v_drift=0, vTh=np.nan, units="units"
):
r"""
Return the probability density at the velocity ``v`` in m/s
to find a particle ``particle`` in a plasma of temperature ``T``
following the Kappa distribution function in 1D. The slope of the
tail of the Kappa distribution function is set by 'kappa', which
must be greater than :math:`1/2`.
Parameters
----------
v : `~astropy.units.Quantity`
The velocity in units convertible to m/s.
T : `~astropy.units.Quantity`
The temperature in kelvin.
kappa : `~astropy.units.Quantity`
The kappa parameter is a dimensionless number which sets the slope
of the energy spectrum of suprathermal particles forming the tail
of the Kappa velocity distribution function. Kappa must be greater
than :math:`3/2`.
particle : `str`, optional
Representation of the particle species(e.g., ``'p`` for protons, ``'D+'``
for deuterium, or ``'He-4 +1'`` for :math:`He_4^{+1}`
(singly ionized helium-4)), which defaults to electrons.
v_drift : `~astropy.units.Quantity`, optional
The drift velocity in units convertible to m/s.
vTh : `~astropy.units.Quantity`, optional
Thermal velocity (most probable) in m/s. This is used for
optimization purposes to avoid re-calculating ``vTh``, for example
when integrating over velocity-space.
units : `str`, optional
Selects whether to run function with units and unit checks (when
equal to ``"units"``) or to run as unitless (when equal to
``"unitless"``). The unitless version is substantially faster for
intensive computations.
Returns
-------
f : `~astropy.units.Quantity`
Probability density in velocity\ :sup:`-1`\ , normalized so that
:math:`\int_{-∞}^{+∞} f(v) dv = 1`.
Raises
------
`TypeError`
A parameter argument is not a `~astropy.units.Quantity` and
cannot be converted into a `~astropy.units.Quantity`.
`~astropy.units.UnitConversionError`
If the parameters is not in appropriate units.
`ValueError`
If the temperature is negative, or the particle mass or charge state
cannot be found.
Notes
-----
In one dimension, the Kappa velocity distribution function describing
the distribution of particles with speed :math:`v` in a plasma with
temperature :math:`T` and suprathermal parameter :math:`κ` is
given by:
.. math::
f = A_κ \left(1 + \frac{(\vec{v} -
\vec{V_{drift}})^2}{κ v_{Th},κ^2}\right)^{-κ}
where :math:`v_{Th},κ` is the kappa thermal speed
and :math:`A_κ = \frac{1}{\sqrt{π} κ^{3/2} v_{Th},κ^2
\frac{Γ(κ + 1)}{Γ(κ - 1/2)}}`
is the normalization constant.
As :math:`κ → ∞`, the kappa distribution function converges to the
Maxwellian distribution function.
Examples
--------
>>> from astropy import units as u
>>> v=1 * u.m / u.s
>>> kappa_velocity_1D(v=v, T=30000*u.K, kappa=4, particle='e', v_drift=0 * u.m / u.s)
<Quantity 6.75549...e-07 s / m>
See Also
--------
kappa_velocity_3D
~plasmapy.formulary.speeds.kappa_thermal_speed
"""
# must have kappa > 3/2 for distribution function to be valid
if kappa <= 3 / 2:
raise ValueError(f"Must have κ > 3/2, instead of {kappa}.")
if units == "units":
# unit checks and conversions
# checking velocity units
v = v.to_value(SPEED_UNITS)
# catching case where drift velocities have default values
v_drift = _v_drift_conversion(v_drift)
# convert temperature to kelvin
T = T.to_value(u.K, equivalencies=u.temperature_energy())
if not np.isnan(vTh):
# check units of thermal velocity
vTh = vTh.to_value(SPEED_UNITS)
if np.isnan(vTh):
# get thermal velocity and thermal velocity squared
vTh = kappa_thermal_speed(T << u.K, kappa, particle=particle).to_value(
SPEED_UNITS
)
# Get thermal velocity squared and accounting for 1D instead of 3D
vThSq = vTh**2
# Get square of relative particle velocity
vSq = (v - v_drift) ** 2
# calculating distribution function
expTerm = (1 + vSq / (kappa * vThSq)) ** (-kappa)
coeff1 = 1 / (np.sqrt(np.pi) * kappa ** (3 / 2) * vTh)
coeff2 = gamma(kappa + 1) / (gamma(kappa - 1 / 2))
distFunc = coeff1 * coeff2 * expTerm
if units == "units":
return distFunc << SPEED_DISTRIBUTION_UNITS_1D
elif units == "unitless":
return distFunc
@particle_input
def kappa_velocity_3D(
vx,
vy,
vz,
T,
kappa,
particle: ParticleLike = "e",
vx_drift=0,
vy_drift=0,
vz_drift=0,
vTh=np.nan,
units="units",
):
r"""
Return the probability density function for finding a particle with
velocity components ``v_x``, ``v_y``, and ``v_z``in m/s in a suprathermal
plasma of temperature ``T`` and parameter ``kappa`` which follows the
3D Kappa distribution function. This function assumes Cartesian
coordinates.
Parameters
----------
vx : `~astropy.units.Quantity`
The velocity in x-direction units convertible to m/s.
vy : `~astropy.units.Quantity`
The velocity in y-direction units convertible to m/s.
vz : `~astropy.units.Quantity`
The velocity in z-direction units convertible to m/s.
T : `~astropy.units.Quantity`
The temperature, preferably in kelvin.
kappa : `~astropy.units.Quantity`
The kappa parameter is a dimensionless number which sets the slope
of the energy spectrum of suprathermal particles forming the tail
of the Kappa velocity distribution function. ``kappa`` must be greater
than :math:`3/2`.
particle : `str`, optional
Representation of the particle species(e.g., 'p' for protons, 'D+'
for deuterium, or 'He-4 +1' for :math:`He_4^{+1}` : singly ionized
helium-4)), which defaults to electrons.
vx_drift : `~astropy.units.Quantity`, optional
The drift velocity in x-direction units convertible to m/s.
vy_drift : `~astropy.units.Quantity`, optional
The drift velocity in y-direction units convertible to m/s.
vz_drift : `~astropy.units.Quantity`, optional
The drift velocity in z-direction units convertible to m/s.
vTh : `~astropy.units.Quantity`, optional
Thermal velocity (most probable) in m/s. This is used for
optimization purposes to avoid re-calculating ``vTh``, for example
when integrating over velocity-space.
units : `str`, optional
Selects whether to run function with units and unit checks (when
equal to "units") or to run as unitless (when equal to "unitless").
The unitless version is substantially faster for intensive
computations.
Returns
-------
f : `~astropy.units.Quantity`
Probability density in units of inverse velocity, normalized so that:
:math:`\iiint_{0}^∞ f(\vec{v}) d\vec{v} = 1`
Raises
------
`TypeError`
If any of the parameters is not a `~astropy.units.Quantity` and
cannot be converted into one.
`~astropy.units.UnitConversionError`
If the parameters is not in appropriate units.
`ValueError`
If the temperature is negative, or the particle mass or charge state
cannot be found.
Notes
-----
In three dimensions, the Kappa velocity distribution function describing
the distribution of particles with speed :math:`v` in a plasma with
temperature :math:`T` and suprathermal parameter :math:`κ` is given by:
.. math::
f = A_κ \left(1 + \frac{(\vec{v} -
\vec{V_{drift}})^2}{κ v_{Th},κ^2}\right)^{-(κ + 1)}
where :math:`v_{Th},κ` is the kappa thermal speed
and :math:`A_κ = \frac{1}{2 π (κ v_{Th},κ^2)^{3/2}}
\frac{Γ(κ + 1)}{Γ(κ - 1/2) Γ(3/2)}` is the
normalization constant.
As :math:`κ → ∞`, the kappa distribution function converges to the
Maxwellian distribution function.
See Also
--------
kappa_velocity_1D
~plasmapy.formulary.speeds.kappa_thermal_speed
Examples
--------
>>> from astropy import units as u
>>> v=1 * u.m / u.s
>>> kappa_velocity_3D(vx=v,
... vy=v,
... vz=v,
... T=30000 * u.K,
... kappa=4,
... particle='e',
... vx_drift=0 * u.m / u.s,
... vy_drift=0 * u.m / u.s,
... vz_drift=0 * u.m / u.s)
<Quantity 3.7833...e-19 s3 / m3>
"""
# must have kappa > 3/2 for distribution function to be valid
if kappa <= 3 / 2:
raise ValueError(f"Must have kappa > 3/2, instead of {kappa}.")
if units == "units":
# unit checks and conversions
# checking velocity units
vx = vx.to_value(SPEED_UNITS)
vy = vy.to_value(SPEED_UNITS)
vz = vz.to_value(SPEED_UNITS)
# Catching case where drift velocities have default values
vx_drift = _v_drift_conversion(vx_drift)
vy_drift = _v_drift_conversion(vy_drift)
vz_drift = _v_drift_conversion(vz_drift)
# convert temperature to kelvin
T = T.to_value(u.K, equivalencies=u.temperature_energy())
if not np.isnan(vTh):
# check units of thermal velocity
vTh = vTh.to_value(SPEED_UNITS)
if np.isnan(vTh):
# get thermal velocity and thermal velocity squared
vTh = kappa_thermal_speed(T << u.K, kappa, particle=particle).to_value(
SPEED_UNITS
)
# getting square of thermal velocity
vThSq = vTh**2
# Get square of relative particle velocity
vSq = (vx - vx_drift) ** 2 + (vy - vy_drift) ** 2 + (vz - vz_drift) ** 2
# calculating distribution function
expTerm = (1 + vSq / (kappa * vThSq)) ** (-(kappa + 1))
coeff1 = 1 / (2 * np.pi * (kappa * vThSq) ** (3 / 2))
coeff2 = gamma(kappa + 1) / (gamma(kappa - 1 / 2) * gamma(3 / 2))
distFunc = coeff1 * coeff2 * expTerm
if units == "units":
return distFunc << SPEED_DISTRIBUTION_UNITS_3D
elif units == "unitless":
return distFunc
| {
"content_hash": "410cd6a7fccedf9e6a74c8ec00c567d9",
"timestamp": "",
"source": "github",
"line_count": 1080,
"max_line_length": 89,
"avg_line_length": 33.06018518518518,
"alnum_prop": 0.6158801288334967,
"repo_name": "StanczakDominik/PlasmaPy",
"id": "8bdce97e1c3d5a24a51d2eb0aa843a110963145d",
"size": "35770",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "plasmapy/formulary/distribution.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1285"
},
{
"name": "Python",
"bytes": "2148684"
}
],
"symlink_target": ""
} |
from ducktape.mark import parametrize
from ducktape.mark.resource import cluster
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.services.kafka import KafkaService
from kafkatest.services.kafka import config_property
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.tests.end_to_end import EndToEndTest
from kafkatest.utils import is_int
from kafkatest.version import LATEST_0_9, LATEST_0_10, LATEST_0_10_0, LATEST_0_10_1, LATEST_0_10_2, LATEST_0_11_0, LATEST_1_0, LATEST_1_1, LATEST_2_0, LATEST_2_1, LATEST_2_2, LATEST_2_3, V_0_9_0_0, V_0_11_0_0, DEV_BRANCH, KafkaVersion
class TestDowngrade(EndToEndTest):
TOPIC_CONFIG = {
"partitions": 3,
"replication-factor": 3,
"configs": {"min.insync.replicas": 2}
}
def __init__(self, test_context):
super(TestDowngrade, self).__init__(test_context=test_context, topic_config=self.TOPIC_CONFIG)
def upgrade_from(self, kafka_version):
for node in self.kafka.nodes:
self.kafka.stop_node(node)
node.version = DEV_BRANCH
node.config[config_property.INTER_BROKER_PROTOCOL_VERSION] = str(kafka_version)
node.config[config_property.MESSAGE_FORMAT_VERSION] = str(kafka_version)
self.kafka.start_node(node)
def downgrade_to(self, kafka_version):
for node in self.kafka.nodes:
self.kafka.stop_node(node)
node.version = kafka_version
del node.config[config_property.INTER_BROKER_PROTOCOL_VERSION]
del node.config[config_property.MESSAGE_FORMAT_VERSION]
self.kafka.start_node(node)
def setup_services(self, kafka_version, compression_types, security_protocol):
self.create_zookeeper()
self.zk.start()
self.create_kafka(num_nodes=3,
security_protocol=security_protocol,
interbroker_security_protocol=security_protocol,
version=kafka_version)
self.kafka.start()
self.create_producer(log_level="DEBUG",
compression_types=compression_types,
version=kafka_version)
self.producer.start()
self.create_consumer(log_level="DEBUG",
version=kafka_version)
self.consumer.start()
@cluster(num_nodes=7)
@parametrize(version=str(LATEST_2_3), compression_types=["none"])
@parametrize(version=str(LATEST_2_3), compression_types=["zstd"], security_protocol="SASL_SSL")
@parametrize(version=str(LATEST_2_2), compression_types=["none"])
@parametrize(version=str(LATEST_2_2), compression_types=["zstd"], security_protocol="SASL_SSL")
@parametrize(version=str(LATEST_2_1), compression_types=["none"])
@parametrize(version=str(LATEST_2_1), compression_types=["lz4"], security_protocol="SASL_SSL")
@parametrize(version=str(LATEST_2_0), compression_types=["none"])
@parametrize(version=str(LATEST_2_0), compression_types=["snappy"], security_protocol="SASL_SSL")
@parametrize(version=str(LATEST_1_1), compression_types=["none"])
@parametrize(version=str(LATEST_1_1), compression_types=["lz4"], security_protocol="SASL_SSL")
def test_upgrade_and_downgrade(self, version, compression_types, security_protocol="PLAINTEXT"):
"""Test upgrade and downgrade of Kafka cluster from old versions to the current version
`version` is the Kafka version to upgrade from and downgrade back to
Downgrades are supported to any version which is at or above the current
`inter.broker.protocol.version` (IBP). For example, if a user upgrades from 1.1 to 2.3,
but they leave the IBP set to 1.1, then downgrading to any version at 1.1 or higher is
supported.
This test case verifies that producers and consumers continue working during
the course of an upgrade and downgrade.
- Start 3 node broker cluster on version 'kafka_version'
- Start producer and consumer in the background
- Roll the cluster to upgrade to the current version with IBP set to 'kafka_version'
- Roll the cluster to downgrade back to 'kafka_version'
- Finally, validate that every message acked by the producer was consumed by the consumer
"""
kafka_version = KafkaVersion(version)
self.setup_services(kafka_version, compression_types, security_protocol)
self.await_startup()
self.logger.info("First pass bounce - rolling upgrade")
self.upgrade_from(kafka_version)
self.run_validation()
self.logger.info("Second pass bounce - rolling downgrade")
self.downgrade_to(kafka_version)
self.run_validation()
| {
"content_hash": "fea97089edbf695559809ae8d3687161",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 234,
"avg_line_length": 48.54,
"alnum_prop": 0.6761433868974042,
"repo_name": "noslowerdna/kafka",
"id": "0a7322d2f763af8c1c547cac02403946d3d6b8ff",
"size": "5635",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "tests/kafkatest/tests/core/downgrade_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "30250"
},
{
"name": "Dockerfile",
"bytes": "6334"
},
{
"name": "HTML",
"bytes": "3739"
},
{
"name": "Java",
"bytes": "19769473"
},
{
"name": "Python",
"bytes": "909872"
},
{
"name": "Scala",
"bytes": "6861865"
},
{
"name": "Shell",
"bytes": "96905"
},
{
"name": "XSLT",
"bytes": "7116"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import shutil
import tempfile
import numpy as np
import tensorflow as tf
import tensorflowjs as tfjs
from tensorflow import keras
curr_dir = os.path.dirname(os.path.realpath(__file__))
_tmp_dir = os.path.join(curr_dir, 'create_save_predict_data')
def _load_predict_save(model_path):
"""Load a Keras Model from artifacts generated by tensorflow.js and inputs.
Make inference with the model and inputs.
Write outputs to file.
Args:
model_path: Path to the model JSON file.
"""
xs_shape_path = os.path.join(
_tmp_dir, model_path + '.xs-shapes.json')
xs_data_path = os.path.join(
_tmp_dir, model_path + '.xs-data.json')
with open(xs_shape_path, 'rt') as f:
xs_shapes = json.load(f)
with open(xs_data_path, 'rt') as f:
xs_values = json.load(f)
xs = [np.array(value, dtype=np.float32).reshape(shape)
for value, shape in zip(xs_values, xs_shapes)]
if len(xs) == 1:
xs = xs[0]
session = tf.Session() if hasattr(tf, 'Session') else tf.compat.v1.Session()
with tf.Graph().as_default(), session:
model_json_path = os.path.join(_tmp_dir, model_path, 'model.json')
print('Loading model from path %s' % model_json_path)
model = tfjs.converters.load_keras_model(model_json_path)
ys = model.predict(xs)
ys_data = None
ys_shape = None
if isinstance(ys, list):
ys_data = [y.tolist() for y in ys]
ys_shape = [list(y.shape) for y in ys]
else:
ys_data = ys.tolist()
ys_shape = [list(ys.shape)]
ys_data_path = os.path.join(
_tmp_dir, model_path + '.ys-data.json')
ys_shape_path = os.path.join(
_tmp_dir, model_path + '.ys-shapes.json')
with open(ys_data_path, 'w') as f:
f.write(json.dumps(ys_data))
with open(ys_shape_path, 'w') as f:
f.write(json.dumps(ys_shape))
def main():
_load_predict_save('mlp')
_load_predict_save('cnn')
_load_predict_save('depthwise_cnn')
_load_predict_save('simple_rnn')
_load_predict_save('gru')
_load_predict_save('bidirectional_lstm')
_load_predict_save('time_distributed_lstm')
_load_predict_save('one_dimensional')
_load_predict_save('functional_merge')
if __name__ == '__main__':
main()
| {
"content_hash": "a2770f354f844c24fe4142ec164f50df",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 78,
"avg_line_length": 29.82051282051282,
"alnum_prop": 0.6496130696474635,
"repo_name": "tensorflow/tfjs",
"id": "d55caf6dab512ca9237c74a4e96b4432a6de5ac7",
"size": "2816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "e2e/integration_tests/create_save_predict.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2165"
},
{
"name": "C",
"bytes": "1149"
},
{
"name": "C++",
"bytes": "511030"
},
{
"name": "CSS",
"bytes": "27067"
},
{
"name": "Dockerfile",
"bytes": "1840"
},
{
"name": "HTML",
"bytes": "132169"
},
{
"name": "Java",
"bytes": "4081"
},
{
"name": "JavaScript",
"bytes": "1200362"
},
{
"name": "Objective-C",
"bytes": "5247"
},
{
"name": "Python",
"bytes": "518704"
},
{
"name": "Ruby",
"bytes": "1981"
},
{
"name": "Shell",
"bytes": "76252"
},
{
"name": "Starlark",
"bytes": "176198"
},
{
"name": "TypeScript",
"bytes": "10878537"
}
],
"symlink_target": ""
} |
"""
Semantic Machines\N{TRADE MARK SIGN} software.
Creates BeliefStateTrackerDatum from different sources TRADE processed dialogues.
"""
import argparse
import json
from typing import Any, Dict, Iterator, List
from dataflow.core.io_utils import save_jsonl_file
from dataflow.multiwoz.belief_state_tracker_datum import (
BeliefState,
BeliefStateTrackerDatum,
Slot,
sort_slots,
)
from dataflow.multiwoz.ontology import DATAFLOW_SLOT_NAMES_FOR_DOMAIN
from dataflow.multiwoz.trade_dst_utils import (
flatten_belief_state,
get_domain_and_slot_name,
)
def build_belief_state_from_belief_dict(
belief_dict: Dict[str, str], strict: bool
) -> BeliefState:
slots_for_domain: Dict[str, List[Slot]] = dict()
for slot_fullname, slot_value in belief_dict.items():
domain, slot_name = get_domain_and_slot_name(slot_fullname)
if strict:
assert (
slot_name in DATAFLOW_SLOT_NAMES_FOR_DOMAIN[domain]
), 'slot "{}" is not in ontology for domain "{}"'.format(slot_name, domain)
elif slot_name not in DATAFLOW_SLOT_NAMES_FOR_DOMAIN[domain]:
# NOTE: We only print a warning. The slot will be still included in the
# belief state for evaluation.
# If we assume the Belief State Tracker knows the ontology in advance, then
# we can remove the slot from the prediction.
print(
'slot "{}" is not in ontology for domain "{}"'.format(slot_name, domain)
)
if domain not in slots_for_domain:
slots_for_domain[domain] = []
slots_for_domain[domain].append(Slot(name=slot_name, value=slot_value))
sort_slots(slots_for_domain)
return BeliefState(slots_for_domain=slots_for_domain)
def build_belief_state_from_trade_turn(trade_turn: Dict[str, Any]) -> BeliefState:
"""Returns a BeliefState object from a TRADE turn."""
# do not drop any slots or change any slot values
belief_dict = flatten_belief_state(
belief_state=trade_turn["belief_state"],
keep_all_domains=True,
remove_none=False,
)
return build_belief_state_from_belief_dict(belief_dict=belief_dict, strict=True)
def build_belief_state_tracker_data_from_trade_dialogue(
trade_dialogue: Dict[str, Any],
) -> Iterator[BeliefStateTrackerDatum]:
for trade_turn in trade_dialogue["dialogue"]:
yield BeliefStateTrackerDatum(
dialogue_id=trade_dialogue["dialogue_idx"],
turn_index=int(trade_turn["turn_idx"]),
belief_state=build_belief_state_from_trade_turn(trade_turn),
prev_agent_utterance=trade_turn["system_transcript"],
curr_user_utterance=trade_turn["transcript"],
)
def main(trade_data_file: str, belief_state_tracker_data_file: str) -> None:
with open(trade_data_file) as fp:
trade_dialogues = json.loads(fp.read().strip())
belief_state_tracker_data = [
datum
for trade_dialogue in trade_dialogues
for datum in build_belief_state_tracker_data_from_trade_dialogue(trade_dialogue)
]
save_jsonl_file(
data=belief_state_tracker_data,
data_jsonl=belief_state_tracker_data_file,
remove_null=True,
)
def add_arguments(argument_parser: argparse.ArgumentParser) -> None:
argument_parser.add_argument(
"--trade_data_file", help="TRADE processed dialogues file",
)
argument_parser.add_argument(
"--belief_state_tracker_data_file",
help="output jsonl file of BeliefStateTrackerDatum",
)
if __name__ == "__main__":
cmdline_parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter
)
add_arguments(cmdline_parser)
args = cmdline_parser.parse_args()
print("Semantic Machines\N{TRADE MARK SIGN} software.")
main(
trade_data_file=args.trade_data_file,
belief_state_tracker_data_file=args.belief_state_tracker_data_file,
)
| {
"content_hash": "c5aad39f037120358b04fa0b8699b8fc",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 88,
"avg_line_length": 36.11711711711712,
"alnum_prop": 0.6667498129209279,
"repo_name": "microsoft/task_oriented_dialogue_as_dataflow_synthesis",
"id": "da13d0af94deedca6d0af852595621bbce3b6e5d",
"size": "4084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/dataflow/multiwoz/create_belief_state_tracker_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "718"
},
{
"name": "Python",
"bytes": "308065"
},
{
"name": "Shell",
"bytes": "2886"
}
],
"symlink_target": ""
} |
"""Module implementing RWA cells with customizable attention spans.
This module provides an implementation of a recurrent weighted average (RWA)
model (https://arxiv.org/abs/1703.01253). The RWACell extends the `RNNCell`
class to create a model that conforms with the recurrent neural network
framework in TensorFlow.
"""
import tensorflow as tf
class RWACell(tf.contrib.rnn.RNNCell):
"""Recurrent weighted averge cell (https://arxiv.org/abs/1703.01253)"""
def __init__(self, num_units, decay_rate=0.0):
"""Initialize the RWA cell.
Args:
num_units: int, The number of units in the RWA cell.
decay_rate: (optional) If this is a float it sets the
decay rate for every unit. If this is a list or
tensor of shape `[num_units]` it sets the decay
rate for each individual unit. The decay rate is
defined as `ln(2.0)/hl` where `hl` is the desired
half-life of the memory.
"""
self.num_units = num_units
if type(decay_rate) is not tf.Variable: # Do nothing if the decay rate is learnable
decay_rate = tf.convert_to_tensor(decay_rate)
self.decay_rate = decay_rate
self.activation = tf.nn.tanh
def zero_state(self, batch_size, dtype):
"""`zero_state` is overridden to return non-zero values and
parameters that must be learned."""
num_units = self.num_units
activation = self.activation
n = tf.zeros([batch_size, num_units], dtype=dtype)
d = tf.zeros([batch_size, num_units], dtype=dtype)
h = tf.zeros([batch_size, num_units], dtype=dtype)
a_max = -float('inf')*tf.ones([batch_size, num_units], dtype=dtype) # Start off with a large negative number with room for this value to decay
"""The scope for the RWACell is hard-coded into `RWACell.zero_state`.
This is done because the initial state is learned and some of the model
parameters must be defined here. These parameters require a scope and
because `RWACell.zero_state` does not accept the scope as an argument,
it must be hard-coded.
"""
with tf.variable_scope('RWACell'):
s_0 = tf.get_variable('s_0', [num_units], initializer=tf.random_normal_initializer(stddev=1.0))
h += activation(tf.expand_dims(s_0, 0))
return (n, d, h, a_max)
def __call__(self, inputs, state, scope=None):
num_inputs = inputs.get_shape()[1]
num_units = self.num_units
decay_rate = self.decay_rate
activation = self.activation
x = inputs
n, d, h, a_max = state
if scope is not None:
raise ValueError(
"The argument `scope` for `RWACell.__call__` is deprecated and "
"no longer works. The scope is hard-coded to make the initial "
"state learnable. See `RWACell.zero_state` for more details."
)
# try:
# with tf.variable_scope('RWACell', reuse=True):
# s_0 = tf.get_variable('s_0', [num_units])
# except ValueError:
# raise ValueError(
# "The initial state of the model contains parameters "
# "that must be learned and these parameters are not "
# "in scope. Please make sure that `RWACell.zero_state` "
# "is under the same scope as the other parameters of "
# "the model."
# )
with tf.variable_scope('RWACell'):
W_u = tf.get_variable('W_u', [num_inputs, num_units], initializer=tf.contrib.layers.xavier_initializer())
b_u = tf.get_variable('b_u', [num_units], initializer=tf.constant_initializer(0.0))
W_g = tf.get_variable('W_g', [num_inputs+num_units, num_units], initializer=tf.contrib.layers.xavier_initializer())
b_g = tf.get_variable('b_g', [num_units], initializer=tf.constant_initializer(0.0))
W_a = tf.get_variable('W_a', [num_inputs+num_units, num_units], initializer=tf.contrib.layers.xavier_initializer())
xh = tf.concat([x, h], 1)
u = tf.matmul(x, W_u)+b_u
g = tf.matmul(xh, W_g)+b_g
a = tf.matmul(xh, W_a) # The bias term when factored out of the numerator and denominator cancels and is unnecessary
z = tf.multiply(u, tf.nn.tanh(g))
a_decay = a_max-decay_rate
n_decay = tf.multiply(n, tf.exp(-decay_rate))
d_decay = tf.multiply(d, tf.exp(-decay_rate))
a_newmax = tf.maximum(a_decay, a)
exp_diff = tf.exp(a_max-a_newmax)
exp_scaled = tf.exp(a-a_newmax)
n = tf.multiply(n_decay, exp_diff)+tf.multiply(z, exp_scaled) # Numerically stable update of numerator
d = tf.multiply(d_decay, exp_diff)+exp_scaled # Numerically stable update of denominator
h = activation(tf.div(n, d))
a_max = a_newmax
return h, (n, d, h, a_max)
@property
def output_size(self):
return self.num_units
@property
def state_size(self):
return (self.num_units, self.num_units, self.num_units, self.num_units)
| {
"content_hash": "5b7c8ee8f06927af7c96158435b6b364",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 144,
"avg_line_length": 38.82051282051282,
"alnum_prop": 0.6897842360193748,
"repo_name": "jostmey/cas",
"id": "fff104fd7d691cb2e6b608df6bac3d8731cddf49",
"size": "4862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RWACell.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8656"
}
],
"symlink_target": ""
} |
"""Support for Verizon FiOS Quantum Gateways."""
import logging
from quantum_gateway import QuantumGatewayScanner
from requests.exceptions import RequestException
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_SSL
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = "myfiosgateway.com"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_SSL, default=True): cv.boolean,
vol.Required(CONF_PASSWORD): cv.string,
}
)
def get_scanner(hass, config):
"""Validate the configuration and return a Quantum Gateway scanner."""
scanner = QuantumGatewayDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class QuantumGatewayDeviceScanner(DeviceScanner):
"""This class queries a Quantum Gateway."""
def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.password = config[CONF_PASSWORD]
self.use_https = config[CONF_SSL]
_LOGGER.debug("Initializing")
try:
self.quantum = QuantumGatewayScanner(
self.host, self.password, self.use_https
)
self.success_init = self.quantum.success_init
except RequestException:
self.success_init = False
_LOGGER.error("Unable to connect to gateway. Check host.")
if not self.success_init:
_LOGGER.error("Unable to login to gateway. Check password and " "host.")
def scan_devices(self):
"""Scan for new devices and return a list of found MACs."""
connected_devices = []
try:
connected_devices = self.quantum.scan_devices()
except RequestException:
_LOGGER.error("Unable to scan devices. Check connection to router")
return connected_devices
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
return self.quantum.get_device_name(device)
| {
"content_hash": "81ff4315b7c8ab0925cb04dff820b088",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 84,
"avg_line_length": 32.114285714285714,
"alnum_prop": 0.6690391459074733,
"repo_name": "leppa/home-assistant",
"id": "97eb8eedfd3915c8bf147dd62ac89dc716345a71",
"size": "2248",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/quantum_gateway/device_tracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18957740"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
"""
A simple file to test the mlb_standings
Could do a bunch more tests but really, this is an example.
Nothing more.
DTH
2015-11-18
"""
from mlb_standings import MLBStandings
import unittest
class MLBStandingsTest(unittest.TestCase):
def __init__(self, testname, startDate, endDate):
super(MLBStandingsTest, self).__init__(testname)
self.setup(startDate, endDate)
def setup(self, startDate, endDate):
self.mlb_stats = MLBStandings(startDate, endDate)
def testFrameLength(self):
# this checks to make sure that we've grabbed every row we want.
# confirms that the data flow from the web to the frame works right.
lenFrame = 5460
standings = self.mlb_stats.getMasterStandings()
self.assertEqual(len(standings), lenFrame)
if __name__ == '__main__':
d1 = '2015-04-05'
d2 = '2015-10-04'
mlbStandingsTestSuite = unittest.TestSuite()
mlbStandingsTestSuite.addTest(MLBStandingsTest('testFrameLength', d1, d2))
# run the tests now
unittest.TextTestRunner().run(mlbStandingsTestSuite)
| {
"content_hash": "6c75c4f4e1dfa47b665af9d73dc66975",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 78,
"avg_line_length": 29.324324324324323,
"alnum_prop": 0.6921658986175115,
"repo_name": "dtherrick/dataviz_baseball",
"id": "7a8ea503dda15e840aa2c1a42d1927ce2ba4ccfb",
"size": "1085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "259699"
},
{
"name": "Python",
"bytes": "4671"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import absolute_import
import functools
import inspect
import os
import sys
import time
import platform
def GetCatapultDir():
return os.path.normpath(
os.path.join(os.path.dirname(__file__), '..', '..', '..'))
def IsRunningOnCrosDevice():
"""Returns True if we're on a ChromeOS device."""
lsb_release = '/etc/lsb-release'
if sys.platform.startswith('linux') and os.path.exists(lsb_release):
with open(lsb_release, 'r') as f:
res = f.read()
if res.count('CHROMEOS_RELEASE_NAME'):
return True
return False
def GetHostOsName():
if IsRunningOnCrosDevice():
return 'chromeos'
if sys.platform.startswith('linux'):
return 'linux'
if sys.platform == 'darwin':
return 'mac'
if sys.platform == 'win32':
return 'win'
return None
def GetHostArchName():
return platform.machine()
def _ExecutableExtensions():
# pathext is, e.g. '.com;.exe;.bat;.cmd'
exts = os.getenv('PATHEXT').split(';') #e.g. ['.com','.exe','.bat','.cmd']
return [x[1:].upper() for x in exts] #e.g. ['COM','EXE','BAT','CMD']
def IsExecutable(path):
if os.path.isfile(path):
if hasattr(os, 'name') and os.name == 'nt':
return path.split('.')[-1].upper() in _ExecutableExtensions()
return os.access(path, os.X_OK)
return False
def _AddDirToPythonPath(*path_parts):
# pylint: disable=no-value-for-parameter
path = os.path.abspath(os.path.join(*path_parts))
if os.path.isdir(path) and path not in sys.path:
# Some callsite that use telemetry assumes that sys.path[0] is the directory
# containing the script, so we add these extra paths to right after it.
sys.path.insert(1, path)
_AddDirToPythonPath(os.path.join(GetCatapultDir(), 'devil'))
_AddDirToPythonPath(os.path.join(GetCatapultDir(), 'dependency_manager'))
_AddDirToPythonPath(os.path.join(GetCatapultDir(), 'third_party', 'mock'))
# mox3 is needed for pyfakefs usage, but not for pylint.
_AddDirToPythonPath(os.path.join(GetCatapultDir(), 'third_party', 'mox3'))
_AddDirToPythonPath(
os.path.join(GetCatapultDir(), 'third_party', 'pyfakefs'))
from devil.utils import timeout_retry # pylint: disable=wrong-import-position
from devil.utils import reraiser_thread # pylint: disable=wrong-import-position
# Decorator that adds timeout functionality to a function.
def Timeout(default_timeout):
return lambda func: TimeoutDeco(func, default_timeout)
# Note: Even though the "timeout" keyword argument is the only
# keyword argument that will need to be given to the decorated function,
# we still have to use the **kwargs syntax, because we have to use
# the *args syntax here before (since the decorator decorates functions
# with different numbers of positional arguments) and Python doesn't allow
# a single named keyword argument after *args.
# (e.g., 'def foo(*args, bar=42):' is a syntax error)
def TimeoutDeco(func, default_timeout):
@functools.wraps(func)
def RunWithTimeout(*args, **kwargs):
timeout = kwargs.get('timeout', default_timeout)
try:
return timeout_retry.Run(func, timeout, 0, args=args)
except reraiser_thread.TimeoutError:
print('%s timed out.' % func.__name__)
return False
return RunWithTimeout
MIN_POLL_INTERVAL_IN_SECONDS = 0.1
MAX_POLL_INTERVAL_IN_SECONDS = 5
OUTPUT_INTERVAL_IN_SECONDS = 300
def WaitFor(condition, timeout):
"""Waits for up to |timeout| secs for the function |condition| to return True.
Polling frequency is (elapsed_time / 10), with a min of .1s and max of 5s.
Returns:
Result of |condition| function (if present).
"""
def GetConditionString():
if condition.__name__ == '<lambda>':
try:
return inspect.getsource(condition).strip()
except IOError:
pass
return condition.__name__
# Do an initial check to see if its true.
res = condition()
if res:
return res
start_time = time.time()
last_output_time = start_time
elapsed_time = time.time() - start_time
while elapsed_time < timeout:
res = condition()
if res:
return res
now = time.time()
elapsed_time = now - start_time
last_output_elapsed_time = now - last_output_time
if last_output_elapsed_time > OUTPUT_INTERVAL_IN_SECONDS:
last_output_time = time.time()
poll_interval = min(max(elapsed_time / 10., MIN_POLL_INTERVAL_IN_SECONDS),
MAX_POLL_INTERVAL_IN_SECONDS)
time.sleep(poll_interval)
raise TimeoutException('Timed out while waiting %ds for %s.' %
(timeout, GetConditionString()))
class TimeoutException(Exception):
"""The operation failed to complete because of a timeout.
It is possible that waiting for a longer period of time would result in a
successful operation.
"""
| {
"content_hash": "481d749cec7799f102a405352a016423",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 80,
"avg_line_length": 32.147651006711406,
"alnum_prop": 0.687473903966597,
"repo_name": "catapult-project/catapult",
"id": "b4c22a21627e30111171ee5adf0b1f418d840151",
"size": "4980",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "common/py_utils/py_utils/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1324"
},
{
"name": "C++",
"bytes": "46069"
},
{
"name": "CSS",
"bytes": "23376"
},
{
"name": "Dockerfile",
"bytes": "1541"
},
{
"name": "Go",
"bytes": "114396"
},
{
"name": "HTML",
"bytes": "12394298"
},
{
"name": "JavaScript",
"bytes": "1559584"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "6778695"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
from time import strftime;
import os, sys, urllib;
import time, datetime;
import pytz;
def f(inputfile, outputfile):
with open (inputfile, "r") as myfile:
code=myfile.read().replace('\n', '');
myfile.close();
url= "http://hq.sinajs.cn/list=%s" % (code);
write_data = urllib.urlopen(url).read().strip().strip('"');
#write_data = write_data.split(';')[0].split('=')[1];
#write_data = write_data.replace(';', '');
#write_data = write_data.replace(',', '\t');
fout = open(outputfile, "awr"); # a means append
fout.write(write_data);
fout.close();
# argv[1] for input_code_file; argv[2] for output_path; argv[3] for output_prefix
if __name__=="__main__":
# get local date
tz = pytz.timezone('Asia/Shanghai');
current_time = datetime.datetime.now(tz);
current_time = current_time.strftime('%Y-%m-%d');
current_time = sys.argv[3] + current_time;
# 15 minuts
for x in range(0, 15):
f(sys.argv[1], sys.argv[2]+"/"+current_time + ".txt");
time.sleep(60); # 1 minute period
#f(sys.argv[1], sys.argv[2]+"/"+current_time + ".txt");
| {
"content_hash": "0f73c8bcf3777c65e6982a8e08ccdec0",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 81,
"avg_line_length": 35.375,
"alnum_prop": 0.5874558303886925,
"repo_name": "xiatian122/stockquote",
"id": "6d6270dd51bf307091e81207740b39cc2984463a",
"size": "1132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compete.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "4673"
},
{
"name": "Python",
"bytes": "2971"
},
{
"name": "Shell",
"bytes": "2826"
}
],
"symlink_target": ""
} |
"""Multivariate Gaussian contour classifier.
"""
import numpy as np
from scipy.stats import boxcox
from scipy.stats import multivariate_normal
from motif.core import ContourClassifier
EPS = 1.0
MAX_SCORE = 10000.0
class MvGaussian(ContourClassifier):
'''Multivariate Gaussian contour classifier.
Attributes
----------
rv_pos : scipy.stats._multivariate.multivariate_normal_gen
A multivariate gaussian modeling the positive class
rv_neg : scipy.stats._multivariate.multivariate_normal_gen
A multivariate gaussian modeling the negative class
n_feats : int
The number of features
lmbda : np.array
Array of length n_features with the optimal lambda.
'''
def __init__(self):
ContourClassifier.__init__(self)
self.rv_pos = None
self.rv_neg = None
self.n_feats = None
self.lmbda = None
def predict(self, X):
""" Compute melodiness score.
Parameters
----------
X : np.array [n_samples, n_features]
Features.
Returns
-------
p : np.array [n_samples]
melodiness scores
"""
if self.rv_pos is None:
raise ReferenceError(
"fit must be called before predict can be called"
)
transformed_feats = self._transform(X)
numerator = self.rv_pos.pdf(transformed_feats)
denominator = self.rv_neg.pdf(transformed_feats)
ratio = np.zeros(numerator.shape)
nonzero_denom = np.where(denominator != 0)[0]
zero_denom = np.where(denominator == 0)[0]
ratio[nonzero_denom] = (
numerator[nonzero_denom] / denominator[nonzero_denom]
)
ratio[zero_denom] = MAX_SCORE
ratio[np.where(numerator == 0)[0]] = 0.0
ratio[ratio > MAX_SCORE] = MAX_SCORE
return ratio
def fit(self, X, Y):
""" Fit class-dependent multivariate gaussians on the training set.
Parameters
----------
x_train_boxcox : np.array [n_samples, n_features_trans]
Transformed training features.
y_train : np.array [n_samples]
Training labels.
Returns
-------
rv_pos : multivariate normal
multivariate normal for melody class
rv_neg : multivariate normal
multivariate normal for non-melody class
"""
X_boxcox = self._fit_boxcox(X)
pos_idx = np.where(Y == 1)[0]
mu_pos = np.mean(X_boxcox[pos_idx, :], axis=0)
cov_pos = np.cov(X_boxcox[pos_idx, :], rowvar=0)
neg_idx = np.where(Y == 0)[0]
mu_neg = np.mean(X_boxcox[neg_idx, :], axis=0)
cov_neg = np.cov(X_boxcox[neg_idx, :], rowvar=0)
rv_pos = multivariate_normal(
mean=mu_pos, cov=cov_pos, allow_singular=True
)
rv_neg = multivariate_normal(
mean=mu_neg, cov=cov_neg, allow_singular=True
)
self.rv_pos = rv_pos
self.rv_neg = rv_neg
@property
def threshold(self):
""" The threshold determining the positive class.
Returns
-------
threshold : flaot
melodiness scores
"""
return 1.0
@classmethod
def get_id(cls):
""" The ContourClassifier identifier
Returns
-------
id : string
class identifier
"""
return 'mv_gaussian'
def _fit_boxcox(self, X):
""" Transform features using a boxcox transform.
Parameters
----------
X : np.array [n_samples, n_features]
Untransformed training features.
Returns
-------
X_boxcox : np.array [n_samples, n_features]
Transformed training features.
"""
_, self.n_feats = X.shape
X_boxcox = np.zeros(X.shape)
lmbda_opt = np.zeros((self.n_feats,))
for i in range(self.n_feats):
X_boxcox[:, i], lmbda_opt[i] = boxcox(
X[:, i] + EPS
)
self.lmbda = lmbda_opt
return X_boxcox
def _transform(self, X):
""" Transform an input feature matrix using the trained boxcox
parameters.
Parameters
----------
X : np.array [n_samples, n_features]
Input features.
Returns
-------
X_boxcox : np.array [n_samples, n_features]
Transformed features.
"""
X_boxcox = np.zeros(X.shape)
for i in range(self.n_feats):
X_boxcox[:, i] = boxcox(
X[:, i] + EPS, lmbda=self.lmbda[i]
)
return X_boxcox
| {
"content_hash": "6cf575c9e827c80e9746816f49c3fbe2",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 75,
"avg_line_length": 27.958333333333332,
"alnum_prop": 0.5439642324888226,
"repo_name": "rabitt/motif",
"id": "642814402d0170f644c1e7f222456d081ffb77de",
"size": "4697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "motif/contour_classifiers/mv_gaussian.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7794"
},
{
"name": "Python",
"bytes": "205056"
},
{
"name": "Shell",
"bytes": "1022"
}
],
"symlink_target": ""
} |
import unittest
from colander import Invalid
from jsonresume.schema.validators import is_valid_country_code
class TestCountryCodeValidation(unittest.TestCase):
def test_all(self):
for country_code, exc_class in [
('JP', None),
('FR', None),
('USA', Invalid),
('MEX', Invalid),
('', Invalid)
]:
if isinstance(exc_class, Exception):
self.assertRaises(
exc_class,
is_valid_country_code, '', country_code
)
elif exc_class is None:
self.assertIsNone(is_valid_country_code('', country_code))
| {
"content_hash": "06ab29b724f2fd83367514c8f275105a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 74,
"avg_line_length": 28.458333333333332,
"alnum_prop": 0.5285505124450952,
"repo_name": "kelvintaywl/jsonresume-validator",
"id": "2b3df2b01bb05a4aa180d8db8fe25bcbc5def377",
"size": "708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsonresume/tests/test_validators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11471"
}
],
"symlink_target": ""
} |
import unittest
from mockito import mock, unstub, when
from Selenium2Library.keywords import BrowserManagementKeywords
class KeywordArgumentsElementTest(unittest.TestCase):
def setUp(self):
ctx = mock()
ctx._browser = mock()
self.brorser = BrowserManagementKeywords(ctx)
def tearDown(self):
unstub()
def test_open_browser(self):
url = 'https://github.com/robotframework'
remote_url = '"http://localhost:4444/wd/hub"'
browser = mock()
when(self.brorser)._make_browser('firefox', None,
None, False).thenReturn(browser)
alias = self.brorser.open_browser(url)
self.assertEqual(alias, None)
when(self.brorser)._make_browser('firefox', None,
None, remote_url).thenReturn(browser)
alias = self.brorser.open_browser(url, alias='None',
remote_url=remote_url)
self.assertEqual(alias, None)
| {
"content_hash": "a729d3fc140ebdab5ddbe98c195447b0",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 78,
"avg_line_length": 33.16129032258065,
"alnum_prop": 0.5885214007782101,
"repo_name": "SergiuTudos/Selenium2Library",
"id": "ce4586e958fc4b7a687b65ef7b20f912693fcd47",
"size": "1028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/keywords/test_keyword_arguments_browsermanagement.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1322"
},
{
"name": "HTML",
"bytes": "47061"
},
{
"name": "JavaScript",
"bytes": "9665"
},
{
"name": "Python",
"bytes": "277136"
},
{
"name": "RobotFramework",
"bytes": "116398"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
import os.path as op
AUTHOR = u'Benjamin Laken'
AUTHOR_EMAIL = 'benlaken@gmail.com'
SITENAME = u'Benjamin Laken'
# SITEURL = 'https://benlaken.github.io/blogsite'
SITEURL = 'https://benlaken.com'
# SITEURL = 'http://localhost:8000'
TAGLINE = u'Earth science meanderings'
DISQUS_SITENAME = 'www-benlaken-com'
DISPLAY_PAGES_ON_MENU = True
PATH = 'content'
# STATIC_PATHS = ['theme/static/', './theme/static/images/favicon.ico']
# EXTRA_PATH_METADATA = {'favicon.ico': {'path': 'theme/static/images/favicon.ico'}}
PROFILE_IMAGE_URL = "https://www.gravatar.com/avatar/69dcde5b90cca46e5111cd8c306d155d"
COVER_IMG_URL = "images/tenerife.jpg"
TIMEZONE = 'Europe/Madrid'
DEFAULT_LANG = 'en'
PLUGIN_PATHS = ['pelican-plugins']
PLUGINS = ['gravatar', 'liquid_tags.img', 'liquid_tags.video',
'liquid_tags.youtube', 'liquid_tags.vimeo',
'liquid_tags.include_code', "tag_cloud", "simple_footnotes",
'share_post']
# Feed generation is usually not desired when developing
# FEED_ALL_ATOM = None
# CATEGORY_FEED_ATOM = None
# TRANSLATION_FEED_ATOM = None
# AUTHOR_FEED_ATOM = None
# AUTHOR_FEED_RSS = None
FEED_ALL_ATOM = u'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = u'feeds/%s.atom.xml'
# TRANSLATION_FEED_ATOM = None
FEED_ALL_RSS = u'feeds/all.rss.xml'
CATEGORY_FEED_RSS = u'feeds/%s.rss.xml'
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (
('github-square', 'https://github.com/benlaken'),
('twitter-square', 'https://twitter.com/benlaken'),
('linkedin-square', 'https://www.linkedin.com/in/benjamin-laken-a3089087'),
('book', 'https://impactstory.org/u/0000-0003-2021-6258/publications'),
)
DEFAULT_PAGINATION = 5
# Tag cloud settings
TAG_CLOUD_STEPS = 4
TAG_CLOUD_MAX_ITEMS = 100
TAG_CLOUD_SORTING = 'random'
TAG_CLOUD_BADGE = True
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
THEME = "pure"
OUTPUT_PATH = 'output'
| {
"content_hash": "472d82f0199a7951d53990361010f43a",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 86,
"avg_line_length": 30.416666666666668,
"alnum_prop": 0.6812785388127854,
"repo_name": "benlaken/blogsite",
"id": "eefabb26c8bcfd4b4fdab638fb8c72eb1a657f29",
"size": "2239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pelicanconf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "4582"
},
{
"name": "Python",
"bytes": "2913"
},
{
"name": "Shell",
"bytes": "2202"
}
],
"symlink_target": ""
} |
from collections.abc import Iterable
import os
def normalize_lineseps(lines):
r"""Normalize and then return the given lines to all use '\n'."""
lines = lines.replace(os.linesep, '\n')
# This happens (even on Linux and macOS) when capturing I/O from an
# emulated tty.
lines = lines.replace('\r\n', '\n')
return lines
def build_line_match(expected_lines, *, strict=False):
"""
Make a callable to match lines with ``expected_lines``.
:param expected_lines: line expectation to match against
:type expected_text: str or List[str] or List[Union[str, regex pattern]]
:return: a callable that matches text against the expectation.
:rtype: Callable[bool, [str]]
"""
if isinstance(expected_lines, str):
def _match(actual_lines, start=0):
if strict:
if all(expected_lines == line for line in actual_lines[start:]):
return start, start + len(actual_lines[start:])
return None
return next((
(i, i + 1) for i, line in enumerate(
actual_lines[start:], start=start
) if expected_lines in line
), None)
return _match
if hasattr(expected_lines, 'match'):
def _match(actual_lines, start):
if strict:
if all(expected_lines.match(line) for line in actual_lines[start:]):
return start, start + len(actual_lines[start:])
return None
return next((
(i, i + 1) for i, line in enumerate(
actual_lines[start:], start=start
) if expected_lines.match(line)
), None)
return _match
if isinstance(expected_lines, Iterable):
head_match, *tail_matches = [
build_line_match(line, strict=False) for line in expected_lines
]
def _match(actual_lines, start=0):
next_start, end = head_match(actual_lines, start) or (-1, -1)
if next_start < start or (strict and next_start != start):
return None
start = next_start
for match in tail_matches:
next_start, next_end = match(actual_lines, end) or (-1, -1)
if next_start < end or (strict and next_start != end):
return None
end = next_end
return start, end
return _match
raise ValueError('Unknown format for expected lines')
def build_text_match(expected_text, *, strict=False):
"""
Make a callable to match text with ``expected_text``.
:param expected_text: text expectation to match against
:type expected_text: str or regex pattern or List[Union[str, regex pattern]]
:return: a callable that matches text against the expectation.
:rtype: Callable[bool, [str]]
"""
if isinstance(expected_text, str):
def _match(actual_text, start=0):
actual_text = normalize_lineseps(actual_text)
if strict:
if actual_text[start:] != expected_text:
return None
else:
start = actual_text.find(expected_text, start)
if start < 0:
return None
return start, start + len(expected_text)
return _match
if hasattr(expected_text, 'search') and hasattr(expected_text, 'match'):
def _match(actual_text, start=0):
actual_text = normalize_lineseps(actual_text)
if strict:
match = expected_text.match(actual_text, start)
else:
match = expected_text.search(actual_text, start)
if match is not None:
match = match.start(), match.end()
return match
return _match
if isinstance(expected_text, Iterable):
head_match, *tail_matches = [
build_text_match(text, strict=False) for text in expected_text
]
def _match(actual_text, start=0):
actual_text = normalize_lineseps(actual_text)
next_start, end = head_match(actual_text, start) or (-1, -1)
if next_start < start or (strict and next_start != start):
return None
start = next_start
for match in tail_matches:
next_start, next_end = match(actual_text, end) or (-1, -1)
if next_start < end or (strict and next_start != end):
return None
end = next_end
return start, end
return _match
raise ValueError('Unknown format for expected text')
| {
"content_hash": "e0794f78a777ac5397908d177b5705c3",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 84,
"avg_line_length": 37.58870967741935,
"alnum_prop": 0.560394765071873,
"repo_name": "ros2/launch",
"id": "61fdd7a95a53ff551037a4c8a9a7d25fca3b6ccc",
"size": "5263",
"binary": false,
"copies": "1",
"ref": "refs/heads/rolling",
"path": "launch_testing/launch_testing/tools/text.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "857"
},
{
"name": "C++",
"bytes": "1468"
},
{
"name": "CMake",
"bytes": "8807"
},
{
"name": "Makefile",
"bytes": "607"
},
{
"name": "Python",
"bytes": "1063971"
},
{
"name": "Shell",
"bytes": "85"
}
],
"symlink_target": ""
} |
from .streamable_archive_tests import *
from .delivery_collection_tests import *
from .importutils import *
from .test_passed_in_previous_period import *
from .test_groups_groupedby_relatedstudent_and_assignment import GroupsGroupedByRelatedStudentAndAssignmentTest
| {
"content_hash": "53d7ab03444fb17b7e1623d7cc5a788d",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 111,
"avg_line_length": 53.2,
"alnum_prop": 0.8421052631578947,
"repo_name": "devilry/devilry-django",
"id": "1d7bf30417e3b328f207b1d46883218466cde875",
"size": "266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devilry/utils/tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "513510"
},
{
"name": "Dockerfile",
"bytes": "211"
},
{
"name": "HTML",
"bytes": "421969"
},
{
"name": "JavaScript",
"bytes": "756713"
},
{
"name": "Less",
"bytes": "166670"
},
{
"name": "PLpgSQL",
"bytes": "397986"
},
{
"name": "Python",
"bytes": "6507968"
},
{
"name": "Shell",
"bytes": "10328"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ParcatsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="parcats", parent_name="layout.template.data", **kwargs
):
super(ParcatsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Parcats"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
| {
"content_hash": "9ff9fa7ee218c9b76a98c5032ed24844",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 81,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.5512820512820513,
"repo_name": "plotly/python-api",
"id": "b1aab66788669c33ab4d0aa29619412e153c4d6c",
"size": "546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/template/data/_parcats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class SmartContractInformation(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'code': 'str',
'uri': 'str'
}
attribute_map = {
'code': 'code',
'uri': 'uri'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""SmartContractInformation - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._code = None
self._uri = None
self.discriminator = None
setattr(self, "_{}".format('code'), kwargs.get('code', None))
setattr(self, "_{}".format('uri'), kwargs.get('uri', None))
@property
def code(self):
"""Gets the code of this SmartContractInformation. # noqa: E501
# noqa: E501
:return: The code of this SmartContractInformation. # noqa: E501
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this SmartContractInformation.
# noqa: E501
:param code: The code of this SmartContractInformation. # noqa: E501
:type: str
"""
self._code = code
@property
def uri(self):
"""Gets the uri of this SmartContractInformation. # noqa: E501
# noqa: E501
:return: The uri of this SmartContractInformation. # noqa: E501
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""Sets the uri of this SmartContractInformation.
# noqa: E501
:param uri: The uri of this SmartContractInformation. # noqa: E501
:type: str
"""
self._uri = uri
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SmartContractInformation, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SmartContractInformation):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SmartContractInformation):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "a7696d54ba6ce30f43eba06add532e4e",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 140,
"avg_line_length": 28.053691275167786,
"alnum_prop": 0.5555023923444976,
"repo_name": "docusign/docusign-python-client",
"id": "de17cefa7d5cb7d9912eb037fa928aeb353370ee",
"size": "4197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docusign_esign/models/smart_contract_information.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9687716"
}
],
"symlink_target": ""
} |
"""
Model classes for AppDynamics REST API
.. moduleauthor:: Todd Radel <tradel@appdynamics.com>
"""
from . import JsonObject, JsonList
class Node(JsonObject):
FIELDS = {'id': '', 'name': '', 'type': '', 'machine_id': 'machineId', 'machine_name': 'machineName',
'tier_id': 'tierId', 'tier_name': 'tierName', 'unique_id': 'nodeUniqueLocalId',
'os_type': 'machineOSType', 'has_app_agent': 'appAgentPresent', 'app_agent_version': 'appAgentVersion',
'has_machine_agent': 'machineAgentPresent', 'machine_agent_version': 'machineAgentVersion'}
def __init__(self, node_id=0, name='', node_type='', machine_id=0, machine_name='', os_type='',
unique_local_id='', tier_id=0, tier_name='', has_app_agent=False, app_agent_version='',
has_machine_agent=False, machine_agent_version=''):
(self.id, self.name, self.type, self.machine_id, self.machine_name, self.os_type, self.unique_local_id,
self.tier_id, self.tier_name, self.has_app_agent, self.app_agent_version, self.has_machine_agent,
self.machine_agent_version) = (node_id, name, node_type, machine_id, machine_name, os_type, unique_local_id,
tier_id, tier_name, has_app_agent, app_agent_version,
has_machine_agent, machine_agent_version)
class Nodes(JsonList):
def __init__(self, initial_list=None):
super(Nodes, self).__init__(Node, initial_list)
def __getitem__(self, i):
"""
:rtype: Node
"""
return self.data[i]
def by_machine_name(self, name):
"""
Filters a Nodes collection to return only the nodes matching the given hostname.
:param str name: Hostname to match against.
:returns: a Nodes collection filtered by hostname.
:rtype: Nodes
"""
return Nodes([x for x in self.data if x.machineName == name])
def by_machine_id(self, machine_id):
"""
Filters a Nodes collection to return only the nodes matching the given machine instance ID.
:param int machine_id: Machine ID to match against.
:returns: a Nodes collection filtered by machine ID.
:rtype: Nodes
"""
return Nodes([x for x in self.data if x.machineId == machine_id])
def by_tier_name(self, name):
"""
Filters a Nodes collection to return only the nodes belonging to the given tier.
:param str name: Tier name to match against.
:returns: a Nodes collection filtered by tier.
:rtype: Nodes
"""
return Nodes([x for x in self.data if x.tier_name == name])
def by_tier_id(self, tier_id):
"""
Filters a Nodes collection to return only the nodes belonging to the given tier ID.
:param int tier_id: Tier ID to match against.
:returns: a Nodes collection filtered by tier.
:rtype: Nodes
"""
return Nodes([x for x in self.data if x.tier_id == tier_id])
| {
"content_hash": "5266b978a7bf100b9b70496f625f9696",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 117,
"avg_line_length": 42.236111111111114,
"alnum_prop": 0.6037487668530089,
"repo_name": "tradel/AppDynamicsREST",
"id": "ce995c875781fcb59a77d035f0a338ddc89c1cd6",
"size": "3041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appd/model/node.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "6640"
},
{
"name": "Makefile",
"bytes": "1728"
},
{
"name": "Python",
"bytes": "64907"
}
],
"symlink_target": ""
} |
"""Functions for collecting diagnostic information on Kubernetes cluster."""
import enum
from typing import List, Text
from kfp.deprecated.cli.diagnose_me import utility
class Commands(enum.Enum):
"""Enum for kubernetes commands."""
GET_CONFIGURED_CONTEXT = 1
GET_PODS = 2
GET_PVCS = 3
GET_PVS = 4
GET_SECRETS = 5
GET_SERVICES = 6
GET_KUBECTL_VERSION = 7
GET_CONFIG_MAPS = 8
_command_string = {
Commands.GET_CONFIGURED_CONTEXT: 'config view',
Commands.GET_PODS: 'get pods',
Commands.GET_PVCS: 'get pvc',
Commands.GET_PVS: 'get pv',
Commands.GET_SECRETS: 'get secrets',
Commands.GET_SERVICES: 'get services',
Commands.GET_KUBECTL_VERSION: 'version',
Commands.GET_CONFIG_MAPS: 'get configmaps',
}
def execute_kubectl_command(
kubectl_command_list: List[Text],
human_readable: bool = False) -> utility.ExecutorResponse:
"""Invokes the kubectl command.
Args:
kubectl_command_list: a command string list to be past to kubectl example
format is ['config', 'view']
human_readable: If false sets parameter -o json for all calls, otherwie
output will be in human readable format.
Returns:
utility.ExecutorResponse with outputs from stdout,stderr and execution code.
"""
command_list = ['kubectl']
command_list.extend(kubectl_command_list)
if not human_readable:
command_list.extend(['-o', 'json'])
return utility.ExecutorResponse().execute_command(command_list)
def get_kubectl_configuration(
configuration: Commands,
kubernetes_context: Text = None,
namespace: Text = None,
human_readable: bool = False) -> utility.ExecutorResponse:
"""Captures the specified environment configuration.
Captures the environment state for the specified setting such as current
context, active pods, etc and returns it in as a dictionary format. if no
context is specified the system will use the current_context or error out of
none is specified.
Args:
configuration:
- K8_CONFIGURED_CONTEXT: returns all k8 configuration available in the
current env including current_context.
- PODS: returns all pods and their status details.
- PVCS: returns all PersistentVolumeClaim and their status details.
- SECRETS: returns all accessible k8 secrests.
- PVS: returns all PersistentVolume and their status details.
- SERVICES: returns all services and their status details.
kubernetes_context: Context to use to retrieve cluster specific commands, if
set to None calls will rely on current_context configured.
namespace: default name space to be used for the commaand, if not specifeid
--all-namespaces will be used.
human_readable: If true all output will be in human readable form insted of
Json.
Returns:
A list of dictionaries matching gcloud / gsutil output for the specified
configuration,or an error message if any occurs during execution.
"""
if configuration in (Commands.GET_CONFIGURED_CONTEXT,
Commands.GET_KUBECTL_VERSION):
return execute_kubectl_command(
(_command_string[configuration]).split(' '), human_readable)
execution_command = _command_string[configuration].split(' ')
if kubernetes_context:
execution_command.extend(['--context', kubernetes_context])
if namespace:
execution_command.extend(['--namespace', namespace])
else:
execution_command.extend(['--all-namespaces'])
return execute_kubectl_command(execution_command, human_readable)
def _get_kfp_runtime() -> Text:
"""Captures the current version of kpf in k8 cluster.
Returns:
Returns the run-time version of kfp in as a string.
"""
# TODO(chavoshi) needs to be implemented.
raise NotImplementedError
| {
"content_hash": "b595896bc629ca01b9cca096c51af04e",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 82,
"avg_line_length": 35.68181818181818,
"alnum_prop": 0.6853503184713375,
"repo_name": "kubeflow/pipelines",
"id": "43d44adfdbd83305ffe58a4c3adcfe828d291cbd",
"size": "4547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdk/python/kfp/deprecated/cli/diagnose_me/kubernetes_cluster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "799"
},
{
"name": "CSS",
"bytes": "2171"
},
{
"name": "Dockerfile",
"bytes": "49331"
},
{
"name": "Go",
"bytes": "1903937"
},
{
"name": "HTML",
"bytes": "3656"
},
{
"name": "JavaScript",
"bytes": "544297"
},
{
"name": "Jinja",
"bytes": "938"
},
{
"name": "Jupyter Notebook",
"bytes": "359548"
},
{
"name": "Makefile",
"bytes": "22164"
},
{
"name": "Mustache",
"bytes": "23652"
},
{
"name": "PowerShell",
"bytes": "3194"
},
{
"name": "Python",
"bytes": "5684887"
},
{
"name": "Shell",
"bytes": "264595"
},
{
"name": "Smarty",
"bytes": "8295"
},
{
"name": "Starlark",
"bytes": "553"
},
{
"name": "TypeScript",
"bytes": "4294958"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authorizationpolicy_binding(base_resource):
""" Binding class showing the resources that can be bound to authorizationpolicy_binding.
"""
def __init__(self) :
self._name = ""
self.authorizationpolicy_csvserver_binding = []
self.authorizationpolicy_lbvserver_binding = []
self.authorizationpolicy_aaauser_binding = []
self.authorizationpolicy_authorizationpolicylabel_binding = []
self.authorizationpolicy_aaagroup_binding = []
@property
def name(self) :
"""Name of the authorization policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the authorization policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def authorizationpolicy_lbvserver_bindings(self) :
"""lbvserver that can be bound to authorizationpolicy.
"""
try :
return self._authorizationpolicy_lbvserver_binding
except Exception as e:
raise e
@property
def authorizationpolicy_aaagroup_bindings(self) :
"""aaagroup that can be bound to authorizationpolicy.
"""
try :
return self._authorizationpolicy_aaagroup_binding
except Exception as e:
raise e
@property
def authorizationpolicy_csvserver_bindings(self) :
"""csvserver that can be bound to authorizationpolicy.
"""
try :
return self._authorizationpolicy_csvserver_binding
except Exception as e:
raise e
@property
def authorizationpolicy_authorizationpolicylabel_bindings(self) :
"""authorizationpolicylabel that can be bound to authorizationpolicy.
"""
try :
return self._authorizationpolicy_authorizationpolicylabel_binding
except Exception as e:
raise e
@property
def authorizationpolicy_aaauser_bindings(self) :
"""aaauser that can be bound to authorizationpolicy.
"""
try :
return self._authorizationpolicy_aaauser_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authorizationpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authorizationpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, name) :
""" Use this API to fetch authorizationpolicy_binding resource.
"""
try :
if type(name) is not list :
obj = authorizationpolicy_binding()
obj.name = name
response = obj.get_resource(service)
else :
if name and len(name) > 0 :
obj = [authorizationpolicy_binding() for _ in range(len(name))]
for i in range(len(name)) :
obj[i].name = name[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class authorizationpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.authorizationpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authorizationpolicy_binding = [authorizationpolicy_binding() for _ in range(length)]
| {
"content_hash": "04cdaf170d084c5e7844aba457923d73",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 129,
"avg_line_length": 29.869565217391305,
"alnum_prop": 0.7195536147501213,
"repo_name": "mahabs/nitro",
"id": "3a2102aa7ac9f053c0ba26c081e83aabd2ed6401",
"size": "4736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/authorization/authorizationpolicy_binding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "498"
},
{
"name": "Python",
"bytes": "10647176"
}
],
"symlink_target": ""
} |
"""
WSGI config for agent project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "agent.settings")
application = get_wsgi_application()
| {
"content_hash": "0d6c69a461a62faa0acbc59ef4c129f1",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.25,
"alnum_prop": 0.7680412371134021,
"repo_name": "kinnevo/kic_alone",
"id": "004b0b544ffddb09b5d32af4735baca9a208b11c",
"size": "388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agent/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "47377"
},
{
"name": "HTML",
"bytes": "100064"
},
{
"name": "JavaScript",
"bytes": "97109"
},
{
"name": "Python",
"bytes": "43182"
},
{
"name": "Shell",
"bytes": "609"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core
from caffe2.python.test_util import rand_array
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given
import hypothesis.strategies as st
class TestScatterOps(hu.HypothesisTestCase):
# TODO(dzhulgakov): add test cases for failure scenarios
@given(num_args=st.integers(1, 5),
first_dim=st.integers(1, 20),
index_dim=st.integers(1, 10),
extra_dims=st.lists(st.integers(1, 4), min_size=0, max_size=3),
ind_type=st.sampled_from([np.int32, np.int64]),
**hu.gcs)
def testScatterWeightedSum(
self, num_args, first_dim, index_dim, extra_dims, ind_type, gc, dc):
ins = ['data', 'w0', 'indices']
for i in range(1, num_args + 1):
ins.extend(['x' + str(i), 'w' + str(i)])
op = core.CreateOperator(
'ScatterWeightedSum',
ins,
['data'],
device_option=gc)
def ref(d, w0, ind, *args):
r = d.copy()
for i in ind:
r[i] *= w0
for i in range(0, len(args), 2):
x = args[i]
w = args[i+1]
for i, j in enumerate(ind):
r[j] += w * x[i]
return [r]
d = rand_array(first_dim, *extra_dims)
ind = np.random.randint(0, first_dim, index_dim).astype(ind_type)
# ScatterWeightedSumOp only supports w0=1.0 in CUDAContext
if(gc == hu.gpu_do):
w0 = np.array(1.0).astype(np.float32)
else:
w0 = rand_array()
inputs = [d, w0, ind]
for _ in range(1, num_args + 1):
x = rand_array(index_dim, *extra_dims)
w = rand_array()
inputs.extend([x,w])
self.assertReferenceChecks(gc, op, inputs, ref, threshold=1e-3)
@given(first_dim=st.integers(1, 20),
index_dim=st.integers(1, 10),
extra_dims=st.lists(st.integers(1, 4), min_size=0, max_size=3),
data_type=st.sampled_from([np.float16, np.float32, np.int32, np.int64]),
ind_type=st.sampled_from([np.int32, np.int64]),
**hu.gcs)
def testScatterAssign(
self, first_dim, index_dim, extra_dims, data_type, ind_type, gc, dc):
op = core.CreateOperator('ScatterAssign',
['data', 'indices', 'slices'], ['data'])
def ref(d, ind, x):
r = d.copy()
r[ind] = x
return [r]
# let's have indices unique
if first_dim < index_dim:
first_dim, index_dim = index_dim, first_dim
d = (rand_array(first_dim, *extra_dims) * 10).astype(data_type)
ind = np.random.choice(first_dim, index_dim,
replace=False).astype(ind_type)
x = (rand_array(index_dim, *extra_dims) * 10).astype(data_type)
self.assertReferenceChecks(gc, op, [d, ind, x], ref, threshold=1e-3)
if __name__ == "__main__":
import unittest
unittest.main()
| {
"content_hash": "33aaf9aa2486e2fca77ae67e07fe6699",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 83,
"avg_line_length": 39.45679012345679,
"alnum_prop": 0.5472465581977471,
"repo_name": "xzturn/caffe2",
"id": "38dfdf3764ab29d74ad9004fdf341ba2cb068e9e",
"size": "3196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "caffe2/python/operator_test/sparse_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3296"
},
{
"name": "C",
"bytes": "678918"
},
{
"name": "C++",
"bytes": "5480393"
},
{
"name": "CMake",
"bytes": "323261"
},
{
"name": "CSS",
"bytes": "2196"
},
{
"name": "Cuda",
"bytes": "2013333"
},
{
"name": "HTML",
"bytes": "5203"
},
{
"name": "Makefile",
"bytes": "15290"
},
{
"name": "Metal",
"bytes": "41257"
},
{
"name": "Objective-C",
"bytes": "4053"
},
{
"name": "Objective-C++",
"bytes": "249566"
},
{
"name": "Python",
"bytes": "3658352"
},
{
"name": "Shell",
"bytes": "65206"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from models import Source, Feed, Entry, MetaData
class FeedAdmin(admin.ModelAdmin):
#prepopulated_fields = {"slug": ("title",)}
pass
class EntryAdmin(admin.ModelAdmin):
#date_hierarchy = 'date'
list_display = ('title', 'feed')
search_fields = ['title', 'description']
admin.site.register(Source)
admin.site.register(Feed, FeedAdmin)
admin.site.register(Entry, EntryAdmin)
admin.site.register(MetaData)
| {
"content_hash": "1a98e49e7efb3af2e6c0508a9a85a02d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 48,
"avg_line_length": 28.625,
"alnum_prop": 0.7248908296943232,
"repo_name": "florentin/django-niftyurls",
"id": "65b796f8f73d5b13df0492c7d09573b6d4a5395f",
"size": "458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "niftyurls/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "44096"
},
{
"name": "Python",
"bytes": "22839"
}
],
"symlink_target": ""
} |
import asyncio
import time
def callback(n, loop):
print('callback {} invoked at {}'.format(n, loop.time()))
async def main(loop):
now = loop.time()
print('clock time: {}'.format(time.time()))
print('loop time: {}'.format(now))
print('registering callbacks')
loop.call_at(now + 0.2, callback, 1, loop)
loop.call_at(now + 0.1, callback, 2, loop)
loop.call_soon(callback, 3, loop)
await asyncio.sleep(1)
event_loop = asyncio.get_event_loop()
try:
print('entering event loop')
event_loop.run_until_complete(main(event_loop))
finally:
print('closing event loop')
event_loop.close() | {
"content_hash": "f0d011725689d826d1d4d156c1fb5b55",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 61,
"avg_line_length": 22.75,
"alnum_prop": 0.6436420722135008,
"repo_name": "scotthuang1989/Python-3-Module-of-the-Week",
"id": "72db460b7766e9087080f17dbaf5a71255454a89",
"size": "637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "concurrency/asyncio/asyncio_call_at.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "913525"
},
{
"name": "Python",
"bytes": "53855"
},
{
"name": "Shell",
"bytes": "91"
}
],
"symlink_target": ""
} |
"""Global constants and classes for LogicInference dataset generation.
"""
import dataclasses
from typing import Any, List
# Global variables:
NEXT_RENAME_INDEX = 1
PROPOSITION_NAMES = ["p", "q", "r", "s", "t", "u", "w"]
FUNCTOR_NAMES = ["P", "Q", "R", "S", "T", "U", "W"]
CONSTANT_NAMES = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]
VARIABLE_NAMES = ["var x", "var y", "var z"]
# Each rule is a tuple with 6 elements:
# (premises, conclusions, contradictions, irrelevant, propositions, name)
PROPOSITIONAL_INFERENCE_RULES = []
QUANTIFIED_INFERENCE_RULES = []
ALL_INFERENCE_RULES = []
ALL_RULE_NAMES = []
# Counters to keep track of how many examples of each type are actually
# generated. And for each problem type, we also count the number of times we
# generate examples with contradictions, and with certain other special cases.
EXAMPLE_PROBLEM_TYPES = [
"1",
"2a", "2a-cont", "2a-empty",
"2b", "2b-cont", "2b-empty",
"3a", "3a-cont", "3a-premise", "3a-no", "3a-no-1",
"3a-unrelated",
"3b", "3b-cont", "3b-premise", "3b-no", "3b-no-1",
"3b-unrelated"]
EXAMPLE_TYPE_STATS = {}
# Common logic inference rules. For each rule we provide:
# - the premises (list of clauses)
# - the conclusions that can be drawn (list of clauses)
# - a collection of clauses that would contradict the premises
# - a collection of clauses that cannot be inferred from the premises, and
# are hence, unrelated.
# - the set of atomic clauses (propositions) that appear in the lists above.
# - the rule name.
# These lists are non-exhaustive, and are just used to generate candidate
# inferences for the training examples.
@dataclasses.dataclass
class InferenceRule:
premises: List[Any]
inferences: List[Any]
contradictions: List[Any]
unrelated: List[Any]
propositions: List[Any]
rule_name: str
# Each inference problem is a 6-tuple consisting of:
# - premises
# - inferences: [clause, reasoning chain]. Where reasoning chains is
# a ("premises, inferences, name") list where premises is a subset
# of original premises + inferences already obtained.
# - contradictions: same as above, with the last step being the contradiction.
# - unrelated: just a plain list of clauses.
# - propositions: list of all atomic clauses appearning in all the lists before.
# - contains_contradiction: whether the premises lead to a contradiction or not.
@dataclasses.dataclass
class InferenceProblem:
premises: List[Any]
inferences: List[Any]
contradictions: List[Any]
unrelated: List[Any]
propositions: List[Any]
contains_contradiction: bool
@dataclasses.dataclass
class Example:
inputs: str
targets: str
example_type: str
problem: InferenceProblem
| {
"content_hash": "5bf662e61dd715a8abe877ebf5b6afa8",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 80,
"avg_line_length": 33.58024691358025,
"alnum_prop": 0.6970588235294117,
"repo_name": "google-research/google-research",
"id": "3452b6487b451352f5547415a6f6641c9c8b8f63",
"size": "3328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logic_inference_dataset/logic_inference_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
"Supporting objects and functions to convert Matplotlib objects into Bokeh."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import itertools
import warnings
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .models.glyphs import (Asterisk, Circle, Cross, Diamond, InvertedTriangle,
Line, MultiLine, Patches, Square, Text, Triangle, X)
from .mplexporter.exporter import Exporter
from .mplexporter.renderers import Renderer
from .mpl_helpers import (convert_dashes, delete_last_col, get_props_cycled,
is_ax_end, xkcd_line)
from .models import (ColumnDataSource, DataRange1d, DatetimeAxis, GlyphRenderer,
Grid, GridPlot, LinearAxis, PanTool, Plot, PreviewSaveTool,
ResetTool, WheelZoomTool)
from .plotting import (curdoc, output_file, output_notebook, output_server,
DEFAULT_TOOLS)
from .plotting_helpers import _process_tools_arg
# Names that we want in this namespace (fool pyflakes)
(PanTool, ResetTool, PreviewSaveTool, WheelZoomTool)
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class BokehRenderer(Renderer):
def __init__(self, pd_obj, xkcd):
"Initial setup."
self.fig = None
self.pd_obj = pd_obj
self.xkcd = xkcd
self.source = ColumnDataSource()
self.xdr = DataRange1d()
self.ydr = DataRange1d()
self.non_text = [] # to save the text we don't want to convert by draw_text
def open_figure(self, fig, props):
"Get the main plot properties and create the plot."
self.width = int(props['figwidth'] * props['dpi'])
self.height = int(props['figheight'] * props['dpi'])
self.plot = Plot(x_range=self.xdr,
y_range=self.ydr,
plot_width=self.width,
plot_height=self.height)
def close_figure(self, fig):
"Complete the plot: add tools."
# Add tools
tool_objs = _process_tools_arg(self.plot, DEFAULT_TOOLS)
self.plot.add_tools(*tool_objs)
# Simple or Grid plot setup
if len(fig.axes) <= 1:
self.fig = self.plot
else:
# This list comprehension splits the plot.renderers list at the "marker"
# points returning small sublists corresponding with each subplot.
subrends = [list(x[1]) for x in itertools.groupby(
self.plot.renderers, lambda x: is_ax_end(x)) if not x[0]]
plots = []
for i, axes in enumerate(fig.axes):
# create a new plot for each subplot
_plot = Plot(x_range=self.xdr,
y_range=self.ydr,
plot_width=self.width,
plot_height=self.height)
_plot.title = ""
# and add new tools
_tool_objs = _process_tools_arg(_plot, DEFAULT_TOOLS)
_plot.add_tools(*_tool_objs)
# clean the plot ref from axis and grids
_plot_rends = subrends[i]
for r in _plot_rends:
if not isinstance(r, GlyphRenderer):
r.plot = None
# add all the renderers into the new subplot
_plot.add_layout(_plot_rends[0], 'below') # xaxis
_plot.add_layout(_plot_rends[1], 'left') # yaxis
_plot.add_layout(_plot_rends[2]) # xgrid
_plot.add_layout(_plot_rends[3]) # ygrid
for r in _plot_rends[4:]: # all the glyphs
_plot.renderers.append(r)
plots.append(_plot)
(a, b, c) = fig.axes[0].get_geometry()
p = np.array(plots)
n = np.resize(p, (a, b))
grid = GridPlot(children=n.tolist())
self.fig = grid
def open_axes(self, ax, props):
"Get axes data and create the axes and grids"
# Get axes, title and grid into class attributes.
self.ax = ax
self.plot.title = ax.get_title()
# to avoid title conversion by draw_text later
self.non_text.append(self.plot.title)
self.grid = ax.get_xgridlines()[0]
# Add axis
bxaxis = self.make_axis(ax.xaxis, "below", props['xscale'])
byaxis = self.make_axis(ax.yaxis, "left", props['yscale'])
# Add grids
self.make_grid(bxaxis, 0)
self.make_grid(byaxis, 1)
# Setup collections info
nones = ("", " ", "None", "none", None)
cols = [col for col in self.ax.collections if col.get_paths() not in nones]
# Add collections renderers
[self.make_line_collection(col) for col in cols if isinstance(col, mpl.collections.LineCollection)]
[self.make_poly_collection(col) for col in cols if isinstance(col, mpl.collections.PolyCollection)]
def close_axes(self, ax):
"Complete the axes adding axes-dependent plot props"
background_fill = ax.get_axis_bgcolor()
if background_fill == 'w':
background_fill = 'white'
self.plot.background_fill = background_fill
if self.xkcd:
self.plot.title_text_font = "Comic Sans MS, Textile, cursive"
self.plot.title_text_font_style = "bold"
self.plot.title_text_color = "black"
# Add a "marker" Glyph to help the plot.renderers splitting in the GridPlot build
dummy_source = ColumnDataSource(data=dict(name="ax_end"))
self.plot.renderers.append(GlyphRenderer(data_source=dummy_source, glyph=X()))
def open_legend(self, legend, props):
pass
def close_legend(self, legend):
pass
def draw_line(self, data, coordinates, style, label, mplobj=None):
"Given a mpl line2d instance create a Bokeh Line glyph."
_x = data[:, 0]
if self.pd_obj is True:
try:
x = [pd.Period(ordinal=int(i), freq=self.ax.xaxis.freq).to_timestamp() for i in _x]
except AttributeError: # we probably can make this one more intelligent later
x = _x
else:
x = _x
y = data[:, 1]
if self.xkcd:
x, y = xkcd_line(x, y)
line = Line()
line.x = self.source.add(x)
line.y = self.source.add(y)
line.line_color = style['color']
line.line_width = style['linewidth']
line.line_alpha = style['alpha']
line.line_dash = [int(i) for i in style['dasharray'].split(",")] # str2list(int)
#style['zorder'] # not in Bokeh
#line.line_join = line2d.get_solid_joinstyle() # not in mplexporter
#line.line_cap = cap_style_map[line2d.get_solid_capstyle()] # not in mplexporter
if self.xkcd:
line.line_width = 3
self.plot.add_glyph(self.source, line)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"Given a mpl line2d instance create a Bokeh Marker glyph."
x = data[:, 0]
y = data[:, 1]
marker_map = {
"o": Circle,
"s": Square,
"+": Cross,
"^": Triangle,
"v": InvertedTriangle,
"x": X,
"D": Diamond,
"*": Asterisk,
}
# Not all matplotlib markers are currently handled; fall back to Circle if we encounter an
# unhandled marker. See http://matplotlib.org/api/markers_api.html for a list of markers.
try:
marker = marker_map[style['marker']]()
except KeyError:
warnings.warn("Unable to handle marker: %s; defaulting to Circle" % style['marker'])
marker = Circle()
marker.x = self.source.add(x)
marker.y = self.source.add(y)
marker.line_color = style['edgecolor']
marker.fill_color = style['facecolor']
marker.line_width = style['edgewidth']
marker.size = style['markersize']
marker.fill_alpha = marker.line_alpha = style['alpha']
#style['zorder'] # not in Bokeh
self.plot.add_glyph(self.source, marker)
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""Path not implemented in Bokeh, but we have our own line ans poly
collection implementations, so passing here to avoid the NonImplemented
error.
"""
pass
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"Given a mpl text instance create a Bokeh Text glyph."
# mpl give you the title and axes names as a text object (with specific locations)
# inside the plot itself. That does not make sense inside Bokeh, so we
# just skip the title and axes names from the conversion and covert any other text.
if text not in self.non_text:
x, y = position
text = Text(x=x, y=y, text=[text])
alignment_map = {"center": "middle", "top": "top", "bottom": "bottom", "baseline": "bottom"}
# baseline not implemented in Bokeh, deafulting to bottom.
text.text_alpha = style['alpha']
text.text_font_size = "%dpx" % style['fontsize']
text.text_color = style['color']
text.text_align = style['halign']
text.text_baseline = alignment_map[style['valign']]
text.angle = style['rotation']
#style['zorder'] # not in Bokeh
## Using get_fontname() works, but it's oftentimes not available in the browser,
## so it's better to just use the font family here.
#text.text_font = mplText.get_fontname()) not in mplexporter
#text.text_font = mplText.get_fontfamily()[0] # not in mplexporter
#text.text_font_style = fontstyle_map[mplText.get_fontstyle()] # not in mplexporter
## we don't really have the full range of font weights, but at least handle bold
#if mplText.get_weight() in ("bold", "heavy"):
#text.text_font_style = bold
self.plot.add_glyph(self.source, text)
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
pass
def make_axis(self, ax, location, scale):
"Given a mpl axes instance, returns a Bokeh LinearAxis object."
# TODO:
# * handle log scaling
# * map `labelpad` to `major_label_standoff`
# * deal with minor ticks once BokehJS supports them
# * handle custom tick locations once that is added to bokehJS
# we need to keep the current axes names to avoid writing them in draw_text
self.non_text.append(ax.get_label_text())
if scale == "linear":
laxis = LinearAxis(axis_label=ax.get_label_text())
elif scale == "date":
laxis = DatetimeAxis(axis_label=ax.get_label_text())
self.plot.add_layout(laxis, location)
# First get the label properties by getting an mpl.Text object
#label = ax.get_label()
#self.text_props(label, laxis, prefix="axis_label_")
#self.draw_text(label, position, coordinates, style, text_type="axis_label_")
# To get the tick label format, we look at the first of the tick labels
# and assume the rest are formatted similarly.
#ticktext = ax.get_ticklabels()[0]
#self.text_props(ticktext, laxis, prefix="major_label_")
#self.draw_text(ticktext, position, coordinates, style, text_type="major_label_")
#newaxis.bounds = axis.get_data_interval() # I think this is the right func...
if self.xkcd:
laxis.axis_line_width = 3
laxis.axis_label_text_font = "Comic Sans MS, Textile, cursive"
laxis.axis_label_text_font_style = "bold"
laxis.axis_label_text_color = "black"
laxis.major_label_text_font = "Comic Sans MS, Textile, cursive"
laxis.major_label_text_font_style = "bold"
laxis.major_label_text_color = "black"
return laxis
def make_grid(self, baxis, dimension):
"Given a mpl axes instance, returns a Bokeh Grid object."
lgrid = Grid(dimension=dimension,
ticker=baxis.ticker,
grid_line_color=self.grid.get_color(),
grid_line_width=self.grid.get_linewidth())
self.plot.add_layout(lgrid)
def make_line_collection(self, col):
"Given a mpl collection instance create a Bokeh MultiLine glyph."
xydata = col.get_segments()
t_xydata = [np.transpose(seg) for seg in xydata]
xs = [t_xydata[x][0] for x in range(len(t_xydata))]
ys = [t_xydata[x][1] for x in range(len(t_xydata))]
if self.xkcd:
xkcd_xs = [xkcd_line(xs[i], ys[i])[0] for i in range(len(xs))]
xkcd_ys = [xkcd_line(xs[i], ys[i])[1] for i in range(len(ys))]
xs = xkcd_xs
ys = xkcd_ys
multiline = MultiLine()
multiline.xs = self.source.add(xs)
multiline.ys = self.source.add(ys)
self.multiline_props(multiline, col)
self.plot.add_glyph(self.source, multiline)
def make_poly_collection(self, col):
"Given a mpl collection instance create a Bokeh Patches glyph."
paths = col.get_paths()
polygons = [paths[i].to_polygons() for i in range(len(paths))]
polygons = [np.transpose(delete_last_col(polygon)) for polygon in polygons]
xs = [polygons[i][0] for i in range(len(polygons))]
ys = [polygons[i][1] for i in range(len(polygons))]
patches = Patches()
patches.xs = self.source.add(xs)
patches.ys = self.source.add(ys)
self.patches_props(patches, col)
self.plot.add_glyph(self.source, patches)
def multiline_props(self, multiline, col):
"Takes a mpl collection object to extract and set up some Bokeh multiline properties."
colors = get_props_cycled(col, col.get_colors(), fx=lambda x: mpl.colors.rgb2hex(x))
widths = get_props_cycled(col, col.get_linewidth())
multiline.line_color = self.source.add(colors)
multiline.line_width = self.source.add(widths)
multiline.line_alpha = col.get_alpha()
offset = col.get_linestyle()[0][0]
if not col.get_linestyle()[0][1]:
on_off = []
else:
on_off = map(int,col.get_linestyle()[0][1])
multiline.line_dash_offset = convert_dashes(offset)
multiline.line_dash = list(convert_dashes(tuple(on_off)))
def patches_props(self, patches, col):
"Takes a mpl collection object to extract and set up some Bokeh patches properties."
face_colors = get_props_cycled(col, col.get_facecolors(), fx=lambda x: mpl.colors.rgb2hex(x))
patches.fill_color = self.source.add(face_colors)
edge_colors = get_props_cycled(col, col.get_edgecolors(), fx=lambda x: mpl.colors.rgb2hex(x))
patches.line_color = self.source.add(edge_colors)
widths = get_props_cycled(col, col.get_linewidth())
patches.line_width = self.source.add(widths)
patches.line_alpha = col.get_alpha()
offset = col.get_linestyle()[0][0]
if not col.get_linestyle()[0][1]:
on_off = []
else:
on_off = map(int,col.get_linestyle()[0][1])
patches.line_dash_offset = convert_dashes(offset)
patches.line_dash = list(convert_dashes(tuple(on_off)))
def to_bokeh(fig=None, name=None, server=None, notebook=False, pd_obj=True, xkcd=False):
""" Uses bokeh to display a Matplotlib Figure.
You can store a bokeh plot in a standalone HTML file, as a document in
a Bokeh plot server, or embedded directly into an IPython Notebook
output cell.
Parameters
----------
fig: matplotlib.figure.Figure
The figure to display. If None or not specified, then the current figure
will be used.
name: str (default=None)
If this option is provided, then the Bokeh figure will be saved into
this HTML file, and then a web browser will be used to display it.
server: str (default=None)
Fully specified URL of bokeh plot server. Default bokeh plot server
URL is "http://localhost:5006" or simply "deault"
notebook: bool (default=False)
Return an output value from this function which represents an HTML
object that the IPython notebook can display. You can also use it with
a bokeh plot server just specifying the URL.
pd_obj: bool (default=True)
The implementation asumes you are plotting using the pandas.
You have the option to turn it off (False) to plot the datetime xaxis
with other non-pandas interfaces.
xkcd: bool (default=False)
If this option is True, then the Bokeh figure will be saved with a
xkcd style.
"""
if fig is None:
fig = plt.gcf()
if any([name, server, notebook]):
if name:
if not server:
filename = name + ".html"
output_file(filename)
else:
output_server(name, url=server)
elif server:
if not notebook:
output_server("unnameuuuuuuuuuuuuuud", url=server)
else:
output_notebook(url=server)
elif notebook:
output_notebook()
else:
output_file("Unnamed.html")
doc = curdoc()
renderer = BokehRenderer(pd_obj, xkcd)
exporter = Exporter(renderer)
exporter.run(fig)
doc._current_plot = renderer.fig # TODO (bev) do not rely on private attrs
doc.add(renderer.fig)
return renderer.fig
| {
"content_hash": "36b748ee192776e1110f4c0fed90e179",
"timestamp": "",
"source": "github",
"line_count": 452,
"max_line_length": 107,
"avg_line_length": 41.04203539823009,
"alnum_prop": 0.5792679639911595,
"repo_name": "canavandl/bokeh",
"id": "a915d211f224709a07b5edcd247fe41518bc8dac",
"size": "18551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bokeh/mpl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5455"
},
{
"name": "CSS",
"bytes": "413395"
},
{
"name": "CoffeeScript",
"bytes": "1951961"
},
{
"name": "HTML",
"bytes": "1551595"
},
{
"name": "JavaScript",
"bytes": "349644"
},
{
"name": "Makefile",
"bytes": "5785"
},
{
"name": "Python",
"bytes": "1645659"
},
{
"name": "Scala",
"bytes": "29550"
},
{
"name": "Shell",
"bytes": "18120"
}
],
"symlink_target": ""
} |
from ._core import Insn
from .basic import *
from .control_flow import *
from .events import *
from .execute import *
from .func_prop import *
from .nbt import *
from .score import *
from .text import *
from .type_ctors import *
| {
"content_hash": "244a4bd0ca5a25053cc0682a621d745a",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 27,
"avg_line_length": 20.90909090909091,
"alnum_prop": 0.7217391304347827,
"repo_name": "simon816/Command-Block-Assembly",
"id": "47a6196b6655346c1f2070f84b68a5e46008c9c1",
"size": "230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmd_ir/instructions/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3901"
},
{
"name": "Python",
"bytes": "558560"
},
{
"name": "Shell",
"bytes": "315"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.