text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from os import getenv
from raygun4py.raygunprovider import RaygunSender
from sanic import Sanic
from sanic.exceptions import SanicException
from sanic.handlers import ErrorHandler
class RaygunExceptionReporter(ErrorHandler):
def __init__(self, raygun_api_key=None):
super().__init__()
if raygun_api_key is None:
raygun_api_key = getenv("RAYGUN_API_KEY")
self.sender = RaygunSender(raygun_api_key)
def default(self, request, exception):
self.sender.send_exception(exception=exception)
return super().default(request, exception)
raygun_error_reporter = RaygunExceptionReporter()
app = Sanic("Example", error_handler=raygun_error_reporter)
@app.route("/raise")
async def test(request):
raise SanicException("You Broke It!")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=getenv("PORT", 8080))
|
{
"content_hash": "daf646f11925ada7439dba797131e846",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 59,
"avg_line_length": 26.636363636363637,
"alnum_prop": 0.6973833902161547,
"repo_name": "ashleysommer/sanic",
"id": "2270ea9d18276e4d2ab53a1ccb4143eb3917551c",
"size": "879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/raygun_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "159"
},
{
"name": "Go",
"bytes": "482"
},
{
"name": "HTML",
"bytes": "1173"
},
{
"name": "Makefile",
"bytes": "2412"
},
{
"name": "Python",
"bytes": "962491"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
import py3utils
class TestUnicodeUtils(TestCase):
def test_get_str(self):
# ok
data = '世界与中国'
res = py3utils.UnicodeUtils.get_str(data.encode('gbk'))
self.assertTrue(res == data)
|
{
"content_hash": "da4cd9cb933653f8d10c7a38b4e1619d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 63,
"avg_line_length": 22.545454545454547,
"alnum_prop": 0.6451612903225806,
"repo_name": "hezhiming/py3utils",
"id": "73e5aa17a3100aa26c786d799ab845898b9ab69f",
"size": "360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_unicodeUtils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18207"
},
{
"name": "Shell",
"bytes": "858"
}
],
"symlink_target": ""
}
|
"""Main entry point into the Token Persistence service."""
import abc
import copy
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import six
from keystone.common import cache
from keystone.common import dependency
from keystone.common import manager
from keystone import exception
from keystone.i18n import _LW
from keystone.token import utils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
MEMOIZE = cache.get_memoization_decorator(section='token')
REVOCATION_MEMOIZE = cache.get_memoization_decorator(
section='token', expiration_section='revoke')
@dependency.requires('assignment_api', 'identity_api', 'resource_api',
'token_provider_api', 'trust_api')
class PersistenceManager(manager.Manager):
"""Default pivot point for the Token Persistence backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
driver_namespace = 'keystone.token.persistence'
def __init__(self):
super(PersistenceManager, self).__init__(CONF.token.driver)
def _assert_valid(self, token_id, token_ref):
"""Raise TokenNotFound if the token is expired."""
current_time = timeutils.normalize_time(timeutils.utcnow())
expires = token_ref.get('expires')
if not expires or current_time > timeutils.normalize_time(expires):
raise exception.TokenNotFound(token_id=token_id)
def get_token(self, token_id):
if not token_id:
# NOTE(morganfainberg): There are cases when the
# context['token_id'] will in-fact be None. This also saves
# a round-trip to the backend if we don't have a token_id.
raise exception.TokenNotFound(token_id='')
unique_id = utils.generate_unique_id(token_id)
token_ref = self._get_token(unique_id)
# NOTE(morganfainberg): Lift expired checking to the manager, there is
# no reason to make the drivers implement this check. With caching,
# self._get_token could return an expired token. Make sure we behave
# as expected and raise TokenNotFound on those instances.
self._assert_valid(token_id, token_ref)
return token_ref
@MEMOIZE
def _get_token(self, token_id):
# Only ever use the "unique" id in the cache key.
return self.driver.get_token(token_id)
def create_token(self, token_id, data):
unique_id = utils.generate_unique_id(token_id)
data_copy = copy.deepcopy(data)
data_copy['id'] = unique_id
ret = self.driver.create_token(unique_id, data_copy)
if MEMOIZE.should_cache(ret):
# NOTE(morganfainberg): when doing a cache set, you must pass the
# same arguments through, the same as invalidate (this includes
# "self"). First argument is always the value to be cached
self._get_token.set(ret, self, unique_id)
return ret
def delete_token(self, token_id):
if not CONF.token.revoke_by_id:
return
unique_id = utils.generate_unique_id(token_id)
self.driver.delete_token(unique_id)
self._invalidate_individual_token_cache(unique_id)
self.invalidate_revocation_list()
def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
if not CONF.token.revoke_by_id:
return
token_list = self.driver.delete_tokens(user_id, tenant_id, trust_id,
consumer_id)
for token_id in token_list:
unique_id = utils.generate_unique_id(token_id)
self._invalidate_individual_token_cache(unique_id)
self.invalidate_revocation_list()
@REVOCATION_MEMOIZE
def list_revoked_tokens(self):
return self.driver.list_revoked_tokens()
def invalidate_revocation_list(self):
# NOTE(morganfainberg): Note that ``self`` needs to be passed to
# invalidate() because of the way the invalidation method works on
# determining cache-keys.
self.list_revoked_tokens.invalidate(self)
def delete_tokens_for_domain(self, domain_id):
"""Delete all tokens for a given domain.
It will delete all the project-scoped tokens for the projects
that are owned by the given domain, as well as any tokens issued
to users that are owned by this domain.
However, deletion of domain_scoped tokens will still need to be
implemented as stated in TODO below.
"""
if not CONF.token.revoke_by_id:
return
projects = self.resource_api.list_projects()
for project in projects:
if project['domain_id'] == domain_id:
for user_id in self.assignment_api.list_user_ids_for_project(
project['id']):
self.delete_tokens_for_user(user_id, project['id'])
# TODO(morganfainberg): implement deletion of domain_scoped tokens.
users = self.identity_api.list_users(domain_id)
user_ids = (user['id'] for user in users)
self.delete_tokens_for_users(user_ids)
def delete_tokens_for_user(self, user_id, project_id=None):
"""Delete all tokens for a given user or user-project combination.
This method adds in the extra logic for handling trust-scoped token
revocations in a single call instead of needing to explicitly handle
trusts in the caller's logic.
"""
if not CONF.token.revoke_by_id:
return
self.delete_tokens(user_id, tenant_id=project_id)
for trust in self.trust_api.list_trusts_for_trustee(user_id):
# Ensure we revoke tokens associated to the trust / project
# user_id combination.
self.delete_tokens(user_id, trust_id=trust['id'],
tenant_id=project_id)
for trust in self.trust_api.list_trusts_for_trustor(user_id):
# Ensure we revoke tokens associated to the trust / project /
# user_id combination where the user_id is the trustor.
# NOTE(morganfainberg): This revocation is a bit coarse, but it
# covers a number of cases such as disabling of the trustor user,
# deletion of the trustor user (for any number of reasons). It
# might make sense to refine this and be more surgical on the
# deletions (e.g. don't revoke tokens for the trusts when the
# trustor changes password). For now, to maintain previous
# functionality, this will continue to be a bit overzealous on
# revocations.
self.delete_tokens(trust['trustee_user_id'], trust_id=trust['id'],
tenant_id=project_id)
def delete_tokens_for_users(self, user_ids, project_id=None):
"""Delete all tokens for a list of user_ids.
:param user_ids: list of user identifiers
:param project_id: optional project identifier
"""
if not CONF.token.revoke_by_id:
return
for user_id in user_ids:
self.delete_tokens_for_user(user_id, project_id=project_id)
def _invalidate_individual_token_cache(self, token_id):
# NOTE(morganfainberg): invalidate takes the exact same arguments as
# the normal method, this means we need to pass "self" in (which gets
# stripped off).
# FIXME(morganfainberg): Does this cache actually need to be
# invalidated? We maintain a cached revocation list, which should be
# consulted before accepting a token as valid. For now we will
# do the explicit individual token invalidation.
self._get_token.invalidate(self, token_id)
self.token_provider_api.invalidate_individual_token_cache(token_id)
@dependency.requires('token_provider_api')
@dependency.provider('token_api')
class Manager(object):
"""The token_api provider.
This class is a proxy class to the token_provider_api's persistence
manager.
"""
def __init__(self):
# NOTE(morganfainberg): __init__ is required for dependency processing.
super(Manager, self).__init__()
def __getattr__(self, item):
"""Forward calls to the `token_provider_api` persistence manager."""
# NOTE(morganfainberg): Prevent infinite recursion, raise an
# AttributeError for 'token_provider_api' ensuring that the dep
# injection doesn't infinitely try and lookup self.token_provider_api
# on _process_dependencies. This doesn't need an exception string as
# it should only ever be hit on instantiation.
if item == 'token_provider_api':
raise AttributeError()
f = getattr(self.token_provider_api._persistence, item)
LOG.warning(_LW('`token_api.%s` is deprecated as of Juno in favor of '
'utilizing methods on `token_provider_api` and may be '
'removed in Kilo.'), item)
setattr(self, item, f)
return f
@six.add_metaclass(abc.ABCMeta)
class TokenDriverV8(object):
"""Interface description for a Token driver."""
@abc.abstractmethod
def get_token(self, token_id):
"""Get a token by id.
:param token_id: identity of the token
:type token_id: string
:returns: token_ref
:raises: keystone.exception.TokenNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def create_token(self, token_id, data):
"""Create a token by id and data.
:param token_id: identity of the token
:type token_id: string
:param data: dictionary with additional reference information
::
{
expires=''
id=token_id,
user=user_ref,
tenant=tenant_ref,
metadata=metadata_ref
}
:type data: dict
:returns: token_ref or None.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_token(self, token_id):
"""Deletes a token by id.
:param token_id: identity of the token
:type token_id: string
:returns: None.
:raises: keystone.exception.TokenNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
"""Deletes tokens by user.
If the tenant_id is not None, only delete the tokens by user id under
the specified tenant.
If the trust_id is not None, it will be used to query tokens and the
user_id will be ignored.
If the consumer_id is not None, only delete the tokens by consumer id
that match the specified consumer id.
:param user_id: identity of user
:type user_id: string
:param tenant_id: identity of the tenant
:type tenant_id: string
:param trust_id: identity of the trust
:type trust_id: string
:param consumer_id: identity of the consumer
:type consumer_id: string
:returns: The tokens that have been deleted.
:raises: keystone.exception.TokenNotFound
"""
if not CONF.token.revoke_by_id:
return
token_list = self._list_tokens(user_id,
tenant_id=tenant_id,
trust_id=trust_id,
consumer_id=consumer_id)
for token in token_list:
try:
self.delete_token(token)
except exception.NotFound:
pass
return token_list
@abc.abstractmethod
def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
"""Returns a list of current token_id's for a user
This is effectively a private method only used by the ``delete_tokens``
method and should not be called by anything outside of the
``token_api`` manager or the token driver itself.
:param user_id: identity of the user
:type user_id: string
:param tenant_id: identity of the tenant
:type tenant_id: string
:param trust_id: identity of the trust
:type trust_id: string
:param consumer_id: identity of the consumer
:type consumer_id: string
:returns: list of token_id's
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_revoked_tokens(self):
"""Returns a list of all revoked tokens
:returns: list of token_id's
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def flush_expired_tokens(self):
"""Archive or delete tokens that have expired.
"""
raise exception.NotImplemented() # pragma: no cover
Driver = manager.create_legacy_driver(TokenDriverV8)
|
{
"content_hash": "9c68a8f2389b89d86e1312f13e60c67f",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 79,
"avg_line_length": 37.91954022988506,
"alnum_prop": 0.6235222794786299,
"repo_name": "tobegit3hub/keystone_docker",
"id": "e68970ace5f192111303257999b9a178f269a7ae",
"size": "13782",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "keystone/token/persistence/core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "4048945"
},
{
"name": "Shell",
"bytes": "88"
}
],
"symlink_target": ""
}
|
from __future__ import print_function # this is here for the version check to work on Python 2.
import sys
if sys.version_info < (3, 5):
print("#" * 49, file=sys.stderr)
print("# mitmproxy only supports Python 3.5 and above! #", file=sys.stderr)
print("#" * 49, file=sys.stderr)
import os # noqa
import signal # noqa
from mitmproxy.tools import cmdline # noqa
from mitmproxy import exceptions # noqa
from mitmproxy import options # noqa
from mitmproxy import optmanager # noqa
from mitmproxy.proxy import config # noqa
from mitmproxy.proxy import server # noqa
from mitmproxy.utils import version_check # noqa
from mitmproxy.utils import debug # noqa
def assert_utf8_env():
spec = ""
for i in ["LANG", "LC_CTYPE", "LC_ALL"]:
spec += os.environ.get(i, "").lower()
if "utf" not in spec:
print(
"Error: mitmproxy requires a UTF console environment.",
file=sys.stderr
)
print(
"Set your LANG enviroment variable to something like en_US.UTF-8",
file=sys.stderr
)
sys.exit(1)
def process_options(parser, opts, args):
if args.version:
print(debug.dump_system_info())
sys.exit(0)
if args.quiet or args.options:
args.verbosity = 0
args.flow_detail = 0
adict = {}
for n in dir(args):
if n in opts:
adict[n] = getattr(args, n)
opts.merge(adict)
pconf = config.ProxyConfig(opts)
if opts.server:
try:
return server.ProxyServer(pconf)
except exceptions.ServerException as v:
print(str(v), file=sys.stderr)
sys.exit(1)
else:
return server.DummyServer(pconf)
def run(MasterKlass, args, extra=None): # pragma: no cover
"""
extra: Extra argument processing callable which returns a dict of
options.
"""
version_check.check_pyopenssl_version()
debug.register_info_dumpers()
opts = options.Options()
parser = cmdline.mitmdump(opts)
args = parser.parse_args(args)
master = None
try:
unknown = optmanager.load_paths(opts, args.conf)
server = process_options(parser, opts, args)
master = MasterKlass(opts, server)
master.addons.configure_all(opts, opts.keys())
remaining = opts.update_known(**unknown)
if remaining and opts.verbosity > 1:
print("Ignored options: %s" % remaining)
if args.options:
print(optmanager.dump_defaults(opts))
sys.exit(0)
opts.set(*args.setoptions)
if extra:
opts.update(**extra(args))
def cleankill(*args, **kwargs):
master.shutdown()
signal.signal(signal.SIGTERM, cleankill)
master.run()
except exceptions.OptionsError as e:
print("%s: %s" % (sys.argv[0], e), file=sys.stderr)
sys.exit(1)
except (KeyboardInterrupt, RuntimeError):
pass
return master
def mitmproxy(args=None): # pragma: no cover
if os.name == "nt":
print("Error: mitmproxy's console interface is not supported on Windows. "
"You can run mitmdump or mitmweb instead.", file=sys.stderr)
sys.exit(1)
assert_utf8_env()
from mitmproxy.tools import console
run(console.master.ConsoleMaster, args)
def mitmdump(args=None): # pragma: no cover
from mitmproxy.tools import dump
def extra(args):
if args.filter_args:
v = " ".join(args.filter_args)
return dict(
view_filter = v,
streamfile_filter = v,
)
return {}
m = run(dump.DumpMaster, args, extra)
if m and m.errorcheck.has_errored:
sys.exit(1)
def mitmweb(args=None): # pragma: no cover
from mitmproxy.tools import web
run(web.master.WebMaster, args)
|
{
"content_hash": "c3231b233173e6af4e21a107cd14c68c",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 96,
"avg_line_length": 29.112781954887218,
"alnum_prop": 0.6082128099173554,
"repo_name": "xaxa89/mitmproxy",
"id": "6db232fc98cb2de0a8b0e4216586328fa8b09721",
"size": "3872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mitmproxy/tools/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17714"
},
{
"name": "HTML",
"bytes": "4270"
},
{
"name": "JavaScript",
"bytes": "150625"
},
{
"name": "PowerShell",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1535155"
},
{
"name": "Shell",
"bytes": "3660"
}
],
"symlink_target": ""
}
|
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.data.ClawPlotData.
Output: a modified version of plotdata.
"""
from pyclaw.plotters.frametools import var_limits
plimits = [-1., 1.]
ulimits = [-1., 1.]
xlimits = 'auto' # choose automatically
# Pressure:
# ---------
plotfigure = plotdata.new_plotfigure(name='Pressure', figno=1)
plotaxes = plotfigure.new_plotaxes(name='Pressure')
plotaxes.axescmd = 'subplot(1,1,1)'
plotaxes.xlimits = xlimits
plotaxes.ylimits = plimits
plotitem = plotaxes.new_plotitem(name='Pressure',plot_type='1d')
plotitem.plot_var = 0 # q[0] is the pressure
plotitem.plotstyle = '-'
plotitem.color = 'b'
# Velocity:
# ---------
plotfigure = plotdata.new_plotfigure(name='Velocity', figno=2)
plotaxes = plotfigure.new_plotaxes(name='Velocity')
plotaxes.axescmd = 'subplot(1,1,1)'
plotaxes.xlimits = xlimits
plotaxes.ylimits = ulimits
plotitem = plotaxes.new_plotitem(name='Velocity',plot_type='1d')
plotitem.plot_var = 1 # q[1] is the velocity
plotitem.plotstyle = '-'
plotitem.color = 'b'
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
return plotdata
|
{
"content_hash": "c140746dd9c11ef4ffbf589b91dba332",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 74,
"avg_line_length": 33,
"alnum_prop": 0.6136363636363636,
"repo_name": "clawpack/clawpack-4.x",
"id": "16952c0da7b3d0e7ba6f7a14db764511fc172452",
"size": "2245",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "book/chap3/acousimple/setplot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Fortran",
"bytes": "1413468"
},
{
"name": "HTML",
"bytes": "1032"
},
{
"name": "Limbo",
"bytes": "135"
},
{
"name": "M",
"bytes": "123"
},
{
"name": "Makefile",
"bytes": "153571"
},
{
"name": "Matlab",
"bytes": "311883"
},
{
"name": "Objective-C",
"bytes": "36"
},
{
"name": "Python",
"bytes": "1242190"
},
{
"name": "Shell",
"bytes": "1579"
}
],
"symlink_target": ""
}
|
"""Libraries for unified and simple console output."""
|
{
"content_hash": "5a2bfbc5150643f7030c5d5a1e1a70c4",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 54,
"avg_line_length": 55,
"alnum_prop": 0.7454545454545455,
"repo_name": "KaranToor/MA450",
"id": "86002a86c56e8ea2b8a3030f10380e24c76b683a",
"size": "651",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/.install/.backup/lib/googlecloudsdk/core/console/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
}
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import sys
from .traversal import Traversal
from .traversal import TraversalStrategies
from .strategies import VertexProgramStrategy
from .traversal import Bytecode
from ..driver.remote_connection import RemoteStrategy
from .. import statics
from ..statics import long
class GraphTraversalSource(object):
def __init__(self, graph, traversal_strategies, bytecode=None):
self.graph = graph
self.traversal_strategies = traversal_strategies
if bytecode is None:
bytecode = Bytecode()
self.bytecode = bytecode
def __repr__(self):
return "graphtraversalsource[" + str(self.graph) + "]"
def withBulk(self, *args):
source = GraphTraversalSource(self.graph, TraversalStrategies(self.traversal_strategies), Bytecode(self.bytecode))
source.bytecode.add_source("withBulk", *args)
return source
def withPath(self, *args):
source = GraphTraversalSource(self.graph, TraversalStrategies(self.traversal_strategies), Bytecode(self.bytecode))
source.bytecode.add_source("withPath", *args)
return source
def withSack(self, *args):
source = GraphTraversalSource(self.graph, TraversalStrategies(self.traversal_strategies), Bytecode(self.bytecode))
source.bytecode.add_source("withSack", *args)
return source
def withSideEffect(self, *args):
source = GraphTraversalSource(self.graph, TraversalStrategies(self.traversal_strategies), Bytecode(self.bytecode))
source.bytecode.add_source("withSideEffect", *args)
return source
def withStrategies(self, *args):
source = GraphTraversalSource(self.graph, TraversalStrategies(self.traversal_strategies), Bytecode(self.bytecode))
source.bytecode.add_source("withStrategies", *args)
return source
def withoutStrategies(self, *args):
source = GraphTraversalSource(self.graph, TraversalStrategies(self.traversal_strategies), Bytecode(self.bytecode))
source.bytecode.add_source("withoutStrategies", *args)
return source
def withRemote(self, remote_connection):
source = GraphTraversalSource(self.graph, TraversalStrategies(self.traversal_strategies), Bytecode(self.bytecode))
source.traversal_strategies.add_strategies([RemoteStrategy(remote_connection)])
return source
def withBindings(self, bindings):
return self
def withComputer(self,graph_computer=None, workers=None, result=None, persist=None, vertices=None, edges=None, configuration=None):
return self.withStrategies(VertexProgramStrategy(graph_computer,workers,result,persist,vertices,edges,configuration))
def E(self, *args):
traversal = GraphTraversal(self.graph, self.traversal_strategies, Bytecode(self.bytecode))
traversal.bytecode.add_step("E", *args)
return traversal
def V(self, *args):
traversal = GraphTraversal(self.graph, self.traversal_strategies, Bytecode(self.bytecode))
traversal.bytecode.add_step("V", *args)
return traversal
def addV(self, *args):
traversal = GraphTraversal(self.graph, self.traversal_strategies, Bytecode(self.bytecode))
traversal.bytecode.add_step("addV", *args)
return traversal
def inject(self, *args):
traversal = GraphTraversal(self.graph, self.traversal_strategies, Bytecode(self.bytecode))
traversal.bytecode.add_step("inject", *args)
return traversal
class GraphTraversal(Traversal):
def __init__(self, graph, traversal_strategies, bytecode):
Traversal.__init__(self, graph, traversal_strategies, bytecode)
def __getitem__(self, index):
if isinstance(index, int):
return self.range(long(index), long(index + 1))
elif isinstance(index, slice):
return self.range(long(0) if index.start is None else long(index.start), long(sys.maxsize) if index.stop is None else long(index.stop))
else:
raise TypeError("Index must be int or slice")
def __getattr__(self, key):
return self.values(key)
def V(self, *args):
self.bytecode.add_step("V", *args)
return self
def addE(self, *args):
self.bytecode.add_step("addE", *args)
return self
def addInE(self, *args):
self.bytecode.add_step("addInE", *args)
return self
def addOutE(self, *args):
self.bytecode.add_step("addOutE", *args)
return self
def addV(self, *args):
self.bytecode.add_step("addV", *args)
return self
def aggregate(self, *args):
self.bytecode.add_step("aggregate", *args)
return self
def and_(self, *args):
self.bytecode.add_step("and", *args)
return self
def as_(self, *args):
self.bytecode.add_step("as", *args)
return self
def barrier(self, *args):
self.bytecode.add_step("barrier", *args)
return self
def both(self, *args):
self.bytecode.add_step("both", *args)
return self
def bothE(self, *args):
self.bytecode.add_step("bothE", *args)
return self
def bothV(self, *args):
self.bytecode.add_step("bothV", *args)
return self
def branch(self, *args):
self.bytecode.add_step("branch", *args)
return self
def by(self, *args):
self.bytecode.add_step("by", *args)
return self
def cap(self, *args):
self.bytecode.add_step("cap", *args)
return self
def choose(self, *args):
self.bytecode.add_step("choose", *args)
return self
def coalesce(self, *args):
self.bytecode.add_step("coalesce", *args)
return self
def coin(self, *args):
self.bytecode.add_step("coin", *args)
return self
def constant(self, *args):
self.bytecode.add_step("constant", *args)
return self
def count(self, *args):
self.bytecode.add_step("count", *args)
return self
def cyclicPath(self, *args):
self.bytecode.add_step("cyclicPath", *args)
return self
def dedup(self, *args):
self.bytecode.add_step("dedup", *args)
return self
def drop(self, *args):
self.bytecode.add_step("drop", *args)
return self
def emit(self, *args):
self.bytecode.add_step("emit", *args)
return self
def filter(self, *args):
self.bytecode.add_step("filter", *args)
return self
def flatMap(self, *args):
self.bytecode.add_step("flatMap", *args)
return self
def fold(self, *args):
self.bytecode.add_step("fold", *args)
return self
def from_(self, *args):
self.bytecode.add_step("from", *args)
return self
def group(self, *args):
self.bytecode.add_step("group", *args)
return self
def groupCount(self, *args):
self.bytecode.add_step("groupCount", *args)
return self
def groupV3d0(self, *args):
self.bytecode.add_step("groupV3d0", *args)
return self
def has(self, *args):
self.bytecode.add_step("has", *args)
return self
def hasId(self, *args):
self.bytecode.add_step("hasId", *args)
return self
def hasKey(self, *args):
self.bytecode.add_step("hasKey", *args)
return self
def hasLabel(self, *args):
self.bytecode.add_step("hasLabel", *args)
return self
def hasNot(self, *args):
self.bytecode.add_step("hasNot", *args)
return self
def hasValue(self, *args):
self.bytecode.add_step("hasValue", *args)
return self
def id(self, *args):
self.bytecode.add_step("id", *args)
return self
def identity(self, *args):
self.bytecode.add_step("identity", *args)
return self
def inE(self, *args):
self.bytecode.add_step("inE", *args)
return self
def inV(self, *args):
self.bytecode.add_step("inV", *args)
return self
def in_(self, *args):
self.bytecode.add_step("in", *args)
return self
def inject(self, *args):
self.bytecode.add_step("inject", *args)
return self
def is_(self, *args):
self.bytecode.add_step("is", *args)
return self
def key(self, *args):
self.bytecode.add_step("key", *args)
return self
def label(self, *args):
self.bytecode.add_step("label", *args)
return self
def limit(self, *args):
self.bytecode.add_step("limit", *args)
return self
def local(self, *args):
self.bytecode.add_step("local", *args)
return self
def loops(self, *args):
self.bytecode.add_step("loops", *args)
return self
def map(self, *args):
self.bytecode.add_step("map", *args)
return self
def mapKeys(self, *args):
self.bytecode.add_step("mapKeys", *args)
return self
def mapValues(self, *args):
self.bytecode.add_step("mapValues", *args)
return self
def match(self, *args):
self.bytecode.add_step("match", *args)
return self
def max(self, *args):
self.bytecode.add_step("max", *args)
return self
def mean(self, *args):
self.bytecode.add_step("mean", *args)
return self
def min(self, *args):
self.bytecode.add_step("min", *args)
return self
def not_(self, *args):
self.bytecode.add_step("not", *args)
return self
def option(self, *args):
self.bytecode.add_step("option", *args)
return self
def optional(self, *args):
self.bytecode.add_step("optional", *args)
return self
def or_(self, *args):
self.bytecode.add_step("or", *args)
return self
def order(self, *args):
self.bytecode.add_step("order", *args)
return self
def otherV(self, *args):
self.bytecode.add_step("otherV", *args)
return self
def out(self, *args):
self.bytecode.add_step("out", *args)
return self
def outE(self, *args):
self.bytecode.add_step("outE", *args)
return self
def outV(self, *args):
self.bytecode.add_step("outV", *args)
return self
def pageRank(self, *args):
self.bytecode.add_step("pageRank", *args)
return self
def path(self, *args):
self.bytecode.add_step("path", *args)
return self
def peerPressure(self, *args):
self.bytecode.add_step("peerPressure", *args)
return self
def profile(self, *args):
self.bytecode.add_step("profile", *args)
return self
def program(self, *args):
self.bytecode.add_step("program", *args)
return self
def project(self, *args):
self.bytecode.add_step("project", *args)
return self
def properties(self, *args):
self.bytecode.add_step("properties", *args)
return self
def property(self, *args):
self.bytecode.add_step("property", *args)
return self
def propertyMap(self, *args):
self.bytecode.add_step("propertyMap", *args)
return self
def range(self, *args):
self.bytecode.add_step("range", *args)
return self
def repeat(self, *args):
self.bytecode.add_step("repeat", *args)
return self
def sack(self, *args):
self.bytecode.add_step("sack", *args)
return self
def sample(self, *args):
self.bytecode.add_step("sample", *args)
return self
def select(self, *args):
self.bytecode.add_step("select", *args)
return self
def sideEffect(self, *args):
self.bytecode.add_step("sideEffect", *args)
return self
def simplePath(self, *args):
self.bytecode.add_step("simplePath", *args)
return self
def store(self, *args):
self.bytecode.add_step("store", *args)
return self
def subgraph(self, *args):
self.bytecode.add_step("subgraph", *args)
return self
def sum(self, *args):
self.bytecode.add_step("sum", *args)
return self
def tail(self, *args):
self.bytecode.add_step("tail", *args)
return self
def timeLimit(self, *args):
self.bytecode.add_step("timeLimit", *args)
return self
def times(self, *args):
self.bytecode.add_step("times", *args)
return self
def to(self, *args):
self.bytecode.add_step("to", *args)
return self
def toE(self, *args):
self.bytecode.add_step("toE", *args)
return self
def toV(self, *args):
self.bytecode.add_step("toV", *args)
return self
def tree(self, *args):
self.bytecode.add_step("tree", *args)
return self
def unfold(self, *args):
self.bytecode.add_step("unfold", *args)
return self
def union(self, *args):
self.bytecode.add_step("union", *args)
return self
def until(self, *args):
self.bytecode.add_step("until", *args)
return self
def value(self, *args):
self.bytecode.add_step("value", *args)
return self
def valueMap(self, *args):
self.bytecode.add_step("valueMap", *args)
return self
def values(self, *args):
self.bytecode.add_step("values", *args)
return self
def where(self, *args):
self.bytecode.add_step("where", *args)
return self
class __(object):
@staticmethod
def V(*args):
return GraphTraversal(None, None, Bytecode()).V(*args)
@staticmethod
def __(*args):
return GraphTraversal(None, None, Bytecode()).__(*args)
@staticmethod
def addE(*args):
return GraphTraversal(None, None, Bytecode()).addE(*args)
@staticmethod
def addInE(*args):
return GraphTraversal(None, None, Bytecode()).addInE(*args)
@staticmethod
def addOutE(*args):
return GraphTraversal(None, None, Bytecode()).addOutE(*args)
@staticmethod
def addV(*args):
return GraphTraversal(None, None, Bytecode()).addV(*args)
@staticmethod
def aggregate(*args):
return GraphTraversal(None, None, Bytecode()).aggregate(*args)
@staticmethod
def and_(*args):
return GraphTraversal(None, None, Bytecode()).and_(*args)
@staticmethod
def as_(*args):
return GraphTraversal(None, None, Bytecode()).as_(*args)
@staticmethod
def barrier(*args):
return GraphTraversal(None, None, Bytecode()).barrier(*args)
@staticmethod
def both(*args):
return GraphTraversal(None, None, Bytecode()).both(*args)
@staticmethod
def bothE(*args):
return GraphTraversal(None, None, Bytecode()).bothE(*args)
@staticmethod
def bothV(*args):
return GraphTraversal(None, None, Bytecode()).bothV(*args)
@staticmethod
def branch(*args):
return GraphTraversal(None, None, Bytecode()).branch(*args)
@staticmethod
def cap(*args):
return GraphTraversal(None, None, Bytecode()).cap(*args)
@staticmethod
def choose(*args):
return GraphTraversal(None, None, Bytecode()).choose(*args)
@staticmethod
def coalesce(*args):
return GraphTraversal(None, None, Bytecode()).coalesce(*args)
@staticmethod
def coin(*args):
return GraphTraversal(None, None, Bytecode()).coin(*args)
@staticmethod
def constant(*args):
return GraphTraversal(None, None, Bytecode()).constant(*args)
@staticmethod
def count(*args):
return GraphTraversal(None, None, Bytecode()).count(*args)
@staticmethod
def cyclicPath(*args):
return GraphTraversal(None, None, Bytecode()).cyclicPath(*args)
@staticmethod
def dedup(*args):
return GraphTraversal(None, None, Bytecode()).dedup(*args)
@staticmethod
def drop(*args):
return GraphTraversal(None, None, Bytecode()).drop(*args)
@staticmethod
def emit(*args):
return GraphTraversal(None, None, Bytecode()).emit(*args)
@staticmethod
def filter(*args):
return GraphTraversal(None, None, Bytecode()).filter(*args)
@staticmethod
def flatMap(*args):
return GraphTraversal(None, None, Bytecode()).flatMap(*args)
@staticmethod
def fold(*args):
return GraphTraversal(None, None, Bytecode()).fold(*args)
@staticmethod
def group(*args):
return GraphTraversal(None, None, Bytecode()).group(*args)
@staticmethod
def groupCount(*args):
return GraphTraversal(None, None, Bytecode()).groupCount(*args)
@staticmethod
def groupV3d0(*args):
return GraphTraversal(None, None, Bytecode()).groupV3d0(*args)
@staticmethod
def has(*args):
return GraphTraversal(None, None, Bytecode()).has(*args)
@staticmethod
def hasId(*args):
return GraphTraversal(None, None, Bytecode()).hasId(*args)
@staticmethod
def hasKey(*args):
return GraphTraversal(None, None, Bytecode()).hasKey(*args)
@staticmethod
def hasLabel(*args):
return GraphTraversal(None, None, Bytecode()).hasLabel(*args)
@staticmethod
def hasNot(*args):
return GraphTraversal(None, None, Bytecode()).hasNot(*args)
@staticmethod
def hasValue(*args):
return GraphTraversal(None, None, Bytecode()).hasValue(*args)
@staticmethod
def id(*args):
return GraphTraversal(None, None, Bytecode()).id(*args)
@staticmethod
def identity(*args):
return GraphTraversal(None, None, Bytecode()).identity(*args)
@staticmethod
def inE(*args):
return GraphTraversal(None, None, Bytecode()).inE(*args)
@staticmethod
def inV(*args):
return GraphTraversal(None, None, Bytecode()).inV(*args)
@staticmethod
def in_(*args):
return GraphTraversal(None, None, Bytecode()).in_(*args)
@staticmethod
def inject(*args):
return GraphTraversal(None, None, Bytecode()).inject(*args)
@staticmethod
def is_(*args):
return GraphTraversal(None, None, Bytecode()).is_(*args)
@staticmethod
def key(*args):
return GraphTraversal(None, None, Bytecode()).key(*args)
@staticmethod
def label(*args):
return GraphTraversal(None, None, Bytecode()).label(*args)
@staticmethod
def limit(*args):
return GraphTraversal(None, None, Bytecode()).limit(*args)
@staticmethod
def local(*args):
return GraphTraversal(None, None, Bytecode()).local(*args)
@staticmethod
def loops(*args):
return GraphTraversal(None, None, Bytecode()).loops(*args)
@staticmethod
def map(*args):
return GraphTraversal(None, None, Bytecode()).map(*args)
@staticmethod
def mapKeys(*args):
return GraphTraversal(None, None, Bytecode()).mapKeys(*args)
@staticmethod
def mapValues(*args):
return GraphTraversal(None, None, Bytecode()).mapValues(*args)
@staticmethod
def match(*args):
return GraphTraversal(None, None, Bytecode()).match(*args)
@staticmethod
def max(*args):
return GraphTraversal(None, None, Bytecode()).max(*args)
@staticmethod
def mean(*args):
return GraphTraversal(None, None, Bytecode()).mean(*args)
@staticmethod
def min(*args):
return GraphTraversal(None, None, Bytecode()).min(*args)
@staticmethod
def not_(*args):
return GraphTraversal(None, None, Bytecode()).not_(*args)
@staticmethod
def optional(*args):
return GraphTraversal(None, None, Bytecode()).optional(*args)
@staticmethod
def or_(*args):
return GraphTraversal(None, None, Bytecode()).or_(*args)
@staticmethod
def order(*args):
return GraphTraversal(None, None, Bytecode()).order(*args)
@staticmethod
def otherV(*args):
return GraphTraversal(None, None, Bytecode()).otherV(*args)
@staticmethod
def out(*args):
return GraphTraversal(None, None, Bytecode()).out(*args)
@staticmethod
def outE(*args):
return GraphTraversal(None, None, Bytecode()).outE(*args)
@staticmethod
def outV(*args):
return GraphTraversal(None, None, Bytecode()).outV(*args)
@staticmethod
def path(*args):
return GraphTraversal(None, None, Bytecode()).path(*args)
@staticmethod
def project(*args):
return GraphTraversal(None, None, Bytecode()).project(*args)
@staticmethod
def properties(*args):
return GraphTraversal(None, None, Bytecode()).properties(*args)
@staticmethod
def property(*args):
return GraphTraversal(None, None, Bytecode()).property(*args)
@staticmethod
def propertyMap(*args):
return GraphTraversal(None, None, Bytecode()).propertyMap(*args)
@staticmethod
def range(*args):
return GraphTraversal(None, None, Bytecode()).range(*args)
@staticmethod
def repeat(*args):
return GraphTraversal(None, None, Bytecode()).repeat(*args)
@staticmethod
def sack(*args):
return GraphTraversal(None, None, Bytecode()).sack(*args)
@staticmethod
def sample(*args):
return GraphTraversal(None, None, Bytecode()).sample(*args)
@staticmethod
def select(*args):
return GraphTraversal(None, None, Bytecode()).select(*args)
@staticmethod
def sideEffect(*args):
return GraphTraversal(None, None, Bytecode()).sideEffect(*args)
@staticmethod
def simplePath(*args):
return GraphTraversal(None, None, Bytecode()).simplePath(*args)
@staticmethod
def start(*args):
return GraphTraversal(None, None, Bytecode()).start(*args)
@staticmethod
def store(*args):
return GraphTraversal(None, None, Bytecode()).store(*args)
@staticmethod
def subgraph(*args):
return GraphTraversal(None, None, Bytecode()).subgraph(*args)
@staticmethod
def sum(*args):
return GraphTraversal(None, None, Bytecode()).sum(*args)
@staticmethod
def tail(*args):
return GraphTraversal(None, None, Bytecode()).tail(*args)
@staticmethod
def timeLimit(*args):
return GraphTraversal(None, None, Bytecode()).timeLimit(*args)
@staticmethod
def times(*args):
return GraphTraversal(None, None, Bytecode()).times(*args)
@staticmethod
def to(*args):
return GraphTraversal(None, None, Bytecode()).to(*args)
@staticmethod
def toE(*args):
return GraphTraversal(None, None, Bytecode()).toE(*args)
@staticmethod
def toV(*args):
return GraphTraversal(None, None, Bytecode()).toV(*args)
@staticmethod
def tree(*args):
return GraphTraversal(None, None, Bytecode()).tree(*args)
@staticmethod
def unfold(*args):
return GraphTraversal(None, None, Bytecode()).unfold(*args)
@staticmethod
def union(*args):
return GraphTraversal(None, None, Bytecode()).union(*args)
@staticmethod
def until(*args):
return GraphTraversal(None, None, Bytecode()).until(*args)
@staticmethod
def value(*args):
return GraphTraversal(None, None, Bytecode()).value(*args)
@staticmethod
def valueMap(*args):
return GraphTraversal(None, None, Bytecode()).valueMap(*args)
@staticmethod
def values(*args):
return GraphTraversal(None, None, Bytecode()).values(*args)
@staticmethod
def where(*args):
return GraphTraversal(None, None, Bytecode()).where(*args)
def V(*args):
return __.V(*args)
statics.add_static('V', V)
def addE(*args):
return __.addE(*args)
statics.add_static('addE', addE)
def addInE(*args):
return __.addInE(*args)
statics.add_static('addInE', addInE)
def addOutE(*args):
return __.addOutE(*args)
statics.add_static('addOutE', addOutE)
def addV(*args):
return __.addV(*args)
statics.add_static('addV', addV)
def aggregate(*args):
return __.aggregate(*args)
statics.add_static('aggregate', aggregate)
def and_(*args):
return __.and_(*args)
statics.add_static('and_', and_)
def as_(*args):
return __.as_(*args)
statics.add_static('as_', as_)
def barrier(*args):
return __.barrier(*args)
statics.add_static('barrier', barrier)
def both(*args):
return __.both(*args)
statics.add_static('both', both)
def bothE(*args):
return __.bothE(*args)
statics.add_static('bothE', bothE)
def bothV(*args):
return __.bothV(*args)
statics.add_static('bothV', bothV)
def branch(*args):
return __.branch(*args)
statics.add_static('branch', branch)
def cap(*args):
return __.cap(*args)
statics.add_static('cap', cap)
def choose(*args):
return __.choose(*args)
statics.add_static('choose', choose)
def coalesce(*args):
return __.coalesce(*args)
statics.add_static('coalesce', coalesce)
def coin(*args):
return __.coin(*args)
statics.add_static('coin', coin)
def constant(*args):
return __.constant(*args)
statics.add_static('constant', constant)
def count(*args):
return __.count(*args)
statics.add_static('count', count)
def cyclicPath(*args):
return __.cyclicPath(*args)
statics.add_static('cyclicPath', cyclicPath)
def dedup(*args):
return __.dedup(*args)
statics.add_static('dedup', dedup)
def drop(*args):
return __.drop(*args)
statics.add_static('drop', drop)
def emit(*args):
return __.emit(*args)
statics.add_static('emit', emit)
def filter(*args):
return __.filter(*args)
statics.add_static('filter', filter)
def flatMap(*args):
return __.flatMap(*args)
statics.add_static('flatMap', flatMap)
def fold(*args):
return __.fold(*args)
statics.add_static('fold', fold)
def group(*args):
return __.group(*args)
statics.add_static('group', group)
def groupCount(*args):
return __.groupCount(*args)
statics.add_static('groupCount', groupCount)
def groupV3d0(*args):
return __.groupV3d0(*args)
statics.add_static('groupV3d0', groupV3d0)
def has(*args):
return __.has(*args)
statics.add_static('has', has)
def hasId(*args):
return __.hasId(*args)
statics.add_static('hasId', hasId)
def hasKey(*args):
return __.hasKey(*args)
statics.add_static('hasKey', hasKey)
def hasLabel(*args):
return __.hasLabel(*args)
statics.add_static('hasLabel', hasLabel)
def hasNot(*args):
return __.hasNot(*args)
statics.add_static('hasNot', hasNot)
def hasValue(*args):
return __.hasValue(*args)
statics.add_static('hasValue', hasValue)
def id(*args):
return __.id(*args)
statics.add_static('id', id)
def identity(*args):
return __.identity(*args)
statics.add_static('identity', identity)
def inE(*args):
return __.inE(*args)
statics.add_static('inE', inE)
def inV(*args):
return __.inV(*args)
statics.add_static('inV', inV)
def in_(*args):
return __.in_(*args)
statics.add_static('in_', in_)
def inject(*args):
return __.inject(*args)
statics.add_static('inject', inject)
def is_(*args):
return __.is_(*args)
statics.add_static('is_', is_)
def key(*args):
return __.key(*args)
statics.add_static('key', key)
def label(*args):
return __.label(*args)
statics.add_static('label', label)
def limit(*args):
return __.limit(*args)
statics.add_static('limit', limit)
def local(*args):
return __.local(*args)
statics.add_static('local', local)
def loops(*args):
return __.loops(*args)
statics.add_static('loops', loops)
def map(*args):
return __.map(*args)
statics.add_static('map', map)
def mapKeys(*args):
return __.mapKeys(*args)
statics.add_static('mapKeys', mapKeys)
def mapValues(*args):
return __.mapValues(*args)
statics.add_static('mapValues', mapValues)
def match(*args):
return __.match(*args)
statics.add_static('match', match)
def max(*args):
return __.max(*args)
statics.add_static('max', max)
def mean(*args):
return __.mean(*args)
statics.add_static('mean', mean)
def min(*args):
return __.min(*args)
statics.add_static('min', min)
def not_(*args):
return __.not_(*args)
statics.add_static('not_', not_)
def optional(*args):
return __.optional(*args)
statics.add_static('optional', optional)
def or_(*args):
return __.or_(*args)
statics.add_static('or_', or_)
def order(*args):
return __.order(*args)
statics.add_static('order', order)
def otherV(*args):
return __.otherV(*args)
statics.add_static('otherV', otherV)
def out(*args):
return __.out(*args)
statics.add_static('out', out)
def outE(*args):
return __.outE(*args)
statics.add_static('outE', outE)
def outV(*args):
return __.outV(*args)
statics.add_static('outV', outV)
def path(*args):
return __.path(*args)
statics.add_static('path', path)
def project(*args):
return __.project(*args)
statics.add_static('project', project)
def properties(*args):
return __.properties(*args)
statics.add_static('properties', properties)
def property(*args):
return __.property(*args)
statics.add_static('property', property)
def propertyMap(*args):
return __.propertyMap(*args)
statics.add_static('propertyMap', propertyMap)
def range(*args):
return __.range(*args)
statics.add_static('range', range)
def repeat(*args):
return __.repeat(*args)
statics.add_static('repeat', repeat)
def sack(*args):
return __.sack(*args)
statics.add_static('sack', sack)
def sample(*args):
return __.sample(*args)
statics.add_static('sample', sample)
def select(*args):
return __.select(*args)
statics.add_static('select', select)
def sideEffect(*args):
return __.sideEffect(*args)
statics.add_static('sideEffect', sideEffect)
def simplePath(*args):
return __.simplePath(*args)
statics.add_static('simplePath', simplePath)
def start(*args):
return __.start(*args)
statics.add_static('start', start)
def store(*args):
return __.store(*args)
statics.add_static('store', store)
def subgraph(*args):
return __.subgraph(*args)
statics.add_static('subgraph', subgraph)
def sum(*args):
return __.sum(*args)
statics.add_static('sum', sum)
def tail(*args):
return __.tail(*args)
statics.add_static('tail', tail)
def timeLimit(*args):
return __.timeLimit(*args)
statics.add_static('timeLimit', timeLimit)
def times(*args):
return __.times(*args)
statics.add_static('times', times)
def to(*args):
return __.to(*args)
statics.add_static('to', to)
def toE(*args):
return __.toE(*args)
statics.add_static('toE', toE)
def toV(*args):
return __.toV(*args)
statics.add_static('toV', toV)
def tree(*args):
return __.tree(*args)
statics.add_static('tree', tree)
def unfold(*args):
return __.unfold(*args)
statics.add_static('unfold', unfold)
def union(*args):
return __.union(*args)
statics.add_static('union', union)
def until(*args):
return __.until(*args)
statics.add_static('until', until)
def value(*args):
return __.value(*args)
statics.add_static('value', value)
def valueMap(*args):
return __.valueMap(*args)
statics.add_static('valueMap', valueMap)
def values(*args):
return __.values(*args)
statics.add_static('values', values)
def where(*args):
return __.where(*args)
statics.add_static('where', where)
|
{
"content_hash": "d53db88046393d255b7ac4255a8d858f",
"timestamp": "",
"source": "github",
"line_count": 1138,
"max_line_length": 143,
"avg_line_length": 26.557117750439367,
"alnum_prop": 0.6734828932565681,
"repo_name": "samiunn/incubator-tinkerpop",
"id": "6165ed33384c662f490821be78c7e29b7c65c0d0",
"size": "30222",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gremlin-python/src/main/jython/gremlin_python/process/graph_traversal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4544"
},
{
"name": "Groovy",
"bytes": "316683"
},
{
"name": "Java",
"bytes": "5483560"
},
{
"name": "Python",
"bytes": "1481"
},
{
"name": "Shell",
"bytes": "15070"
}
],
"symlink_target": ""
}
|
<<<<<<< HEAD
<<<<<<< HEAD
"""distutils.file_util
Utility functions for operating on single files.
"""
import os
from distutils.errors import DistutilsFileError
from distutils import log
# for generating verbose output in 'copy_file()'
_copy_action = { None: 'copying',
'hard': 'hard linking',
'sym': 'symbolically linking' }
def _copy_file_contents(src, dst, buffer_size=16*1024):
"""Copy the file 'src' to 'dst'; both must be filenames. Any error
opening either file, reading from 'src', or writing to 'dst', raises
DistutilsFileError. Data is read/written in chunks of 'buffer_size'
bytes (default 16k). No attempt is made to handle anything apart from
regular files.
"""
# Stolen from shutil module in the standard library, but with
# custom error-handling added.
fsrc = None
fdst = None
try:
try:
fsrc = open(src, 'rb')
except OSError as e:
raise DistutilsFileError("could not open '%s': %s" % (src, e.strerror))
if os.path.exists(dst):
try:
os.unlink(dst)
except OSError as e:
raise DistutilsFileError(
"could not delete '%s': %s" % (dst, e.strerror))
try:
fdst = open(dst, 'wb')
except OSError as e:
raise DistutilsFileError(
"could not create '%s': %s" % (dst, e.strerror))
while True:
try:
buf = fsrc.read(buffer_size)
except OSError as e:
raise DistutilsFileError(
"could not read from '%s': %s" % (src, e.strerror))
if not buf:
break
try:
fdst.write(buf)
except OSError as e:
raise DistutilsFileError(
"could not write to '%s': %s" % (dst, e.strerror))
finally:
if fdst:
fdst.close()
if fsrc:
fsrc.close()
def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0,
link=None, verbose=1, dry_run=0):
"""Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is
copied there with the same name; otherwise, it must be a filename. (If
the file exists, it will be ruthlessly clobbered.) If 'preserve_mode'
is true (the default), the file's mode (type and permission bits, or
whatever is analogous on the current platform) is copied. If
'preserve_times' is true (the default), the last-modified and
last-access times are copied as well. If 'update' is true, 'src' will
only be copied if 'dst' does not exist, or if 'dst' does exist but is
older than 'src'.
'link' allows you to make hard links (os.link) or symbolic links
(os.symlink) instead of copying: set it to "hard" or "sym"; if it is
None (the default), files are copied. Don't set 'link' on systems that
don't support it: 'copy_file()' doesn't check if hard or symbolic
linking is available. If hardlink fails, falls back to
_copy_file_contents().
Under Mac OS, uses the native file copy function in macostools; on
other systems, uses '_copy_file_contents()' to copy file contents.
Return a tuple (dest_name, copied): 'dest_name' is the actual name of
the output file, and 'copied' is true if the file was copied (or would
have been copied, if 'dry_run' true).
"""
# XXX if the destination file already exists, we clobber it if
# copying, but blow up if linking. Hmmm. And I don't know what
# macostools.copyfile() does. Should definitely be consistent, and
# should probably blow up if destination exists and we would be
# changing it (ie. it's not already a hard/soft link to src OR
# (not update) and (src newer than dst).
from distutils.dep_util import newer
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
if not os.path.isfile(src):
raise DistutilsFileError(
"can't copy '%s': doesn't exist or not a regular file" % src)
if os.path.isdir(dst):
dir = dst
dst = os.path.join(dst, os.path.basename(src))
else:
dir = os.path.dirname(dst)
if update and not newer(src, dst):
if verbose >= 1:
log.debug("not copying %s (output up-to-date)", src)
return (dst, 0)
try:
action = _copy_action[link]
except KeyError:
raise ValueError("invalid value '%s' for 'link' argument" % link)
if verbose >= 1:
if os.path.basename(dst) == os.path.basename(src):
log.info("%s %s -> %s", action, src, dir)
else:
log.info("%s %s -> %s", action, src, dst)
if dry_run:
return (dst, 1)
# If linking (hard or symbolic), use the appropriate system call
# (Unix only, of course, but that's the caller's responsibility)
elif link == 'hard':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
try:
os.link(src, dst)
return (dst, 1)
except OSError:
# If hard linking fails, fall back on copying file
# (some special filesystems don't support hard linking
# even under Unix, see issue #8876).
pass
elif link == 'sym':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.symlink(src, dst)
return (dst, 1)
# Otherwise (non-Mac, not linking), copy the file contents and
# (optionally) copy the times and mode.
_copy_file_contents(src, dst)
if preserve_mode or preserve_times:
st = os.stat(src)
# According to David Ascher <da@ski.org>, utime() should be done
# before chmod() (at least under NT).
if preserve_times:
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
if preserve_mode:
os.chmod(dst, S_IMODE(st[ST_MODE]))
return (dst, 1)
# XXX I suspect this is Unix-specific -- need porting help!
def move_file (src, dst,
verbose=1,
dry_run=0):
"""Move a file 'src' to 'dst'. If 'dst' is a directory, the file will
be moved into it with the same name; otherwise, 'src' is just renamed
to 'dst'. Return the new full name of the file.
Handles cross-device moves on Unix using 'copy_file()'. What about
other systems???
"""
from os.path import exists, isfile, isdir, basename, dirname
import errno
if verbose >= 1:
log.info("moving %s -> %s", src, dst)
if dry_run:
return dst
if not isfile(src):
raise DistutilsFileError("can't move '%s': not a regular file" % src)
if isdir(dst):
dst = os.path.join(dst, basename(src))
elif exists(dst):
raise DistutilsFileError(
"can't move '%s': destination '%s' already exists" %
(src, dst))
if not isdir(dirname(dst)):
raise DistutilsFileError(
"can't move '%s': destination '%s' not a valid path" %
(src, dst))
copy_it = False
try:
os.rename(src, dst)
except OSError as e:
(num, msg) = e.args
if num == errno.EXDEV:
copy_it = True
else:
raise DistutilsFileError(
"couldn't move '%s' to '%s': %s" % (src, dst, msg))
if copy_it:
copy_file(src, dst, verbose=verbose)
try:
os.unlink(src)
except OSError as e:
(num, msg) = e.args
try:
os.unlink(dst)
except OSError:
pass
raise DistutilsFileError(
"couldn't move '%s' to '%s' by copy/delete: "
"delete '%s' failed: %s"
% (src, dst, src, msg))
return dst
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
f = open(filename, "w")
try:
for line in contents:
f.write(line + "\n")
finally:
f.close()
=======
"""distutils.file_util
Utility functions for operating on single files.
"""
import os
from distutils.errors import DistutilsFileError
from distutils import log
# for generating verbose output in 'copy_file()'
_copy_action = { None: 'copying',
'hard': 'hard linking',
'sym': 'symbolically linking' }
def _copy_file_contents(src, dst, buffer_size=16*1024):
"""Copy the file 'src' to 'dst'; both must be filenames. Any error
opening either file, reading from 'src', or writing to 'dst', raises
DistutilsFileError. Data is read/written in chunks of 'buffer_size'
bytes (default 16k). No attempt is made to handle anything apart from
regular files.
"""
# Stolen from shutil module in the standard library, but with
# custom error-handling added.
fsrc = None
fdst = None
try:
try:
fsrc = open(src, 'rb')
except OSError as e:
raise DistutilsFileError("could not open '%s': %s" % (src, e.strerror))
if os.path.exists(dst):
try:
os.unlink(dst)
except OSError as e:
raise DistutilsFileError(
"could not delete '%s': %s" % (dst, e.strerror))
try:
fdst = open(dst, 'wb')
except OSError as e:
raise DistutilsFileError(
"could not create '%s': %s" % (dst, e.strerror))
while True:
try:
buf = fsrc.read(buffer_size)
except OSError as e:
raise DistutilsFileError(
"could not read from '%s': %s" % (src, e.strerror))
if not buf:
break
try:
fdst.write(buf)
except OSError as e:
raise DistutilsFileError(
"could not write to '%s': %s" % (dst, e.strerror))
finally:
if fdst:
fdst.close()
if fsrc:
fsrc.close()
def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0,
link=None, verbose=1, dry_run=0):
"""Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is
copied there with the same name; otherwise, it must be a filename. (If
the file exists, it will be ruthlessly clobbered.) If 'preserve_mode'
is true (the default), the file's mode (type and permission bits, or
whatever is analogous on the current platform) is copied. If
'preserve_times' is true (the default), the last-modified and
last-access times are copied as well. If 'update' is true, 'src' will
only be copied if 'dst' does not exist, or if 'dst' does exist but is
older than 'src'.
'link' allows you to make hard links (os.link) or symbolic links
(os.symlink) instead of copying: set it to "hard" or "sym"; if it is
None (the default), files are copied. Don't set 'link' on systems that
don't support it: 'copy_file()' doesn't check if hard or symbolic
linking is available. If hardlink fails, falls back to
_copy_file_contents().
Under Mac OS, uses the native file copy function in macostools; on
other systems, uses '_copy_file_contents()' to copy file contents.
Return a tuple (dest_name, copied): 'dest_name' is the actual name of
the output file, and 'copied' is true if the file was copied (or would
have been copied, if 'dry_run' true).
"""
# XXX if the destination file already exists, we clobber it if
# copying, but blow up if linking. Hmmm. And I don't know what
# macostools.copyfile() does. Should definitely be consistent, and
# should probably blow up if destination exists and we would be
# changing it (ie. it's not already a hard/soft link to src OR
# (not update) and (src newer than dst).
from distutils.dep_util import newer
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
if not os.path.isfile(src):
raise DistutilsFileError(
"can't copy '%s': doesn't exist or not a regular file" % src)
if os.path.isdir(dst):
dir = dst
dst = os.path.join(dst, os.path.basename(src))
else:
dir = os.path.dirname(dst)
if update and not newer(src, dst):
if verbose >= 1:
log.debug("not copying %s (output up-to-date)", src)
return (dst, 0)
try:
action = _copy_action[link]
except KeyError:
raise ValueError("invalid value '%s' for 'link' argument" % link)
if verbose >= 1:
if os.path.basename(dst) == os.path.basename(src):
log.info("%s %s -> %s", action, src, dir)
else:
log.info("%s %s -> %s", action, src, dst)
if dry_run:
return (dst, 1)
# If linking (hard or symbolic), use the appropriate system call
# (Unix only, of course, but that's the caller's responsibility)
elif link == 'hard':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
try:
os.link(src, dst)
return (dst, 1)
except OSError:
# If hard linking fails, fall back on copying file
# (some special filesystems don't support hard linking
# even under Unix, see issue #8876).
pass
elif link == 'sym':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.symlink(src, dst)
return (dst, 1)
# Otherwise (non-Mac, not linking), copy the file contents and
# (optionally) copy the times and mode.
_copy_file_contents(src, dst)
if preserve_mode or preserve_times:
st = os.stat(src)
# According to David Ascher <da@ski.org>, utime() should be done
# before chmod() (at least under NT).
if preserve_times:
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
if preserve_mode:
os.chmod(dst, S_IMODE(st[ST_MODE]))
return (dst, 1)
# XXX I suspect this is Unix-specific -- need porting help!
def move_file (src, dst,
verbose=1,
dry_run=0):
"""Move a file 'src' to 'dst'. If 'dst' is a directory, the file will
be moved into it with the same name; otherwise, 'src' is just renamed
to 'dst'. Return the new full name of the file.
Handles cross-device moves on Unix using 'copy_file()'. What about
other systems???
"""
from os.path import exists, isfile, isdir, basename, dirname
import errno
if verbose >= 1:
log.info("moving %s -> %s", src, dst)
if dry_run:
return dst
if not isfile(src):
raise DistutilsFileError("can't move '%s': not a regular file" % src)
if isdir(dst):
dst = os.path.join(dst, basename(src))
elif exists(dst):
raise DistutilsFileError(
"can't move '%s': destination '%s' already exists" %
(src, dst))
if not isdir(dirname(dst)):
raise DistutilsFileError(
"can't move '%s': destination '%s' not a valid path" %
(src, dst))
copy_it = False
try:
os.rename(src, dst)
except OSError as e:
(num, msg) = e.args
if num == errno.EXDEV:
copy_it = True
else:
raise DistutilsFileError(
"couldn't move '%s' to '%s': %s" % (src, dst, msg))
if copy_it:
copy_file(src, dst, verbose=verbose)
try:
os.unlink(src)
except OSError as e:
(num, msg) = e.args
try:
os.unlink(dst)
except OSError:
pass
raise DistutilsFileError(
"couldn't move '%s' to '%s' by copy/delete: "
"delete '%s' failed: %s"
% (src, dst, src, msg))
return dst
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
f = open(filename, "w")
try:
for line in contents:
f.write(line + "\n")
finally:
f.close()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""distutils.file_util
Utility functions for operating on single files.
"""
import os
from distutils.errors import DistutilsFileError
from distutils import log
# for generating verbose output in 'copy_file()'
_copy_action = { None: 'copying',
'hard': 'hard linking',
'sym': 'symbolically linking' }
def _copy_file_contents(src, dst, buffer_size=16*1024):
"""Copy the file 'src' to 'dst'; both must be filenames. Any error
opening either file, reading from 'src', or writing to 'dst', raises
DistutilsFileError. Data is read/written in chunks of 'buffer_size'
bytes (default 16k). No attempt is made to handle anything apart from
regular files.
"""
# Stolen from shutil module in the standard library, but with
# custom error-handling added.
fsrc = None
fdst = None
try:
try:
fsrc = open(src, 'rb')
except OSError as e:
raise DistutilsFileError("could not open '%s': %s" % (src, e.strerror))
if os.path.exists(dst):
try:
os.unlink(dst)
except OSError as e:
raise DistutilsFileError(
"could not delete '%s': %s" % (dst, e.strerror))
try:
fdst = open(dst, 'wb')
except OSError as e:
raise DistutilsFileError(
"could not create '%s': %s" % (dst, e.strerror))
while True:
try:
buf = fsrc.read(buffer_size)
except OSError as e:
raise DistutilsFileError(
"could not read from '%s': %s" % (src, e.strerror))
if not buf:
break
try:
fdst.write(buf)
except OSError as e:
raise DistutilsFileError(
"could not write to '%s': %s" % (dst, e.strerror))
finally:
if fdst:
fdst.close()
if fsrc:
fsrc.close()
def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0,
link=None, verbose=1, dry_run=0):
"""Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is
copied there with the same name; otherwise, it must be a filename. (If
the file exists, it will be ruthlessly clobbered.) If 'preserve_mode'
is true (the default), the file's mode (type and permission bits, or
whatever is analogous on the current platform) is copied. If
'preserve_times' is true (the default), the last-modified and
last-access times are copied as well. If 'update' is true, 'src' will
only be copied if 'dst' does not exist, or if 'dst' does exist but is
older than 'src'.
'link' allows you to make hard links (os.link) or symbolic links
(os.symlink) instead of copying: set it to "hard" or "sym"; if it is
None (the default), files are copied. Don't set 'link' on systems that
don't support it: 'copy_file()' doesn't check if hard or symbolic
linking is available. If hardlink fails, falls back to
_copy_file_contents().
Under Mac OS, uses the native file copy function in macostools; on
other systems, uses '_copy_file_contents()' to copy file contents.
Return a tuple (dest_name, copied): 'dest_name' is the actual name of
the output file, and 'copied' is true if the file was copied (or would
have been copied, if 'dry_run' true).
"""
# XXX if the destination file already exists, we clobber it if
# copying, but blow up if linking. Hmmm. And I don't know what
# macostools.copyfile() does. Should definitely be consistent, and
# should probably blow up if destination exists and we would be
# changing it (ie. it's not already a hard/soft link to src OR
# (not update) and (src newer than dst).
from distutils.dep_util import newer
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
if not os.path.isfile(src):
raise DistutilsFileError(
"can't copy '%s': doesn't exist or not a regular file" % src)
if os.path.isdir(dst):
dir = dst
dst = os.path.join(dst, os.path.basename(src))
else:
dir = os.path.dirname(dst)
if update and not newer(src, dst):
if verbose >= 1:
log.debug("not copying %s (output up-to-date)", src)
return (dst, 0)
try:
action = _copy_action[link]
except KeyError:
raise ValueError("invalid value '%s' for 'link' argument" % link)
if verbose >= 1:
if os.path.basename(dst) == os.path.basename(src):
log.info("%s %s -> %s", action, src, dir)
else:
log.info("%s %s -> %s", action, src, dst)
if dry_run:
return (dst, 1)
# If linking (hard or symbolic), use the appropriate system call
# (Unix only, of course, but that's the caller's responsibility)
elif link == 'hard':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
try:
os.link(src, dst)
return (dst, 1)
except OSError:
# If hard linking fails, fall back on copying file
# (some special filesystems don't support hard linking
# even under Unix, see issue #8876).
pass
elif link == 'sym':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.symlink(src, dst)
return (dst, 1)
# Otherwise (non-Mac, not linking), copy the file contents and
# (optionally) copy the times and mode.
_copy_file_contents(src, dst)
if preserve_mode or preserve_times:
st = os.stat(src)
# According to David Ascher <da@ski.org>, utime() should be done
# before chmod() (at least under NT).
if preserve_times:
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
if preserve_mode:
os.chmod(dst, S_IMODE(st[ST_MODE]))
return (dst, 1)
# XXX I suspect this is Unix-specific -- need porting help!
def move_file (src, dst,
verbose=1,
dry_run=0):
"""Move a file 'src' to 'dst'. If 'dst' is a directory, the file will
be moved into it with the same name; otherwise, 'src' is just renamed
to 'dst'. Return the new full name of the file.
Handles cross-device moves on Unix using 'copy_file()'. What about
other systems???
"""
from os.path import exists, isfile, isdir, basename, dirname
import errno
if verbose >= 1:
log.info("moving %s -> %s", src, dst)
if dry_run:
return dst
if not isfile(src):
raise DistutilsFileError("can't move '%s': not a regular file" % src)
if isdir(dst):
dst = os.path.join(dst, basename(src))
elif exists(dst):
raise DistutilsFileError(
"can't move '%s': destination '%s' already exists" %
(src, dst))
if not isdir(dirname(dst)):
raise DistutilsFileError(
"can't move '%s': destination '%s' not a valid path" %
(src, dst))
copy_it = False
try:
os.rename(src, dst)
except OSError as e:
(num, msg) = e.args
if num == errno.EXDEV:
copy_it = True
else:
raise DistutilsFileError(
"couldn't move '%s' to '%s': %s" % (src, dst, msg))
if copy_it:
copy_file(src, dst, verbose=verbose)
try:
os.unlink(src)
except OSError as e:
(num, msg) = e.args
try:
os.unlink(dst)
except OSError:
pass
raise DistutilsFileError(
"couldn't move '%s' to '%s' by copy/delete: "
"delete '%s' failed: %s"
% (src, dst, src, msg))
return dst
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
f = open(filename, "w")
try:
for line in contents:
f.write(line + "\n")
finally:
f.close()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
{
"content_hash": "3aba93b37bfa805919909152ab3eb18a",
"timestamp": "",
"source": "github",
"line_count": 720,
"max_line_length": 83,
"avg_line_length": 34.144444444444446,
"alnum_prop": 0.5710218027985682,
"repo_name": "ArcherSys/ArcherSys",
"id": "aca36bc9bcacac22ce337638db2228ce57c99911",
"size": "24584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/distutils/file_util.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'DeadLink.date'
db.delete_column('smra_portal_deadlink', 'date')
# Adding field 'DeadLink.date_created'
db.add_column('smra_portal_deadlink', 'date_created', self.gf('django.db.models.fields.DateField')(auto_now_add=True, default=datetime.date(2011, 7, 19), blank=True), keep_default=False)
# Adding field 'DeadLink.reason'
db.add_column('smra_portal_deadlink', 'reason', self.gf('django.db.models.fields.CharField')(default=datetime.date(2011, 7, 19), max_length=400), keep_default=False)
def backwards(self, orm):
# Adding field 'DeadLink.date'
db.add_column('smra_portal_deadlink', 'date', self.gf('django.db.models.fields.DateField')(default=datetime.date(2011, 7, 19)), keep_default=False)
# Deleting field 'DeadLink.date_created'
db.delete_column('smra_portal_deadlink', 'date_created')
# Deleting field 'DeadLink.reason'
db.delete_column('smra_portal_deadlink', 'reason')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'smra_portal.deadlink': {
'Meta': {'object_name': 'DeadLink'},
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smra_portal.ParameterName']"}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '400'})
},
'smra_portal.mediaobject': {
'Meta': {'object_name': 'MediaObject'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'repos': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smra_portal.Repository']"}),
'schemas': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['smra_portal.Schema']", 'through': "orm['smra_portal.MediaObjectParameterSet']", 'symmetrical': 'False'})
},
'smra_portal.mediaobjectparameter': {
'Meta': {'ordering': "('name',)", 'object_name': 'MediaObjectParameter'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smra_portal.ParameterName']"}),
'paramset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smra_portal.MediaObjectParameterSet']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'smra_portal.mediaobjectparameterset': {
'Meta': {'ordering': "['-ranking']", 'object_name': 'MediaObjectParameterSet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smra_portal.MediaObject']"}),
'ranking': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smra_portal.Schema']"})
},
'smra_portal.parametername': {
'Meta': {'ordering': "['-ranking']", 'object_name': 'ParameterName'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'ranking': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smra_portal.Schema']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'smra_portal.repository': {
'Meta': {'object_name': 'Repository'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smra_portal.System']"})
},
'smra_portal.schema': {
'Meta': {'object_name': 'Schema'},
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '80'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '50', 'db_index': 'True'}),
'namespace': ('django.db.models.fields.URLField', [], {'max_length': '400'})
},
'smra_portal.system': {
'Meta': {'object_name': 'System'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'smra_portal.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'smra_portal.virtualcollection': {
'Meta': {'object_name': 'VirtualCollection'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'owned_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['smra_portal.UserProfile']"})
}
}
complete_apps = ['smra_portal']
|
{
"content_hash": "81d94eae13d04fc612380de3d0101f51",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 196,
"avg_line_length": 66.17391304347827,
"alnum_prop": 0.558694699956198,
"repo_name": "tectronics/mavrec",
"id": "83ddb267bfdbe0aa00b1ed18970ee0861ea52756",
"size": "9150",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "smra/smra_portal/migrations/0010_auto__del_field_deadlink_date__add_field_deadlink_date_created__add_fi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "30199"
},
{
"name": "HTML",
"bytes": "42761"
},
{
"name": "JavaScript",
"bytes": "10786"
},
{
"name": "Python",
"bytes": "875078"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
urlpatterns = patterns(
'jobsub.views',
# The base view is the "list" view, which we alias as /
url(r'^$', 'list_designs'),
# Not available on Hue 4
url(r'^not_available$', 'not_available'),
# Actions: get, save, clone, delete, submit, new.
url(r'^designs$', 'list_designs'),
url(r'^designs/(?P<design_id>\d+)$', 'get_design'),
url(r'^designs/(?P<node_type>\w+)/new$', 'new_design'),
url(r'^designs/(?P<design_id>\d+)/save$', 'save_design'),
url(r'^designs/(?P<design_id>\d+)/clone$', 'clone_design'),
url(r'^designs/(?P<design_id>\d+)/delete$', 'delete_design'),
url(r'^designs/(?P<design_id>\d+)/restore$', 'restore_design'),
)
|
{
"content_hash": "0e58ab634bd2c17cd9892c3d84645b77",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 65,
"avg_line_length": 35.15,
"alnum_prop": 0.6187766714082503,
"repo_name": "xq262144/hue",
"id": "2229531e497cf38631d0a70b3967aac8499b54d3",
"size": "1495",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/jobsub/src/jobsub/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "41710"
},
{
"name": "C",
"bytes": "2692409"
},
{
"name": "C++",
"bytes": "199897"
},
{
"name": "CSS",
"bytes": "521820"
},
{
"name": "Emacs Lisp",
"bytes": "11704"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "Groff",
"bytes": "16669"
},
{
"name": "HTML",
"bytes": "24188238"
},
{
"name": "Java",
"bytes": "575404"
},
{
"name": "JavaScript",
"bytes": "4987047"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "Makefile",
"bytes": "144341"
},
{
"name": "Mako",
"bytes": "3052598"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "PLpgSQL",
"bytes": "3646"
},
{
"name": "Perl",
"bytes": "3499"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "44291483"
},
{
"name": "Shell",
"bytes": "44147"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "Thrift",
"bytes": "278712"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XSLT",
"bytes": "518588"
}
],
"symlink_target": ""
}
|
__author__ = 'Sergey Krivohatskiy'
window_width = 800
window_height = 600
window_vsync = True
window_resizable = True
background_color_r = 23
background_color_g = 23
background_color_b = 23
bullet_image = 'bullet.png'
bullet_max_velocity = 20
bullet_velocity_power_coefficient = 3
bullet_half_width = 5
robot_body_image = 'robot.png'
robot_gun_image = 'gun.png'
robot_radar_image = 'radar.png'
robot_initial_gun_heat = 3
robot_max_gun_turn = 20
robot_max_radar_turn = 45
robot_max_idle_body_turn = 10
robot_velocity_body_turn_coefficient = 0.75
robot_max_velocity = 8
robot_max_acceleration = 1
robot_max_brake_acceleration = 2
robot_initial_energy = 100
robot_half_width = 50
robot_gun_cooling = 0.1
robot_radar_scan_length = 1200
|
{
"content_hash": "bae349aeb802699c896017416f1fc3be",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 43,
"avg_line_length": 23.70967741935484,
"alnum_prop": 0.7428571428571429,
"repo_name": "SergeyKrivohatskiy/robocode_python",
"id": "3ce003ea8939b651e72ccb7ef8f945cedc0b2793",
"size": "757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24167"
}
],
"symlink_target": ""
}
|
from http import HTTPStatus
from waterbutler.core.exceptions import ProviderError
class OsfStorageQuotaExceededError(ProviderError):
def __init__(self, dummy) -> None:
"""``dummy`` argument is because children of ``WaterButlerError`` must be instantiable with
a single integer argument. See :class:`waterbutler.core.exceptions.WaterButlerError` for
details.
"""
super().__init__('The quota on this osfstorage project has been exceeded',
code=HTTPStatus.INSUFFICIENT_STORAGE)
|
{
"content_hash": "d7d491086d707c4832d185e83d26dab7",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 99,
"avg_line_length": 36.666666666666664,
"alnum_prop": 0.6854545454545454,
"repo_name": "felliott/waterbutler",
"id": "c347d8d47285143495fd86db9cdd1d267845e7e0",
"size": "550",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "waterbutler/providers/osfstorage/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "922"
},
{
"name": "Python",
"bytes": "1673806"
}
],
"symlink_target": ""
}
|
"""Run the Chrome WebUI presubmit scripts on our test javascript.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
https://chromium.googlesource.com/chromium/src/+/main/styleguide/web/web.md
for the rules we're checking against here.
"""
import os
USE_PYTHON3 = True
def GetPathsToPrepend(input_api):
web_dev_style_path = input_api.os_path.join(
input_api.change.RepositoryRoot(),
'tools')
return [input_api.PresubmitLocalPath(), web_dev_style_path]
def RunWithPrependedPath(prepended_path, fn, *args):
import sys
old_path = sys.path
try:
sys.path = prepended_path + old_path
return fn(*args)
finally:
sys.path = old_path
def CheckChangeOnUpload(input_api, output_api):
def go():
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
return RunWithPrependedPath(GetPathsToPrepend(input_api), go)
def CheckChangeOnCommit(input_api, output_api):
def go():
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
return RunWithPrependedPath(GetPathsToPrepend(input_api), go)
def _CommonChecks(input_api, output_api):
resources = input_api.PresubmitLocalPath()
def _html_css_js_resource(p):
return p.endswith(('.js')) and p.startswith(resources)
def is_resource(maybe_resource):
return _html_css_js_resource(maybe_resource.AbsoluteLocalPath())
from web_dev_style import js_checker
results = []
results.extend(js_checker.JSChecker(
input_api, output_api, file_filter=is_resource).RunChecks())
return results
|
{
"content_hash": "621b56759066469337f060d260ebaefc",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 75,
"avg_line_length": 28.23728813559322,
"alnum_prop": 0.7274909963985594,
"repo_name": "chromium/chromium",
"id": "63b9ce0d8f2c8744967ab8658403b0fe626087cb",
"size": "1807",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "chrome/test/data/extensions/api_test/activity_log_private/PRESUBMIT.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).absolute().parent.parent / 'src/'))
|
{
"content_hash": "6ad5ee0fc650045f5b33219389dcd49d",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 70,
"avg_line_length": 21.8,
"alnum_prop": 0.7064220183486238,
"repo_name": "wartalker/BlogSpider",
"id": "ed3d47e43d3b6421496afe5c86a4a96157acb99b",
"size": "135",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "www/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5575"
},
{
"name": "HTML",
"bytes": "3893"
},
{
"name": "JavaScript",
"bytes": "30840"
},
{
"name": "Makefile",
"bytes": "156"
},
{
"name": "Nginx",
"bytes": "1628"
},
{
"name": "Python",
"bytes": "69955"
},
{
"name": "Shell",
"bytes": "127"
}
],
"symlink_target": ""
}
|
from .models.base import Base, engine, session
from .queries import make_query
from .models.group import Group
from .models.user import User
def commit():
'''Commit the changes to database'''
session.commit()
def close():
'''Close the session connection with the database'''
session.close()
def commit_and_close():
commit()
close()
def remove_from_db(value):
'''
Remove some value from database
value -- value to delete
'''
session.delete(value)
commit_and_close()
def create_tables():
'''
see:
http://docs.sqlalchemy.org/en/latest/core/metadata.html#creating-and-dropping-database-tables
'''
Base.metadata.create_all(engine)
def addto_db(table):
'''
Get a table and add to db
'''
session.add(table)
def addsto_db(tables):
'''
Get a list of tables and add to db
'''
for table in tables:
session.add(table)
def add_user(first_name, user_id, group_id):
'''Add a user to the db
first_name -- name of the user
user_id -- id of the user
group_id -- group id for the user to be added
Return a user object
'''
user = User(first_name, user_id)
group = make_query(Group, Group.group_id == group_id)[0]
group.users = [user]
commit_and_close()
return user
def _current_session_obj(o):
'''
SqlAlchemy stuff
see: https://stackoverflow.com/questions/24291933/sqlalchemy-object-already-attached-to-session
'''
curr_session = session.object_session(o)
curr_session.add(o)
curr_session.commit()
curr_session.close()
def update_value(group_id, field, value):
'''
Update a column of the table Group filtered by its id
group_id -- id of the group
field -- column of the table Group to update
value -- value to insert
'''
session.query(Group).filter(Group.group_id == group_id).update({field: value})
def set_welcome_msg(group_id, text):
'''Set the welcome message of the group
group_id -- id of the group
text -- text to add to the db
'''
update_value(group_id, 'welcome_msg', text)
commit_and_close()
def set_rules(group_id, text):
update_value(group_id, 'rules', text)
commit_and_close()
def set_chat_link(group_id, link):
update_value(group_id, 'link', link)
commit_and_close()
def set_max_warn(group_id, value):
update_value(group_id, 'max_warns', value)
commit_and_close()
def warn_user(group_id, user_id):
for user in make_query(Group, Group.group_id == group_id)[0].users:
if user.user_id == user_id:
user.total_warns += 1
commit_and_close()
def unwarn_user(group_id, user_id):
for user in make_query(Group, Group.group_id == group_id)[0].users:
if user.user_id == user_id:
user.total_warns -= 1
commit_and_close()
|
{
"content_hash": "dcbe36c43dfd1bf6e17ea05c76a559ce",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 99,
"avg_line_length": 23,
"alnum_prop": 0.6339410939691444,
"repo_name": "Pygrameiros/bot",
"id": "550fa2df2e7f5f61886733e0a40d3b8d03f99e18",
"size": "2852",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "db/inserts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6150"
}
],
"symlink_target": ""
}
|
"""
Default settings for the CPPPATH support of the Automoc feature:
it is enabled and uses the CPPPATH list of the current environment.
"""
import TestSCons
test = TestSCons.TestSCons()
test.dir_fixture('image')
test.file_fixture('SConscript','SConscript')
test.file_fixture('../../../qtenv.py')
test.file_fixture('../../../../__init__.py','site_scons/site_tools/qt4/__init__.py')
test.run()
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "dd5724e7b2f8737949bea35b17e2d799",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 84,
"avg_line_length": 23.772727272727273,
"alnum_prop": 0.6998087954110899,
"repo_name": "mbychawski/traffic-simulator",
"id": "ccf202e6e6b68209a67c25f2fa7856e93bafe741",
"size": "1656",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "site_scons/site_tools/qt4/test/moc/cpppath/default/sconstest-default.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2028"
},
{
"name": "C++",
"bytes": "116749"
},
{
"name": "CSS",
"bytes": "7616"
},
{
"name": "HTML",
"bytes": "35945"
},
{
"name": "Python",
"bytes": "251420"
},
{
"name": "QMake",
"bytes": "1753"
},
{
"name": "XSLT",
"bytes": "8010"
}
],
"symlink_target": ""
}
|
import logging
# Our imports
import emission.analysis.intake.segmentation.section_segmentation_methods.smoothed_high_confidence_motion as eaisms
import emission.core.wrapper.motionactivity as ecwm
import emission.core.wrapper.location as ecwl
class SmoothedHighConfidenceMotionWithVisitTransitions(eaisms.SmoothedHighConfidenceMotion):
def create_unknown_section(self, location_points_df):
assert(len(location_points_df) > 0)
return (location_points_df.iloc[0], location_points_df.iloc[-1], ecwm.MotionTypes.UNKNOWN)
def get_section_if_applicable(self, timeseries, time_query, location_points):
# We don't have any motion changes. So let's check to see if we
# have a visit transition, which will help us distinguish between
# real and fake trips.
# Yech! This feels really hacky, but if we have a really short trip,
# then we may get the visit ending message after the trip has ended.
# So let's expand the time query by 5 minutes.
# This is based on the 10:06 -> 10:07 trip from the 22 Feb test case
time_query.endTs = time_query.endTs + 5 * 60
transition_df = timeseries.get_data_df('statemachine/transition', time_query)
if len(transition_df) == 0:
logging.debug("there are no transitions, which means no visit transitions, not creating a section")
return None
visit_ended_transition_df = transition_df[transition_df.transition == 14]
if len(visit_ended_transition_df) == 0:
logging.debug("there are some transitions, but none of them are visit, not creating a section")
return None
# We have a visit transition, so we have a pretty good idea that
# this is a real section. So let's create a dummy section for it and return
logging.debug("found visit transition %s, returning dummy section" % visit_ended_transition_df[["transition", "fmt_time"]])
return self.create_unknown_section(location_points)
def extend_activity_to_location(self, motion_change, location_point):
new_mc = ecwm.Motionactivity({
'type': motion_change.type,
'confidence': motion_change.confidence,
'ts': location_point.data.ts,
'local_dt': location_point.data.local_dt,
'fmt_time': location_point.data.fmt_time
})
return new_mc
def segment_into_sections(self, timeseries, time_query):
"""
Determine locations within the specified time that represent segmentation points for a trip.
:param timeseries: the time series for this user
:param time_query: the range to consider for segmentation
:return: a list of tuples [(start1, end1), (start2, end2), ...] that represent the start and end of sections
in this time range. end[n] and start[n+1] are typically assumed to be adjacent.
"""
motion_changes = self.segment_into_motion_changes(timeseries, time_query)
location_points = timeseries.get_data_df("background/filtered_location", time_query)
if len(location_points) == 0:
logging.debug("There are no points in the trip. How the heck did we segment it?")
return []
if len(motion_changes) == 0:
dummy_sec = self.get_section_if_applicable(timeseries, time_query, location_points)
if dummy_sec is not None:
return [dummy_sec]
else:
return []
# Now, we know that we have location points and we have motion_changes.
section_list = []
# Sometimes, on iOS, we have no overlap between motion detection
# and location points.
# In a concrete example, the motion points are:
# 13 100 high 10 2016-02-22T15:36:06.491621-08:00
# 14 100 high 0 2016-02-22T15:36:09.353743-08:00
# 15 100 high 10 2016-02-22T15:36:13.169997-08:00
# 16 75 medium 0 2016-02-22T15:36:13.805993-08:00
# while the trip points are 2016-02-22T15:36:00 and then
# 2016-02-22T15:36:23. So there are no location points within
# that very narrow range. And there are no more motion points
# until the trip end at 15:37:35. This is because, unlike android,
# we cannot specify a sampling frequency for the motion activity
# So let us extend the first motion change to the beginning of the
# trip, and the last motion change to the end of the trip
motion_changes[0] = (self.extend_activity_to_location(motion_changes[0][0],
timeseries.df_row_to_entry("background/filtered_location",
location_points.iloc[0])),
motion_changes[0][1])
motion_changes[-1] = (motion_changes[-1][0],
self.extend_activity_to_location(motion_changes[-1][1],
timeseries.df_row_to_entry("background/filtered_location",
location_points.iloc[-1])))
for (start_motion, end_motion) in motion_changes:
logging.debug("Considering %s from %s -> %s" %
(start_motion.type, start_motion.fmt_time, end_motion.fmt_time))
# Find points that correspond to this section
raw_section_df = location_points[(location_points.ts >= start_motion.ts) &
(location_points.ts <= end_motion.ts)]
if len(raw_section_df) == 0:
logging.warn("Found no location points between %s and %s" % (start_motion, end_motion))
else:
logging.debug("with iloc, section start point = %s, section end point = %s" %
(ecwl.Location(raw_section_df.iloc[0]), ecwl.Location(raw_section_df.iloc[-1])))
section_list.append((raw_section_df.iloc[0], raw_section_df.iloc[-1], start_motion.type))
# if this lack of overlap is part of an existing set of sections,
# then it is fine, because in the section segmentation code, we
# will mark it as a transition
return section_list
|
{
"content_hash": "de2c6ecb4cb9cd4244f34ba831247372",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 131,
"avg_line_length": 57.127272727272725,
"alnum_prop": 0.6195098663271802,
"repo_name": "yw374cornell/e-mission-server",
"id": "e676d7a3dfbd7e319673a9b3d0989f76dc3f5c55",
"size": "6303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emission/analysis/intake/segmentation/section_segmentation_methods/smoothed_high_confidence_with_visit_transitions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "445"
},
{
"name": "CSS",
"bytes": "717871"
},
{
"name": "HTML",
"bytes": "114875"
},
{
"name": "JavaScript",
"bytes": "7620696"
},
{
"name": "Jupyter Notebook",
"bytes": "97095629"
},
{
"name": "Python",
"bytes": "1584848"
},
{
"name": "Shell",
"bytes": "2299"
},
{
"name": "Smarty",
"bytes": "3456"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class ColorbarValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="colorbar", parent_name="parcats.line", **kwargs):
super(ColorbarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "ColorBar"),
data_docs=kwargs.pop(
"data_docs",
"""
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.parcats
.line.colorbar.Tickformatstop` instances or
dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.parcats.line.colorbar.tickformatstopdefaults)
, sets the default property values to use for
elements of
parcats.line.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.parcats.line.color
bar.Title` instance or dict with compatible
properties
titlefont
Deprecated: Please use
parcats.line.colorbar.title.font instead. Sets
this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
parcats.line.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
""",
),
**kwargs
)
|
{
"content_hash": "9eb2ae29347ad46ec5364dbe3f47d580",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 85,
"avg_line_length": 47.49561403508772,
"alnum_prop": 0.5228552959645396,
"repo_name": "plotly/python-api",
"id": "bf5d856f0a541dd7188481f5f5a183c75eabb585",
"size": "10829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/parcats/line/_colorbar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from .gui import (
Widget,
Button,
TextInput,
SpinBox,
Label,
GenericDialog,
InputDialog,
ListView,
ListItem,
DropDown,
DropDownItem,
Image,
Table,
TableRow,
TableItem,
TableTitle,
Input,
Slider,
ColorPicker,
Date,
GenericObject,
FileFolderNavigator,
FileFolderItem,
FileSelectionDialog,
Menu,
MenuItem,
FileUploader,
FileDownloader,
VideoPlayer,
)
from .server import App, Server, start
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
|
{
"content_hash": "fcf041db11fb236895ee55c31a5f7a1d",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 64,
"avg_line_length": 17.45,
"alnum_prop": 0.6547277936962751,
"repo_name": "dddomodossola/remi",
"id": "74378e4f7a4ffc49aba02403945c571ba3cd51e4",
"size": "698",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "remi/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "17384"
},
{
"name": "Jupyter Notebook",
"bytes": "34372"
},
{
"name": "Python",
"bytes": "523102"
},
{
"name": "Shell",
"bytes": "612"
}
],
"symlink_target": ""
}
|
import base64
import urllib.request
import json
def decodeB64(code):
bas64text = base64.b64decode(code)
baseUTF = bas64text.decode('utf-8')
crop = baseUTF.split('\"url\":\"')[1]
url = crop[:-4]
print(url)
def getSkin(UUID):
page = urllib.request.urlopen('https://sessionserver.mojang.com/session/minecraft/profile/' + UUID)
content = page.read().decode('utf-8')
json_obj = json.loads(content)
list_d = json_obj['properties']
return list_d[0]['value']
decodeB64(getSkin(input('Enter UUID > ')))
|
{
"content_hash": "93e00bcd897c83920590d8bdc0a97c4e",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 103,
"avg_line_length": 28.1,
"alnum_prop": 0.6334519572953736,
"repo_name": "kevinchu-sg/Minecraft-Skin-Decoder",
"id": "502a49b6d64b4152cc5b16effc1b7e04c4f6cb07",
"size": "562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "decode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "562"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function, division
from numba import unittest_support as unittest
from numba import guvectorize
from numba import void, float32
import numpy as np
import numpy.core.umath_tests as ut
def matmulcore(A, B, C):
m, n = A.shape
n, p = B.shape
for i in range(m):
for j in range(p):
C[i, j] = 0
for k in range(n):
C[i, j] += A[i, k] * B[k, j]
class TestVectorizeDecor(unittest.TestCase):
def test_cpu_guvectorize(self):
target = 'cpu'
gufunc = guvectorize([void(float32[:,:], float32[:,:], float32[:,:])],
'(m,n),(n,p)->(m,p)',
target=target)(matmulcore)
matrix_ct = 1001 # an odd number to test thread/block division in CUDA
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2, 4)
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4, 5)
C = gufunc(A, B)
Gold = ut.matrix_multiply(A, B)
self.assertTrue(np.allclose(C, Gold))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "c85b21bf8f16cd3298cb5228f0c319a4",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 83,
"avg_line_length": 29.435897435897434,
"alnum_prop": 0.563588850174216,
"repo_name": "pombredanne/numba",
"id": "d49350c43b8399a86f41b54df4b4420d7d4fedff",
"size": "1148",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "numba/tests/npyufunc/test_guvectorize_decor.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2212"
},
{
"name": "C",
"bytes": "249112"
},
{
"name": "C++",
"bytes": "17024"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "HTML",
"bytes": "98846"
},
{
"name": "PowerShell",
"bytes": "3153"
},
{
"name": "Python",
"bytes": "3320040"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
}
|
import subprocess as sp
import os
from .safe_call import safe_call
def init_parser(parser):
parser.add_argument('name', type=str, help='Cluster name.')
parser.add_argument('service', type=str,
choices=['notebook', 'nb', 'spark-ui', 'ui', 'spark-ui1', 'ui1',
'spark-ui2', 'ui2', 'spark-history', 'hist'],
help='Web service to launch.')
parser.add_argument('--port', '-p', default='10000', type=str,
help='Local port to use for SSH tunnel to master node (default: %(default)s).')
parser.add_argument('--zone', '-z', default='us-central1-b', type=str,
help='Compute zone for Dataproc cluster (default: %(default)s).')
def main(args):
print("Connecting to cluster '{}'...".format(args.name))
# shortcut mapping
shortcut = {
'ui': 'spark-ui',
'ui1': 'spark-ui1',
'ui2': 'spark-ui2',
'hist': 'history',
'nb': 'notebook'
}
service = args.service
if service in shortcut:
service = shortcut[service]
# Dataproc port mapping
dataproc_ports = {
'spark-ui': 4040,
'spark-ui1': 4041,
'spark-ui2': 4042,
'spark-history': 18080,
'notebook': 8123
}
connect_port = dataproc_ports[service]
# open SSH tunnel to master node
sp.check_call(
['gcloud',
'compute',
'ssh',
'{}-m'.format(args.name),
'--zone={}'.format(args.zone),
'--ssh-flag=-D {}'.format(args.port),
'--ssh-flag=-N',
'--ssh-flag=-f',
'--ssh-flag=-n'],
stderr=sp.STDOUT
)
# open Chrome with SOCKS proxy configuration
with open(os.devnull, 'w') as f:
sp.Popen([
r'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
'http://localhost:{}'.format(connect_port),
'--proxy-server=socks5://localhost:{}'.format(args.port),
'--host-resolver-rules=MAP * 0.0.0.0 , EXCLUDE localhost',
'--proxy-bypass-list=<-loopback>', # https://chromium.googlesource.com/chromium/src/+/da790f920bbc169a6805a4fb83b4c2ab09532d91
'--user-data-dir=/tmp/'
], stdout=f, stderr=f)
|
{
"content_hash": "b3f9787fcb3afa6b14395bfac93d0d0b",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 138,
"avg_line_length": 35.07692307692308,
"alnum_prop": 0.5434210526315789,
"repo_name": "Nealelab/cloudtools",
"id": "4b9bdc6c79140277f82f43c2ade0e6de8e85f740",
"size": "2280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudtools/connect.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "605"
},
{
"name": "Makefile",
"bytes": "944"
},
{
"name": "Python",
"bytes": "46863"
},
{
"name": "Shell",
"bytes": "2536"
}
],
"symlink_target": ""
}
|
"""
This module is the central entity that determines which implementation of the
API is used.
"""
__author__ = 'petar@googlepb.com (Petar Petrov)'
import os
# This environment variable can be used to switch to a certain implementation
# of the Python API. Right now only 'python' and 'cpp' are valid values. Any
# other value will be ignored.
_implementation_type = os.getenv('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION',
'python')
if _implementation_type != 'python':
# For now, by default use the pure-Python implementation.
# The code below checks if the C extension is available and
# uses it if it is available.
_implementation_type = 'cpp'
## Determine automatically which implementation to use.
#try:
# from googlepb.protobuf.internal import cpp_message
# _implementation_type = 'cpp'
#except ImportError, e:
# _implementation_type = 'python'
# This environment variable can be used to switch between the two
# 'cpp' implementations. Right now only 1 and 2 are valid values. Any
# other value will be ignored.
_implementation_version_str = os.getenv(
'PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION',
'1')
if _implementation_version_str not in ('1', '2'):
raise ValueError(
"unsupported PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION: '" +
_implementation_version_str + "' (supported versions: 1, 2)"
)
_implementation_version = int(_implementation_version_str)
# Usage of this function is discouraged. Clients shouldn't care which
# implementation of the API is in use. Note that there is no guarantee
# that differences between APIs will be maintained.
# Please don't use this function if possible.
def Type():
return _implementation_type
# See comment on 'Type' above.
def Version():
return _implementation_version
|
{
"content_hash": "ba5e9ce88b88c62d0e7f2b6259b48a48",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 77,
"avg_line_length": 32,
"alnum_prop": 0.7198464912280702,
"repo_name": "beschulz/ved-decoder",
"id": "daa340c91efe23786c671099f6e01f38c162cc89",
"size": "3444",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/googlepb/protobuf/internal/api_implementation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "273876"
}
],
"symlink_target": ""
}
|
import datetime
import mwparserfromhell
import pywikibot
import re
import sys
SOFT_REDIR_CATS = "Wikipedia soft redirected categories"
NUM_PAGES = 2
SUMMARY = "[[Wikipedia:Bots/Requests for approval/EnterpriseyBot 10|Bot]] removing the article class assessment"
DATA_FILE = "current-progress.txt"
WP_BANNER_SHELL = "WikiProject banner shell"
# Don't touch parameters of these banners
UNTOUCHABLE_BANNERS = ("WikiProject Anime and manga")
def verify_redirect_age(site, page):
"""Returns True iff the page was a redirect/nonexistent a week ago."""
a_week_ago = site.server_time() - datetime.timedelta(days=7)
for each_rev_info in page.getVersionHistory():
if each_rev_info.timestamp <= a_week_ago:
text_a_week_ago = page.getOldVersion(each_rev_info.revid)
return "#REDIRECT" in text_a_week_ago
# If we're here, the page didn't exist a week ago
earliest_revid = page.getVersionHistory(reverse=True)[0].revid
earliest_text = page.getOldVersion(earliest_revid)
return "#REDIRECT" in earliest_text
class TemplateChecker:
def __init__(self, site):
"""Initializes the internal list of templates to avoid
changing."""
wpbs_redirects = get_template_redirects(site, WP_BANNER_SHELL)
untouchable_templates = (redir
for banner in UNTOUCHABLE_BANNERS
for redir in get_template_redirects(site, banner))
self.names_to_avoid = set(wpbs_redirects +
untouchable_templates)
def check(self, template_name):
"""Returns True if we are allowed to alter the parameters of a
template with template_name, and False otherwise."""
sanitized_name = unicode(template.name).lower().strip()
return sanitized_name.startswith("wikiproject") and
sanitized_name not in self.names_to_avoid
def get_template_redirects(site, template_name):
"""Gets the names of all of the template-space redirects to the
provided template. The names come without namespaces.
Example, if `site` is a enwiki site object:
>>> get_template_redicts(site, "Hexadecimal")
[u'hexdigit']
"""
template_page = pywikibot.Page(site, "Template:" + template_name)
return [page.title(withNamespace=False).lower()
for page
in template_page.getReferences(redirectsOnly=True)
if page.namespace() == 10]
def main():
print("Starting redirect-banners at " + datetime.datetime.utcnow().isoformat())
site = pywikibot.Site("en", "wikipedia")
site.login()
i = 0
checker = TemplateChecker()
# If we have a data file, pick up where we left off
try:
with open(DATA_FILE) as data_file:
start_sort = data_file.read()
print(start_sort)
except IOError:
start_sort = ""
# We always write our progress to the previous category, to avoid
# skipping any pages
previous_category = None
# Because PWB won't let us use hex keys, build our own generator.
# Random argument keys come from site.py in Pywikibot (specifically,
# the site.categorymembers() and site._generator() functions)
gen_args = {"gcmtitle": "Category:All_redirect_categories",
"gcmprop": "title",
"gcmstartsortkeyprefix": start_sort}
members_gen = pywikibot.data.api.PageGenerator("categorymembers", site=site, parameters=gen_args)
for redirect_cat in members_gen:
if redirect_cat.title(withNamespace=False) == SOFT_REDIR_CATS:
continue
# Record which subcat we were in for the next run
if previous_category:
with open(DATA_FILE, "w") as data_file:
data_file.write(previous_category.title(withNamespace=False))
for each_article in redirect_cat.articles(recurse=True, namespaces=(0)):
print("Considering \"{}\".".format(each_article.title().encode("utf-8")))
if not verify_redirect_age(site, each_article): continue
talk_page = each_article.toggleTalkPage()
if not talk_page.exists() or talk_page.isRedirectPage(): continue
talk_text = talk_page.get()
parse_result = mwparserfromhell.parse(talk_text)
original_talk_text = talk_text
talk_banners = filter(checker.check, parse_result.filter_templates())
if not talk_banners: continue
for each_template in talk_banners:
class_params = [x for x in each_template.params
if ("class" in x.lower() and
"formerly assessed as" not in x.lower())]
if class_params:
if len(class_params) != 1:
print("Multiple class params in " + talk_page.title(withNamespace=True))
else:
current_unicode = unicode(each_template)
each_template.remove(class_params[0].partition("=")[0])
old_quality = class_params[0].partition("=")[2]
if not re.match("\w+$", old_quality.strip()):
print("Invalid class!")
continue
print(current_unicode)
new_unicode = unicode(each_template)
new_unicode += " <!-- Formerly assessed as " + old_quality.strip() + "-class -->"
print(new_unicode)
talk_text = talk_text.replace(current_unicode, new_unicode)
if talk_page.text != talk_text:
talk_page.text = talk_text
talk_page.save(summary=SUMMARY)
i += 1
print("{} out of {} done so far.".format(i, NUM_PAGES))
if i >= NUM_PAGES:
break
previous_category = redirect_cat
if i >= NUM_PAGES:
break
if __name__ == "__main__":
main()
|
{
"content_hash": "967067756a114c52e06cdafea5a44961",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 112,
"avg_line_length": 41.74125874125874,
"alnum_prop": 0.6049589545987603,
"repo_name": "APerson241/EnterpriseyBot",
"id": "31a22eed6035d5a715c02771a33eabbf43acd0c3",
"size": "5969",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "redirect-banners/bot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "243"
},
{
"name": "HTML",
"bytes": "443"
},
{
"name": "Python",
"bytes": "95993"
},
{
"name": "Ruby",
"bytes": "5547"
},
{
"name": "Shell",
"bytes": "53"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from models import EmailConfirmation, EmailAddress
class EmailAddressAdmin(admin.ModelAdmin):
list_display = ('email', 'user', 'primary', 'verified')
list_filter = ('primary', 'verified')
search_fields = ('email',
'user__username',
'user__first_name',
'user__last_name')
raw_id_fields = ('user',)
class EmailConfirmationAdmin(admin.ModelAdmin):
list_display = ('email_address', 'created', 'sent', 'key')
list_filter = ('sent',)
raw_id_fields = ('email_address',)
admin.site.register(EmailConfirmation, EmailConfirmationAdmin)
admin.site.register(EmailAddress, EmailAddressAdmin)
|
{
"content_hash": "e3699204ab8c45e586df2053ced489cf",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 62,
"avg_line_length": 33.80952380952381,
"alnum_prop": 0.643661971830986,
"repo_name": "GinnyN/towerofdimensions-django",
"id": "b722f10f01a635042189bcaf79b9c441a7cc6470",
"size": "710",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "django-allauth/build/lib/allauth/account/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "83182"
},
{
"name": "JavaScript",
"bytes": "133482"
},
{
"name": "Perl",
"bytes": "36399"
},
{
"name": "Python",
"bytes": "16134977"
},
{
"name": "Racket",
"bytes": "212"
},
{
"name": "Ruby",
"bytes": "140"
},
{
"name": "Scala",
"bytes": "70604"
},
{
"name": "Shell",
"bytes": "7871"
}
],
"symlink_target": ""
}
|
import pickle
import numpy as np
from teafacto.util import argprun, tokenize
def run(trainp="fb_train.tsv", testp="fb_test.tsv", validp="fb_valid.tsv", outp="datamat.wordchar.pkl", maxchar=30):
worddic = {"<RARE>": 0}
chardic = {}
entdic = {}
reldic = {}
acc = {}
acc["train"] = getdata(trainp, worddic, chardic, entdic, reldic, maxchar=maxchar)
acc["valid"] = getdata(validp, worddic, chardic, entdic, reldic, maxchar=maxchar)
acc["test"] = getdata(testp, worddic, chardic, entdic, reldic, maxchar=maxchar)
acc["worddic"] = worddic
acc["chardic"] = chardic
numents = len(entdic)
acc["train"][1][:, 1] += numents
acc["valid"][1][:, 1] += numents
acc["test"][1][:, 1] += numents
reldic = {k: v+numents for k, v in reldic.items()}
entdic.update(reldic)
print len(entdic)
acc["entdic"] = entdic
acc["numents"] = numents
pickle.dump(acc, open(outp, "w"))
def getdata(p, worddic, chardic, entdic, reldic, maxc=np.infty, maxchar=30):
data = []
gold = []
maxlen = 0
maxwordlen = 0
c = 0
for line in open(p):
q, a = (line[:-1] if line[-1] == "\n" else line).split("\t")
s, p = a.split()
words = tokenize(q)
maxlen = max(maxlen, len(words))
for word in words:
maxwordlen = max(maxwordlen, len(word))
if word not in worddic:
worddic[word] = len(worddic)
if s not in entdic:
entdic[s] = len(entdic)
if p not in reldic:
reldic[p] = len(reldic)
data.append(words)
gold.append([entdic[s], reldic[p]])
c += 1
if c > maxc:
break
print maxwordlen
maxchar = min(maxchar, maxwordlen)
wordmat = np.zeros((c, maxlen)).astype("int32") - 1
charten = np.zeros((c, maxlen, maxchar)).astype("int32") - 1
goldmat = np.zeros((c, 2)).astype("int32")
i = 0
for sent in data:
j = 0
for word in sent:
if len(word) > maxchar:
print word
wordmat[i, j] = worddic[word]
chars = map(ord, word)
charten[i, j, :min(len(chars), maxchar)] = chars[:min(len(chars), maxchar)]
j += 1
i += 1
i = 0
for x in gold:
goldmat[i, :] = x
i += 1
# making chardic and transforming through chardic
thischardic = dict(map(lambda (x,y): (ord(x), y), chardic.items()))
nextid = 0
while nextid in thischardic.values():
nextid += 1
uniquechars = np.unique(charten)
for uniquechar in list(uniquechars):
if not uniquechar in thischardic and uniquechar >= 0:
thischardic[uniquechar] = nextid
while nextid in thischardic.values():
nextid += 1
chardic.update(dict(map(lambda (x, y): (chr(x), y), thischardic.items())))
print len(chardic), chardic
charten = np.vectorize(lambda x: thischardic[x] if x >= 0 else x)(charten)
datamat = np.concatenate([wordmat.reshape(wordmat.shape + (1,)), charten], axis=2)
return datamat, goldmat
if __name__ == "__main__":
argprun(run)
|
{
"content_hash": "196ea6da4b1abbce29dd633720f822aa",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 116,
"avg_line_length": 33.351063829787236,
"alnum_prop": 0.5655502392344498,
"repo_name": "lukovnikov/teafacto",
"id": "e368fb3a970d5bfd4d517801495432ee55f12b0a",
"size": "3135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/simplequestions/tomat.wordchar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "56665"
},
{
"name": "Python",
"bytes": "819448"
},
{
"name": "Shell",
"bytes": "102"
}
],
"symlink_target": ""
}
|
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# Future is needed for pip distribution for python 3 support
dependencies = ['pyyaml', 'pycurl']
test_dependencies = ['django==1.6.5','django-tastypie==0.12.1','jsonpath','jmespath']
# Add additional compatibility shims
if sys.version_info[0] > 2:
dependencies.append('future') # Only works with direct local installs, not via pip
else:
test_dependencies.append('mock')
test_dependencies.append('discover')
setup(name='pyresttest',
version='1.7.2.dev',
description='Python RESTful API Testing & Microbenchmarking Tool',
long_description='Python RESTful API Testing & Microbenchmarking Tool \n Documentation at https://github.com/svanoort/pyresttest',
author='Sam Van Oort',
author_email='samvanoort@gmail.com',
url='https://github.com/svanoort/pyresttest',
keywords=['rest', 'web', 'http', 'testing'],
classifiers=[
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Utilities'
],
py_modules=['pyresttest.resttest', 'pyresttest.generators', 'pyresttest.binding',
'pyresttest.parsing', 'pyresttest.validators', 'pyresttest.contenthandling',
'pyresttest.benchmarks', 'pyresttest.tests',
'pyresttest.six',
'pyresttest.ext.validator_jsonschema',
'pyresttest.ext.extractor_jmespath',
'pyresttest.workflow'],
license='Apache License, Version 2.0',
install_requires=dependencies,
tests_require=test_dependencies,
extras_require= {
'JSONSchema': ['jsonschema'],
'JMESPath': ['jmespath']
},
# Make this executable from command line when installed
scripts=['util/pyresttest', 'util/resttest.py'],
provides=['pyresttest']
)
|
{
"content_hash": "1cc7893f32e8a98b96e816d9491e3235",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 136,
"avg_line_length": 41.81818181818182,
"alnum_prop": 0.6334782608695653,
"repo_name": "satish-suradkar/pyresttest",
"id": "040178479362b2b0b22f1388e34361f0ac35f8fe",
"size": "2300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "16054"
},
{
"name": "Python",
"bytes": "315258"
},
{
"name": "Shell",
"bytes": "5999"
}
],
"symlink_target": ""
}
|
"""An implementation of MultiPerspectiveLayer for Bimpm model."""
import tensorflow as tf
from keras import backend as K
from keras.engine import Layer
from matchzoo.contrib.layers.attention_layer import AttentionLayer
class MultiPerspectiveLayer(Layer):
"""
A keras implementation of multi-perspective layer of BiMPM.
For detailed information, see Bilateral Multi-Perspective
Matching for Natural Language Sentences, section 3.2.
Examples:
>>> import matchzoo as mz
>>> perspective={'full': True, 'max-pooling': True,
... 'attentive': True, 'max-attentive': True}
>>> layer = mz.contrib.layers.MultiPerspectiveLayer(
... att_dim=50, mp_dim=20, perspective=perspective)
>>> layer.compute_output_shape(
... [(32, 10, 100), (32, 50), None, (32, 50), None,
... [(32, 40, 100), (32, 50), None, (32, 50), None]])
(32, 10, 83)
"""
def __init__(self,
att_dim: int,
mp_dim: int,
perspective: dict):
"""Class initialization."""
super(MultiPerspectiveLayer, self).__init__()
self._att_dim = att_dim
self._mp_dim = mp_dim
self._perspective = perspective
@classmethod
def list_available_perspectives(cls) -> list:
"""List available strategy for multi-perspective matching."""
return ['full', 'max-pooling', 'attentive', 'max-attentive']
@property
def num_perspective(self):
"""Get the number of perspectives that is True."""
return sum(self._perspective.values())
def build(self, input_shape: list):
"""Input shape."""
# The shape of the weights is l * d.
if self._perspective.get('full'):
self.full_match = MpFullMatch(self._mp_dim)
if self._perspective.get('max-pooling'):
self.max_pooling_match = MpMaxPoolingMatch(self._mp_dim)
if self._perspective.get('attentive'):
self.attentive_match = MpAttentiveMatch(self._att_dim,
self._mp_dim)
if self._perspective.get('max-attentive'):
self.max_attentive_match = MpMaxAttentiveMatch(self._att_dim)
self.built = True
def call(self, x: list, **kwargs):
"""Call."""
seq_lt, seq_rt = x[:5], x[5:]
# unpack seq_left and seq_right
# all hidden states, last hidden state of forward pass,
# last cell state of forward pass, last hidden state of
# backward pass, last cell state of backward pass.
lstm_reps_lt, forward_h_lt, _, backward_h_lt, _ = seq_lt
lstm_reps_rt, forward_h_rt, _, backward_h_rt, _ = seq_rt
match_tensor_list = []
match_dim = 0
if self._perspective.get('full'):
# Each forward & backward contextual embedding compare
# with the last step of the last time step of the other sentence.
h_lt = tf.concat([forward_h_lt, backward_h_lt], axis=-1)
full_match_tensor = self.full_match([h_lt, lstm_reps_rt])
match_tensor_list.append(full_match_tensor)
match_dim += self._mp_dim + 1
if self._perspective.get('max-pooling'):
# Each contextual embedding compare with each contextual embedding.
# retain the maximum of each dimension.
max_match_tensor = self.max_pooling_match([lstm_reps_lt,
lstm_reps_rt])
match_tensor_list.append(max_match_tensor)
match_dim += self._mp_dim
if self._perspective.get('attentive'):
# Each contextual embedding compare with each contextual embedding.
# retain sum of weighted mean of each dimension.
attentive_tensor = self.attentive_match([lstm_reps_lt,
lstm_reps_rt])
match_tensor_list.append(attentive_tensor)
match_dim += self._mp_dim + 1
if self._perspective.get('max-attentive'):
# Each contextual embedding compare with each contextual embedding.
# retain max of weighted mean of each dimension.
relevancy_matrix = _calc_relevancy_matrix(lstm_reps_lt,
lstm_reps_rt)
max_attentive_tensor = self.max_attentive_match([lstm_reps_lt,
lstm_reps_rt,
relevancy_matrix])
match_tensor_list.append(max_attentive_tensor)
match_dim += self._mp_dim + 1
mp_tensor = tf.concat(match_tensor_list, axis=-1)
return mp_tensor
def compute_output_shape(self, input_shape: list):
"""Compute output shape."""
shape_a = input_shape[0]
match_dim = 0
if self._perspective.get('full'):
match_dim += self._mp_dim + 1
if self._perspective.get('max-pooling'):
match_dim += self._mp_dim
if self._perspective.get('attentive'):
match_dim += self._mp_dim + 1
if self._perspective.get('max-attentive'):
match_dim += self._mp_dim + 1
return shape_a[0], shape_a[1], match_dim
class MpFullMatch(Layer):
"""Mp Full Match Layer."""
def __init__(self, mp_dim):
"""Init."""
super(MpFullMatch, self).__init__()
self.mp_dim = mp_dim
def build(self, input_shapes):
"""Build."""
# input_shape = input_shapes[0]
self.built = True
def call(self, x, **kwargs):
"""Call.
"""
rep_lt, reps_rt = x
att_lt = tf.expand_dims(rep_lt, 1)
match_tensor, match_dim = _multi_perspective_match(self.mp_dim,
reps_rt,
att_lt)
# match_tensor => [b, len_rt, mp_dim+1]
return match_tensor
def compute_output_shape(self, input_shape):
"""Compute output shape."""
return input_shape[1][0], input_shape[1][1], self.mp_dim + 1
class MpMaxPoolingMatch(Layer):
"""MpMaxPoolingMatch."""
def __init__(self, mp_dim):
"""Init."""
super(MpMaxPoolingMatch, self).__init__()
self.mp_dim = mp_dim
def build(self, input_shapes):
"""Build."""
d = input_shapes[0][-1]
self.kernel = self.add_weight(name='kernel',
shape=(1, 1, 1, self.mp_dim, d),
initializer='uniform',
trainable=True)
self.built = True
def call(self, x, **kwargs):
"""Call."""
reps_lt, reps_rt = x
# kernel: [1, 1, 1, mp_dim, d]
# lstm_lt => [b, len_lt, 1, 1, d]
reps_lt = tf.expand_dims(reps_lt, axis=2)
reps_lt = tf.expand_dims(reps_lt, axis=2)
reps_lt = reps_lt * self.kernel
# lstm_rt -> [b, 1, len_rt, 1, d]
reps_rt = tf.expand_dims(reps_rt, axis=2)
reps_rt = tf.expand_dims(reps_rt, axis=1)
match_tensor = _cosine_distance(reps_lt, reps_rt, cosine_norm=False)
max_match_tensor = tf.reduce_max(match_tensor, axis=1)
# match_tensor => [b, len_rt, m]
return max_match_tensor
def compute_output_shape(self, input_shape):
"""Compute output shape."""
return input_shape[1][0], input_shape[1][1], self.mp_dim
class MpAttentiveMatch(Layer):
"""
MpAttentiveMatch Layer.
Reference:
https://github.com/zhiguowang/BiMPM/blob/master/src/match_utils.py#L188-L193
Examples:
>>> import matchzoo as mz
>>> layer = mz.contrib.layers.multi_perspective_layer.MpAttentiveMatch(
... att_dim=30, mp_dim=20)
>>> layer.compute_output_shape([(32, 10, 100), (32, 40, 100)])
(32, 40, 20)
"""
def __init__(self, att_dim, mp_dim):
"""Init."""
super(MpAttentiveMatch, self).__init__()
self.att_dim = att_dim
self.mp_dim = mp_dim
def build(self, input_shapes):
"""Build."""
# input_shape = input_shapes[0]
self.built = True
def call(self, x, **kwargs):
"""Call."""
reps_lt, reps_rt = x[0], x[1]
# attention prob matrix
attention_layer = AttentionLayer(self.att_dim)
attn_prob = attention_layer([reps_rt, reps_lt])
# attention reps
att_lt = K.batch_dot(attn_prob, reps_lt)
# mp match
attn_match_tensor, match_dim = _multi_perspective_match(self.mp_dim,
reps_rt,
att_lt)
return attn_match_tensor
def compute_output_shape(self, input_shape):
"""Compute output shape."""
return input_shape[1][0], input_shape[1][1], self.mp_dim
class MpMaxAttentiveMatch(Layer):
"""MpMaxAttentiveMatch."""
def __init__(self, mp_dim):
"""Init."""
super(MpMaxAttentiveMatch, self).__init__()
self.mp_dim = mp_dim
def build(self, input_shapes):
"""Build."""
# input_shape = input_shapes[0]
self.built = True
def call(self, x):
"""Call."""
reps_lt, reps_rt = x[0], x[1]
relevancy_matrix = x[2]
max_att_lt = cal_max_question_representation(reps_lt, relevancy_matrix)
max_attentive_tensor, match_dim = _multi_perspective_match(self.mp_dim,
reps_rt,
max_att_lt)
return max_attentive_tensor
def cal_max_question_representation(reps_lt, attn_scores):
"""
Calculate max_question_representation.
:param reps_lt: [batch_size, passage_len, hidden_size]
:param attn_scores: []
:return: [batch_size, passage_len, hidden_size].
"""
attn_positions = tf.argmax(attn_scores, axis=2)
max_reps_lt = collect_representation(reps_lt, attn_positions)
return max_reps_lt
def collect_representation(representation, positions):
"""
Collect_representation.
:param representation: [batch_size, node_num, feature_dim]
:param positions: [batch_size, neighbour_num]
:return: [batch_size, neighbour_num]?
"""
return collect_probs(representation, positions)
def collect_final_step_of_lstm(lstm_representation, lengths):
"""
Collect final step of lstm.
:param lstm_representation: [batch_size, len_rt, dim]
:param lengths: [batch_size]
:return: [batch_size, dim]
"""
lengths = tf.maximum(lengths, K.zeros_like(lengths))
batch_size = tf.shape(lengths)[0]
# shape (batch_size)
batch_nums = tf.range(0, limit=batch_size)
# shape (batch_size, 2)
indices = tf.stack((batch_nums, lengths), axis=1)
result = tf.gather_nd(lstm_representation, indices,
name='last-forwar-lstm')
# [batch_size, dim]
return result
def collect_probs(probs, positions):
"""
Collect Probabilities.
Reference:
https://github.com/zhiguowang/BiMPM/blob/master/src/layer_utils.py#L128-L140
:param probs: [batch_size, chunks_size]
:param positions: [batch_size, pair_size]
:return: [batch_size, pair_size]
"""
batch_size = tf.shape(probs)[0]
pair_size = tf.shape(positions)[1]
# shape (batch_size)
batch_nums = K.arange(0, batch_size)
# [batch_size, 1]
batch_nums = tf.reshape(batch_nums, shape=[-1, 1])
# [batch_size, pair_size]
batch_nums = K.tile(batch_nums, [1, pair_size])
# shape (batch_size, pair_size, 2)
# Alert: to solve error message
positions = tf.cast(positions, tf.int32)
indices = tf.stack([batch_nums, positions], axis=2)
pair_probs = tf.gather_nd(probs, indices)
# pair_probs = tf.reshape(pair_probs, shape=[batch_size, pair_size])
return pair_probs
def _multi_perspective_match(mp_dim, reps_rt, att_lt,
with_cosine=True, with_mp_cosine=True):
"""
The core function of zhiguowang's implementation.
reference:
https://github.com/zhiguowang/BiMPM/blob/master/src/match_utils.py#L207-L223
:param mp_dim: about 20
:param reps_rt: [batch, len_rt, dim]
:param att_lt: [batch, len_rt, dim]
:param with_cosine: True
:param with_mp_cosine: True
:return: [batch, len, 1 + mp_dim]
"""
shape_rt = tf.shape(reps_rt)
batch_size = shape_rt[0]
len_lt = shape_rt[1]
match_dim = 0
match_result_list = []
if with_cosine:
cosine_tensor = _cosine_distance(reps_rt, att_lt, False)
cosine_tensor = tf.reshape(cosine_tensor,
[batch_size, len_lt, 1])
match_result_list.append(cosine_tensor)
match_dim += 1
if with_mp_cosine:
mp_cosine_layer = MpCosineLayer(mp_dim)
mp_cosine_tensor = mp_cosine_layer([reps_rt, att_lt])
mp_cosine_tensor = tf.reshape(mp_cosine_tensor,
[batch_size, len_lt, mp_dim])
match_result_list.append(mp_cosine_tensor)
match_dim += mp_cosine_layer.mp_dim
match_result = tf.concat(match_result_list, 2)
return match_result, match_dim
class MpCosineLayer(Layer):
"""
Implementation of Multi-Perspective Cosine Distance.
Reference:
https://github.com/zhiguowang/BiMPM/blob/master/src/match_utils.py#L121-L129
Examples:
>>> import matchzoo as mz
>>> layer = mz.contrib.layers.multi_perspective_layer.MpCosineLayer(
... mp_dim=50)
>>> layer.compute_output_shape([(32, 10, 100), (32, 10, 100)])
(32, 10, 50)
"""
def __init__(self, mp_dim, **kwargs):
"""Init."""
self.mp_dim = mp_dim
super(MpCosineLayer, self).__init__(**kwargs)
def build(self, input_shape):
"""Build."""
self.kernel = self.add_weight(name='kernel',
shape=(1, 1, self.mp_dim,
input_shape[0][-1]),
initializer='uniform',
trainable=True)
super(MpCosineLayer, self).build(input_shape)
def call(self, x, **kwargs):
"""Call."""
v1, v2 = x
v1 = tf.expand_dims(v1, 2) * self.kernel # [b, s_lt, m, d]
v2 = tf.expand_dims(v2, 2) # [b, s_lt, 1, d]
return _cosine_distance(v1, v2, False)
def compute_output_shape(self, input_shape):
"""Compute output shape."""
return input_shape[0][0], input_shape[0][1], self.mp_dim
def _calc_relevancy_matrix(reps_lt, reps_rt):
reps_lt = tf.expand_dims(reps_lt, 1) # [b, 1, len_lt, d]
reps_rt = tf.expand_dims(reps_rt, 2) # [b, len_rt, 1, d]
relevancy_matrix = _cosine_distance(reps_lt, reps_rt)
# => [b, len_rt, len_lt, d]
return relevancy_matrix
def _mask_relevancy_matrix(relevancy_matrix, mask_lt, mask_rt):
"""
Mask relevancy matrix.
:param relevancy_matrix: [b, len_rt, len_lt]
:param mask_lt: [b, len_lt]
:param mask_rt: [b, len_rt]
:return: masked_matrix: [b, len_rt, len_lt]
"""
if mask_lt is not None:
relevancy_matrix = relevancy_matrix * tf.expand_dims(mask_lt, 1)
relevancy_matrix = relevancy_matrix * tf.expand_dims(mask_rt, 2)
return relevancy_matrix
def _cosine_distance(v1, v2, cosine_norm=True, eps=1e-6):
"""
Only requires `tf.reduce_sum(v1 * v2, axis=-1)`.
:param v1: [batch, time_steps(v1), 1, m, d]
:param v2: [batch, 1, time_steps(v2), m, d]
:param cosine_norm: True
:param eps: 1e-6
:return: [batch, time_steps(v1), time_steps(v2), m]
"""
cosine_numerator = tf.reduce_sum(v1 * v2, axis=-1)
if not cosine_norm:
return K.tanh(cosine_numerator)
v1_norm = K.sqrt(tf.maximum(tf.reduce_sum(tf.square(v1), axis=-1), eps))
v2_norm = K.sqrt(tf.maximum(tf.reduce_sum(tf.square(v2), axis=-1), eps))
return cosine_numerator / v1_norm / v2_norm
|
{
"content_hash": "1dbf58b4dea01bb7fe9849bc25af8b89",
"timestamp": "",
"source": "github",
"line_count": 468,
"max_line_length": 80,
"avg_line_length": 34.72435897435897,
"alnum_prop": 0.5573810842409698,
"repo_name": "faneshion/MatchZoo",
"id": "64cfd33868917a14c049613d90b21b7be0d12274",
"size": "16251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "matchzoo/contrib/layers/multi_perspective_layer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "201"
},
{
"name": "Python",
"bytes": "249119"
},
{
"name": "Shell",
"bytes": "2746"
}
],
"symlink_target": ""
}
|
import urllib,urllib2,re,xbmcplugin,xbmcgui
import os,datetime
import demjson
import BeautifulSoup
DATELOOKUP = "http://www.thedailyshow.com/feeds/timeline/coordinates/"
pluginhandle = int(sys.argv[1])
shownail = xbmc.translatePath(os.path.join(os.getcwd().replace(';', ''),"icon.png"))
fanart = xbmc.translatePath(os.path.join(os.getcwd().replace(';', ''),'fanart.jpg'))
xbmcplugin.setPluginFanart(pluginhandle, fanart, color2='0xFFFF3300')
TVShowTitle = 'The Daily Show'
if xbmcplugin.getSetting(pluginhandle,"sort") == '0':
SORTORDER = 'date'
elif xbmcplugin.getSetting(pluginhandle,"sort") == '1':
SORTORDER = 'views'
elif xbmcplugin.getSetting(pluginhandle,"sort") == '2':
SORTORDER = 'rating'
################################ Common
def getURL( url ):
try:
print 'The Daily Show --> getURL :: url = '+url
txdata = None
txheaders = {
'Referer': 'http://www.thedailyshow.com/videos/',
'X-Forwarded-For': '12.13.14.15',
'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US;rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 ( .NET CLR 3.5.30729)',
}
req = urllib2.Request(url, txdata, txheaders)
#req = urllib2.Request(url)
#req.addheaders = [('Referer', 'http://www.thedailyshow.com/videos'),
# ('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 ( .NET CLR 3.5.30729)')]
response = urllib2.urlopen(req)
link=response.read()
response.close()
except urllib2.URLError, e:
error = 'Error code: '+ str(e.code)
xbmcgui.Dialog().ok(error,error)
print 'Error code: ', e.code
return False
else:
return link
def addLink(name,url,iconimage='',plot=''):
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name, "Plot":plot, "TVShowTitle":TVShowTitle})
liz.setProperty('fanart_image',fanart)
ok=xbmcplugin.addDirectoryItem(handle=pluginhandle,url=url,listitem=liz)
return ok
def addDir(name,url,mode,iconimage=shownail,plot=''):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name, "Plot":plot, "TVShowTitle":TVShowTitle})
liz.setProperty('fanart_image',fanart)
ok=xbmcplugin.addDirectoryItem(handle=pluginhandle,url=u,listitem=liz,isFolder=True)
return ok
def pageFragments(url):
pageNum = int(url[-1])
nextPage = pageNum + 1
nurl = url.replace('page='+str(pageNum),'page='+str(nextPage))
prevPage = pageNum - 1
purl = url.replace('page='+str(pageNum),'page='+str(prevPage))
if '/box' in nurl or '/box' in purl:
nurl = nurl.replace('/box','')
purl = purl.replace('/box','')
data = getURL(nurl)
if 'Your search returned zero results' not in data:
addDir('Next Page ('+str(nextPage)+')',nurl,7)
if prevPage >= 1:
addDir('Previous Page ('+str(prevPage)+')',purl,7)
LISTVIDEOS(url)
xbmcplugin.endOfDirectory(pluginhandle,updateListing=True)
################################ Root listing
def ROOT():
addDir('Full Episodes','full',5)
addDir('Guests','guests',3)
xbmcplugin.endOfDirectory(pluginhandle)
def FULLEPISODES():
xbmcplugin.setContent(pluginhandle, 'episodes')
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_NONE)
full = 'http://www.thedailyshow.com/full-episodes/'
data = getURL(full)
weeks = re.compile('<a id="(.+?)" class="seaso.+?" href="#">(.+?)</a>').findall(data)
for url, week in weeks:
data = getURL(url)
episodes=re.compile('<span class="date"><a href="(.+?)">(.+?)</a></span>').findall(data)
thumbnails=re.compile("<img width='.+?' height='.+?' src='(.+?)'.+?/>").findall(data)
descriptions=re.compile('<span class="description">(.+?)</span>').findall(data)
airdates=re.compile('<span class="date">Aired: (.+?)</span>').findall(data)
epNumbers=re.compile('<span class="id">Episode (.+?)</span>').findall(data)
listings = []
for link, name in episodes:
listing = []
listing.append(name)
listing.append(link)
listings.append(listing)
for thumbnail in thumbnails:
marker = thumbnails.index(thumbnail)
listings[marker].append(thumbnail)
for description in descriptions:
marker = descriptions.index(description)
listings[marker].append(description)
for airdate in airdates:
marker = airdates.index(airdate)
listings[marker].append(airdate)
for epNumber in epNumbers:
marker = epNumbers.index(epNumber)
listings[marker].append(epNumber)
print listings
for name, link, thumbnail, plot, date, seasonepisode in listings:
mode = 10
season = int(seasonepisode[:-3])
episode = int(seasonepisode[-3:])
u=sys.argv[0]+"?url="+urllib.quote_plus(link)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
u += "&season="+urllib.quote_plus(str(season))
u += "&episode="+urllib.quote_plus(str(episode))
u += "&premiered="+urllib.quote_plus(date)
u += "&plot="+urllib.quote_plus(plot)
u += "&thumbnail="+urllib.quote_plus(thumbnail)
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=thumbnail)
liz.setInfo( type="Video", infoLabels={ "Title": name,
"Plot":plot,
"Season":season,
"Episode": episode,
"premiered":date,
"TVShowTitle":TVShowTitle})
liz.setProperty('IsPlayable', 'true')
liz.setProperty('fanart_image',fanart)
xbmcplugin.addDirectoryItem(handle=pluginhandle,url=u,listitem=liz)
xbmcplugin.endOfDirectory(pluginhandle)
class Guest(object):
def __init__(self,data):
self.soup = data
def day(self):
raw_text = self.soup('a',{'class' : 'full-episode-url'})[0].getText()
raw_text = raw_text.replace('Full Episode Available','')
m = re.search(r'(.*) - .*', raw_text)
return m.group(1)
def name(self):
return self.soup('span', {'class' : 'title'})[0].getText().replace('Exclusive - ','')
def url(self):
return self.soup('a', {'class' : 'imageHolder'})[0]['href']
def GUESTS():
gurl = "http://www.thedailyshow.com/feeds/search?keywords=&tags=interviews&sortOrder=desc&sortBy=date&page=1"
data = getURL(gurl).replace('</pre>','</div>')
soup = BeautifulSoup.BeautifulSoup(data)
guest_items = soup('div', {'class' : 'entry'})
mode = 10
for item in guest_items:
g = Guest(item)
liz=xbmcgui.ListItem(g.name(), iconImage='', thumbnailImage='')
liz.setInfo( type="Video", infoLabels={ "Title": g.name(),
"TVShowTitle":'The Daily Show'})
liz.setProperty('IsPlayable', 'true')
liz.setProperty('fanart_image',fanart)
u=sys.argv[0]+"?url="+g.url()+"&mode="+str(mode)+"&name="+g.name()
xbmcplugin.addDirectoryItem(handle=pluginhandle,url=u, listitem=liz)
xbmcplugin.endOfDirectory(pluginhandle)
################################ List Videos
def LISTVIDEOS(url):
xbmcplugin.setContent(pluginhandle, 'episodes')
data = getURL(url)
playbackUrls=re.compile('<a href="http://www.thedailyshow.com/watch/(.+?)".+?>').findall(data)
thumbnails=re.compile("<img width='.+?' height='.+?' src='(.+?)'").findall(data)
names=re.compile('<span class="title"><a href=".+?">(.+?)</a></span>').findall(data)
descriptions=re.compile('<span class="description">(.+?)\(.+?</span>').findall(data)
durations=re.compile('<span class="description">.+?\((.+?)</span>').findall(data)
epNumbers=re.compile('<span class="episode">Episode #(.+?)</span>').findall(data)
airdates=re.compile('<span>Aired.+?</span>(.+?)</div>').findall(data)
for pb in playbackUrls:
url = "http://www.thedailyshow.com/watch/"+pb
marker = playbackUrls.index(pb)
log( 'marker --> %s' % marker )
log('names --> %s' % names)
thumbnail = thumbnails[marker]
fname = names[marker]
description = descriptions[marker]
duration = durations[marker].replace(')','')
try:
seasonepisode = epNumbers[marker]
season = int(seasonepisode[:-3])
episode = int(seasonepisode[-3:])
except:
season = 0
episode = 0
date = airdates[marker]
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(13)+"&name="+urllib.quote_plus(fname)
u += "&season="+urllib.quote_plus(str(season))
u += "&episode="+urllib.quote_plus(str(episode))
u += "&premiered="+urllib.quote_plus(date)
u += "&plot="+urllib.quote_plus(plot)
u += "&thumbnail="+urllib.quote_plus(thumbnail)
liz=xbmcgui.ListItem(fname, iconImage="DefaultVideo.png", thumbnailImage=thumbnail)
liz.setInfo( type="Video", infoLabels={ "Title": fname,
"Episode":episode,
"Season":season,
"Plot":description,
"premiered":date,
"Duration":duration,
"TVShowTitle":TVShowTitle})
liz.setProperty('IsPlayable', 'true')
liz.setProperty('fanart_image',fanart)
xbmcplugin.addDirectoryItem(handle=pluginhandle,url=u,listitem=liz)
################################ Play Video
def PLAYVIDEO(name,url):
data = getURL(url)
uri = re.compile('"http://media.mtvnservices.com/(.+?)"/>').findall(data)[0].replace('fb/','').replace('.swf','')
print 'uri --> %s' % uri
rtmp = GRAB_RTMP(uri)
item = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=thumbnail, path=rtmp)
item.setInfo( type="Video", infoLabels={ "Title": name,
"Plot":plot,
"premiered":premiered,
"Season":int(season),
"Episode":int(episode),
"TVShowTitle":TVShowTitle})
item.setProperty('fanart_image',fanart)
print 'item --> %s' % item
xbmcplugin.setResolvedUrl(pluginhandle, True, item)
################################ Play Full Episode
def PLAYFULLEPISODE(name,url):
data = getURL(url)
uri = re.compile('http://media.mtvnservices.com/(mgid:cms:episode:thedailyshow.com:\d{6}|mgid:cms:video:thedailyshow.com:\d{6})').findall(data)[0]
#url = 'http://media.mtvnservices.com/player/config.jhtml?uri='+uri+'&group=entertainment&type=network&site=thedailyshow.com'
url = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?uri='+uri
data = getURL(url)
uris=re.compile('<guid isPermaLink="false">(.+?)</guid>').findall(data)
stacked_url = 'stack://'
for uri in uris:
rtmp = GRAB_RTMP(uri)
stacked_url += rtmp.replace(',',',,')+' , '
stacked_url = stacked_url[:-3]
print 'stacked_url --> %s' % stacked_url
item = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=thumbnail, path=stacked_url)
print 'item --> %s' % item
item.setInfo( type="Video", infoLabels={ "Title": name,
"Plot":plot,
"premiered":premiered,
"Season":int(season),
"Episode":int(episode),
"TVShowTitle":TVShowTitle})
item.setProperty('fanart_image',fanart)
xbmcplugin.setResolvedUrl(pluginhandle, True, item)
################################ Grab rtmp
def GRAB_RTMP(uri):
swfurl = 'http://media.mtvnservices.com/player/release/?v=4.5.3'
url = 'http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?uri='+uri+'&showTicker=true'
mp4_url = "http://mtvnmobile.vo.llnwd.net/kip0/_pxn=0+_pxK=18639+_pxE=/44620/mtvnorigin"
data = getURL(url)
widths = re.compile('width="(.+?)"').findall(data)
heights = re.compile('height="(.+?)"').findall(data)
bitrates = re.compile('bitrate="(.+?)"').findall(data)
rtmps = re.compile('<src>rtmp(.+?)</src>').findall(data)
print 'rtmps --> %s' % rtmps
mpixels = 0
mbitrate = 0
lbitrate = 0
if xbmcplugin.getSetting(pluginhandle,"bitrate") == '0':
lbitrate = 0
elif xbmcplugin.getSetting(pluginhandle,"bitrate") == '1':
lbitrate = 1720
elif xbmcplugin.getSetting(pluginhandle,"bitrate") == '2':
lbitrate = 1300
elif xbmcplugin.getSetting(pluginhandle,"bitrate") == '3':
lbitrate = 960
elif xbmcplugin.getSetting(pluginhandle,"bitrate") == '4':
lbitrate = 640
elif xbmcplugin.getSetting(pluginhandle,"bitrate") == '5':
lbitrate = 450
for rtmp in rtmps:
print 'processing rtmp: %s' % rtmp
marker = rtmps.index(rtmp)
w = int(widths[marker])
h = int(heights[marker])
bitrate = int(bitrates[marker])
if bitrate == 0:
continue
elif bitrate > lbitrate and lbitrate <> 0:
continue
elif lbitrate <= bitrate or lbitrate == 0:
pixels = w * h
if pixels > mpixels or bitrate > mbitrate:
mpixels = pixels
mbitrate = bitrate
furl = mp4_url + rtmp.split('viacomccstrm')[2]
#rtmpsplit = rtmp.split('/ondemand')
#server = rtmpsplit[0]
#path = rtmpsplit[1].replace('.flv','')
#if '.mp4' in path:
# path = 'mp4:' + path
#port = ':1935'
#app = '/ondemand?ovpfv=2.1.4'
#furl = 'rtmp'+server+port+app+path+" playpath="+path+" swfurl="+swfurl+" swfvfy=true"
print 'furl --> %s' % furl
return furl
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
params=get_params()
url=None
name=None
mode=None
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
try:
thumbnail=urllib.unquote_plus(params["thumbnail"])
except:
thumbnail=''
try:
season=int(params["season"])
except:
season=0
try:
episode=int(params["episode"])
except:
episode=0
try:
premiered=urllib.unquote_plus(params["premiered"])
except:
premiered=''
try:
plot=urllib.unquote_plus(params["plot"])
except:
plot=''
print "Mode: "+str(mode)
print "URL: "+str(url)
print "Name: "+str(name)
if mode==None or url==None or len(url)<1:
ROOT()
elif mode==3:
GUESTS()
elif mode==5:
FULLEPISODES()
elif mode==7:
pageFragments(url)
elif mode==9:
LISTVIDEOS(url)
elif mode==10:
PLAYFULLEPISODE(name,url)
elif mode==13:
PLAYVIDEO(name,url)
|
{
"content_hash": "001bde5e2828368520bbd18c703ca44c",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 150,
"avg_line_length": 39.652482269503544,
"alnum_prop": 0.5510642103380433,
"repo_name": "adamsb6/xbmc-dailyshow",
"id": "6b83cc78402e197ba5fefbcbe027fd56c6a1c5c7",
"size": "16773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "default.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "186200"
}
],
"symlink_target": ""
}
|
"""
Copyright (2010-2014) INCUBAID BVBA
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import socket
import logging
import RemoteControlProtocol as RCP
def collapse(ip, port, clusterId, n):
"""
tell the node listening on (ip, port) to collapse, and keep n tlog files
@type ip: string
@type port: int
@type n: int > 0
@type clusterId:string
@param clusterId: must match cluster id of the node
"""
if n < 1:
raise ValueError("%i is not acceptable" % n)
s = RCP.make_socket(ip, port)
try:
RCP._prologue(clusterId, s)
cmd = RCP._int_to(RCP._COLLAPSE_TLOGS | RCP._MAGIC)
cmd += RCP._int_to(n)
s.send(cmd)
RCP.check_error_code(s)
collapse_count = RCP._receive_int(s)
logging.debug("collapse_count = %i", collapse_count)
for i in range(collapse_count):
logging.debug("i=%i", i)
RCP.check_error_code(s)
took = RCP._receive_int64(s)
logging.info("took %i", took)
finally:
s.close()
def downloadDb(ip, port, clusterId, location):
s = RCP.make_socket(ip, port)
try:
with open(location,'w+b') as db_file:
RCP._prologue(clusterId, s)
cmd = RCP._int_to(RCP._DOWNLOAD_DB | RCP._MAGIC)
s.send(cmd)
RCP.check_error_code(s)
db_size = RCP._receive_int64(s)
while (db_size > 0 ) :
chunkSize = min(4*1024, db_size)
chunk = RCP._receive_all(s, chunkSize)
db_size -= len(chunk)
db_file.write(chunk)
finally:
s.close()
def copyDbToHead(ip, port, clusterId, tlogsToKeep):
s = RCP.make_socket(ip, port)
try:
RCP._prologue(clusterId, s)
cmd = RCP._int_to(RCP._COPY_DB_TO_HEAD | RCP._MAGIC)
cmd += RCP._int_to(tlogsToKeep)
s.send(cmd)
RCP.check_error_code(s)
finally:
s.close()
def _simple_cmd(ip,port,clusterId, code):
s = RCP.make_socket(ip, port)
try:
RCP._prologue(clusterId, s)
cmd = RCP._int_to(code | RCP._MAGIC)
s.send(cmd)
RCP.check_error_code(s)
finally:
s.close()
def optimizeDb(ip, port, clusterId):
_simple_cmd(ip,port,clusterId, RCP._OPTIMIZE_DB)
def defragDb(ip,port,clusterId):
_simple_cmd(ip,port,clusterId, RCP._DEFRAG_DB)
def dropMaster(ip,port,clusterId):
_simple_cmd(ip,port,clusterId, RCP._DROP_MASTER)
def flushStore(ip,port,clusterId):
_simple_cmd(ip,port,clusterId, RCP._FLUSH_STORE)
def setInterval(cluster_id, ip, port, pub_start, pub_end, priv_start, priv_end):
s = RCP.make_socket(ip,port)
try:
RCP._prologue(cluster_id, s)
cmd = RCP._int_to(RCP._SET_INTERVAL | RCP._MAGIC)
cmd += RCP._string_option_to (pub_start)
cmd += RCP._string_option_to (pub_end)
cmd += RCP._string_option_to (priv_start)
cmd += RCP._string_option_to (priv_end)
s.send(cmd)
RCP.check_error_code(s)
finally:
s.close()
|
{
"content_hash": "d149c71d77224f0151150960a73ef34d",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 80,
"avg_line_length": 31.07017543859649,
"alnum_prop": 0.6084133258046301,
"repo_name": "sql-analytics/openvstorage",
"id": "edb8d017b4555930e3be052754d13a3da584c7e1",
"size": "3542",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ovs/extensions/db/arakoon/arakoon/ArakoonRemoteControl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "23516"
},
{
"name": "CSS",
"bytes": "10475"
},
{
"name": "Diff",
"bytes": "816"
},
{
"name": "HTML",
"bytes": "186526"
},
{
"name": "JavaScript",
"bytes": "710424"
},
{
"name": "Makefile",
"bytes": "1269"
},
{
"name": "Python",
"bytes": "1633348"
},
{
"name": "Shell",
"bytes": "10567"
}
],
"symlink_target": ""
}
|
"""
Author: Isabel Restrepo
May 2, 2012
A script to parse training features from all categories and stack them into a singe array
"""
#*******************The Main Algorithm ************************#
if __name__=="__main__":
import os
import sys
import time
import numpy as np
from bmvc12_adaptor import *
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals import joblib
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-r", "--radius", action="store", type="int", dest="radius", help="radius (multiple of resolution)");
parser.add_option("-p", "--percent", action="store", type="int", dest="percentile", help="percentile of original samples");
parser.add_option("-t", "--trial", action="store", type="int", dest="trial", help="trial number- corresponding to a validation set");
parser.add_option("-d", "--descriptor", action="store", type="string", dest="descriptor_type", help="name of the descriptor i.e FPFH");
(opts, args) = parser.parse_args()
#all categories
class_names = ["planes", "cars", "residential", "buildings", "parking", "background"];
radius = opts.radius;
percentile = opts.percentile;
trial=opts.trial;
ft=opts.descriptor_type
feature_name = ft + "_" + str(radius);
#paths of all trainig objects
train_paths = [];
all_feature_paths = [];
#collect all paths
for obj_class in class_names:
parse_train_objects_no_object(obj_class, feature_name, percentile, trial, train_paths, all_feature_paths);
print "Number of training objects: " + str(len(all_feature_paths));
start_time = time.time();
#add al descriptors to huge matrix
fis = open(all_feature_paths[0], 'rb')
train_descriptors=np.load(fis);
nan_rows = np.sum(np.isnan(train_descriptors), axis=1) > 0;
print train_descriptors.shape
if (np.sum(nan_rows)>0):
print "Number of NAN rows: " + str(sum(nan_rows))
train_descriptors = train_descriptors[np.logical_not(nan_rows)];
print train_descriptors.shape
fis.close();
for p in range(1,len(all_feature_paths)):
print all_feature_paths[p]
if not os.path.exists(all_feature_paths[p]):
print "Warning: File not found!"
continue;
fis = open(all_feature_paths[p], 'rb')
descriptors= np.load(fis);
#Detect NAN rows
nan_rows = np.sum(np.isnan(descriptors), axis=1) > 0;
print descriptors.shape
if (np.sum(nan_rows)>0):
print "Number of NAN rows: " + str(sum(nan_rows))
descriptors = descriptors[np.logical_not(nan_rows)];
print descriptors.shape
fis.close()
train_descriptors =np.vstack((train_descriptors, descriptors));
rows, cols = train_descriptors.shape;
#**************Save to File ****************#
descriptor_dir = "/Users/isa/Experiments/bof_bmvc12" + "/trial_" + str(trial) + "/" + feature_name + "/percentile_" + str(percentile)
if not os.path.exists(descriptor_dir + "/"):
os.makedirs(descriptor_dir);
descriptors_file = descriptor_dir + "/train_descriptors_no_object.npy" ;
print "Dimension of training data: (" + str(rows) + ',' + str(cols) + ")" ;
print "Saving file to : " + descriptors_file;
fos = open(descriptors_file, 'wb')
np.save(fos, train_descriptors);
fos.close();
print("Time stacking descriptors")
print(time.time() - start_time);
print train_descriptors;
|
{
"content_hash": "60b310dd9ccc1755f8489878c7c79507",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 137,
"avg_line_length": 33.83838383838384,
"alnum_prop": 0.6582089552238806,
"repo_name": "mirestrepo/voxels-at-lems",
"id": "2b6695b3a0a363d4741467e0a458d5c711132dd1",
"size": "3390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bmvc12/utils/stack_train_features_no_object.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1426982"
},
{
"name": "Shell",
"bytes": "360033"
},
{
"name": "TeX",
"bytes": "568"
},
{
"name": "nesC",
"bytes": "374"
}
],
"symlink_target": ""
}
|
"""Header encoding and decoding functionality."""
import re
import binascii
import email.quopriMIME
import email.base64MIME
from email.Errors import HeaderParseError
from email.Charset import Charset
NL = '\n'
SPACE = ' '
USPACE = u' '
SPACE8 = ' ' * 8
UEMPTYSTRING = u''
MAXLINELEN = 76
USASCII = Charset('us-ascii')
UTF8 = Charset('utf-8')
# Match encoded-word strings in the form =?charset?q?Hello_World?=
ecre = re.compile(r'''
=\? # literal =?
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
\? # literal ?
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
\? # literal ?
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
\?= # literal ?=
''', re.VERBOSE | re.IGNORECASE)
# Field name regexp, including trailing colon, but not separating whitespace,
# according to RFC 2822. Character range is from tilde to exclamation mark.
# For use with .match()
fcre = re.compile(r'[\041-\176]+:$')
# Helpers
_max_append = email.quopriMIME._max_append
def decode_header(header):
"""Decode a message header value without converting charset.
Returns a list of (decoded_string, charset) pairs containing each of the
decoded parts of the header. Charset is None for non-encoded parts of the
header, otherwise a lower-case string containing the name of the character
set specified in the encoded string.
An email.Errors.HeaderParseError may be raised when certain decoding error
occurs (e.g. a base64 decoding exception).
"""
# If no encoding, just return the header
header = str(header)
if not ecre.search(header):
return [(header, None)]
decoded = []
dec = ''
for line in header.splitlines():
# This line might not have an encoding in it
if not ecre.search(line):
decoded.append((line, None))
continue
parts = ecre.split(line)
while parts:
unenc = parts.pop(0).strip()
if unenc:
# Should we continue a long line?
if decoded and decoded[-1][1] is None:
decoded[-1] = (decoded[-1][0] + SPACE + unenc, None)
else:
decoded.append((unenc, None))
if parts:
charset, encoding = [s.lower() for s in parts[0:2]]
encoded = parts[2]
dec = None
if encoding == 'q':
dec = email.quopriMIME.header_decode(encoded)
elif encoding == 'b':
try:
dec = email.base64MIME.decode(encoded)
except binascii.Error:
# Turn this into a higher level exception. BAW: Right
# now we throw the lower level exception away but
# when/if we get exception chaining, we'll preserve it.
raise HeaderParseError
if dec is None:
dec = encoded
if decoded and decoded[-1][1] == charset:
decoded[-1] = (decoded[-1][0] + dec, decoded[-1][1])
else:
decoded.append((dec, charset))
del parts[0:3]
return decoded
def make_header(decoded_seq, maxlinelen=None, header_name=None,
continuation_ws=' '):
"""Create a Header from a sequence of pairs as returned by decode_header()
decode_header() takes a header value string and returns a sequence of
pairs of the format (decoded_string, charset) where charset is the string
name of the character set.
This function takes one of those sequence of pairs and returns a Header
instance. Optional maxlinelen, header_name, and continuation_ws are as in
the Header constructor.
"""
h = Header(maxlinelen=maxlinelen, header_name=header_name,
continuation_ws=continuation_ws)
for s, charset in decoded_seq:
# None means us-ascii but we can simply pass it on to h.append()
if charset is not None and not isinstance(charset, Charset):
charset = Charset(charset)
h.append(s, charset)
return h
class Header:
def __init__(self, s=None, charset=None,
maxlinelen=None, header_name=None,
continuation_ws=' ', errors='strict'):
"""Create a MIME-compliant header that can contain many character sets.
Optional s is the initial header value. If None, the initial header
value is not set. You can later append to the header with .append()
method calls. s may be a byte string or a Unicode string, but see the
.append() documentation for semantics.
Optional charset serves two purposes: it has the same meaning as the
charset argument to the .append() method. It also sets the default
character set for all subsequent .append() calls that omit the charset
argument. If charset is not provided in the constructor, the us-ascii
charset is used both as s's initial charset and as the default for
subsequent .append() calls.
The maximum line length can be specified explicit via maxlinelen. For
splitting the first line to a shorter value (to account for the field
header which isn't included in s, e.g. `Subject') pass in the name of
the field in header_name. The default maxlinelen is 76.
continuation_ws must be RFC 2822 compliant folding whitespace (usually
either a space or a hard tab) which will be prepended to continuation
lines.
errors is passed through to the .append() call.
"""
if charset is None:
charset = USASCII
if not isinstance(charset, Charset):
charset = Charset(charset)
self._charset = charset
self._continuation_ws = continuation_ws
cws_expanded_len = len(continuation_ws.replace('\t', SPACE8))
# BAW: I believe `chunks' and `maxlinelen' should be non-public.
self._chunks = []
if s is not None:
self.append(s, charset, errors)
if maxlinelen is None:
maxlinelen = MAXLINELEN
if header_name is None:
# We don't know anything about the field header so the first line
# is the same length as subsequent lines.
self._firstlinelen = maxlinelen
else:
# The first line should be shorter to take into account the field
# header. Also subtract off 2 extra for the colon and space.
self._firstlinelen = maxlinelen - len(header_name) - 2
# Second and subsequent lines should subtract off the length in
# columns of the continuation whitespace prefix.
self._maxlinelen = maxlinelen - cws_expanded_len
def __str__(self):
"""A synonym for self.encode()."""
return self.encode()
def __unicode__(self):
"""Helper for the built-in unicode function."""
uchunks = []
lastcs = None
for s, charset in self._chunks:
# We must preserve spaces between encoded and non-encoded word
# boundaries, which means for us we need to add a space when we go
# from a charset to None/us-ascii, or from None/us-ascii to a
# charset. Only do this for the second and subsequent chunks.
nextcs = charset
if uchunks:
if lastcs not in (None, 'us-ascii'):
if nextcs in (None, 'us-ascii'):
uchunks.append(USPACE)
nextcs = None
elif nextcs not in (None, 'us-ascii'):
uchunks.append(USPACE)
lastcs = nextcs
uchunks.append(unicode(s, str(charset)))
return UEMPTYSTRING.join(uchunks)
# Rich comparison operators for equality only. BAW: does it make sense to
# have or explicitly disable <, <=, >, >= operators?
def __eq__(self, other):
# other may be a Header or a string. Both are fine so coerce
# ourselves to a string, swap the args and do another comparison.
return other == self.encode()
def __ne__(self, other):
return not self == other
def append(self, s, charset=None, errors='strict'):
"""Append a string to the MIME header.
Optional charset, if given, should be a Charset instance or the name
of a character set (which will be converted to a Charset instance). A
value of None (the default) means that the charset given in the
constructor is used.
s may be a byte string or a Unicode string. If it is a byte string
(i.e. isinstance(s, str) is true), then charset is the encoding of
that byte string, and a UnicodeError will be raised if the string
cannot be decoded with that charset. If s is a Unicode string, then
charset is a hint specifying the character set of the characters in
the string. In this case, when producing an RFC 2822 compliant header
using RFC 2047 rules, the Unicode string will be encoded using the
following charsets in order: us-ascii, the charset hint, utf-8. The
first character set not to provoke a UnicodeError is used.
Optional `errors' is passed as the third argument to any unicode() or
ustr.encode() call.
"""
if charset is None:
charset = self._charset
elif not isinstance(charset, Charset):
charset = Charset(charset)
# If the charset is our faux 8bit charset, leave the string unchanged
if charset <> '8bit':
# We need to test that the string can be converted to unicode and
# back to a byte string, given the input and output codecs of the
# charset.
if isinstance(s, str):
# Possibly raise UnicodeError if the byte string can't be
# converted to a unicode with the input codec of the charset.
incodec = charset.input_codec or 'us-ascii'
ustr = unicode(s, incodec, errors)
# Now make sure that the unicode could be converted back to a
# byte string with the output codec, which may be different
# than the iput coded. Still, use the original byte string.
outcodec = charset.output_codec or 'us-ascii'
ustr.encode(outcodec, errors)
elif isinstance(s, unicode):
# Now we have to be sure the unicode string can be converted
# to a byte string with a reasonable output codec. We want to
# use the byte string in the chunk.
for charset in USASCII, charset, UTF8:
try:
outcodec = charset.output_codec or 'us-ascii'
s = s.encode(outcodec, errors)
break
except UnicodeError:
pass
else:
assert False, 'utf-8 conversion failed'
self._chunks.append((s, charset))
def _split(self, s, charset, maxlinelen, splitchars):
# Split up a header safely for use with encode_chunks.
splittable = charset.to_splittable(s)
encoded = charset.from_splittable(splittable, True)
elen = charset.encoded_header_len(encoded)
# If the line's encoded length first, just return it
if elen <= maxlinelen:
return [(encoded, charset)]
# If we have undetermined raw 8bit characters sitting in a byte
# string, we really don't know what the right thing to do is. We
# can't really split it because it might be multibyte data which we
# could break if we split it between pairs. The least harm seems to
# be to not split the header at all, but that means they could go out
# longer than maxlinelen.
if charset == '8bit':
return [(s, charset)]
# BAW: I'm not sure what the right test here is. What we're trying to
# do is be faithful to RFC 2822's recommendation that ($2.2.3):
#
# "Note: Though structured field bodies are defined in such a way that
# folding can take place between many of the lexical tokens (and even
# within some of the lexical tokens), folding SHOULD be limited to
# placing the CRLF at higher-level syntactic breaks."
#
# For now, I can only imagine doing this when the charset is us-ascii,
# although it's possible that other charsets may also benefit from the
# higher-level syntactic breaks.
elif charset == 'us-ascii':
return self._split_ascii(s, charset, maxlinelen, splitchars)
# BAW: should we use encoded?
elif elen == len(s):
# We can split on _maxlinelen boundaries because we know that the
# encoding won't change the size of the string
splitpnt = maxlinelen
first = charset.from_splittable(splittable[:splitpnt], False)
last = charset.from_splittable(splittable[splitpnt:], False)
else:
# Binary search for split point
first, last = _binsplit(splittable, charset, maxlinelen)
# first is of the proper length so just wrap it in the appropriate
# chrome. last must be recursively split.
fsplittable = charset.to_splittable(first)
fencoded = charset.from_splittable(fsplittable, True)
chunk = [(fencoded, charset)]
return chunk + self._split(last, charset, self._maxlinelen, splitchars)
def _split_ascii(self, s, charset, firstlen, splitchars):
chunks = _split_ascii(s, firstlen, self._maxlinelen,
self._continuation_ws, splitchars)
return zip(chunks, [charset]*len(chunks))
def _encode_chunks(self, newchunks, maxlinelen):
# MIME-encode a header with many different charsets and/or encodings.
#
# Given a list of pairs (string, charset), return a MIME-encoded
# string suitable for use in a header field. Each pair may have
# different charsets and/or encodings, and the resulting header will
# accurately reflect each setting.
#
# Each encoding can be email.Utils.QP (quoted-printable, for
# ASCII-like character sets like iso-8859-1), email.Utils.BASE64
# (Base64, for non-ASCII like character sets like KOI8-R and
# iso-2022-jp), or None (no encoding).
#
# Each pair will be represented on a separate line; the resulting
# string will be in the format:
#
# =?charset1?q?Mar=EDa_Gonz=E1lez_Alonso?=\n
# =?charset2?b?SvxyZ2VuIEL2aW5n?="
chunks = []
for header, charset in newchunks:
if not header:
continue
if charset is None or charset.header_encoding is None:
s = header
else:
s = charset.header_encode(header)
# Don't add more folding whitespace than necessary
if chunks and chunks[-1].endswith(' '):
extra = ''
else:
extra = ' '
_max_append(chunks, s, maxlinelen, extra)
joiner = NL + self._continuation_ws
return joiner.join(chunks)
def encode(self, splitchars=';, '):
"""Encode a message header into an RFC-compliant format.
There are many issues involved in converting a given string for use in
an email header. Only certain character sets are readable in most
email clients, and as header strings can only contain a subset of
7-bit ASCII, care must be taken to properly convert and encode (with
Base64 or quoted-printable) header strings. In addition, there is a
75-character length limit on any given encoded header field, so
line-wrapping must be performed, even with double-byte character sets.
This method will do its best to convert the string to the correct
character set used in email, and encode and line wrap it safely with
the appropriate scheme for that character set.
If the given charset is not known or an error occurs during
conversion, this function will return the header untouched.
Optional splitchars is a string containing characters to split long
ASCII lines on, in rough support of RFC 2822's `highest level
syntactic breaks'. This doesn't affect RFC 2047 encoded lines.
"""
newchunks = []
maxlinelen = self._firstlinelen
lastlen = 0
for s, charset in self._chunks:
# The first bit of the next chunk should be just long enough to
# fill the next line. Don't forget the space separating the
# encoded words.
targetlen = maxlinelen - lastlen - 1
if targetlen < charset.encoded_header_len(''):
# Stick it on the next line
targetlen = maxlinelen
newchunks += self._split(s, charset, targetlen, splitchars)
lastchunk, lastcharset = newchunks[-1]
lastlen = lastcharset.encoded_header_len(lastchunk)
return self._encode_chunks(newchunks, maxlinelen)
def _split_ascii(s, firstlen, restlen, continuation_ws, splitchars):
lines = []
maxlen = firstlen
for line in s.splitlines():
# Ignore any leading whitespace (i.e. continuation whitespace) already
# on the line, since we'll be adding our own.
line = line.lstrip()
if len(line) < maxlen:
lines.append(line)
maxlen = restlen
continue
# Attempt to split the line at the highest-level syntactic break
# possible. Note that we don't have a lot of smarts about field
# syntax; we just try to break on semi-colons, then commas, then
# whitespace.
for ch in splitchars:
if ch in line:
break
else:
# There's nothing useful to split the line on, not even spaces, so
# just append this line unchanged
lines.append(line)
maxlen = restlen
continue
# Now split the line on the character plus trailing whitespace
cre = re.compile(r'%s\s*' % ch)
if ch in ';,':
eol = ch
else:
eol = ''
joiner = eol + ' '
joinlen = len(joiner)
wslen = len(continuation_ws.replace('\t', SPACE8))
this = []
linelen = 0
for part in cre.split(line):
curlen = linelen + max(0, len(this)-1) * joinlen
partlen = len(part)
onfirstline = not lines
# We don't want to split after the field name, if we're on the
# first line and the field name is present in the header string.
if ch == ' ' and onfirstline and \
len(this) == 1 and fcre.match(this[0]):
this.append(part)
linelen += partlen
elif curlen + partlen > maxlen:
if this:
lines.append(joiner.join(this) + eol)
# If this part is longer than maxlen and we aren't already
# splitting on whitespace, try to recursively split this line
# on whitespace.
if partlen > maxlen and ch <> ' ':
subl = _split_ascii(part, maxlen, restlen,
continuation_ws, ' ')
lines.extend(subl[:-1])
this = [subl[-1]]
else:
this = [part]
linelen = wslen + len(this[-1])
maxlen = restlen
else:
this.append(part)
linelen += partlen
# Put any left over parts on a line by themselves
if this:
lines.append(joiner.join(this))
return lines
def _binsplit(splittable, charset, maxlinelen):
i = 0
j = len(splittable)
while i < j:
# Invariants:
# 1. splittable[:k] fits for all k <= i (note that we *assume*,
# at the start, that splittable[:0] fits).
# 2. splittable[:k] does not fit for any k > j (at the start,
# this means we shouldn't look at any k > len(splittable)).
# 3. We don't know about splittable[:k] for k in i+1..j.
# 4. We want to set i to the largest k that fits, with i <= k <= j.
#
m = (i+j+1) >> 1 # ceiling((i+j)/2); i < m <= j
chunk = charset.from_splittable(splittable[:m], True)
chunklen = charset.encoded_header_len(chunk)
if chunklen <= maxlinelen:
# m is acceptable, so is a new lower bound.
i = m
else:
# m is not acceptable, so final i must be < m.
j = m - 1
# i == j. Invariant #1 implies that splittable[:i] fits, and
# invariant #2 implies that splittable[:i+1] does not fit, so i
# is what we're looking for.
first = charset.from_splittable(splittable[:i], False)
last = charset.from_splittable(splittable[i:], False)
return first, last
|
{
"content_hash": "a90c395045f5bf3e9282d0799d89216f",
"timestamp": "",
"source": "github",
"line_count": 497,
"max_line_length": 79,
"avg_line_length": 43.183098591549296,
"alnum_prop": 0.5916503587736465,
"repo_name": "MalloyPower/parsing-python",
"id": "5e24afede013f775e836a5931c35f6ca3edfd1a9",
"size": "21587",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.4.3/Lib/email/Header.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
'''
Send Cluster EFK checks to Zagg
'''
# vim: expandtab:tabstop=4:shiftwidth=4
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#This is not a module, but pylint thinks it is. This is a command.
#pylint: disable=invalid-name
#If a check throws an exception it is failed and should alert
#pylint: disable=bare-except
import argparse
import ssl
import urllib2
import json
# pylint: disable=import-error
from openshift_tools.monitoring.ocutil import OCUtil
from openshift_tools.monitoring.metric_sender import MetricSender
# pylint: enable=import-error
# pylint think check_fluentd is too complex
#pylint: disable=too-many-branches
#pylint: disable=too-many-arguments
#pylint: disable=no-init
class RedirectHandler(urllib2.HTTPRedirectHandler):
"""docstring for RedirctHandler"""
def http_error_301(self, req, fp, code, msg, headers):
'''pass 301 error dirrectly'''
pass
def http_error_302(self, req, fp, code, msg, headers):
'''pass 302 error dirrectly'''
pass
class OpenshiftLoggingStatus(object):
'''
This is a check for the entire EFK stack shipped with OCP
'''
def __init__(self):
''' Initialize OpenShiftLoggingStatus class '''
self.metric_sender = None
self.oc = None
self.args = None
self.es_pods = []
self.fluentd_pods = []
es_cert = '/etc/elasticsearch/secret/admin-'
self.es_curl = "curl -s --cert {}cert --key {}key --cacert {}ca -XGET ".format(es_cert, es_cert, es_cert)
def parse_args(self):
''' Parse arguments passed to the script '''
parser = argparse.ArgumentParser(description='OpenShift Cluster Logging Checker')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose output')
parser.add_argument('--debug', action='store_true', default=None, help='Debug?')
parser.add_argument('--use-service-ip', action='store_true', default=False,\
help='use this if kibana can be access by service ip')
self.args = parser.parse_args()
def get_pods(self):
''' Get all pods and filter them in one pass '''
pods = self.oc.get_pods()
for pod in pods['items']:
if pod['status']['phase'] == 'Failed':
continue
if 'component' in pod['metadata']['labels']:
# Get ES pods
if pod['metadata']['labels']['component'] == 'es':
self.es_pods.append(pod)
elif pod['metadata']['labels']['component'] == 'fluentd':
self.fluentd_pods.append(pod)
def check_elasticsearch(self):
''' Various checks for elasticsearch '''
es_status = {}
es_status['single_master'] = None
es_master_name = None
es_status['pods'] = {}
for pod in self.es_pods:
pod_dc = pod['metadata']['labels']['deploymentconfig']
pod_name = pod['metadata']['name']
es_status['pods'][pod_dc] = {}
cluster_health = self.check_elasticsearch_cluster_health(pod_name)
if cluster_health['status'] == 'green':
health_n = 2
elif cluster_health['status'] == 'yellow':
health_n = 1
else:
health_n = 0
es_status['pods'][pod_dc]['elasticsearch_health'] = health_n
es_status['pods'][pod_dc]['elasticsearch_active_primary_shards'] = cluster_health['active_primary_shards']
es_status['pods'][pod_dc]['elasticsearch_pending_task_queue_depth'] = \
cluster_health['number_of_pending_tasks']
es_status['pods'][pod_dc]['disk'] = self.check_elasticsearch_diskspace(pod_name)
# Compare the master across all ES nodes to see if we have split brain
curl_cmd = "{} 'https://localhost:9200/_cat/master'".format(self.es_curl)
es_master = "exec -c elasticsearch -ti {} -- {}".format(pod_name, curl_cmd)
master_name = self.oc.run_user_cmd(es_master).split(' ')[1]
if es_status['single_master'] is None:
es_status['single_master'] = 1
es_master_name = master_name
elif es_master_name != master_name:
es_status['single_master'] = 0
# fix for 3.4 logging where es_master_name is getting set to an ip.
# so we set a try check around incase it fails just so it keeps working for 3.3
for pod in self.es_pods:
try:
if pod['status']['podIP'] == es_master_name:
es_master_name = pod['metadata']['name']
except:
continue
# get cluster nodes
curl_cmd = "{} 'https://localhost:9200/_nodes'".format(self.es_curl)
node_cmd = "exec -c elasticsearch -ti {} -- {}".format(es_master_name, curl_cmd)
cluster_nodes = json.loads(self.oc.run_user_cmd(node_cmd))['nodes']
es_status['all_nodes_registered'] = 1
# The internal ES node name is a random string we do not track anywhere
# pylint: disable=unused-variable
for node, data in cluster_nodes.items():
# pylint: enable=unused-variable
has_matched = False
for pod in self.es_pods:
if data['host'] == pod['metadata']['name'] or data['host'] == pod['status']['podIP']:
has_matched = True
break
if has_matched is False:
es_status['all_nodes_registered'] = 0
return es_status
def check_elasticsearch_cluster_health(self, es_pod):
''' Exec into the elasticsearch pod and check the cluster health '''
try:
curl_cmd = "{} 'https://localhost:9200/_cluster/health?pretty=true'".format(self.es_curl)
cluster_health = "exec -c elasticsearch -ti {} -- {}".format(es_pod, curl_cmd)
health_res = json.loads(self.oc.run_user_cmd(cluster_health))
return health_res
except:
# The check failed so ES is in a bad state
return 0
def check_elasticsearch_diskspace(self, es_pod):
''' Exec into a elasticsearch pod and query the diskspace '''
results = {}
try:
disk_used = 0
disk_free = 0
trash_var = 0
disk_output = self.oc.run_user_cmd("exec -c elasticsearch -ti {} -- df".format(es_pod)).split(' ')
disk_output = [x for x in disk_output if x]
for item in disk_output:
if "/elasticsearch/persistent" not in item:
disk_used = disk_free
disk_free = trash_var
trash_var = item
else:
break
results['used'] = int(disk_used)
results['free'] = int(disk_free)
except:
results['used'] = int(0)
results['free'] = int(0)
return results
def check_fluentd(self):
''' Verify fluentd is running '''
fluentd_status = {}
# Get all nodes with fluentd label
nodes = self.oc.get_nodes()
fluentd_nodes = []
for node in nodes['items']:
if 'logging-infra-fluentd' in node['metadata']['labels']:
if node['metadata']['labels']['logging-infra-fluentd'] == 'true':
fluentd_nodes.append(node)
# Make sure fluentd is on all the nodes and the pods are running
fluentd_status['number_expected_pods'] = len(fluentd_nodes)
fluentd_status['number_pods'] = len(self.fluentd_pods)
fluentd_status['node_mismatch'] = 0
fluentd_status['running'] = 1
for pod in self.fluentd_pods:
node_matched = False
try:
if pod['status']['containerStatuses'][0]['ready'] is False:
fluentd_status['running'] = 0
# do not want to see too much info if pod outofcpu
except KeyError:
fluentd_status['running'] = 0
# If there is already a problem don't worry about looping over the remaining pods/nodes
for node in fluentd_nodes:
internal_ip = ""
for address in node['status']['addresses']:
if address['type'] == "InternalIP":
internal_ip = address['address']
try:
if node['metadata']['labels']['kubernetes.io/hostname'] == pod['spec']['host']:
node_matched = True
break
raise ValueError('')
except:
if internal_ip == pod['spec']['nodeName'] or node['metadata']['name'] == pod['spec']['nodeName']:
node_matched = True
break
if node_matched is False:
fluentd_status['node_mismatch'] = 1
break
return fluentd_status
def get_kibana_url(self):
''' Get the kibana url to access '''
kibana_url = ""
if self.args.use_service_ip:
service = self.oc.get_service('logging-kibana')['spec']['clusterIP']
kibana_url = "https://{}/".format(service)
else:
route = self.oc.get_route('logging-kibana')['status']['ingress'][0]['host']
kibana_url = "https://{}/".format(route)
return kibana_url
def check_kibana(self):
''' Check to see if kibana is up and working '''
kibana_status = {}
# Get logging url
kibana_url = self.get_kibana_url()
# Disable SSL to work around self signed clusters
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# Verify that the url is returning a valid response
kibana_status['site_up'] = 0
kibana_status['response_code'] = 0
debug_handler = urllib2.HTTPHandler(debuglevel=1)
opener = urllib2.build_opener(urllib2.HTTPSHandler(context=ctx), debug_handler, RedirectHandler)
try:
# get response code from kibana
u = kibana_status['response_code'] = opener.open(kibana_url, timeout=10)
kibana_status['response_code'] = u.getcode()
except urllib2.HTTPError, e:
kibana_status['response_code'] = e.code
except urllib2.URLError, e:
kibana_status['response_code'] = '100'
else:
kibana_status['response_code'] = '100'
# accept 401 because we can't auth: https://bugzilla.redhat.com/show_bug.cgi?id=1466496
if 200 <= kibana_status['response_code'] <= 401:
kibana_status['site_up'] = 1
return kibana_status
def report_to_zabbix(self, logging_status):
''' Report all of our findings to zabbix '''
self.metric_sender.add_dynamic_metric('openshift.logging.elasticsearch.pods',
'#OSO_METRICS',
logging_status['elasticsearch']['pods'].keys())
for item, data in logging_status.iteritems():
if item == "fluentd":
self.metric_sender.add_metric({
'openshift.logging.fluentd.running': data['running'],
'openshift.logging.fluentd.number_pods': data['number_pods'],
'openshift.logging.fluentd.node_mismatch': data['node_mismatch'],
'openshift.logging.fluentd.number_expected_pods': data['number_expected_pods']
})
elif item == "kibana":
self.metric_sender.add_metric({'openshift.logging.kibana.response_code': data['response_code']})
self.metric_sender.add_metric({'openshift.logging.kibana.site_up': data['site_up']})
elif item == "elasticsearch":
self.metric_sender.add_metric({
'openshift.logging.elasticsearch.single_master': data['single_master'],
'openshift.logging.elasticsearch.all_nodes_registered': data['all_nodes_registered']
})
for pod, value in data['pods'].iteritems():
self.metric_sender.add_metric({
"openshift.logging.elasticsearch.pod_health[%s]" %(pod): value['elasticsearch_health'],
"openshift.logging.elasticsearch.pod_active_primary_shards[%s]" %(pod): \
value['elasticsearch_active_primary_shards'],
"openshift.logging.elasticsearch.pod_pending_task_queue_depth[%s]" %(pod): \
value['elasticsearch_pending_task_queue_depth'],
"openshift.logging.elasticsearch.disk_free_pct[%s]" %(pod): \
value['disk']['free'] * 100 / (value['disk']['free'] + value['disk']['used'] + 1)
})
self.metric_sender.send_metrics()
def run(self):
''' Main function that runs the check '''
self.parse_args()
self.metric_sender = MetricSender(verbose=self.args.verbose, debug=self.args.debug)
self.oc = OCUtil(namespace='logging', config_file='/tmp/admin.kubeconfig', verbose=self.args.verbose)
self.get_pods()
logging_status = {}
logging_status['elasticsearch'] = self.check_elasticsearch()
logging_status['fluentd'] = self.check_fluentd()
logging_status['kibana'] = self.check_kibana()
self.report_to_zabbix(logging_status)
if __name__ == '__main__':
OSLS = OpenshiftLoggingStatus()
OSLS.run()
|
{
"content_hash": "3b4014a1c6d851e5b833674808e8431d",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 118,
"avg_line_length": 41.78885630498534,
"alnum_prop": 0.5666666666666667,
"repo_name": "ivanhorvath/openshift-tools",
"id": "56a1f3f34ae985e2e30cd6829fe1a6052a187a1e",
"size": "14272",
"binary": false,
"copies": "1",
"ref": "refs/heads/prod",
"path": "scripts/monitoring/cron-send-logging-checks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "588"
},
{
"name": "Dockerfile",
"bytes": "70267"
},
{
"name": "Go",
"bytes": "382164"
},
{
"name": "Groovy",
"bytes": "6322"
},
{
"name": "HTML",
"bytes": "146500"
},
{
"name": "JavaScript",
"bytes": "2380"
},
{
"name": "Makefile",
"bytes": "3324"
},
{
"name": "PHP",
"bytes": "35793"
},
{
"name": "Python",
"bytes": "37739486"
},
{
"name": "Shell",
"bytes": "1643890"
},
{
"name": "Vim script",
"bytes": "1836"
}
],
"symlink_target": ""
}
|
from pyqtgraph import ROI
from pyqtgraph import QtGui
from pyqtgraph import QtCore
from pyqtgraph import TextItem
from pyqtgraph import LayoutWidget
from .utils import delete_content
from .utils import get_bigger_bbox
from .color import JChooseColor
from .color import setup_color
from .remove_item import JRemoveItem
class Jtext(TextItem):
def __init__(self, text):
TextItem.__init__(self, text, angle=0)
self.setPos(0, 0)
class JtextROI(ROI, JRemoveItem, JChooseColor):
def __init__(self, position, text, viewbox=None, screen_bbox=None,
size=10, transpose=False):
ROI.__init__(self, position, size=[1, 1])
self.handlePen.setColor(QtGui.QColor(0, 0, 0))
dx = screen_bbox[0][1] - screen_bbox[0][0]
dy = screen_bbox[1][1] - screen_bbox[1][0]
self._character_width = dx * 0.01
self._character_height = dy * 0.04
self.text = text
self.width = None
self.height = None
self.size = size
self._bbox = None
self._bigger_bbox = None
self.text.setParentItem(self)
self.info_dock = viewbox.info_dock
self._menu = self._build_menu()
self.transpose = False
JRemoveItem.__init__(self, viewbox)
self._viewbox = viewbox
self._first_transform = None
self._last_transform = None
self._transpose_check_box = None
self._text_line_edit = None
self._text_spin_box = None
JChooseColor.__init__(self)
self._set_width_and_height()
self._display_info_dock()
self.set_black_color()
self._changed_size()
if transpose:
self._toggle_transpose()
self._transpose_check_box.setChecked(transpose)
@classmethod
def load(cls, s, viewbox=None):
if "*JText" not in s:
print("Error reading a Text from string %s" % s)
s = s.replace("*JText", "")
if s[0] != "{" or s[-1] != "}":
print("Error the string is in the wrong format")
data = eval(s)
text = Jtext(data["text"])
viewbox.addItem(text)
text_roi = JtextROI(data["position"], text, viewbox=viewbox,
screen_bbox=viewbox.viewRange(),
size=data["size"],
transpose=data["transpose"])
setup_color(text_roi, data["color"])
if viewbox is not None:
viewbox.label.setText("Text loaded.")
return text_roi
def save(self, file):
data = {
"position": [self.pos().x(), self.pos().y()],
"text": self.text.textItem.toPlainText(),
"size": self.size,
"transpose": self.transpose,
"color": self.color
}
file.write("*JText\n")
file.write(str(data) + "\n")
# does not work properly
# def compute_bbox(self):
# return compute_bbox_of_points(self._get_bbox())
def _build_menu(self):
menu = QtGui.QMenu()
menu.setTitle("Text")
menu.addAction("Transpose", self._click_transpose)
menu.addAction("Remove", self.remove_item)
return menu
def _click_transpose(self):
b = self._transpose_check_box.isChecked()
self._transpose_check_box.setChecked(not b)
def _toggle_transpose(self):
self.transpose = not self.transpose
if self.transpose:
self.text.textItem.setRotation(270)
else:
self.text.textItem.setRotation(0)
self._build_bbox()
view_box = self.viewbox
view_box.update()
def _set_width_and_height(self):
text = self.text.textItem.toPlainText()
n = len(text)
factor = self.size / 10
self.width = factor * n * self._character_width
self.height = factor * self._character_height
self._build_bbox()
def _build_bbox(self):
x, y = self._get_position()
bbox = []
width = self.width
height = self.height
if self.transpose:
bbox.append([x + height, y])
bbox.append([x + height, y + width])
bbox.append([x, y + width])
bbox.append([x, y])
bbox.append([x + height, y])
else:
bbox.append([x, y - height])
bbox.append([x + width, y - height])
bbox.append([x + width, y])
bbox.append([x, y])
bbox.append([x, y - height])
self._bbox = bbox
def _get_bbox(self):
if self._bbox is None:
self._build_bbox()
return self._bbox
def _change_bbox(self, f1, f2):
self._build_bbox()
bbox = [[x * f1, y * f2] for x, y, in self._bbox]
self._bbox = bbox
def _get_position(self):
pt = self.pos()
x, y = pt.x(), pt.y()
pt = self.mapFromParent(x, y)
return pt.x(), pt.y()
def shape(self):
p = QtGui.QPainterPath()
bbox = self._get_bbox()
big_bbox = self._bigger_bbox
if big_bbox is None:
big_bbox = bbox
else:
big_bbox = get_bigger_bbox(big_bbox, bbox)
p.moveTo(big_bbox[0][0], big_bbox[0][1])
for pt in big_bbox[1:]:
p.lineTo(pt[0], pt[1])
self._bigger_bbox = bbox
return p
def boundingRect(self):
return self.shape().boundingRect()
def paint(self, p, *args):
t = p.transform()
if self._first_transform is None:
self._first_transform = t
if self._first_transform != t:
f1 = self._first_transform.m11() / t.m11()
f2 = self._first_transform.m22() / t.m22()
self._change_bbox(f1, f2)
bbox = self._get_bbox()
points = [QtCore.QPointF(pt[0], pt[1]) for pt in bbox]
self.currentPen.setWidth(2)
p.setPen(self.currentPen)
for i in range(len(points) - 1):
p.drawLine(points[i], points[i + 1])
def mouseClickEvent(self, ev):
self._display_info_dock()
if ev.button() == QtCore.Qt.RightButton:
self._raise_menu(ev)
def _raise_menu(self, event):
pos = event.screenPos()
self._menu.popup(QtCore.QPoint(pos.x(), pos.y()))
def _dock_toggle_transpose(self):
b = self._transpose_check_box.isChecked()
if self.transpose != b:
self._toggle_transpose()
def _transpose_dock_widget(self):
layout = LayoutWidget()
label = QtGui.QLabel("Transpose")
layout.addWidget(label, row=0, col=0)
check_box = QtGui.QCheckBox()
layout.addWidget(check_box, row=0, col=1)
check_box.setChecked(self.transpose)
check_box.toggled.connect(self._dock_toggle_transpose)
self._transpose_check_box = check_box
layout.layout.setContentsMargins(0, 0, 0, 5)
return layout
def _changed_text(self):
text = self._text_line_edit.text()
if text == "":
return
else:
self.text.setText(text, self._color_rgb)
self._set_width_and_height()
self.update()
def _changed_size(self):
self.size = self._text_spin_box.value()
self.text.textItem.setScale(self.size / 10)
self._set_width_and_height()
self.update()
def _text_dock_widget(self):
layout = LayoutWidget()
label = QtGui.QLabel("Text:")
layout.addWidget(label, row=0, col=0)
line_edit = QtGui.QLineEdit(self.text.textItem.toPlainText())
layout.addWidget(line_edit, row=0, col=1)
line_edit.textChanged.connect(self._changed_text)
self._text_line_edit = line_edit
label1 = QtGui.QLabel("Size:")
layout.addWidget(label1, row=1, col=0)
spin_box = QtGui.QSpinBox()
layout.addWidget(spin_box, row=1, col=1)
spin_box.setMinimum(6)
spin_box.setMaximum(20)
spin_box.setValue(self.size)
spin_box.valueChanged.connect(self._changed_size)
self._text_spin_box = spin_box
layout.layout.setContentsMargins(0, 0, 0, 5)
return layout
def set_red_color(self):
super().set_red_color()
self._changed_text()
def set_blue_color(self):
super().set_blue_color()
self._changed_text()
def set_green_color(self):
super().set_green_color()
self._changed_text()
def set_white_color(self):
super().set_white_color()
self._changed_text()
def set_black_color(self):
super().set_black_color()
self._changed_text()
def _display_info_dock(self):
if self.info_dock is None:
return
delete_content(self.info_dock)
container = LayoutWidget()
label = QtGui.QLabel("Text")
container.addWidget(label, row=0, col=0)
text_dock_widget = self._text_dock_widget()
container.addWidget(text_dock_widget, row=1, col=0)
transpose_dock_widget = self._transpose_dock_widget()
container.addWidget(transpose_dock_widget, row=2, col=0)
choose_color_widget = self.get_color_dock_widget()
container.addWidget(choose_color_widget, row=3, col=0)
remove_item_widget = self.get_remove_item_dock_widget()
container.addWidget(remove_item_widget, row=4, col=0)
vertical_spacer = QtGui.QSpacerItem(1, 1, QtGui.QSizePolicy.Minimum,
QtGui.QSizePolicy.Expanding)
container.layout.addItem(vertical_spacer, 4, 0)
self.info_dock.addWidget(container)
|
{
"content_hash": "a4578531c8227de7237e0a4a84a893b3",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 76,
"avg_line_length": 28.735905044510385,
"alnum_prop": 0.560512185047501,
"repo_name": "jakaspeh/JDesigner",
"id": "fdcc1c319bd1e5e57757141234f691d9304f4094",
"size": "9684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jdesigner/jtext.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60262"
}
],
"symlink_target": ""
}
|
"""List User Device access."""
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
@click.command(cls=SoftLayer.CLI.command.SLCommand, )
@click.argument('identifier')
@environment.pass_env
def cli(env, identifier):
"""User Device access."""
mgr = SoftLayer.UserManager(env.client)
all_permissions = mgr.get_user_permissions(identifier)
# verify the table in table
table = formatting.Table(['Name', 'Value'])
permission_table = formatting.Table(['KeyName', 'Name'])
for permission in all_permissions:
if 'ALL_' in permission['key']:
permission_table.add_row([permission.get('keyName'), permission.get('name')])
hardwares = mgr.get_user_hardware(identifier)
dedicatedhosts = mgr.get_user_dedicated_host(identifier)
virtual_guests = mgr.get_user_virtuals(identifier)
hardware_table = formatting.KeyValueTable(['Id', 'Device Name', 'Device type', 'Public Ip', 'Private Ip', 'notes'])
virtual_table = formatting.KeyValueTable(['Id', 'Device Name', 'Device type', 'Public Ip', 'Private Ip', 'notes'])
dedicated_table = formatting.KeyValueTable(['Id', 'Device Name', 'Device type', 'notes'])
hardware_table.align['Device Name'] = 'l'
dedicated_table.align['Device Name'] = 'l'
virtual_table.align['Device Name'] = 'l'
for hardware in hardwares:
hardware_table.add_row([hardware.get('id'),
hardware.get('fullyQualifiedDomainName'),
'Bare Metal',
hardware.get('primaryIpAddress'),
hardware.get('primaryBackendIpAddress'),
hardware.get('notes') or '-'])
for host in dedicatedhosts:
dedicated_table.add_row([host.get('id'),
host.get('name'),
'Dedicated Host',
host.get('notes') or '-'])
for virtual in virtual_guests:
virtual_table.add_row([virtual.get('id'),
virtual.get('fullyQualifiedDomainName'),
'virtual Guests',
virtual.get('primaryIpAddress'),
virtual.get('primaryBackendIpAddress'),
virtual.get('notes') or '-'])
table.add_row(['Permission', permission_table])
table.add_row(['Hardware', hardware_table])
table.add_row(['Dedicated Host', dedicated_table])
table.add_row(['Virtual Guest', virtual_table])
env.fout(table)
|
{
"content_hash": "bd1fe8425365bfd76ef4775743d23c20",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 119,
"avg_line_length": 43.916666666666664,
"alnum_prop": 0.5836812144212524,
"repo_name": "allmightyspiff/softlayer-python",
"id": "4ad0f2951ff95ba94f42edd7cb2fd6fa6f99d81b",
"size": "2635",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "SoftLayer/CLI/user/device_access.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "854"
},
{
"name": "Makefile",
"bytes": "7458"
},
{
"name": "Python",
"bytes": "2657752"
}
],
"symlink_target": ""
}
|
from osqp.interface import OSQP
|
{
"content_hash": "904af8ead269a094766258f1e4ad3ec8",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 31,
"avg_line_length": 32,
"alnum_prop": 0.84375,
"repo_name": "oxfordcontrol/osqp-python",
"id": "19ebfaa23d845dcd79fcce24991d651de2a7f329",
"size": "32",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "module/tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "92076"
},
{
"name": "C++",
"bytes": "13734"
},
{
"name": "CMake",
"bytes": "4187"
},
{
"name": "Python",
"bytes": "165589"
}
],
"symlink_target": ""
}
|
class Experiment:
function_set = None
terminal_set = None
@classmethod
def get_terminal_set(cls):
return cls.terminal_set.get()
@classmethod
def get_function_set(cls):
return cls.function_set.get()
def function_lookup(self, name):
return getattr(self, name)
def index(self):
return None
def target_data(self):
self.initialize()
samples = []
loop = True
while loop:
sample = {'value': self.error(0)}
if self.index() != None:
sample['index'] = self.index()
samples.append(sample)
loop = self.next()
return samples
|
{
"content_hash": "bb2af1d7e6ad0192cd7d766f598acff4",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 39,
"avg_line_length": 20.79310344827586,
"alnum_prop": 0.6235489220563848,
"repo_name": "dougsc/gp",
"id": "7dc193a687415fcd519f461905953c14913de3d7",
"size": "604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "engine/experiment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2018"
},
{
"name": "Python",
"bytes": "29498"
}
],
"symlink_target": ""
}
|
import time
import json
from flask import g, request, Response
from flask.ext.classy import FlaskView
from werkzeug.exceptions import BadRequest, NotAcceptable
from app.authorization import login_required
import app.config
import app.database
class NotificationView(FlaskView):
'''
Send notifications using Server-Sent Events (SSE).
Based on this:
http://stackoverflow.com/questions/13386681/streaming-data-with-python-and-flask
'''
CHANNELS = (
'archive',
'group',
'result',
'site',
'worker',
)
__should_quit = False
@classmethod
def quit_notifications(cls):
'''A helper function to end long-running notification threads. '''
cls.__should_quit = True
@login_required
def index(self):
''' Open an SSE stream. '''
if request.headers.get('Accept') == 'text/event-stream':
redis = app.database.get_redis(dict(g.config.items('redis')))
pubsub = redis.pubsub(ignore_subscribe_messages=True)
pubsub.subscribe(*self.__class__.CHANNELS)
client_id = request.args.get('client-id', '')
if client_id.strip() == '':
raise BadRequest('`client-id` query parameter is required.')
return Response(self._stream(pubsub, client_id), content_type='text/event-stream')
else:
message = 'This endpoint is only for use with server-sent ' \
'events (SSE).'
raise NotAcceptable(message)
def _stream(self, pubsub, client_id):
'''
Stream events.
If an event has a source_client_id key set, then it is *not* sent to that client.
'''
# Prime the stream. (This forces headers to be sent. Otherwise the
# client will think the stream is not open yet.)
yield ''
# Now send real events from the Redis pubsub channel.
while True:
if self.__class__.__should_quit:
break
message = pubsub.get_message()
if message is not None:
data = json.loads(message['data'].decode('utf8'))
source_client_id = data.pop('source_client_id', '')
if source_client_id != client_id:
channel = message['channel'].decode('utf8')
data_str = json.dumps(data)
yield 'event: {}\ndata: {}\n\n'.format(channel, data_str)
else:
time.sleep(0.2)
|
{
"content_hash": "4b134c804277b7e09001943b542632cc",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 94,
"avg_line_length": 30.74390243902439,
"alnum_prop": 0.5763585878619596,
"repo_name": "TeamHG-Memex/hgprofiler",
"id": "5cf7d8ab471807185f3f889a78658350b464d4c9",
"size": "2521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/app/views/notification.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22445"
},
{
"name": "Dart",
"bytes": "157800"
},
{
"name": "HTML",
"bytes": "66599"
},
{
"name": "JavaScript",
"bytes": "448280"
},
{
"name": "Python",
"bytes": "213096"
}
],
"symlink_target": ""
}
|
"""<MyProject>, a CherryPy application.
Use this as a base for creating new CherryPy applications. When you want
to make a new app, copy and paste this folder to some other location
(maybe site-packages) and rename it to the name of your project,
then tweak as desired.
Even before any tweaking, this should serve a few demonstration pages.
Change to this directory and run:
cherryd -c site.conf
"""
import cherrypy
from cherrypy import tools, url
import os
local_dir = os.path.join(os.getcwd(), os.path.dirname(__file__))
@cherrypy.config(**{'tools.log_tracebacks.on': True})
class Root:
"""Declaration of the CherryPy app URI structure."""
@cherrypy.expose
def index(self):
"""Render HTML-template at the root path of the web-app."""
return """<html>
<body>Try some <a href='%s?a=7'>other</a> path,
or a <a href='%s?n=14'>default</a> path.<br />
Or, just look at the pretty picture:<br />
<img src='%s' />
</body></html>""" % (url('other'), url('else'),
url('files/made_with_cherrypy_small.png'))
@cherrypy.expose
def default(self, *args, **kwargs):
"""Render catch-all args and kwargs."""
return 'args: %s kwargs: %s' % (args, kwargs)
@cherrypy.expose
def other(self, a=2, b='bananas', c=None):
"""Render number of fruits based on third argument."""
cherrypy.response.headers['Content-Type'] = 'text/plain'
if c is None:
return 'Have %d %s.' % (int(a), b)
else:
return 'Have %d %s, %s.' % (int(a), b, c)
files = tools.staticdir.handler(
section='/files',
dir=os.path.join(local_dir, 'static'),
# Ignore .php files, etc.
match=r'\.(css|gif|html?|ico|jpe?g|js|png|swf|xml)$',
)
root = Root()
# Uncomment the following to use your own favicon instead of CP's default.
# favicon_path = os.path.join(local_dir, "favicon.ico")
# root.favicon_ico = tools.staticfile.handler(filename=favicon_path)
|
{
"content_hash": "992b06c34f237e80b0dabd6bc5a782be",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 74,
"avg_line_length": 31.6984126984127,
"alnum_prop": 0.628943415122684,
"repo_name": "cherrypy/cherrypy",
"id": "bcddba2db85b0b509b26371ef4046b62e6c688f4",
"size": "1997",
"binary": false,
"copies": "14",
"ref": "refs/heads/main",
"path": "cherrypy/scaffold/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "17"
},
{
"name": "HTML",
"bytes": "510"
},
{
"name": "Python",
"bytes": "984166"
}
],
"symlink_target": ""
}
|
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.rpc.handlers import l3_rpc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.db import common_db_mixin
from neutron.db import dns_db
#from neutron.db import l3_gwmode_db
from oslo_config import cfg
from oslo_utils import importutils
from networking_cisco import backwards_compatibility as bc
import networking_cisco.plugins
from networking_cisco.plugins.cisco.common import cisco_constants
from networking_cisco.plugins.cisco.db.l3 import ha_db
from networking_cisco.plugins.cisco.db.l3 import l3_router_appliance_db
from networking_cisco.plugins.cisco.db.l3 import routertype_db
from networking_cisco.plugins.cisco.db.scheduler import (
l3_routertype_aware_schedulers_db as router_sch_db)
from networking_cisco.plugins.cisco.extensions import ha
from networking_cisco.plugins.cisco.extensions import routerhostingdevice
from networking_cisco.plugins.cisco.extensions import routerrole
from networking_cisco.plugins.cisco.extensions import routertype
from networking_cisco.plugins.cisco.extensions import routertypeawarescheduler
from networking_cisco.plugins.cisco.l3.rpc import (
l3_router_cfg_agent_rpc_cb as l3cfg_rpc)
from networking_cisco.plugins.cisco.l3.rpc import l3_router_rpc_cfg_agent_api
class CiscoRouterPlugin(common_db_mixin.CommonDbMixin,
routertype_db.RoutertypeDbMixin,
ha_db.HA_db_mixin,
l3_router_appliance_db.L3RouterApplianceDBMixin,
#l3_gwmode_db.L3_NAT_db_mixin,
router_sch_db.L3RouterTypeAwareSchedulerDbMixin,
dns_db.DNSDbMixin):
"""Implementation of Cisco L3 Router Service Plugin for Neutron.
This class implements a L3 service plugin that provides
router and floatingip resources and manages associated
request/response.
All DB functionality is implemented in class
l3_router_appliance_db.L3RouterApplianceDBMixin.
"""
supported_extension_aliases = [
"router", # "ext-gw-mode",
"standard-attr-description",
"extraroute", "l3_agent_scheduler",
routerhostingdevice.ROUTERHOSTINGDEVICE_ALIAS,
routerrole.ROUTERROLE_ALIAS,
routertype.ROUTERTYPE_ALIAS,
routertypeawarescheduler.ROUTERTYPE_AWARE_SCHEDULER_ALIAS,
ha.HA_ALIAS,
"dns-integration"]
def __init__(self):
self.setup_rpc()
basepath = networking_cisco.plugins.__path__[0]
ext_paths = [basepath + '/cisco/extensions']
cp = cfg.CONF.api_extensions_path
to_add = ""
for ext_path in ext_paths:
if cp.find(ext_path) == -1:
to_add += ':' + ext_path
if to_add != "":
cfg.CONF.set_override('api_extensions_path', cp + to_add)
self.router_scheduler = importutils.import_object(
cfg.CONF.routing.router_type_aware_scheduler_driver)
self.l3agent_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver)
def setup_rpc(self):
# RPC support
self.topic = topics.L3PLUGIN
self.conn = n_rpc.create_connection()
self.agent_notifiers[bc.constants.AGENT_TYPE_L3] = (
l3_rpc_agent_api.L3AgentNotifyAPI())
self.agent_notifiers[cisco_constants.AGENT_TYPE_L3_CFG] = (
l3_router_rpc_cfg_agent_api.L3RouterCfgAgentNotifyAPI(self))
self.endpoints = [l3_rpc.L3RpcCallback(),
l3cfg_rpc.L3RouterCfgRpcCallback(self)]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
# Consume from all consumers in threads
self.conn.consume_in_threads()
def get_plugin_type(self):
return bc.constants.L3
def get_plugin_description(self):
return ("Cisco Router Service Plugin for basic L3 forwarding"
" between (L2) Neutron networks and access to external"
" networks via a NAT gateway.")
def create_floatingip(self, context, floatingip):
"""Create floating IP.
:param context: Neutron request context
:param floatingip: data for the floating IP being created
:returns: A floating IP object on success
As the l3 router plugin asynchronously creates floating IPs
leveraging the l3 agent and l3 cfg agent, the initial status for the
floating IP object will be DOWN.
"""
return super(CiscoRouterPlugin, self).create_floatingip(
context, floatingip,
initial_status=bc.constants.FLOATINGIP_STATUS_DOWN)
@property
def _core_plugin(self):
try:
return self._plugin
except AttributeError:
self._plugin = bc.get_plugin()
return self._plugin
|
{
"content_hash": "3d683fc6ec7394e3ee51c89ce6bb3d38",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 78,
"avg_line_length": 42.241379310344826,
"alnum_prop": 0.6744897959183673,
"repo_name": "Tehsmash/networking-cisco",
"id": "85bad7c93404715b89da27d03bc05049f67fef61",
"size": "5534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networking_cisco/plugins/cisco/service_plugins/cisco_router_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "3715465"
},
{
"name": "Shell",
"bytes": "35749"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
import warnings
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PeopleInPovertyState'
db.create_table('data_peopleinpovertystate', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('year', self.gf('django.db.models.fields.IntegerField')()),
('state', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['data.State'])),
('total_population', self.gf('django.db.models.fields.IntegerField')()),
('value', self.gf('django.db.models.fields.IntegerField')()),
('value_standard_error', self.gf('django.db.models.fields.IntegerField')()),
('percent', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=2)),
('percent_standard_error', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=2)),
('create_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('update_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('data', ['PeopleInPovertyState'])
# Adding model 'ChildrenPovertyState'
db.create_table('data_childrenpovertystate', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('year', self.gf('django.db.models.fields.IntegerField')()),
('state', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['data.State'])),
('children_total', self.gf('django.db.models.fields.IntegerField')()),
('children_total_moe', self.gf('django.db.models.fields.IntegerField')()),
('children_poverty', self.gf('django.db.models.fields.IntegerField')()),
('children_poverty_moe', self.gf('django.db.models.fields.IntegerField')()),
('children_poverty_percent', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=2)),
('children_poverty_percent_moe', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=2)),
('create_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('update_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('data', ['ChildrenPovertyState'])
# Adding model 'FamiliesPovertyState'
db.create_table('data_familiespovertystate', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('year', self.gf('django.db.models.fields.IntegerField')()),
('state', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['data.State'])),
('families_total', self.gf('django.db.models.fields.IntegerField')()),
('families_total_moe', self.gf('django.db.models.fields.IntegerField')()),
('families_poverty_percent', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=2)),
('families_poverty_percent_moe', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=2)),
('create_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('update_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('data', ['FamiliesPovertyState'])
# Changing field 'FamiliesPoverty.families_poverty_percent'
with warnings.catch_warnings():
warnings.simplefilter("ignore")
db.alter_column('data_familiespoverty', 'families_poverty_percent', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=2))
# Changing field 'FamiliesPoverty.families_poverty_percent_moe'
with warnings.catch_warnings():
warnings.simplefilter("ignore")
db.alter_column('data_familiespoverty', 'families_poverty_percent_moe', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=2))
# Adding field 'PeopleInPoverty.create_date'
db.add_column('data_peopleinpoverty', 'create_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.date(2012, 12, 5), blank=True), keep_default=False)
# Adding field 'PeopleInPoverty.update_date'
db.add_column('data_peopleinpoverty', 'update_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.date(2012, 12, 5), blank=True), keep_default=False)
# Changing field 'PeopleInPoverty.percent_standard_error'
with warnings.catch_warnings():
warnings.simplefilter("ignore")
db.alter_column('data_peopleinpoverty', 'percent_standard_error', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=2))
# Changing field 'PeopleInPoverty.percent'
with warnings.catch_warnings():
warnings.simplefilter("ignore")
db.alter_column('data_peopleinpoverty', 'percent', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=2))
# Changing field 'ChildrenPoverty.children_poverty_percent_moe'
with warnings.catch_warnings():
warnings.simplefilter("ignore")
db.alter_column('data_childrenpoverty', 'children_poverty_percent_moe', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=2))
# Changing field 'ChildrenPoverty.children_poverty_percent'
with warnings.catch_warnings():
warnings.simplefilter("ignore")
db.alter_column('data_childrenpoverty', 'children_poverty_percent', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=2))
def backwards(self, orm):
# Deleting model 'PeopleInPovertyState'
db.delete_table('data_peopleinpovertystate')
# Deleting model 'ChildrenPovertyState'
db.delete_table('data_childrenpovertystate')
# Deleting model 'FamiliesPovertyState'
db.delete_table('data_familiespovertystate')
# Changing field 'FamiliesPoverty.families_poverty_percent'
db.alter_column('data_familiespoverty', 'families_poverty_percent', self.gf('django.db.models.fields.FloatField')())
# Changing field 'FamiliesPoverty.families_poverty_percent_moe'
db.alter_column('data_familiespoverty', 'families_poverty_percent_moe', self.gf('django.db.models.fields.FloatField')())
# Deleting field 'PeopleInPoverty.create_date'
db.delete_column('data_peopleinpoverty', 'create_date')
# Deleting field 'PeopleInPoverty.update_date'
db.delete_column('data_peopleinpoverty', 'update_date')
# Changing field 'PeopleInPoverty.percent_standard_error'
db.alter_column('data_peopleinpoverty', 'percent_standard_error', self.gf('django.db.models.fields.FloatField')())
# Changing field 'PeopleInPoverty.percent'
db.alter_column('data_peopleinpoverty', 'percent', self.gf('django.db.models.fields.FloatField')())
# Changing field 'ChildrenPoverty.children_poverty_percent_moe'
db.alter_column('data_childrenpoverty', 'children_poverty_percent_moe', self.gf('django.db.models.fields.FloatField')())
# Changing field 'ChildrenPoverty.children_poverty_percent'
db.alter_column('data_childrenpoverty', 'children_poverty_percent', self.gf('django.db.models.fields.FloatField')())
models = {
'data.agegroup': {
'Meta': {'object_name': 'AgeGroup'},
'age_group_desc': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'age_group_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.alternativefuelvehicles': {
'Meta': {'object_name': 'AlternativeFuelVehicles'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.annualstateenergyexpenditures': {
'Meta': {'object_name': 'AnnualStateEnergyExpenditures'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msn': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.ansicountystate': {
'Meta': {'object_name': 'AnsiCountyState'},
'ansi_class': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'ansi_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'data.ansistate': {
'Meta': {'object_name': 'AnsiState'},
'ansi_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'gnisid': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'state_name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'data.atcodes': {
'Meta': {'object_name': 'AtCodes'},
'assistance_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'data.averageteachersalary': {
'Meta': {'object_name': 'AverageTeacherSalary'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.bilingualedspending': {
'Meta': {'object_name': 'BilingualEdSpending'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.budgetcategorysubfunctions': {
'Meta': {'object_name': 'BudgetCategorySubfunctions'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'npp_budget_category': ('django.db.models.fields.TextField', [], {'max_length': '64'}),
'subfunction': ('django.db.models.fields.TextField', [], {'max_length': '3'})
},
'data.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'data.cffr': {
'Meta': {'unique_together': "(('year', 'state', 'county', 'cffrprogram'),)", 'object_name': 'Cffr'},
'amount': ('django.db.models.fields.BigIntegerField', [], {}),
'amount_per_capita': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'cffrprogram': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CffrProgram']"}),
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffragency': {
'Meta': {'object_name': 'CffrAgency'},
'agency_code': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrgeo': {
'Meta': {'object_name': 'CffrGeo'},
'congress_district': ('django.db.models.fields.CharField', [], {'max_length': '34', 'null': 'True'}),
'county_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'county_gu': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'place_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'place_gu': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'place_name': ('django.db.models.fields.CharField', [], {'max_length': '24'}),
'population': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'split_gu': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'state_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'state_gu': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'type_gu': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrindividualcounty': {
'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'CffrIndividualCounty'},
'amount': ('django.db.models.fields.BigIntegerField', [], {}),
'amount_per_capita': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrindividualstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'CffrIndividualState'},
'amount': ('django.db.models.fields.BigIntegerField', [], {}),
'amount_per_capita': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrobjectcode': {
'Meta': {'object_name': 'CffrObjectCode'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_code': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'data.cffrprogram': {
'Meta': {'unique_together': "(('year', 'program_code'),)", 'object_name': 'CffrProgram'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'program_code': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'program_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'program_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrprogramraw': {
'Meta': {'object_name': 'CffrProgramRaw'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'program_id_code': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'program_name': ('django.db.models.fields.CharField', [], {'max_length': '74'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrraw': {
'Meta': {'object_name': 'CffrRaw'},
'agency_code': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'amount': ('django.db.models.fields.BigIntegerField', [], {}),
'amount_adjusted': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'congress_district': ('django.db.models.fields.CharField', [], {'max_length': '34', 'null': 'True'}),
'county_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'county_name': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True'}),
'funding_sign': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'place_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'place_name': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True'}),
'population': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'program_code': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'state_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'state_postal': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.cffrstate': {
'Meta': {'unique_together': "(('year', 'state', 'cffrprogram'),)", 'object_name': 'CffrState'},
'amount': ('django.db.models.fields.BigIntegerField', [], {}),
'amount_per_capita': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'cffrprogram': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CffrProgram']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.childrenpoverty': {
'Meta': {'object_name': 'ChildrenPoverty'},
'children_poverty': ('django.db.models.fields.IntegerField', [], {}),
'children_poverty_moe': ('django.db.models.fields.IntegerField', [], {}),
'children_poverty_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'children_poverty_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'children_total': ('django.db.models.fields.IntegerField', [], {}),
'children_total_moe': ('django.db.models.fields.IntegerField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.childrenpovertystate': {
'Meta': {'object_name': 'ChildrenPovertyState'},
'children_poverty': ('django.db.models.fields.IntegerField', [], {}),
'children_poverty_moe': ('django.db.models.fields.IntegerField', [], {}),
'children_poverty_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'children_poverty_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'children_total': ('django.db.models.fields.IntegerField', [], {}),
'children_total_moe': ('django.db.models.fields.IntegerField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.county': {
'Meta': {'unique_together': "(('state', 'county_ansi'),)", 'object_name': 'County'},
'county_abbr': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'county_ansi': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'county_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'county_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sort_order': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.diplomarecipienttotal': {
'Meta': {'object_name': 'DiplomaRecipientTotal'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.dropoutsrace': {
'Meta': {'object_name': 'DropoutsRace'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.drugfreeschoolspending': {
'Meta': {'object_name': 'DrugFreeSchoolSpending'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.educationalattainment': {
'Meta': {'object_name': 'EducationalAttainment'},
'category': ('django.db.models.fields.TextField', [], {'max_length': '64'}),
'gender': ('django.db.models.fields.TextField', [], {'max_length': '16'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.TextField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'value_type': ('django.db.models.fields.TextField', [], {'max_length': '16'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.electricemissionsstate': {
'Meta': {'unique_together': "(('year', 'state', 'producer_type', 'energy_source'),)", 'object_name': 'ElectricEmissionsState'},
'co2': ('django.db.models.fields.BigIntegerField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'energy_source': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nox': ('django.db.models.fields.BigIntegerField', [], {}),
'producer_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'so2': ('django.db.models.fields.BigIntegerField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.electricemissionsstateraw': {
'Meta': {'object_name': 'ElectricEmissionsStateRaw'},
'co2': ('django.db.models.fields.BigIntegerField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'energy_source': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nox': ('django.db.models.fields.BigIntegerField', [], {}),
'producer_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'so2': ('django.db.models.fields.BigIntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.ellstudentsdistrict': {
'Meta': {'object_name': 'EllStudentsDistrict'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.employment': {
'Meta': {'object_name': 'Employment'},
'black_civilian_labor_force': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_unemployed': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'hispanic_civilian_labor_force': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'hispanic_unemployed': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'total_civilian_labor_force': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_civilian_labor_force': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_unemployed': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.energyconsumptionstate': {
'Meta': {'unique_together': "(('year', 'state', 'msn'),)", 'object_name': 'EnergyConsumptionState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Msn']"}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.energyconsumptionstateraw': {
'Meta': {'object_name': 'EnergyConsumptionStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msn': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.energyproductionstate': {
'Meta': {'unique_together': "(('year', 'state', 'msn'),)", 'object_name': 'EnergyProductionState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Msn']"}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.energyproductionstateraw': {
'Meta': {'object_name': 'EnergyProductionStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msn': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.enrolledstudentsdistrict': {
'Meta': {'object_name': 'EnrolledStudentsDistrict'},
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '9'})
},
'data.enrollmentrace': {
'Meta': {'object_name': 'EnrollmentRace'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.ethnicity': {
'Meta': {'object_name': 'Ethnicity'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ethnicity_abbr': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '5'}),
'ethnicity_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'ethnicity_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.expenditureperpupil': {
'Meta': {'object_name': 'ExpenditurePerPupil'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.familiespoverty': {
'Meta': {'object_name': 'FamiliesPoverty'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'families_poverty_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'families_poverty_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'families_total': ('django.db.models.fields.IntegerField', [], {}),
'families_total_moe': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.familiespovertystate': {
'Meta': {'object_name': 'FamiliesPovertyState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'families_poverty_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'families_poverty_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'families_total': ('django.db.models.fields.IntegerField', [], {}),
'families_total_moe': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.fcnaspending': {
'Meta': {'object_name': 'FcnaSpending'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.federalimpactaid': {
'Meta': {'object_name': 'FederalImpactAid'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.federaltaxcollectionstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'FederalTaxCollectionState'},
'business_income': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'estate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'estate_trust_income': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'excise': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'gift': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'individual_total': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'notwitheld_income_and_seca': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'railroad_retirement': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'unemployment_insurance': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'witheld_income_and_fica': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.federaltaxcollectionstateraw': {
'Meta': {'object_name': 'FederalTaxCollectionStateRaw'},
'business_income_taxes': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'estate_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'estate_trust_income_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'excise_taxes': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'gift_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'income_employment_estate_trust_total': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'individual_notwitheld_seca': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'individual_witheld_fica': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'railroad_retirement': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'total_collections': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'unemployment_insurance': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.fipscountycongressdistrict': {
'Meta': {'object_name': 'FipsCountyCongressDistrict'},
'congress': ('django.db.models.fields.IntegerField', [], {}),
'county_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'district_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state_code': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'data.fipsstate': {
'Meta': {'object_name': 'FipsState'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'data.foodsecuritystate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'FoodSecurityState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'food_insecure': ('django.db.models.fields.IntegerField', [], {}),
'food_insecure_percent': ('django.db.models.fields.FloatField', [], {}),
'food_secure': ('django.db.models.fields.IntegerField', [], {}),
'food_secure_high': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'food_secure_high_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'food_secure_low': ('django.db.models.fields.IntegerField', [], {}),
'food_secure_low_percent': ('django.db.models.fields.FloatField', [], {}),
'food_secure_marginal': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'food_secure_marginal_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'food_secure_percent': ('django.db.models.fields.FloatField', [], {}),
'food_secure_very_low': ('django.db.models.fields.IntegerField', [], {}),
'food_secure_very_low_percent': ('django.db.models.fields.FloatField', [], {}),
'household_total': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'no_response': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.foodsecuritystateraw': {
'Meta': {'object_name': 'FoodSecurityStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'food_secure': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'food_secure_high': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'food_secure_low': ('django.db.models.fields.IntegerField', [], {}),
'food_secure_marginal': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'food_secure_very_low': ('django.db.models.fields.IntegerField', [], {}),
'household_total': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'no_response': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.freeluncheligible': {
'Meta': {'object_name': 'FreeLunchEligible'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.freereducedluncheligible': {
'Meta': {'object_name': 'FreeReducedLunchEligible'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.freereducedluncheligiblecounty': {
'Meta': {'object_name': 'FreeReducedLunchEligibleCounty'},
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'county_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.gender': {
'Meta': {'object_name': 'Gender'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'gender_abbr': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1'}),
'gender_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.halfpints': {
'Meta': {'object_name': 'HalfPints'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.headstartenrollment': {
'Meta': {'object_name': 'HeadStartEnrollment'},
'enrollment': ('django.db.models.fields.IntegerField', [], {}),
'funding': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.healthinsurance': {
'Meta': {'object_name': 'HealthInsurance'},
'all_people': ('django.db.models.fields.IntegerField', [], {}),
'covered': ('django.db.models.fields.IntegerField', [], {}),
'covered_pct': ('django.db.models.fields.FloatField', [], {}),
'covered_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'covered_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'direct_purchase': ('django.db.models.fields.IntegerField', [], {}),
'direct_purchase_pct': ('django.db.models.fields.FloatField', [], {}),
'direct_purchase_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'direct_purchase_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'govt': ('django.db.models.fields.IntegerField', [], {}),
'govt_pct': ('django.db.models.fields.FloatField', [], {}),
'govt_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'govt_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'medicaid': ('django.db.models.fields.IntegerField', [], {}),
'medicaid_pct': ('django.db.models.fields.FloatField', [], {}),
'medicaid_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'medicaid_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'medicare': ('django.db.models.fields.IntegerField', [], {}),
'medicare_pct': ('django.db.models.fields.FloatField', [], {}),
'medicare_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'medicare_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'military': ('django.db.models.fields.IntegerField', [], {}),
'military_pct': ('django.db.models.fields.FloatField', [], {}),
'military_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'military_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'not_covered': ('django.db.models.fields.IntegerField', [], {}),
'not_covered_pct': ('django.db.models.fields.FloatField', [], {}),
'not_covered_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'not_covered_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private': ('django.db.models.fields.IntegerField', [], {}),
'private_employment': ('django.db.models.fields.IntegerField', [], {}),
'private_employment_pct': ('django.db.models.fields.FloatField', [], {}),
'private_employment_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private_employment_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private_pct': ('django.db.models.fields.FloatField', [], {}),
'private_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.highschooldropouts': {
'Meta': {'object_name': 'HighSchoolDropouts'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.highschoolother': {
'Meta': {'object_name': 'HighSchoolOther'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.housingoccupancystate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'HousingOccupancyState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'occupied_units': ('django.db.models.fields.IntegerField', [], {}),
'occupied_units_moe': ('django.db.models.fields.IntegerField', [], {}),
'occupied_units_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'occupied_units_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'owner_occupied': ('django.db.models.fields.IntegerField', [], {}),
'owner_occupied_moe': ('django.db.models.fields.IntegerField', [], {}),
'owner_occupied_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'owner_occupied_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'owner_vacancy_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'owner_vacancy_rate_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'renter_occupied': ('django.db.models.fields.IntegerField', [], {}),
'renter_occupied_moe': ('django.db.models.fields.IntegerField', [], {}),
'renter_occupied_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'renter_occupied_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'renter_vacancy_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'renter_vacancy_rate_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total_units': ('django.db.models.fields.IntegerField', [], {}),
'total_units_moe': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'vacant_units': ('django.db.models.fields.IntegerField', [], {}),
'vacant_units_moe': ('django.db.models.fields.IntegerField', [], {}),
'vacant_units_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'vacant_units_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.housingoccupancystateraw': {
'Meta': {'object_name': 'HousingOccupancyStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'occupied_units': ('django.db.models.fields.IntegerField', [], {}),
'occupied_units_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'occupied_units_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}),
'occupied_units_percent_moe': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'owner_occupied': ('django.db.models.fields.IntegerField', [], {}),
'owner_occupied_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'owner_occupied_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}),
'owner_occupied_percent_moe': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'owner_vacancy_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'owner_vacancy_rate_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'renter_occupied': ('django.db.models.fields.IntegerField', [], {}),
'renter_occupied_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'renter_occupied_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}),
'renter_occupied_percent_moe': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'renter_vacancy_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'renter_vacancy_rate_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state_fips': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'total_units': ('django.db.models.fields.IntegerField', [], {}),
'total_units_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'vacant_units': ('django.db.models.fields.IntegerField', [], {}),
'vacant_units_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'vacant_units_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}),
'vacant_units_percent_moe': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.individualeducationprograms': {
'Meta': {'object_name': 'IndividualEducationPrograms'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.kidshealthinsurance': {
'Meta': {'object_name': 'KidsHealthInsurance'},
'all_people': ('django.db.models.fields.IntegerField', [], {}),
'covered': ('django.db.models.fields.IntegerField', [], {}),
'covered_pct': ('django.db.models.fields.FloatField', [], {}),
'covered_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'covered_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'direct_purchase': ('django.db.models.fields.IntegerField', [], {}),
'direct_purchase_pct': ('django.db.models.fields.FloatField', [], {}),
'direct_purchase_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'direct_purchase_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'govt': ('django.db.models.fields.IntegerField', [], {}),
'govt_pct': ('django.db.models.fields.FloatField', [], {}),
'govt_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'govt_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'medicaid': ('django.db.models.fields.IntegerField', [], {}),
'medicaid_pct': ('django.db.models.fields.FloatField', [], {}),
'medicaid_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'medicaid_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'medicare': ('django.db.models.fields.IntegerField', [], {}),
'medicare_pct': ('django.db.models.fields.FloatField', [], {}),
'medicare_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'medicare_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'military': ('django.db.models.fields.IntegerField', [], {}),
'military_pct': ('django.db.models.fields.FloatField', [], {}),
'military_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'military_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'not_covered': ('django.db.models.fields.IntegerField', [], {}),
'not_covered_pct': ('django.db.models.fields.FloatField', [], {}),
'not_covered_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'not_covered_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private': ('django.db.models.fields.IntegerField', [], {}),
'private_employment': ('django.db.models.fields.IntegerField', [], {}),
'private_employment_pct': ('django.db.models.fields.FloatField', [], {}),
'private_employment_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private_employment_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private_pct': ('django.db.models.fields.FloatField', [], {}),
'private_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'private_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.laborforcecounty': {
'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'LaborForceCounty'},
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'employment_total': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labor_force_total': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'laus_code': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'unemployment_rate': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'unemployment_total': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.laborforcecountyraw': {
'Meta': {'object_name': 'LaborForceCountyRaw'},
'county_fips': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'county_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'employed': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labor_force': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'laus_code': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'state_fips': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'unemployed': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'unemployment_rate': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.laborforcestate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'LaborForceState'},
'civilian_noninstitutional_pop': ('django.db.models.fields.IntegerField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'employment_pop_rate': ('django.db.models.fields.FloatField', [], {}),
'employment_total': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labor_force_participation_rate': ('django.db.models.fields.FloatField', [], {}),
'labor_force_total': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'unemployment_rate': ('django.db.models.fields.FloatField', [], {}),
'unemployment_total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.laborforcestateraw': {
'Meta': {'object_name': 'LaborForceStateRaw'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'area_fips': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'civilian_noninstitutional_pop': ('django.db.models.fields.IntegerField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'employment_pop_rate': ('django.db.models.fields.FloatField', [], {}),
'employment_total': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labor_force_participation_rate': ('django.db.models.fields.FloatField', [], {}),
'labor_force_total': ('django.db.models.fields.IntegerField', [], {}),
'unemployment_rate': ('django.db.models.fields.FloatField', [], {}),
'unemployment_total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.laborunderutilizationstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'LaborUnderutilizationState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'u1': ('django.db.models.fields.FloatField', [], {}),
'u2': ('django.db.models.fields.FloatField', [], {}),
'u3': ('django.db.models.fields.FloatField', [], {}),
'u4': ('django.db.models.fields.FloatField', [], {}),
'u5': ('django.db.models.fields.FloatField', [], {}),
'u6': ('django.db.models.fields.FloatField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.laborunderutilizationstateraw': {
'Meta': {'object_name': 'LaborUnderutilizationStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'u1': ('django.db.models.fields.FloatField', [], {}),
'u2': ('django.db.models.fields.FloatField', [], {}),
'u3': ('django.db.models.fields.FloatField', [], {}),
'u4': ('django.db.models.fields.FloatField', [], {}),
'u5': ('django.db.models.fields.FloatField', [], {}),
'u6': ('django.db.models.fields.FloatField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.mathsciencespending': {
'Meta': {'object_name': 'MathScienceSpending'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.medianhouseholdincomestateraw': {
'Meta': {'object_name': 'MedianHouseholdIncomeStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'median_household_income': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'median_household_income_moe': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'state_fips': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.medianincomestate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'MedianIncomeState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'median_household_income': ('django.db.models.fields.FloatField', [], {}),
'median_household_income_moe': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.medicaidparticipation': {
'Meta': {'object_name': 'MedicaidParticipation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.medicareenrollment': {
'Meta': {'object_name': 'MedicareEnrollment'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'population': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.migrantstudents': {
'Meta': {'object_name': 'MigrantStudents'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.militarypersonnel': {
'Meta': {'object_name': 'MilitaryPersonnel'},
'civilian_personnel': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'military_personnel': ('django.db.models.fields.IntegerField', [], {}),
'reserve_national_guard_personnel': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.msn': {
'Meta': {'object_name': 'Msn'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msn_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'msn_desc': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'msn_unit': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.nativeedspending': {
'Meta': {'object_name': 'NativeEdSpending'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.ncesschooldistrict': {
'Meta': {'object_name': 'NcesSchoolDistrict'},
'congress_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'county_code': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'county_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'district_code': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'district_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'state_code': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'data.newaidscases': {
'Meta': {'object_name': 'NewAidsCases'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.otherfederalrevenue': {
'Meta': {'object_name': 'OtherFederalRevenue'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'data.ownersrenters': {
'Meta': {'object_name': 'OwnersRenters'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'not_in_universe': ('django.db.models.fields.IntegerField', [], {}),
'owned': ('django.db.models.fields.IntegerField', [], {}),
'rented': ('django.db.models.fields.IntegerField', [], {}),
'rented_no_cash': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.peopleinpoverty': {
'Meta': {'object_name': 'PeopleInPoverty'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'percent_standard_error': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'total_population': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'value_standard_error': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.peopleinpovertystate': {
'Meta': {'object_name': 'PeopleInPovertyState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'percent_standard_error': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total_population': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'value_standard_error': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationagecounty': {
'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'PopulationAgeCounty'},
'age_0_19': ('django.db.models.fields.IntegerField', [], {}),
'age_0_19_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_4': ('django.db.models.fields.IntegerField', [], {}),
'age_0_4_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_10_14': ('django.db.models.fields.IntegerField', [], {}),
'age_10_14_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_15_19': ('django.db.models.fields.IntegerField', [], {}),
'age_15_19_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_20_24': ('django.db.models.fields.IntegerField', [], {}),
'age_20_24_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_25_29': ('django.db.models.fields.IntegerField', [], {}),
'age_25_29_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_30_34': ('django.db.models.fields.IntegerField', [], {}),
'age_30_34_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_35_39': ('django.db.models.fields.IntegerField', [], {}),
'age_35_39_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_40_44': ('django.db.models.fields.IntegerField', [], {}),
'age_40_44_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_45_49': ('django.db.models.fields.IntegerField', [], {}),
'age_45_49_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_50_54': ('django.db.models.fields.IntegerField', [], {}),
'age_50_54_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_55_59': ('django.db.models.fields.IntegerField', [], {}),
'age_55_59_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_5_9': ('django.db.models.fields.IntegerField', [], {}),
'age_5_9_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_60_64': ('django.db.models.fields.IntegerField', [], {}),
'age_60_64_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_65_69': ('django.db.models.fields.IntegerField', [], {}),
'age_65_69_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_65_over': ('django.db.models.fields.IntegerField', [], {}),
'age_65_over_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_70_74': ('django.db.models.fields.IntegerField', [], {}),
'age_70_74_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_75_79': ('django.db.models.fields.IntegerField', [], {}),
'age_75_79_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_80_84': ('django.db.models.fields.IntegerField', [], {}),
'age_80_84_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_85_over': ('django.db.models.fields.IntegerField', [], {}),
'age_85_over_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationagestate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'PopulationAgeState'},
'age_0_19': ('django.db.models.fields.IntegerField', [], {}),
'age_0_19_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_4': ('django.db.models.fields.IntegerField', [], {}),
'age_0_4_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_10_14': ('django.db.models.fields.IntegerField', [], {}),
'age_10_14_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_15_19': ('django.db.models.fields.IntegerField', [], {}),
'age_15_19_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_20_24': ('django.db.models.fields.IntegerField', [], {}),
'age_20_24_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_25_29': ('django.db.models.fields.IntegerField', [], {}),
'age_25_29_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_30_34': ('django.db.models.fields.IntegerField', [], {}),
'age_30_34_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_35_39': ('django.db.models.fields.IntegerField', [], {}),
'age_35_39_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_40_44': ('django.db.models.fields.IntegerField', [], {}),
'age_40_44_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_45_49': ('django.db.models.fields.IntegerField', [], {}),
'age_45_49_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_50_54': ('django.db.models.fields.IntegerField', [], {}),
'age_50_54_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_55_59': ('django.db.models.fields.IntegerField', [], {}),
'age_55_59_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_5_9': ('django.db.models.fields.IntegerField', [], {}),
'age_5_9_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_60_64': ('django.db.models.fields.IntegerField', [], {}),
'age_60_64_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_65_69': ('django.db.models.fields.IntegerField', [], {}),
'age_65_69_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_65_over': ('django.db.models.fields.IntegerField', [], {}),
'age_65_over_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_70_74': ('django.db.models.fields.IntegerField', [], {}),
'age_70_74_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_75_79': ('django.db.models.fields.IntegerField', [], {}),
'age_75_79_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_80_84': ('django.db.models.fields.IntegerField', [], {}),
'age_80_84_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_85_over': ('django.db.models.fields.IntegerField', [], {}),
'age_85_over_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationcongressionaldistrict': {
'Meta': {'object_name': 'PopulationCongressionalDistrict'},
'american_indian_alaskan_alone': ('django.db.models.fields.IntegerField', [], {}),
'asian_alone': ('django.db.models.fields.IntegerField', [], {}),
'black_alone': ('django.db.models.fields.IntegerField', [], {}),
'district': ('django.db.models.fields.IntegerField', [], {}),
'hawaiian_pacific_island_alone': ('django.db.models.fields.IntegerField', [], {}),
'households': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'other_alone': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'two_or_more_races': ('django.db.models.fields.IntegerField', [], {}),
'white_alone': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationest00raw': {
'Meta': {'unique_together': "(('state', 'county', 'gender', 'ethnic_origin', 'race'),)", 'object_name': 'PopulationEst00Raw'},
'census2010pop': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ctyname': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'estimatesbase2000': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'ethnic_origin': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'popestimate2000': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2001': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2002': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2003': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2004': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2005': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2006': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2007': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2008': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2009': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'popestimate2010': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'race': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'stname': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'sumlev': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.populationest90raw': {
'Meta': {'unique_together': "(('year', 'state', 'county', 'agegrp', 'race_gender', 'ethnic_origin'),)", 'object_name': 'PopulationEst90Raw'},
'agegrp': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'create_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ethnic_origin': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'population': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'race_gender': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_date': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'data.populationfamilies': {
'Meta': {'object_name': 'PopulationFamilies'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationgendercounty': {
'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'PopulationGenderCounty'},
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'female': ('django.db.models.fields.IntegerField', [], {}),
'female_percent': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'male': ('django.db.models.fields.IntegerField', [], {}),
'male_percent': ('django.db.models.fields.FloatField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationgenderstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'PopulationGenderState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'female': ('django.db.models.fields.IntegerField', [], {}),
'female_percent': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'male': ('django.db.models.fields.IntegerField', [], {}),
'male_percent': ('django.db.models.fields.FloatField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationracecounty': {
'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'PopulationRaceCounty'},
'asian_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_pacific_islander_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_pacific_islander_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_alone': ('django.db.models.fields.IntegerField', [], {}),
'black_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_alone_percent': ('django.db.models.fields.FloatField', [], {}),
'black_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiple_race': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'multiple_race_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'multiple_race_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'multiple_race_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'multiple_race_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'multiple_race_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'white_alone': ('django.db.models.fields.IntegerField', [], {}),
'white_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_alone_percent': ('django.db.models.fields.FloatField', [], {}),
'white_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.populationracestate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'PopulationRaceState'},
'asian_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'asian_pacific_islander_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'asian_pacific_islander_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_alone': ('django.db.models.fields.IntegerField', [], {}),
'black_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_alone_percent': ('django.db.models.fields.FloatField', [], {}),
'black_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'black_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'black_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiple_race': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'multiple_race_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'multiple_race_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'multiple_race_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'multiple_race_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'multiple_race_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'native_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'native_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'pacific_islander_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'pacific_islander_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'white_alone': ('django.db.models.fields.IntegerField', [], {}),
'white_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_alone_percent': ('django.db.models.fields.FloatField', [], {}),
'white_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'white_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'white_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.presidentsbudget': {
'Meta': {'object_name': 'PresidentsBudget'},
'account_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'account_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'agency_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'bea_category': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'budget_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'bureau_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'bureau_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'grant_non_grant': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'on_off_budget': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'source_category_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'source_category_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'source_subcategory_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'source_subcategory_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'subfunction_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'subfunction_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'treasury_agency_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'data.presidentsbudgetyear': {
'Meta': {'object_name': 'PresidentsBudgetYear'},
'budget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'years'", 'to': "orm['data.PresidentsBudget']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4'})
},
'data.pupilteacherdistrict': {
'Meta': {'object_name': 'PupilTeacherDistrict'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.pupilteacherstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'PupilTeacherState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ratio': ('django.db.models.fields.FloatField', [], {}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.pupilteacherstateraw': {
'Meta': {'object_name': 'PupilTeacherStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ratio': ('django.db.models.fields.FloatField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.race': {
'Meta': {'object_name': 'Race'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race_abbr': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'race_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'race_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.racecombo': {
'Meta': {'object_name': 'RaceCombo'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race_combo_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'race_combo_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.retireddisablednilf': {
'Meta': {'object_name': 'RetiredDisabledNilf'},
'disabled_not_in_labor_force': ('django.db.models.fields.IntegerField', [], {}),
'employed_absent': ('django.db.models.fields.IntegerField', [], {}),
'employed_at_work': ('django.db.models.fields.IntegerField', [], {}),
'employed_on_layoff': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'other_not_in_labor_force': ('django.db.models.fields.IntegerField', [], {}),
'retired_not_in_labor_force': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'total': ('django.db.models.fields.IntegerField', [], {}),
'unemployed_looking': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.saipecountystate': {
'Meta': {'object_name': 'SaipeCountyState'},
'age_0_17_poverty': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_0_17_poverty_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_0_17_poverty_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_0_17_poverty_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_17_poverty_percent_90_lower': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_17_poverty_percent_90_upper': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_5_poverty': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_0_5_poverty_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_0_5_poverty_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_0_5_poverty_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_5_poverty_percent_90_lower': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_0_5_poverty_percent_90_upper': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_5_17_related_poverty': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_5_17_related_poverty_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_5_17_related_poverty_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'age_5_17_related_poverty_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_5_17_related_poverty_percent_90_lower': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'age_5_17_related_poverty_percent_90_upper': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'all_age_poverty': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'all_age_poverty_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'all_age_poverty_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'all_age_poverty_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'all_age_poverty_percent_90_lower': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'all_age_poverty_percent_90_upper': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'file_tag': ('django.db.models.fields.CharField', [], {'max_length': '22'}),
'fips_county': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'fips_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'median_household_income': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'median_household_income_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'median_household_income_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'state_county_name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'state_postal_abbreviation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.saipeschool': {
'Meta': {'object_name': 'SaipeSchool'},
'ccd_district_id': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'district_name': ('django.db.models.fields.CharField', [], {'max_length': '65'}),
'file_stamp': ('django.db.models.fields.CharField', [], {'max_length': '21'}),
'fips_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'population': ('django.db.models.fields.IntegerField', [], {}),
'relevant_population': ('django.db.models.fields.IntegerField', [], {}),
'relevant_population_poverty': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.schipenrollmentstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SchipEnrollmentState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.schipenrollmentstateraw': {
'Meta': {'object_name': 'SchipEnrollmentStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.schoolbreakfastparticipationstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SchoolBreakfastParticipationState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.schoolbreakfastparticipationstateraw': {
'Meta': {'object_name': 'SchoolBreakfastParticipationStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.schoollunchparticipationstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SchoolLunchParticipationState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.schoollunchparticipationstateraw': {
'Meta': {'object_name': 'SchoolLunchParticipationStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.shelterpopulation': {
'Meta': {'object_name': 'ShelterPopulation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapbenefitsrecipients': {
'Meta': {'object_name': 'SnapBenefitsRecipients'},
'county_fips': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'state_fips': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapmonthlybenefitspersonstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SnapMonthlyBenefitsPersonState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapmonthlybenefitspersonstateraw': {
'Meta': {'object_name': 'SnapMonthlyBenefitsPersonStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapparticipationhouseholdsstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SnapParticipationHouseholdsState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapparticipationhouseholdsstateraw': {
'Meta': {'object_name': 'SnapParticipationHouseholdsStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapparticipationpeoplestate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SnapParticipationPeopleState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.snapparticipationpeoplestateraw': {
'Meta': {'object_name': 'SnapParticipationPeopleStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.source': {
'Meta': {'object_name': 'Source'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'string_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'data.specialedfunding': {
'Meta': {'object_name': 'SpecialEdFunding'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.state': {
'Meta': {'object_name': 'State'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sort_order': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'state_abbr': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'}),
'state_ansi': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'}),
'state_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'state_gnisid': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'state_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'data.statecompletionrate': {
'Meta': {'object_name': 'StateCompletionRate'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.stategdp': {
'Meta': {'object_name': 'StateGdp'},
'component': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'component_code': ('django.db.models.fields.IntegerField', [], {}),
'fips': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'industry': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'industry_code': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.stategdppre97': {
'Meta': {'object_name': 'StateGdpPre97'},
'component': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'component_code': ('django.db.models.fields.IntegerField', [], {}),
'fips': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'industry': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'industry_code': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.statepostalcodes': {
'Meta': {'object_name': 'StatePostalCodes'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'data.staterenewableenergy': {
'Meta': {'object_name': 'StateRenewableEnergy'},
'fossil_coal': ('django.db.models.fields.FloatField', [], {}),
'fossil_gas': ('django.db.models.fields.FloatField', [], {}),
'fossil_oil': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nuclear_electric': ('django.db.models.fields.FloatField', [], {}),
'renewable_biofuels': ('django.db.models.fields.FloatField', [], {}),
'renewable_other': ('django.db.models.fields.FloatField', [], {}),
'renewable_total': ('django.db.models.fields.FloatField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'total': ('django.db.models.fields.FloatField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.subfunctionscffr': {
'Meta': {'object_name': 'SubfunctionsCffr'},
'at_code_1': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_2': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_3': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_4': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_5': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_6': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_7': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'at_code_8': ('django.db.models.fields.TextField', [], {'max_length': '1', 'null': 'True'}),
'cfda_program_code': ('django.db.models.fields.TextField', [], {'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'program_name': ('django.db.models.fields.TextField', [], {'max_length': '64'}),
'subfunction_name': ('django.db.models.fields.TextField', [], {'max_length': '64'}),
'subfunction_number': ('django.db.models.fields.TextField', [], {'max_length': '3'})
},
'data.summerlunchparticipationstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SummerLunchParticipationState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.summerlunchparticipationstateraw': {
'Meta': {'object_name': 'SummerLunchParticipationStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.tanffamilystateraw': {
'Meta': {'object_name': 'TanfFamilyStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.tanfparticipationstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'TanfParticipationState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'family': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.tanfparticipationstateraw': {
'Meta': {'object_name': 'TanfParticipationStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.titleifunding': {
'Meta': {'object_name': 'TitleIFunding'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.totalstudents': {
'Meta': {'object_name': 'TotalStudents'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.vehicleregistrations': {
'Meta': {'object_name': 'VehicleRegistrations'},
'all_private': ('django.db.models.fields.IntegerField', [], {}),
'all_public': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'all_total': ('django.db.models.fields.IntegerField', [], {}),
'auto_private': ('django.db.models.fields.IntegerField', [], {}),
'auto_public': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'auto_total': ('django.db.models.fields.IntegerField', [], {}),
'buses_private': ('django.db.models.fields.IntegerField', [], {}),
'buses_public': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'buses_total': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'motorcycle_private': ('django.db.models.fields.IntegerField', [], {}),
'motorcycle_public': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'private_commercial_per_capita': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'trucks_private': ('django.db.models.fields.IntegerField', [], {}),
'trucks_public': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'trucks_total': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.vocationaledspending': {
'Meta': {'object_name': 'VocationalEdSpending'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.wicbenefitsstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'WicBenefitsState'},
'amount': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.wicbenefitsstateraw': {
'Meta': {'object_name': 'WicBenefitsStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'place': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.wicparticipationstate': {
'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'WicParticipationState'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'data.wicparticipationstateraw': {
'Meta': {'object_name': 'WicParticipationStateRaw'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'place': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['data']
|
{
"content_hash": "0f77680d9cf8b94d4962e1ae26cb93f7",
"timestamp": "",
"source": "github",
"line_count": 1941,
"max_line_length": 197,
"avg_line_length": 79.17516743946419,
"alnum_prop": 0.5528341543086563,
"repo_name": "npp/npp-api",
"id": "ff186e3d7fde240f39a1336e29935c633b4cdab7",
"size": "153697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/migrations/0021_auto__add_peopleinpovertystate__add_childrenpovertystate__add_families.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5982539"
},
{
"name": "Shell",
"bytes": "36"
}
],
"symlink_target": ""
}
|
import sys
from time import sleep
import logging
import os
import string
import re
import datetime
from commands import *
# Initialize logging
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(name)s - %(message)s', level=logging.WARNING)
_log = logging.getLogger()
_log.setLevel(logging.INFO)
class Gbp_Config(object):
def __init__( self ):
"""
Init def
"""
self.err_strings=['Unable','Conflict','Bad Request','Error', 'Unknown','Exception','Invalid','read-only','not supported','prefix greater than subnet mask']
def keystone_creds(self):
creds={}
creds['username'] = os.environ['OS_USERNAME']
creds['password'] = os.environ['OS_PASSWORD']
creds['auth_url'] = os.environ['OS_AUTH_URL']
creds['tenant_name'] = os.environ['OS_TENANT_NAME']
return creds
def get_uuid(self,cmd_out):
'''
Extracts UUID of a gbp object
'''
match=re.search("\\bid\\b\s+\| (.*) \|",cmd_out,re.I)
if match != None:
obj_uuid = match.group(1)
#_log.info( "UUID:\n%s " %(obj_uuid))
return obj_uuid.rstrip()
else:
return 0
def gbp_action_config(self,cmd_val,name_uuid,**kwargs):
"""
-- cmd_val== 0:delete; 1:create; 2:update
-- name_uuid == UUID or name_string
Create/Update/Delete Policy Action
Returns assigned UUID on Create
kwargs addresses the need for passing required/optional params
"""
if cmd_val == '' or name_uuid == '':
_log.info('''Function Usage: gbp_action_config 0 "abc"\n
--cmd_val == 0:delete; 1:create; 2:update\n
-- name_uuid == UUID or name_string\n''')
return 0
#Build the command with mandatory param 'name_uuid'
if cmd_val == 0:
cmd = 'gbp policy-action-delete '+str(name_uuid)
if cmd_val == 1:
cmd = 'gbp policy-action-create '+str(name_uuid)
if cmd_val == 2:
cmd = 'gbp policy-action-update '+str(name_uuid)
# Build the cmd string for optional/non-default args/values
for arg, value in kwargs.items():
cmd = cmd + " --" + "".join( '%s %s' %(arg, value ))
_log.info(cmd)
# Execute the policy-action-config-cmd
cmd_out = getoutput(cmd)
_log.info(cmd_out)
# Catch for non-exception error strings, even though try clause succeded
if self.cmd_error_check(cmd_out) == 0:
return 0
# If "create" cmd succeeded then parse the cmd_out to extract the UUID
if cmd_val==1:
action_uuid = self.get_uuid(cmd_out)
return action_uuid
def gbp_classif_config(self,cmd_val,classifier_name,**kwargs):
"""
-- cmd_val== 0:delete; 1:create; 2:update
-- classifier_name == UUID or name_string
Create/Update/Delete Policy Classifier
Returns assigned UUID on Create
kwargs addresses the need for passing required/optional params
"""
if cmd_val == '' or classifier_name == '':
_log.info('''Function Usage: gbp_classifier_config 0 "abc"\n
--cmd_val == 0:delete; 1:create; 2:update\n
-- classifier_name == UUID or name_string\n''')
return 0
#Build the command with mandatory param 'classifier_name'
if cmd_val == 0:
cmd = 'gbp policy-classifier-delete '+str(classifier_name)
if cmd_val == 1:
cmd = 'gbp policy-classifier-create '+str(classifier_name)
if cmd_val == 2:
cmd = 'gbp policy-classifier-update '+str(classifier_name)
# Build the cmd string for optional/non-default args/values
for arg, value in kwargs.items():
cmd = cmd + " --" + "".join( '%s %s' %(arg, value ))
#_log.info(cmd)
# Execute the policy-classifier-config-cmd
cmd_out = getoutput(cmd)
# Catch for non-exception error strings, even though try clause succeded
if self.cmd_error_check(cmd_out) == 0:
return 0
# If try clause succeeds for "create" cmd then parse the cmd_out to extract the UUID
if cmd_val==1:
classifier_uuid = self.get_uuid(cmd_out)
return classifier_uuid
def gbp_policy_cfg_all(self,cmd_val,cfgobj,name_uuid,**kwargs):
"""
--cfgobj== policy-*(where *=action;classifer,rule,ruleset,targetgroup,target
--cmd_val== 0:delete; 1:create; 2:update
--name_uuid == UUID or name_string
Create/Update/Delete Policy Object
Returns assigned UUID on Create
kwargs addresses the need for passing required/optional params
"""
cfgobj_dict={"action":"policy-action","classifier":"policy-classifier","rule":"policy-rule",
"ruleset":"policy-rule-set","group":"policy-target-group","target":"policy-target",
"l2p":"l2policy","l3p":"l3policy","nsp":"network-service-policy",
"extseg":"external-segment","extpol":"external-policy","natpool":"nat-pool"}
if cfgobj != '':
if cfgobj not in cfgobj_dict:
raise KeyError
if cmd_val == '' or name_uuid == '':
_log.info('''Function Usage: gbp_policy_cfg_all 'rule' 0 "abc"\n
--cmd_val == 0:delete; 1:create; 2:update\n
-- name_uuid == UUID or name_string\n''')
return 0
#Build the command with mandatory params
if cmd_val == 0:
cmd = 'gbp %s-delete ' % cfgobj_dict[cfgobj]+str(name_uuid)
if cmd_val == 1:
cmd = 'gbp %s-create ' % cfgobj_dict[cfgobj]+str(name_uuid)
if cmd_val == 2:
cmd = 'gbp %s-update ' % cfgobj_dict[cfgobj]+str(name_uuid)
# Build the cmd string for optional/non-default args/values
for arg, value in kwargs.items():
if '_' in arg:
arg=string.replace(arg,'_','-')
cmd = cmd + " --" + "".join( '%s=%s' %(arg, value ))
_log.info(cmd)
# Execute the cmd
cmd_out = getoutput(cmd)
#_log.info(cmd_out)
# Catch for non-exception error strings, even though try clause succeded
if self.cmd_error_check(cmd_out) == 0:
return 0
# If try clause succeeds for "create" cmd then parse the cmd_out to extract the UUID of the object
try:
if cmd_val==1 and cfgobj=="group":
obj_uuid = self.get_uuid(cmd_out)
match = re.search("\\bl2_policy_id\\b\s+\| (.*) \|",cmd_out,re.I)
l2pid = match.group(1)
match = re.search("\\bsubnets\\b\s+\| (.*) \|",cmd_out,re.I)
subnetid = match.group(1)
return obj_uuid,l2pid.rstrip(),subnetid.rstrip()
if cmd_val==1 and cfgobj=="target":
obj_uuid = self.get_uuid(cmd_out)
match = re.search("\\bport_id\\b\s+\| (.*) \|",cmd_out,re.I)
neutr_port_id = match.group(1)
return obj_uuid.rstrip(),neutr_port_id.rstrip()
if cmd_val==1 and cfgobj=="l2p":
obj_uuid = self.get_uuid(cmd_out)
match = re.search("\\l3_policy_id\\b\s+\| (.*) \|",cmd_out,re.I)
l3p_uuid = match.group(1)
return obj_uuid.rstrip(),l3p_uuid.rstrip()
if cmd_val==1:
obj_uuid = self.get_uuid(cmd_out)
return obj_uuid.rstrip()
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
_log.info('Exception Type = %s, Exception Object = %s' %(exc_type,exc_value))
return 0
return 1
def gbp_policy_cfg_upd_all(self,cfgobj,name_uuid,attr):
"""
--cfgobj== policy-*(where *=action;classifer,rule,ruleset,targetgroup,target
--name_uuid == UUID or name_string
--attr == MUST be a dict, where key: attribute_name, while val: attribute's value(new value to update)
Updates Policy Objects' editable attributes
"""
cfgobj_dict={"action":"policy-action","classifier":"policy-classifier","rule":"policy-rule",
"ruleset":"policy-rule-set","group":"policy-target-group","target":"policy-target",
"l2p":"l2policy","l3p":"l3policy","nsp":"network-service-policy",
"extseg":"external-segment","extpol":"external-policy","natpool":"nat-pool"}
if cfgobj != '':
if cfgobj not in cfgobj_dict:
raise KeyError
if name_uuid == '' or not isinstance(attr,dict):
_log.info('''Function Usage: gbp_policy_cfg_upd_all 'rule' "abc" {attr:attr_val}\n
--cmd_val == 0:delete; 1:create; 2:update\n
-- name_uuid == UUID or name_string\n''')
return 0
#Build the command with mandatory params
cmd = 'gbp %s-update ' % cfgobj_dict[cfgobj]+str(name_uuid)
# Build the cmd string for optional/non-default args/values
for arg, value in attr.iteritems():
if '_' in arg:
arg=string.replace(arg,'_','-')
cmd = cmd + " --" + "".join( '%s %s' %(arg, value ))
_log.info(cmd)
# Execute the update cmd
cmd_out = getoutput(cmd)
#_log.info(cmd_out)
# Catch for non-exception error strings, even though try clause succeded
if self.cmd_error_check(cmd_out) == 0:
return 0
return 1
def gbp_del_all_anyobj(self,cfgobj):
"""
This function deletes all entries for any policy-object
"""
cfgobj_dict={"action":"policy-action","classifier":"policy-classifier","rule":"policy-rule",
"ruleset":"policy-rule-set","group":"group","target":"policy-target",
"l2p":"l2policy","l3p":"l3policy","nsp":"network-service-policy",
"node":"servicechain-node","spec":"servicechain-spec",
"extseg":"external-segment","extpol":"external-policy","natpool":"nat-pool"}
if cfgobj != '':
if cfgobj not in cfgobj_dict:
raise KeyError
#Build the command with mandatory params
cmd = 'gbp %s-list -c id ' % cfgobj_dict[cfgobj]
cmd_out = getoutput(cmd)
uuid_list=[]
_out=cmd_out.split('\n')
final_out = _out[3:len(_out)-1]
_log.info("\nThe Policy Object %s to be deleted = \n%s" %(cfgobj_dict[cfgobj],cmd_out))
for item in final_out:
item = item.strip(' |')
cmd = 'gbp %s-delete ' % cfgobj_dict[cfgobj]+str(item)
cmd_out = getoutput(cmd)
_log.info(cmd_out)
return 1
def gbp_sc_cfg_all(self,cmd_val,cfgobj,name_uuid,nodes="",svc_type='lb'):
"""
::cmd_val= 0: delete; 1:create
::cfgobj = servicechain-*(where *=node;spec)
::name_uuid = UUID or name_string
::svc_type = LOADBALANCER or FIREWALL, defaulted to LB
Create/Update/Delete Policy Object
Returns assigned UUID on Create
kwargs addresses the need for passing required/optional params
"""
cfgobj_dict={"node":"servicechain-node","spec":"servicechain-spec"}
if cfgobj != '':
if cfgobj not in cfgobj_dict:
raise KeyError
if cmd_val == '' or name_uuid == '':
_log.info('''Function Usage: gbp_sc_cfg_all(0,"node","name or uuid")\n''')
return 0
#Build the command with mandatory params
if cmd_val == 0:
cmd = 'gbp %s-delete ' % cfgobj_dict[cfgobj]+str(name_uuid)
if cmd_val == 1 and cfgobj == 'spec':
cmd = 'gbp %s-create ' % cfgobj_dict[cfgobj]+str(name_uuid)+' --nodes "%s"' %(nodes)
if cmd_val == 1 and cfgobj == 'node':
if svc_type == 'lb':
service='LOADBALANCER'
else:
service='FIREWALL'
cmd = 'gbp %s-create ' % cfgobj_dict[cfgobj]+str(name_uuid)+' --template-file %s.template' %(svc_type)+' --servicetype '+service
_log.info(cmd)
# Execute the policy-rule-config-cmd
cmd_out = getoutput(cmd)
#_log.info(cmd_out)
# Catch for non-exception error strings, even though try clause succeded
if self.cmd_error_check(cmd_out) == 0:
return 0
if cmd_val==1:
obj_uuid = self.get_uuid(cmd_out)
return obj_uuid
def neutron_cfg(self,cmd_val,cfg_obj,name_uuid,**kwargs):
"""
--cfgobj== net,subnet
--cmd_val== 0:delete; 1:create; 2:update
--name_uuid == UUID or name_string
Create/Update/Delete Policy Object
Returns assigned UUID on Create
kwargs addresses the need for passing required/optional params
"""
cfgobj_dict={"network":"net","subnet":"subnet"}
if cfgobj != '':
if cfgobj not in cfgobj_dict:
raise KeyError
if cmd_val == '' or name_uuid == '':
_log.info('''Function Usage: gbp_policy_cfg_all 'rule' 0 "abc"\n
--cmd_val == 0:delete; 1:create; 2:update\n
-- name_uuid == UUID or name_string\n''')
return 0
#Build the command with mandatory params
if cmd_val == 0:
cmd = 'neutron %s-delete ' % cfgobj_dict[cfgobj]+str(name_uuid)
if cmd_val == 1:
cmd = 'neutron %s-create ' % cfgobj_dict[cfgobj]+str(name_uuid)
if cmd_val == 2:
cmd = 'neutron %s-update ' % cfgobj_dict[cfgobj]+str(name_uuid)
# Build the cmd string for optional/non-default args/values
for arg, value in kwargs.items():
if '_' in arg:
arg=string.replace(arg,'_','-')
cmd = cmd + " --" + "".join( '%s=%s' %(arg, value ))
_log.info(cmd)
# Execute the cmd
cmd_out = getoutput(cmd)
#_log.info(cmd_out)
# Catch for non-exception error strings, even though try clause succeded
if self.cmd_error_check(cmd_out) == 0:
return 0
if cmd_val==1:
obj_uuid = self.get_uuid(cmd_out)
return obj_uuid
def cmd_error_check(self,cmd_out):
"""
Verifies whether executed cmd has any known error string
"""
for err in self.err_strings:
if re.search('\\b%s\\b' %(err), cmd_out, re.I):
_log.info(cmd_out)
_log.info("Cmd execution failed! with this Return Error: \n%s" %(cmd_out))
return 0
|
{
"content_hash": "3f1388d0ac48544c5ac93033cb18afb7",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 161,
"avg_line_length": 42.48396501457726,
"alnum_prop": 0.5534586878945924,
"repo_name": "tbachman/group-based-policy",
"id": "c8b1da7bb0c86f83e7fa37df95bcefa10c6fdcfd",
"size": "15144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gbpservice/tests/contrib/gbpfunctests/libs/config_libs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "2130911"
},
{
"name": "Shell",
"bytes": "28973"
}
],
"symlink_target": ""
}
|
import argparse
import uuid
import fixtures
import mock
from oslo_config import cfg
from keystoneauth1.auth import base
from keystoneauth1.auth import cli
from keystoneauth1.tests.unit.auth import utils
class TesterPlugin(base.BaseAuthPlugin):
def get_token(self, *args, **kwargs):
return None
@classmethod
def get_options(cls):
# NOTE(jamielennox): this is kind of horrible. If you specify this as
# a deprecated_name= value it will convert - to _ which is not what we
# want for a CLI option.
deprecated = [cfg.DeprecatedOpt('test-other')]
return [
cfg.StrOpt('test-opt', help='tester', deprecated_opts=deprecated)
]
class CliTests(utils.TestCase):
def setUp(self):
super(CliTests, self).setUp()
self.p = argparse.ArgumentParser()
def env(self, name, value=None):
if value is not None:
# environment variables are always strings
value = str(value)
return self.useFixture(fixtures.EnvironmentVariable(name, value))
def test_creating_with_no_args(self):
ret = cli.register_argparse_arguments(self.p, [])
self.assertIsNone(ret)
self.assertIn('--os-auth-plugin', self.p.format_usage())
def test_load_with_nothing(self):
cli.register_argparse_arguments(self.p, [])
opts = self.p.parse_args([])
self.assertIsNone(cli.load_from_argparse_arguments(opts))
@utils.mock_plugin
def test_basic_params_added(self, m):
name = uuid.uuid4().hex
argv = ['--os-auth-plugin', name]
ret = cli.register_argparse_arguments(self.p, argv)
self.assertIs(utils.MockPlugin, ret)
for n in ('--os-a-int', '--os-a-bool', '--os-a-float'):
self.assertIn(n, self.p.format_usage())
m.assert_called_once_with(name)
@utils.mock_plugin
def test_param_loading(self, m):
name = uuid.uuid4().hex
argv = ['--os-auth-plugin', name,
'--os-a-int', str(self.a_int),
'--os-a-float', str(self.a_float),
'--os-a-bool', str(self.a_bool)]
klass = cli.register_argparse_arguments(self.p, argv)
self.assertIs(utils.MockPlugin, klass)
opts = self.p.parse_args(argv)
self.assertEqual(name, opts.os_auth_plugin)
a = cli.load_from_argparse_arguments(opts)
self.assertTestVals(a)
self.assertEqual(name, opts.os_auth_plugin)
self.assertEqual(str(self.a_int), opts.os_a_int)
self.assertEqual(str(self.a_float), opts.os_a_float)
self.assertEqual(str(self.a_bool), opts.os_a_bool)
@utils.mock_plugin
def test_default_options(self, m):
name = uuid.uuid4().hex
argv = ['--os-auth-plugin', name,
'--os-a-float', str(self.a_float)]
klass = cli.register_argparse_arguments(self.p, argv)
self.assertIs(utils.MockPlugin, klass)
opts = self.p.parse_args(argv)
self.assertEqual(name, opts.os_auth_plugin)
a = cli.load_from_argparse_arguments(opts)
self.assertEqual(self.a_float, a['a_float'])
self.assertEqual(3, a['a_int'])
@utils.mock_plugin
def test_with_default_string_value(self, m):
name = uuid.uuid4().hex
klass = cli.register_argparse_arguments(self.p, [], default=name)
self.assertIs(utils.MockPlugin, klass)
m.assert_called_once_with(name)
@utils.mock_plugin
def test_overrides_default_string_value(self, m):
name = uuid.uuid4().hex
default = uuid.uuid4().hex
argv = ['--os-auth-plugin', name]
klass = cli.register_argparse_arguments(self.p, argv, default=default)
self.assertIs(utils.MockPlugin, klass)
m.assert_called_once_with(name)
@utils.mock_plugin
def test_with_default_type_value(self, m):
klass = cli.register_argparse_arguments(self.p, [],
default=utils.MockPlugin)
self.assertIs(utils.MockPlugin, klass)
self.assertEqual(0, m.call_count)
@utils.mock_plugin
def test_overrides_default_type_value(self, m):
# using this test plugin would fail if called because there
# is no get_options() function
class TestPlugin(object):
pass
name = uuid.uuid4().hex
argv = ['--os-auth-plugin', name]
klass = cli.register_argparse_arguments(self.p, argv,
default=TestPlugin)
self.assertIs(utils.MockPlugin, klass)
m.assert_called_once_with(name)
@utils.mock_plugin
def test_env_overrides_default_opt(self, m):
name = uuid.uuid4().hex
val = uuid.uuid4().hex
self.env('OS_A_STR', val)
klass = cli.register_argparse_arguments(self.p, [], default=name)
opts = self.p.parse_args([])
a = klass.load_from_argparse_arguments(opts)
self.assertEqual(val, a['a_str'])
def test_deprecated_cli_options(self):
TesterPlugin.register_argparse_arguments(self.p)
val = uuid.uuid4().hex
opts = self.p.parse_args(['--os-test-other', val])
self.assertEqual(val, opts.os_test_opt)
def test_deprecated_multi_cli_options(self):
TesterPlugin.register_argparse_arguments(self.p)
val1 = uuid.uuid4().hex
val2 = uuid.uuid4().hex
# argarse rules say that the last specified wins.
opts = self.p.parse_args(['--os-test-other', val2,
'--os-test-opt', val1])
self.assertEqual(val1, opts.os_test_opt)
def test_deprecated_env_options(self):
val = uuid.uuid4().hex
with mock.patch.dict('os.environ', {'OS_TEST_OTHER': val}):
TesterPlugin.register_argparse_arguments(self.p)
opts = self.p.parse_args([])
self.assertEqual(val, opts.os_test_opt)
def test_deprecated_env_multi_options(self):
val1 = uuid.uuid4().hex
val2 = uuid.uuid4().hex
with mock.patch.dict('os.environ', {'OS_TEST_OPT': val1,
'OS_TEST_OTHER': val2}):
TesterPlugin.register_argparse_arguments(self.p)
opts = self.p.parse_args([])
self.assertEqual(val1, opts.os_test_opt)
|
{
"content_hash": "ba16873a0ef061250b2976b8a0c17baa",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 78,
"avg_line_length": 34.43478260869565,
"alnum_prop": 0.6025883838383839,
"repo_name": "citrix-openstack-build/keystoneauth",
"id": "f2cbde40702d56564c809d3a17080522cb23c4d7",
"size": "6882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystoneauth1/tests/unit/auth/test_cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "393336"
}
],
"symlink_target": ""
}
|
from numba import jit
import numpy as np
from functools import lru_cache as cache
@cache()
@jit
def invertPoisson(x, mi):
""" Calculates the value that would be found in a
poisson distribution with lambda = mi at probability
value X
"""
if(mi >= 0):
if(x >= 0):
if(x < 1):
L = np.exp(- mi)
k = 1
prob = 1 * x
while(prob > L):
k += 1
prob = prob * x
return k
@jit
def calcNumberBins(lambda_i, omega_i, weights=1, adjusting=0):
""" Transform a set of real valued bins (0..1) into
a set of integer bins, using the value of real data
(omega) as the mean for the poisson distribution"""
invP = np.vectorize(invertPoisson)
bin = (invP(lambda_i, omega_i * weights) - adjusting).tolist()
invertPoisson.cache_clear()
return bin
def calcNumberBinsOld(lambda_i, omega_i, weights=1, adjusting=0):
""" Transform a set of real valued bins (0..1) into
a set of integer bins, using the value of real data
(omega) as the mean for the poisson distribution"""
bin = []
if weights is 1:
for lam, om in zip(lambda_i, omega_i):
bin.append(invertPoisson(lam, om) - adjusting)
else:
for lam, om, weight in zip(lambda_i, omega_i, weights):
bin.append(invertPoisson(lam, om * weight) - adjusting)
return bin
def normalize(auxList):
""" Normalize the number of observations, to a value between 0 and 1"""
sumValue = sum(auxList)
# divide each entry by this sumValue
aux2List = []
aux2List[:] = [12 / sumValue if x >= 12 else x / sumValue for x in auxList]
return aux2List
def percentile(value, sample):
""" Defines how many observations are less or igual to the sample
It sorts the vector sample, and advances it until we find a
value in it that is bigger than the sample
"""
numberOfSamples = len(sample)
sampleCopy = sample.tolist()
sampleCopy.sort()
for i in range(numberOfSamples):
if value <= sampleCopy[i]:
return float(i / numberOfSamples)
return 1.0
def calcFactorial(filename, limit=200):
for i in range(limit):
with open(filename, 'a') as f:
fact = np.math.factorial(i)
f.write(str(fact))
f.write(str("\n"))
def loadFactorial(filename):
fact = list()
f = open(filename, "r")
i = 1
for line in f:
line = line.strip('\n')
fact.insert(i, int(line))
i += 1
return fact
|
{
"content_hash": "7fa5d558c57d1d3398f67e8eb7ff2fd0",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 79,
"avg_line_length": 27.69148936170213,
"alnum_prop": 0.5908567038033039,
"repo_name": "PyQuake/earthquakemodels",
"id": "f9b3fc9a3a21953699997d176c5b4516a5055924",
"size": "2603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/models/mathUtil.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import sys
import os
import time
import re
import random
import weakref
import subprocess
import socket
import asyncore
import inspect
#----------------------------------------------------------------------------------------------------------------------
# wpantund properties
WPAN_STATE = 'NCP:State'
WPAN_NAME = 'Network:Name'
WPAN_PANID = 'Network:PANID'
WPAN_XPANID = 'Network:XPANID'
WPAN_KEY = 'Network:Key'
WPAN_KEY_INDEX = 'Network:KeyIndex'
WPAN_CHANNEL = 'NCP:Channel'
WPAN_HW_ADDRESS = 'NCP:HardwareAddress'
WPAN_EXT_ADDRESS = 'NCP:ExtendedAddress'
WPAN_POLL_INTERVAL = 'NCP:SleepyPollInterval'
WPAN_NODE_TYPE = 'Network:NodeType'
WPAN_ROLE = 'Network:Role'
WPAN_PARTITION_ID = 'Network:PartitionId'
WPAN_NCP_VERSION = 'NCP:Version'
WPAN_NCP_MCU_POWER_STATE = "NCP:MCUPowerState"
WPAN_NETWORK_ALLOW_JOIN = 'com.nestlabs.internal:Network:AllowingJoin'
WPAN_NETWORK_PASSTHRU_PORT = 'com.nestlabs.internal:Network:PassthruPort'
WPAN_IP6_LINK_LOCAL_ADDRESS = "IPv6:LinkLocalAddress"
WPAN_IP6_MESH_LOCAL_ADDRESS = "IPv6:MeshLocalAddress"
WPAN_IP6_MESH_LOCAL_PREFIX = "IPv6:MeshLocalPrefix"
WPAN_IP6_ALL_ADDRESSES = "IPv6:AllAddresses"
WPAN_IP6_MULTICAST_ADDRESSES = "IPv6:MulticastAddresses"
WPAN_THREAD_RLOC16 = "Thread:RLOC16"
WPAN_THREAD_ROUTER_ID = "Thread:RouterID"
WPAN_THREAD_LEADER_ADDRESS = "Thread:Leader:Address"
WPAN_THREAD_LEADER_ROUTER_ID = "Thread:Leader:RouterID"
WPAN_THREAD_LEADER_WEIGHT = "Thread:Leader:Weight"
WPAN_THREAD_LEADER_LOCAL_WEIGHT = "Thread:Leader:LocalWeight"
WPAN_THREAD_LEADER_NETWORK_DATA = "Thread:Leader:NetworkData"
WPAN_THREAD_STABLE_LEADER_NETWORK_DATA = "Thread:Leader:StableNetworkData"
WPAN_THREAD_NETWORK_DATA = "Thread:NetworkData"
WPAN_THREAD_CHILD_TABLE = "Thread:ChildTable"
WPAN_THREAD_CHILD_TABLE_ASVALMAP = "Thread:ChildTable:AsValMap"
WPAN_THREAD_CHILD_TABLE_ADDRESSES = "Thread:ChildTable:Addresses"
WPAN_THREAD_NEIGHBOR_TABLE = "Thread:NeighborTable"
WPAN_THREAD_NEIGHBOR_TABLE_ASVALMAP = "Thread:NeighborTable:AsValMap"
WPAN_THREAD_NEIGHBOR_TABLE_ERR_RATES = "Thread:NeighborTable:ErrorRates"
WPAN_THREAD_NEIGHBOR_TABLE_ERR_RATES_AVVALMAP = "Thread:NeighborTable:ErrorRates:AsValMap"
WPAN_THREAD_ROUTER_TABLE = "Thread:RouterTable"
WPAN_THREAD_ROUTER_TABLE_ASVALMAP = "Thread:RouterTable:AsValMap"
WPAN_THREAD_CHILD_TIMEOUT = "Thread:ChildTimeout"
WPAN_THREAD_PARENT = "Thread:Parent"
WPAN_THREAD_PARENT_ASVALMAP = "Thread:Parent:AsValMap"
WPAN_THREAD_NETWORK_DATA_VERSION = "Thread:NetworkDataVersion"
WPAN_THREAD_STABLE_NETWORK_DATA = "Thread:StableNetworkData"
WPAN_THREAD_STABLE_NETWORK_DATA_VERSION = "Thread:StableNetworkDataVersion"
WPAN_THREAD_PREFERRED_ROUTER_ID = "Thread:PreferredRouterID"
WPAN_THREAD_COMMISSIONER_ENABLED = "Thread:Commissioner:Enabled"
WPAN_THREAD_DEVICE_MODE = "Thread:DeviceMode"
WPAN_THREAD_OFF_MESH_ROUTES = "Thread:OffMeshRoutes"
WPAN_THREAD_ON_MESH_PREFIXES = "Thread:OnMeshPrefixes"
WPAN_THREAD_ROUTER_ROLE_ENABLED = "Thread:RouterRole:Enabled"
WPAN_THREAD_CONFIG_FILTER_RLOC_ADDRESSES = "Thread:Config:FilterRLOCAddresses"
WPAN_THREAD_ROUTER_UPGRADE_THRESHOLD = "Thread:RouterUpgradeThreshold"
WPAN_THREAD_ROUTER_DOWNGRADE_THRESHOLD = "Thread:RouterDowngradeThreshold"
WPAN_THREAD_ACTIVE_DATASET = "Thread:ActiveDataset"
WPAN_THREAD_ACTIVE_DATASET_ASVALMAP = "Thread:ActiveDataset:AsValMap"
WPAN_THREAD_PENDING_DATASET = "Thread:PendingDataset"
WPAN_THREAD_PENDING_DATASET_ASVALMAP = "Thread:PendingDataset:AsValMap"
WPAN_THREAD_ADDRESS_CACHE_TABLE = "Thread:AddressCacheTable"
WPAN_THREAD_ADDRESS_CACHE_TABLE_ASVALMAP = "Thread:AddressCacheTable:AsValMap"
WPAN_OT_LOG_LEVEL = "OpenThread:LogLevel"
WPAN_OT_STEERING_DATA_ADDRESS = "OpenThread:SteeringData:Address"
WPAN_OT_STEERING_DATA_SET_WHEN_JOINABLE = "OpenThread:SteeringData:SetWhenJoinable"
WPAN_OT_MSG_BUFFER_COUNTERS = "OpenThread:MsgBufferCounters"
WPAN_OT_MSG_BUFFER_COUNTERS_AS_STRING = "OpenThread:MsgBufferCounters:AsString"
WPAN_OT_DEBUG_TEST_ASSERT = "OpenThread:Debug:TestAssert"
WPAN_OT_DEBUG_TEST_WATCHDOG = "OpenThread:Debug:TestWatchdog"
WPAN_NCP_COUNTER_ALL_MAC = "NCP:Counter:AllMac"
WPAN_NCP_COUNTER_ALL_MAC_ASVALMAP = "NCP:Counter:AllMac:AsValMap"
WPAN_MAC_WHITELIST_ENABLED = "MAC:Whitelist:Enabled"
WPAN_MAC_WHITELIST_ENTRIES = "MAC:Whitelist:Entries"
WPAN_MAC_WHITELIST_ENTRIES_ASVALMAP = "MAC:Whitelist:Entries:AsValMap"
WPAN_MAC_BLACKLIST_ENABLED = "MAC:Blacklist:Enabled"
WPAN_MAC_BLACKLIST_ENTRIES = "MAC:Blacklist:Entries"
WPAN_MAC_BLACKLIST_ENTRIES_ASVALMAP = "MAC:Blacklist:Entries:AsValMap"
WPAN_CHILD_SUPERVISION_INTERVAL = "ChildSupervision:Interval"
WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT = "ChildSupervision:CheckTimeout"
WPAN_JAM_DETECTION_STATUS = "JamDetection:Status"
WPAN_JAM_DETECTION_ENABLE = "JamDetection:Enable"
WPAN_JAM_DETECTION_RSSI_THRESHOLD = "JamDetection:RssiThreshold"
WPAN_JAM_DETECTION_WINDOW = "JamDetection:Window"
WPAN_JAM_DETECTION_BUSY_PERIOD = "JamDetection:BusyPeriod"
WPAN_JAM_DETECTION_DEBUG_HISTORY_BITMAP = "JamDetection:Debug:HistoryBitmap"
WPAN_CHANNEL_MONITOR_SAMPLE_INTERVAL = "ChannelMonitor:SampleInterval"
WPAN_CHANNEL_MONITOR_RSSI_THRESHOLD = "ChannelMonitor:RssiThreshold"
WPAN_CHANNEL_MONITOR_SAMPLE_WINDOW = "ChannelMonitor:SampleWindow"
WPAN_CHANNEL_MONITOR_SAMPLE_COUNT = "ChannelMonitor:SampleCount"
WPAN_CHANNEL_MONITOR_CHANNEL_QUALITY = "ChannelMonitor:ChannelQuality"
WPAN_CHANNEL_MONITOR_CHANNEL_QUALITY_ASVALMAP = "ChannelMonitor:ChannelQuality:AsValMap"
WPAN_CHANNEL_MANAGER_NEW_CHANNEL = "ChannelManager:NewChannel"
WPAN_CHANNEL_MANAGER_DELAY = "ChannelManager:Delay"
WPAN_CHANNEL_MANAGER_CHANNEL_SELECT = "ChannelManager:ChannelSelect"
WPAN_CHANNEL_MANAGER_AUTO_SELECT_ENABLED = "ChannelManager:AutoSelect:Enabled"
WPAN_CHANNEL_MANAGER_AUTO_SELECT_INTERVAL = "ChannelManager:AutoSelect:Interval"
WPAN_CHANNEL_MANAGER_SUPPORTED_CHANNEL_MASK = "ChannelManager:SupportedChannelMask"
WPAN_CHANNEL_MANAGER_FAVORED_CHANNEL_MASK = "ChannelManager:FavoredChannelMask"
#----------------------------------------------------------------------------------------------------------------------
# Valid state values
STATE_UNINITIALIZED = '"uninitialized"'
STATE_FAULT = '"uninitialized:fault"'
STATE_UPGRADING = '"uninitialized:upgrading"'
STATE_DEEP_SLEEP = '"offline:deep-sleep"'
STATE_OFFLINE = '"offline"'
STATE_COMMISSIONED = '"offline:commissioned"'
STATE_ASSOCIATING = '"associating"'
STATE_CREDENTIALS_NEEDED = '"associating:credentials-needed"'
STATE_ASSOCIATED = '"associated"'
STATE_ISOLATED = '"associated:no-parent"'
STATE_NETWAKE_ASLEEP = '"associated:netwake-asleep"'
STATE_NETWAKE_WAKING = '"associated:netwake-waking"'
#-----------------------------------------------------------------------------------------------------------------------
# MCU Power state from `WPAN_NCP_MCU_POWER_STATE`
MCU_POWER_STATE_ON = '"on"'
MCU_POWER_STATE_LOW_POWER = '"low-power"'
MCU_POWER_STATE_OFF = '"off"'
#-----------------------------------------------------------------------------------------------------------------------
# Node types (from `WPAN_NODE_TYPE` property)
NODE_TYPE_UNKNOWN = '"unknown"'
NODE_TYPE_LEADER = '"leader"'
NODE_TYPE_ROUTER = '"router"'
NODE_TYPE_END_DEVICE = '"end-device"'
NODE_TYPE_SLEEPY_END_DEVICE = '"sleepy-end-device"'
NODE_TYPE_COMMISSIONER = '"commissioner"'
NODE_TYPE_NEST_LURKER = '"nl-lurker"'
#-----------------------------------------------------------------------------------------------------------------------
# Node types used by `Node.join()`
JOIN_TYPE_ROUTER = 'r'
JOIN_TYPE_END_DEVICE = 'e'
JOIN_TYPE_SLEEPY_END_DEVICE = 's'
#-----------------------------------------------------------------------------------------------------------------------
# Bit Flags for Thread Device Mode `WPAN_THREAD_DEVICE_MODE`
THREAD_MODE_FLAG_FULL_NETWORK_DATA = (1 << 0)
THREAD_MODE_FLAG_FULL_THREAD_DEV = (1 << 1)
THREAD_MODE_FLAG_SECURE_DATA_REQUEST = (1 << 2)
THREAD_MODE_FLAG_RX_ON_WHEN_IDLE = (1 << 3)
_OT_BUILDDIR = os.getenv('top_builddir', '../..')
_WPANTUND_PREFIX = os.getenv('WPANTUND_PREFIX', '/usr/local')
#-----------------------------------------------------------------------------------------------------------------------
def _log(text, new_line=True, flush=True):
sys.stdout.write(text)
if new_line:
sys.stdout.write('\n')
if flush:
sys.stdout.flush()
#-----------------------------------------------------------------------------------------------------------------------
# Node class
class Node(object):
""" A wpantund OT NCP instance """
_VERBOSE = False # defines the default verbosity setting (can be changed per `Node`)
_SPEED_UP_FACTOR = 1 # defines the default time speed up factor
# path to `wpantund`, `wpanctl`, `ot-ncp-ftd`,`ot-ncp` and `ot-ncp-radio`
_WPANTUND = '%s/sbin/wpantund' % _WPANTUND_PREFIX
_WPANCTL = '%s/bin/wpanctl' % _WPANTUND_PREFIX
_OT_NCP_FTD = '%s/examples/apps/ncp/ot-ncp-ftd' % _OT_BUILDDIR
_OT_NCP_FTD_POSIX_APP = '%s/src/posix/ot-ncp' % _OT_BUILDDIR
_OT_NCP_RADIO = '%s/examples/apps/ncp/ot-ncp-radio' % _OT_BUILDDIR
# Environment variable used to determine how to run OpenThread
# If set to 1, then posix-app (`ot-ncp`) is used along with a posix RCP `ot-ncp-radio`.
# Otherwise, the posix NCP `ot-ncp-ftd` is used
_POSIX_APP_ENV_VAR = 'TORANJ_POSIX_APP_RCP_MODEL'
_TUND_LOG_TO_FILE = True # determines if the wpantund logs are saved in file or sent to stdout
_TUND_LOG_FNAME = 'wpantund-logs' # name of wpantund log file (if # name of wpantund _TUND_LOG_TO_FILE is True)
# interface name
_INTFC_NAME_PREFIX = 'utun' if sys.platform == 'darwin' else 'wpan'
_START_INDEX = 4 if sys.platform == 'darwin' else 1
_cur_index = _START_INDEX
_all_nodes = weakref.WeakSet()
def __init__(self, verbose=_VERBOSE):
"""Creates a new `Node` instance"""
index = Node._cur_index
Node._cur_index += 1
self._index = index
self._interface_name = self._INTFC_NAME_PREFIX + str(index)
self._verbose = verbose
# Check if env variable `TORANJ_POSIX_APP_RCP_MODEL` is defined
# and use it to determine if to use operate in "posix-ncp-app".
if self._POSIX_APP_ENV_VAR in os.environ:
use_posix_app_with_rcp = (os.environ[self._POSIX_APP_ENV_VAR] in ['1', 'yes'])
else:
use_posix_app_with_rcp = False
if use_posix_app_with_rcp:
ncp_socket_path = 'system:{} -s {} {} {}'.format(self._OT_NCP_FTD_POSIX_APP, self._SPEED_UP_FACTOR,
self._OT_NCP_RADIO, index)
else:
ncp_socket_path = 'system:{} {} {}'.format(self._OT_NCP_FTD, index, self._SPEED_UP_FACTOR)
cmd = self._WPANTUND + \
' -o Config:NCP:SocketPath \"{}\"'.format(ncp_socket_path) + \
' -o Config:TUN:InterfaceName {}'.format(self._interface_name) + \
' -o Config:NCP:DriverName spinel' + \
' -o Daemon:SyslogMask \"all -debug\"'
if Node._TUND_LOG_TO_FILE:
self._tund_log_file = open(self._TUND_LOG_FNAME + str(index) + '.log', 'wb')
else:
self._tund_log_file = None
if self._verbose:
_log('$ Node{}.__init__() cmd: {}'.format(index, cmd))
self._wpantund_process = subprocess.Popen(cmd, shell=True, stderr=self._tund_log_file)
self._wpanctl_cmd = self._WPANCTL + ' -I ' + self._interface_name + ' '
self._recvers = weakref.WeakValueDictionary() # map from local_port to `AsyncReceiver` object
Node._all_nodes.add(self)
def __del__(self):
self._wpantund_process.poll()
if self._wpantund_process.returncode is None:
self._wpantund_process.terminate()
self._wpantund_process.wait()
def __repr__(self):
return 'Node (index={}, interface_name={})'.format(self._index, self._interface_name)
@property
def index(self):
return self._index
@property
def interface_name(self):
return self._interface_name
@property
def tund_log_file(self):
return self._tund_log_file
#------------------------------------------------------------------------------------------------------------------
# Executing a `wpanctl` command
def wpanctl(self, cmd):
""" Runs a wpanctl command on the given wpantund/OT-NCP instance and returns the output """
if self._verbose:
_log('$ Node{}.wpanctl(\'{}\')'.format(self._index, cmd), new_line=False)
result = subprocess.check_output(self._wpanctl_cmd + cmd, shell=True, stderr=subprocess.STDOUT)
if len(result) >= 1 and result[-1] == '\n': # remove the last char if it is '\n',
result = result[:-1]
if self._verbose:
if '\n' in result:
_log(':')
for line in result.splitlines():
_log(' ' + line)
else:
_log(' -> \'{}\''.format(result))
return result
#------------------------------------------------------------------------------------------------------------------
# APIs matching `wpanctl` commands.
def get(self, prop_name, value_only=True):
return self.wpanctl('get ' + ('-v ' if value_only else '') + prop_name)
def set(self, prop_name, value, binary_data=False):
return self._update_prop('set', prop_name, value, binary_data)
def add(self, prop_name, value, binary_data=False):
return self._update_prop('add', prop_name, value, binary_data)
def remove(self, prop_name, value, binary_data=False):
return self._update_prop('remove', prop_name, value, binary_data)
def _update_prop(self, action, prop_name, value, binary_data):
return self.wpanctl(action + ' ' + prop_name + ' ' + ('-d ' if binary_data else '') +
'-v ' + value) # use -v to handle values starting with `-`.
def reset(self):
return self.wpanctl('reset')
def status(self):
return self.wpanctl('status')
def leave(self):
return self.wpanctl('leave')
def form(self, name, channel=None, channel_mask=None, panid=None, xpanid=None, key=None, key_index=None,
node_type=None, mesh_local_prefix=None, legacy_prefix=None):
return self.wpanctl('form \"' + name + '\"' +
(' -c {}'.format(channel) if channel is not None else '') +
(' -m {}'.format(channel_mask) if channel_mask is not None else '') +
(' -p {}'.format(panid) if panid is not None else '') +
(' -x {}'.format(xpanid) if xpanid is not None else '') +
(' -k {}'.format(key) if key is not None else '') +
(' -i {}'.format(key_index) if key_index is not None else '') +
(' -T {}'.format(node_type) if node_type is not None else '') +
(' -M {}'.format(mesh_local_prefix) if mesh_local_prefix is not None else '') +
(' -L {}'.format(legacy_prefix) if legacy_prefix is not None else ''))
def join(self, name, channel=None, node_type=None, panid=None, xpanid=None, key=None):
return self.wpanctl('join \"' + name + '\"' +
(' -c {}'.format(channel) if channel is not None else '') +
(' -T {}'.format(node_type) if node_type is not None else '') +
(' -p {}'.format(panid) if panid is not None else '') +
(' -x {}'.format(xpanid) if xpanid is not None else '') +
(' -k {}'.format(key) if key is not None else '') +
(' -n'))
def active_scan(self, channel=None):
return self.wpanctl('scan' +
(' -c {}'.format(channel) if channel is not None else ''))
def energy_scan(self, channel=None):
return self.wpanctl('scan -e' +
(' -c {}'.format(channel) if channel is not None else ''))
def discover_scan(self, channel=None, joiner_only=False, enable_filtering=False, panid_filter=None):
return self.wpanctl('scan -d' +
(' -c {}'.format(channel) if channel is not None else '') +
(' -j' if joiner_only else '') +
(' -e' if enable_filtering else '') +
(' -p {}'.format(panid_filter) if panid_filter is not None else ''))
def permit_join(self, duration_sec=None, port=None, udp=True, tcp=True):
if not udp and not tcp: # incorrect use!
return ''
traffic_type = ''
if udp and not tcp:
traffic_type = ' --udp'
if tcp and not udp:
traffic_type = ' --tcp'
if port is not None and duration_sec is None:
duration_sec = '240'
return self.wpanctl('permit-join' +
(' {}'.format(duration_sec) if duration_sec is not None else '') +
(' {}'.format(port) if port is not None else '') +
traffic_type)
def config_gateway(self, prefix, default_route=False, priority=None):
return self.wpanctl('config-gateway ' + prefix +
(' -d' if default_route else '') +
(' -P {}'.format(priority) if priority is not None else ''))
def add_prefix(self, prefix, prefix_len=None, priority=None, stable=True, on_mesh=False, slaac=False, dhcp=False,
configure=False, default_route=False, preferred=False):
return self.wpanctl('add-prefix ' + prefix +
(' -l {}'.format(prefix_len) if prefix_len is not None else '') +
(' -P {}'.format(priority) if priority is not None else '') +
(' -s' if stable else '') +
(' -f' if preferred else '') +
(' -a' if slaac else '') +
(' -d' if dhcp else '') +
(' -c' if configure else '') +
(' -r' if default_route else '') +
(' -o' if on_mesh else ''))
def remove_prefix(self, prefix, prefix_len=None):
return self.wpanctl('remove-prefix ' + prefix +
(' -l {}'.format(prefix_len) if prefix_len is not None else ''))
def add_route(self, route_prefix, prefix_len=None, priority=None, stable=True):
"""route priority [(>0 for high, 0 for medium, <0 for low)]"""
return self.wpanctl('add-route ' + route_prefix +
(' -l {}'.format(prefix_len) if prefix_len is not None else '') +
(' -p {}'.format(priority) if priority is not None else '') +
('' if stable else '-n'))
def remove_route(self, route_prefix, prefix_len=None, priority=None, stable=True):
"""route priority [(>0 for high, 0 for medium, <0 for low)]"""
return self.wpanctl('remove-route ' + route_prefix +
(' -l {}'.format(prefix_len) if prefix_len is not None else '') +
(' -p {}'.format(priority) if priority is not None else ''))
#------------------------------------------------------------------------------------------------------------------
# Helper methods
def is_associated(self):
return self.get(WPAN_STATE) == STATE_ASSOCIATED
def join_node(self, node, node_type=JOIN_TYPE_ROUTER, should_set_key=True):
"""Join a network specified by another node, `node` should be a Node"""
if not node.is_associated():
return "{} is not associated".format(node)
return self.join(
node.get(WPAN_NAME)[1:-1],
channel=node.get(WPAN_CHANNEL),
node_type=node_type,
panid=node.get(WPAN_PANID),
xpanid=node.get(WPAN_XPANID),
key=node.get(WPAN_KEY)[1:-1] if should_set_key else None)
def whitelist_node(self, node):
"""Adds a given node (of type `Node`) to the whitelist of `self` and enables whitelisting on `self`"""
self.add(WPAN_MAC_WHITELIST_ENTRIES, node.get(WPAN_EXT_ADDRESS)[1:-1])
self.set(WPAN_MAC_WHITELIST_ENABLED, '1')
def un_whitelist_node(self, node):
"""Removes a given node (of node `Node) from the whitelist"""
self.remove(WPAN_MAC_WHITELIST_ENTRIES, node.get(WPAN_EXT_ADDRESS)[1:-1])
def is_in_scan_result(self, scan_result):
"""Checks if node is in the scan results
`scan_result` must be an array of `ScanResult` object (see `parse_scan_result`).
"""
joinable = (self.get(WPAN_NETWORK_ALLOW_JOIN) == 'true')
panid = self.get(WPAN_PANID)
xpanid = self.get(WPAN_XPANID)[2:]
name = self.get(WPAN_NAME)[1:-1]
channel = self.get(WPAN_CHANNEL)
ext_address = self.get(WPAN_EXT_ADDRESS)[1:-1]
for item in scan_result:
if all( [item.network_name == name,
item.panid == panid,
item.xpanid == xpanid,
item.channel == channel,
item.ext_address == ext_address,
(item.type == ScanResult.TYPE_DISCOVERY_SCAN) or (item.joinable == joinable) ] ):
return True
return False
def find_ip6_address_with_prefix(self, prefix):
"""Find an IPv6 address on node matching a given prefix.
`prefix` should be an string containing the prefix.
Returns a string containing the IPv6 address matching the prefix or empty string if no address found.
"""
if len(prefix) > 2 and prefix[-1] == ':' and prefix[-2] == ':':
prefix = prefix[:-1]
all_addrs = parse_list(self.get(WPAN_IP6_ALL_ADDRESSES))
matched_addr = [addr for addr in all_addrs if addr.startswith(prefix)]
return matched_addr[0] if len(matched_addr) >= 1 else ''
def add_ip6_address_on_interface(self, address, prefix_len=64):
"""Adds an IPv6 interface on the network interface.
`address` should be string containing the IPv6 address.
`prefix_len` is an `int` specifying the prefix length.
NOTE: this method uses linux `ip` command.
"""
cmd = 'ip -6 addr add '+ address + '/{} dev '.format(prefix_len) + self.interface_name
if self._verbose:
_log('$ Node{} \'{}\')'.format(self._index, cmd))
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
return result
def remove_ip6_address_on_interface(self, address, prefix_len=64):
"""Removes an IPv6 interface on the network interface.
`address` should be string containing the IPv6 address.
`prefix_len` is an `int` specifying the prefix length.
NOTE: this method uses linux `ip` command.
"""
cmd = 'ip -6 addr del '+ address + '/{} dev '.format(prefix_len) + self.interface_name
if self._verbose:
_log('$ Node{} \'{}\')'.format(self._index, cmd))
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
return result
#------------------------------------------------------------------------------------------------------------------
# class methods
@classmethod
def init_all_nodes(cls, disable_logs=True, wait_time=15):
"""Issues a `wpanctl.leave` on all `Node` objects and waits for them to be ready"""
random.seed(123456)
time.sleep(0.5)
for node in Node._all_nodes:
start_time = time.time()
while True:
try:
node._wpantund_process.poll()
if node._wpantund_process.returncode is not None:
print 'Node {} wpantund instance has terminated unexpectedly'.format(node)
if disable_logs:
node.set(WPAN_OT_LOG_LEVEL, '0')
node.leave()
except subprocess.CalledProcessError as e:
if (node._verbose):
_log(' -> \'{}\' exit code: {}'.format(e.output, e.returncode))
interval = time.time() - start_time
if interval > wait_time:
print 'Took too long to init node {} ({}>{} sec)'.format(node, interval, wait_time)
raise
except:
raise
else:
break
time.sleep(0.4)
@classmethod
def finalize_all_nodes(cls):
"""Finalizes all previously created `Node` instances (stops the wpantund process)"""
for node in Node._all_nodes:
node._wpantund_process.terminate()
node._wpantund_process.wait()
@classmethod
def set_time_speedup_factor(cls, factor):
"""Sets up the time speed up factor - should be set before creating any `Node` objects"""
if len(Node._all_nodes) != 0:
raise Node._NodeError('set_time_speedup_factor() cannot be called after creating a `Node`')
Node._SPEED_UP_FACTOR = factor
#------------------------------------------------------------------------------------------------------------------
# IPv6 message Sender and Receiver class
class _NodeError(Exception):
pass
def prepare_tx(self, src, dst, data=40, count=1, mcast_hops=None):
"""Prepares an IPv6 msg transmission.
- `src` and `dst` can be either a string containing IPv6 address, or a tuple (ipv6 address as string, port),
if no port is given, a random port number is used.
- `data` can be either a string containing the message to be sent, or an int indicating size of the message (a
random message with the given length will be used).
- `count` gives number of times the message will be sent (default is 1).
- `mcast_hops` specifies multicast hop limit (only applicable for multicast tx).
Returns an `AsyncSender` object.
"""
if isinstance(src, tuple):
src_addr = src[0]
src_port = src[1]
else:
src_addr = src
src_port = random.randint(49152, 65535)
if isinstance(dst, tuple):
dst_addr = dst[0]
dst_port = dst[1]
else:
dst_addr = dst
dst_port = random.randint(49152, 65535)
if isinstance(data, int):
# create a random message with the given length.
all_chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,><?;:[]=-+)(*&^%$#@'
msg = ''.join(random.choice(all_chars) for _ in range(data))
else:
msg = data
return AsyncSender(self, src_addr, src_port, dst_addr, dst_port, msg, count, mcast_hops)
def _get_receiver(self, local_port):
# Gets or creates a receiver (an `AsyncReceiver`) tied to given port number
if local_port in self._recvers:
receiver = self._recvers[local_port]
else:
receiver = AsyncReceiver(self, local_port)
self._recvers[local_port] = receiver
return receiver
def _remove_recver(self, recvr):
# Removes a receiver from weak dictionary - called when the receiver is done and its socket is closed
local_port = recvr.local_port
if local_port in self._recvers:
del self._recvers[local_port]
def prepare_rx(self, sender):
"""Prepare to receive messages from a sender (an `AsyncSender`)"""
receiver = self._get_receiver(sender.dst_port)
receiver._add_sender(sender.src_addr, sender.src_port, sender.msg, sender.count)
return receiver
def preapre_listener(self, local_port, timeout=1):
"""Prepares a listener (an `AsyncReceiver`) listening on the given `local_port` for given `timeout` (sec)"""
receiver = self._get_receiver(local_port)
receiver._set_listen_timeout(timeout)
return receiver
@staticmethod
def perform_async_tx_rx(timeout=20):
"""Called to perform all previously prepared async rx/listen and tx operations"""
try:
start_time = time.time()
while asyncore.socket_map:
elapsed_time = time.time() - start_time
if elapsed_time > timeout:
print 'Performing aysnc tx/tx took too long ({}>{} sec)'.format(elapsed_time, timeout)
raise Node._NodeError('perform_tx_rx timed out ({}>{} sec)'.format(elapsed_time, timeout))
# perform a single asyncore loop
asyncore.loop(timeout=0.5, count=1)
except:
print 'Failed to perform async rx/tx'
raise
#-----------------------------------------------------------------------------------------------------------------------
# `AsyncSender` and `AsyncReceiver classes
_SO_BINDTODEVICE = 25
def _is_ipv6_addr_link_local(ip_addr):
"""Indicates if a given IPv6 address is link-local"""
return ip_addr.lower().startswith('fe80::')
def _create_socket_address(ip_address, port):
"""Convert a given IPv6 address (string) and port number into a socket address"""
# `socket.getaddrinfo()` returns a list of `(family, socktype, proto, canonname, sockaddr)` where `sockaddr`
# (at index 4) can be used as input in socket methods (like `sendto()`, `bind()`, etc.).
return socket.getaddrinfo(ip_address, port)[0][4]
class AsyncSender(asyncore.dispatcher):
""" An IPv6 async message sender - use `Node.prepare_tx()` to create one"""
def __init__(self, node, src_addr, src_port, dst_addr, dst_port, msg, count, mcast_hops=None):
self._node = node
self._src_addr = src_addr
self._src_port = src_port
self._dst_addr = dst_addr
self._dst_port = dst_port
self._msg = msg
self._count = count
self._dst_sock_addr = _create_socket_address(dst_addr, dst_port)
self._tx_buffer = self._msg
self._tx_counter = 0
# Create a socket, bind it to the node's interface
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, _SO_BINDTODEVICE, node.interface_name + '\0')
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# Set the IPV6_MULTICAST_HOPS
if mcast_hops is not None:
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, mcast_hops)
# Bind the socket to the given src address
if _is_ipv6_addr_link_local(src_addr):
# If src is a link local address it requires the interface name to be specified.
src_sock_addr = _create_socket_address(src_addr + '%' + node.interface_name, src_port)
else:
src_sock_addr = _create_socket_address(src_addr, src_port)
sock.bind(src_sock_addr)
asyncore.dispatcher.__init__(self, sock)
# Property getters
@property
def node(self):
return self._node
@property
def src_addr(self):
return self._src_addr
@property
def src_port(self):
return self._src_port
@property
def dst_addr(self):
return self._dst_addr
@property
def dst_port(self):
return self._dst_port
@property
def msg(self):
return self._msg
@property
def count(self):
return self._count
@property
def was_successful(self):
"""Indicates if the transmission of IPv6 messages finished successfully"""
return self._tx_counter == self._count
# asyncore.dispatcher callbacks
def readable(self):
return False
def writable(self):
return True
def handle_write(self):
sent_len = self.sendto(self._tx_buffer, self._dst_sock_addr)
if self._node._verbose:
if sent_len < 30:
info_text = '{} bytes ("{}")'.format(sent_len, self._tx_buffer[:sent_len])
else:
info_text = '{} bytes'.format(sent_len)
_log('- Node{} sent {} to [{}]:{} from [{}]:{}'.format(self._node._index, info_text,
self._dst_addr, self._dst_port,
self._src_addr, self._src_port))
self._tx_buffer = self._tx_buffer[sent_len:]
if len(self._tx_buffer) == 0:
self._tx_counter += 1
if self._tx_counter < self._count:
self._tx_buffer = self._msg
else:
self.handle_close()
def handle_close(self):
self.close()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class AsyncReceiver(asyncore.dispatcher):
""" An IPv6 async message receiver - use `prepare_rx()` to create one"""
_MAX_RECV_SIZE = 2048
class _SenderInfo(object):
def __init__(self, sender_addr, sender_port, msg, count):
self._sender_addr = sender_addr
self._sender_port = sender_port
self._msg = msg
self._count = count
self._rx_counter = 0
def _check_received(self, msg, sender_addr, sender_port):
if self._msg == msg and self._sender_addr == sender_addr and self._sender_port == sender_port:
self._rx_counter += 1
return self._did_recv_all()
def _did_recv_all(self):
return self._rx_counter >= self._count
def __init__(self, node, local_port):
self._node = node
self._local_port = local_port
self._senders = [] # list of `_SenderInfo` objects
self._all_rx = [] # contains all received messages as a list of (pkt, (src_addr, src_port))
self._timeout = 0 # listen timeout (zero means forever)
self._started = False
self._start_time = 0
# Create a socket, bind it to the node's interface
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, _SO_BINDTODEVICE, node.interface_name + '\0')
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# Bind the socket to any IPv6 address with the given local port
local_sock_addr = _create_socket_address('::', local_port)
sock.bind(local_sock_addr)
asyncore.dispatcher.__init__(self, sock)
def _add_sender(self, sender_addr, sender_port, msg, count):
self._senders.append(AsyncReceiver._SenderInfo(sender_addr, sender_port, msg, count))
def _set_listen_timeout(self, timeout):
self._timeout = timeout
# Property getters
@property
def node(self):
return self._node
@property
def local_port(self):
return self._local_port
@property
def all_rx_msg(self):
"""returns all received messages as a list of (msg, (src_addr, src_port))"""
return self._all_rx
@property
def was_successful(self):
"""Indicates if all expected IPv6 messages were received successfully"""
return len(self._senders) == 0 or all([sender._did_recv_all() for sender in self._senders])
# asyncore.dispatcher callbacks
def readable(self):
if not self._started:
self._start_time = time.time()
self._started = True
if self._timeout != 0 and time.time() - self._start_time >= self._timeout:
self.handle_close()
if self._node._verbose:
_log('- Node{} finished listening on port {} for {} sec, received {} msg(s)'.format(
self._node._index, self._local_port, self._timeout, len(self._all_rx)))
return False
return True
def writable(self):
return False
def handle_read(self):
(msg, src_sock_addr) = self.recvfrom(AsyncReceiver._MAX_RECV_SIZE)
src_addr = src_sock_addr[0]
src_port = src_sock_addr[1]
if (_is_ipv6_addr_link_local(src_addr)):
if '%' in src_addr:
src_addr = src_addr.split('%')[0] # remove the interface name from address
if self._node._verbose:
if len(msg) < 30:
info_text = '{} bytes ("{}")'.format(len(msg), msg)
else:
info_text = '{} bytes'.format(len(msg))
_log('- Node{} received {} on port {} from [{}]:{}'.format(self._node._index, info_text,
self._local_port,
src_addr, src_port))
self._all_rx.append((msg, (src_addr, src_port)))
if all([sender._check_received(msg, src_addr, src_port) for sender in self._senders]):
self.handle_close()
def handle_close(self):
self.close()
# remove the receiver from the node once the socket is closed
self._node._remove_recver(self)
#-----------------------------------------------------------------------------------------------------------------------
class VerifyError(Exception):
pass
_is_in_verify_within = False
def verify(condition):
"""Verifies that a `condition` is true, otherwise raises a VerifyError"""
global _is_in_verify_within
if not condition:
calling_frame = inspect.currentframe().f_back
error_message = 'verify() failed at line {} in "{}"'.format(calling_frame.f_lineno, calling_frame.f_code.co_filename)
if not _is_in_verify_within:
print error_message
raise VerifyError(error_message)
def verify_within(condition_checker_func, wait_time, delay_time=0.1):
"""Verifies that a given function `condition_checker_func` passes successfully within a given wait timeout.
`wait_time` is maximum time waiting for condition_checker to pass (in seconds).
`delay_time` specifies a delay interval added between failed attempts (in seconds).
"""
global _is_in_verify_within
start_time = time.time()
old_is_in_verify_within = _is_in_verify_within
_is_in_verify_within = True
while True:
try:
condition_checker_func()
except VerifyError as e:
if time.time() - start_time > wait_time:
print 'Took too long to pass the condition ({}>{} sec)'.format(time.time() - start_time, wait_time)
print e.message
raise e
except:
raise
else:
break
if delay_time != 0:
time.sleep(delay_time)
_is_in_verify_within = old_is_in_verify_within
#-----------------------------------------------------------------------------------------------------------------------
# Parsing `wpanctl` output
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ScanResult(object):
""" This object encapsulates a scan result (active/discover/energy scan)"""
TYPE_ACTIVE_SCAN = 'active-scan'
TYPE_DISCOVERY_SCAN = 'discover-scan'
TYPE_ENERGY_SCAN = 'energy-scan'
def __init__(self, result_text):
items = [item.strip() for item in result_text.split('|')]
if len(items) == 8:
self._type = ScanResult.TYPE_ACTIVE_SCAN
self._index = items[0]
self._joinable = (items[1] == 'YES')
self._network_name = items[2][1:-1]
self._panid = items[3]
self._channel = items[4]
self._xpanid = items[5]
self._ext_address = items[6]
self._rssi = items[7]
elif len(items) == 7:
self._type = ScanResult.TYPE_DISCOVERY_SCAN
self._index = items[0]
self._network_name = items[1][1:-1]
self._panid = items[2]
self._channel = items[3]
self._xpanid = items[4]
self._ext_address = items[5]
self._rssi = items[6]
elif len(items) == 2:
self._type = ScanResult.TYPE_ENERGY_SCAN
self._channel = items[0]
self._rssi = items[1]
else:
raise ValueError('"{}" does not seem to be a valid scan result string'.result_text)
@property
def type(self):
return self._type
@property
def joinable(self):
return self._joinable
@property
def network_name(self):
return self._network_name
@property
def panid(self):
return self._panid
@property
def channel(self):
return self._channel
@property
def xpanid(self):
return self._xpanid
@property
def ext_address(self):
return self._ext_address
@property
def rssi(self):
return self._rssi
def __repr__(self):
return 'ScanResult({})'.format(self.__dict__)
def parse_scan_result(scan_result):
""" Parses scan result string and returns an array of `ScanResult` objects"""
return [ ScanResult(item) for item in scan_result.split('\n')[2:] ] # skip first two lines which are table headers
def parse_list(list_string):
"""
Parses IPv6/prefix/route list string (output of wpanctl get for properties WPAN_IP6_ALL_ADDRESSES,
IP6_MULTICAST_ADDRESSES, WPAN_THREAD_ON_MESH_PREFIXES, ...)
Returns an array of strings each containing an IPv6/prefix/route entry.
"""
# List string example (get(WPAN_IP6_ALL_ADDRESSES) output):
#
# '[\n
# \t"fdf4:5632:4940:0:8798:8701:85d4:e2be prefix_len:64 origin:ncp valid:forever preferred:forever"\n
# \t"fe80::2092:9358:97ea:71c6 prefix_len:64 origin:ncp valid:forever preferred:forever"\n
# ]'
#
# We split the lines ('\n' as separator) and skip the first and last lines which are '[' and ']'.
# For each line, skip the first two characters (which are '\t"') and last character ('"'), then split the string
# using whitespace as separator. The first entry is the IPv6 address.
#
return [line[2:-1].split()[0] for line in list_string.split('\n')[1:-1]]
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class OnMeshPrefix(object):
""" This object encapsulates an on-mesh prefix"""
def __init__(self, text):
# Example of expected text:
#
# '\t"fd00:abba:cafe:: prefix_len:64 origin:user stable:yes flags:0x31'
# ' [on-mesh:1 def-route:0 config:0 dhcp:0 slaac:1 pref:1 prio:med] rloc:0x0000"'
m = re.match('\t"([0-9a-fA-F:]+)\s*prefix_len:(\d+)\s+origin:(\w*)\s+stable:(\w*).* \[' +
'on-mesh:(\d)\s+def-route:(\d)\s+config:(\d)\s+dhcp:(\d)\s+slaac:(\d)\s+pref:(\d)\s+prio:(\w*)\]' +
'\s+rloc:(0x[0-9a-fA-F]+)',
text)
verify(m is not None)
data = m.groups()
self._prefix = data[0]
self._prefix_len = data[1]
self._origin = data[2]
self._stable = (data[3] == 'yes')
self._on_mesh = (data[4] == '1')
self._def_route = (data[5] == '1')
self._config = (data[6] == '1')
self._dhcp = (data[7] == '1')
self._slaac = (data[8] == '1')
self._preferred = (data[9] == '1')
self._priority = (data[10])
self._rloc16 = (data[11])
@property
def prefix(self):
return self._prefix
@property
def prefix_len(self):
return self._prefix_len
@property
def origin(self):
return self._origin
@property
def priority(self):
return self._priority
def is_stable(self):
return self._stable
def is_on_mesh(self):
return self._on_mesh
def is_def_route(self):
return self._def_route
def is_config(self):
return self._config
def is_dhcp(self):
return self._dhcp
def is_slaac(self):
return self._slaac
def is_preferred(self):
return self._preferred
def rloc16(self):
return self._rloc16
def __repr__(self):
return 'OnMeshPrefix({})'.format(self.__dict__)
def parse_on_mesh_prefix_result(on_mesh_prefix_list):
""" Parses on-mesh prefix list string and returns an array of `OnMeshPrefix` objects"""
return [ OnMeshPrefix(item) for item in on_mesh_prefix_list.split('\n')[1:-1] ]
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ChildEntry(object):
""" This object encapsulates a child entry"""
def __init__(self, text):
# Example of expected text:
#
# `\t"E24C5F67F4B8CBB9, RLOC16:d402, NetDataVer:175, LQIn:3, AveRssi:-20, LastRssi:-20, Timeout:120, Age:0, `
# `RxOnIdle:no, FTD:no, SecDataReq:yes, FullNetData:yes"`
#
# We get rid of the first two chars `\t"' and last char '"', split the rest using whitespace as separator.
# Then remove any ',' at end of items in the list.
items = [item[:-1] if item[-1] ==',' else item for item in text[2:-1].split()]
# First item in the extended address
self._ext_address = items[0]
# Convert the rest into a dictionary by splitting using ':' as separator
dict = {item.split(':')[0] : item.split(':')[1] for item in items[1:]}
self._rloc16 = dict['RLOC16']
self._timeout = dict['Timeout']
self._rx_on_idle = (dict['RxOnIdle'] == 'yes')
self._ftd = (dict['FTD'] == 'yes')
self._sec_data_req = (dict['SecDataReq'] == 'yes')
self._full_net_data = (dict['FullNetData'] == 'yes')
@property
def ext_address(self):
return self._ext_address
@property
def rloc16(self):
return self._rloc16
@property
def timeout(self):
return self._timeout
def is_rx_on_when_idle(self):
return self._rx_on_idle
def is_ftd(self):
return self._ftd
def is_sec_data_req(self):
return self._sec_data_req
def is_full_net_data(self):
return self._full_net_data
def __repr__(self):
return 'ChildEntry({})'.format(self.__dict__)
def parse_child_table_result(child_table_list):
""" Parses child table list string and returns an array of `ChildEntry` objects"""
return [ ChildEntry(item) for item in child_table_list.split('\n')[1:-1] ]
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class NeighborEntry(object):
""" This object encapsulates a neighbor entry"""
def __init__(self, text):
# Example of expected text:
#
# `\t"5AC95ED4646D6565, RLOC16:9403, LQIn:3, AveRssi:-20, LastRssi:-20, Age:0, LinkFC:8, MleFC:0, IsChild:yes, '
# 'RxOnIdle:no, FTD:no, SecDataReq:yes, FullNetData:yes"'
#
# We get rid of the first two chars `\t"' and last char '"', split the rest using whitespace as separator.
# Then remove any ',' at end of items in the list.
items = [item[:-1] if item[-1] ==',' else item for item in text[2:-1].split()]
# First item in the extended address
self._ext_address = items[0]
# Convert the rest into a dictionary by splitting the text using ':' as separator
dict = {item.split(':')[0] : item.split(':')[1] for item in items[1:]}
self._rloc16 = dict['RLOC16']
self._is_child = (dict['IsChild'] == 'yes')
self._rx_on_idle = (dict['RxOnIdle'] == 'yes')
self._ftd = (dict['FTD'] == 'yes')
@property
def ext_address(self):
return self._ext_address
@property
def rloc16(self):
return self._rloc16
def is_rx_on_when_idle(self):
return self._rx_on_idle
def is_ftd(self):
return self._ftd
def is_child(self):
return self._is_child
def __repr__(self):
return 'NeighborEntry({})'.format(self.__dict__)
def parse_neighbor_table_result(neighbor_table_list):
""" Parses neighbor table list string and returns an array of `NeighborEntry` objects"""
return [ NeighborEntry(item) for item in neighbor_table_list.split('\n')[1:-1] ]
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class RouterTableEntry(object):
""" This object encapsulates a router table entry"""
def __init__(self, text):
# Example of expected text:
#
# `\t"8A970B3251810826, RLOC16:4000, RouterId:16, NextHop:43, PathCost:1, LQIn:3, LQOut:3, Age:3, LinkEst:yes"`
#
# We get rid of the first two chars `\t"' and last char '"', split the rest using whitespace as separator.
# Then remove any ',' at end of items in the list.
items = [item[:-1] if item[-1] ==',' else item for item in text[2:-1].split()]
# First item in the extended address
self._ext_address = items[0]
# Convert the rest into a dictionary by splitting the text using ':' as separator
dict = {item.split(':')[0] : item.split(':')[1] for item in items[1:]}
self._rloc16 = int(dict['RLOC16'], 16)
self._router_id = int(dict['RouterId'], 0)
self._next_hop = int(dict['NextHop'], 0)
self._path_cost = int(dict['PathCost'], 0)
self._age = int(dict['Age'], 0)
self._le = (dict['LinkEst'] == 'yes')
@property
def ext_address(self):
return self._ext_address
@property
def rloc16(self):
return self._rloc16
@property
def router_id(self):
return self._router_id
@property
def next_hop(self):
return self._next_hop
@property
def path_cost(self):
return self._path_cost
def is_link_established(self):
return self._le
def __repr__(self):
return 'RouterTableEntry({})'.format(self.__dict__)
def parse_router_table_result(router_table_list):
""" Parses router table list string and returns an array of `RouterTableEntry` objects"""
return [ RouterTableEntry(item) for item in router_table_list.split('\n')[1:-1] ]
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class AddressCacheEntry(object):
""" This object encapsulates an address cache entry"""
def __init__(self, text):
# Example of expected text:
#
# '\t"fd00:1234::d427:a1d9:6204:dbae -> 0x9c00, age:0"'
#
# We get rid of the first two chars `\t"' and last char '"', split the rest using whitespace as separator.
# Then remove any ',' at end of items in the list.
items = [item[:-1] if item[-1] ==',' else item for item in text[2:-1].split()]
# First item in the extended address
self._address = items[0]
self._rloc16 = int(items[2], 16)
# Convert the rest into a dictionary by splitting the text using ':' as separator
dict = {item.split(':')[0] : item.split(':')[1] for item in items[3:]}
self._age = int(dict['age'], 0)
@property
def address(self):
return self._address
@property
def rloc16(self):
return self._rloc16
@property
def age(self):
return self._age
def __repr__(self):
return 'AddressCacheEntry({})'.format(self.__dict__)
def parse_address_cache_table_result(addr_cache_table_list):
""" Parses address cache table list string and returns an array of `AddressCacheEntry` objects"""
return [ AddressCacheEntry(item) for item in addr_cache_table_list.split('\n')[1:-1] ]
|
{
"content_hash": "fde58bcedf698ff684efea1d65f0bc05",
"timestamp": "",
"source": "github",
"line_count": 1298,
"max_line_length": 125,
"avg_line_length": 42.15100154083205,
"alnum_prop": 0.5400643368913584,
"repo_name": "erja-gp/openthread",
"id": "874259e0833bd69a3c25af536c2a3aa78c31f1a8",
"size": "56316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/toranj/wpan.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "15850"
},
{
"name": "C",
"bytes": "940119"
},
{
"name": "C#",
"bytes": "18077"
},
{
"name": "C++",
"bytes": "4306681"
},
{
"name": "Dockerfile",
"bytes": "6256"
},
{
"name": "M4",
"bytes": "63303"
},
{
"name": "Makefile",
"bytes": "133368"
},
{
"name": "Python",
"bytes": "2012919"
},
{
"name": "Ruby",
"bytes": "3397"
},
{
"name": "Shell",
"bytes": "74907"
}
],
"symlink_target": ""
}
|
from supriya.tools.ugentools.UGen import UGen
class TRand(UGen):
r'''A triggered random number generator.
::
>>> trigger = ugentools.Impulse.ar()
>>> t_rand = ugentools.TRand.ar(
... minimum=-1.,
... maximum=1.,
... trigger=trigger,
... )
>>> t_rand
TRand.ar()
'''
### CLASS VARIABLES ###
__documentation_section__ = 'Noise UGens'
__slots__ = ()
_ordered_input_names = (
'minimum',
'maximum',
'trigger',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
maximum=1,
minimum=0,
trigger=0,
):
UGen.__init__(
self,
calculation_rate=calculation_rate,
maximum=maximum,
minimum=minimum,
trigger=trigger,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
maximum=1,
minimum=0,
trigger=0,
):
r'''Constructs an audio-rate triggered random number generator.
::
>>> trigger = ugentools.Impulse.ar()
>>> t_rand = ugentools.TRand.ar(
... minimum=-1.,
... maximum=[0, 2],
... trigger=trigger,
... )
>>> t_rand
UGenArray({2})
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
maximum=maximum,
minimum=minimum,
trigger=trigger,
)
return ugen
@classmethod
def kr(
cls,
maximum=1,
minimum=0,
trigger=0,
):
r'''Constructs a control-rate triggered random number generator.
::
>>> trigger = ugentools.Impulse.kr()
>>> t_rand = ugentools.TRand.kr(
... minimum=-1.,
... maximum=[0, 2],
... trigger=trigger,
... )
>>> t_rand
UGenArray({2})
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.CONTROL
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
maximum=maximum,
minimum=minimum,
trigger=trigger,
)
return ugen
### PUBLIC PROPERTIES ###
@property
def maximum(self):
r'''Gets `maximum` input of TRand.
::
>>> trigger = ugentools.Impulse.ar()
>>> t_rand = ugentools.TRand.ar(
... minimum=-1.,
... maximum=1.,
... trigger=trigger,
... )
>>> t_rand.maximum
1.0
Returns ugen input.
'''
index = self._ordered_input_names.index('maximum')
return self._inputs[index]
@property
def minimum(self):
r'''Gets `minimum` input of TRand.
::
>>> trigger = ugentools.Impulse.ar()
>>> t_rand = ugentools.TRand.ar(
... minimum=-1.,
... maximum=1.,
... trigger=trigger,
... )
>>> t_rand.minimum
-1.0
Returns ugen input.
'''
index = self._ordered_input_names.index('minimum')
return self._inputs[index]
@property
def trigger(self):
r'''Gets `trigger` input of TRand.
::
>>> trigger = ugentools.Impulse.ar()
>>> t_rand = ugentools.TRand.ar(
... minimum=-1.,
... maximum=1.,
... trigger=trigger,
... )
>>> t_rand.trigger
OutputProxy(
source=Impulse(
calculation_rate=CalculationRate.AUDIO,
frequency=440.0,
phase=0.0
),
output_index=0
)
Returns ugen input.
'''
index = self._ordered_input_names.index('trigger')
return self._inputs[index]
|
{
"content_hash": "d3f55d753097a2be7bb3761c05e2f1fb",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 72,
"avg_line_length": 23.605405405405406,
"alnum_prop": 0.44882070070986946,
"repo_name": "andrewyoung1991/supriya",
"id": "085a021291b4ddf4ff1ee5d05e6df752f968bfd9",
"size": "4393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supriya/tools/ugentools/TRand.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2693776"
}
],
"symlink_target": ""
}
|
from ._commons import validate_yes_no, MODELNAME_template
def create_admin(model_name):
MODELNAME_template('admin', model_name)
return
def request(model_name):
while True:
admin_required = input('Does this model require an admin.py [y/n] ')
answer = validate_yes_no(admin_required)
if answer:
create_admin(model_name)
return
elif answer is None:
pass
elif not answer:
return
|
{
"content_hash": "487ded56f2b2aa64ff7f47dc546a1863",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 70,
"avg_line_length": 23.647058823529413,
"alnum_prop": 0.7189054726368159,
"repo_name": "cidadania/ecidadania-ng",
"id": "b9c09ca839047052a6e6d762fc0210991ae5c2f4",
"size": "402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/apps/managecommands/management/commands/resource_addnewapp/_admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "41262"
},
{
"name": "HTML",
"bytes": "85966"
},
{
"name": "JavaScript",
"bytes": "3818"
},
{
"name": "Python",
"bytes": "148480"
},
{
"name": "Ruby",
"bytes": "946"
}
],
"symlink_target": ""
}
|
from django import forms
from cms.models import Page
from cms.utils.urlutils import static_with_version
from .wizard_pool import entry_choices
def step2_form_factory(mixin_cls, entry_form_class, attrs=None):
"""
Combines a form mixin with a form class, sets attrs to the resulting class.
This is used to provide a common behavior/logic for all wizard content
forms.
"""
if attrs is None:
attrs = {}
# class name is hardcoded to be consistent with the step 1 form.
# this is meant to be used only in the context of the form wizard.
class_name = 'WizardStep2Form'
meta_class = type(entry_form_class)
FormClass = meta_class(class_name, (mixin_cls, entry_form_class), attrs)
return FormClass
class BaseFormMixin(object):
has_separate_optional_fields = False
def __init__(self, *args, **kwargs):
self.page = kwargs.pop('wizard_page', None)
self.user = kwargs.pop('wizard_user', None)
self.language_code = kwargs.pop('wizard_language')
super(BaseFormMixin, self).__init__(*args, **kwargs)
@property
def required_fields(self):
return [f for f in self.visible_fields() if f.field.required]
@property
def optional_fields(self):
return [f for f in self.visible_fields() if not f.field.required]
class WizardStep1Form(BaseFormMixin, forms.Form):
class Media:
css = {
'all': (
static_with_version('cms/css/cms.wizard.css'),
)
}
js = (
'cms/js/dist/bundle.admin.base.min.js',
'cms/js/modules/cms.wizards.js',
)
page = forms.ModelChoiceField(
queryset=Page.objects.all(),
required=False,
widget=forms.HiddenInput
)
language = forms.CharField(widget=forms.HiddenInput)
entry = forms.ChoiceField(choices=[], widget=forms.RadioSelect())
def __init__(self, *args, **kwargs):
super(WizardStep1Form, self).__init__(*args, **kwargs)
# set the entries here to get an up to date list of entries.
self.fields['entry'].choices = entry_choices(user=self.user,
page=self.page)
class WizardStep2BaseForm(BaseFormMixin):
user = None
|
{
"content_hash": "5f5d8ac438aaa89a92d584f26d19134d",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 31.15068493150685,
"alnum_prop": 0.6270888302550571,
"repo_name": "netzkolchose/django-cms",
"id": "95d5f11dd67670a25f134d3f774bdbd0ab17d92e",
"size": "2299",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cms/wizards/forms.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "141578"
},
{
"name": "HTML",
"bytes": "182815"
},
{
"name": "JavaScript",
"bytes": "1253800"
},
{
"name": "Python",
"bytes": "2213767"
},
{
"name": "Shell",
"bytes": "447"
}
],
"symlink_target": ""
}
|
from random import random
import csv
import sys
# Splits inputfile into two files: one for testing and one for training
# Pre-Requisite : Inputfile, a csv generated from extract.py
# Parameters :
# inputfile - a csv that is in the format NLC expects (see extract.py)
# outputtrainfile - the outputfile as CSV in the format, to be used for training
# outputestfile - the outputfile as CSV in the format, to be used for training
# percent_train, the percent of questions to reserve for training
#TODO make split.csv optimized to prevent class imbalances
def main(argv):
if len(argv) != 4:
print 'split.py inputfile percent_train outputtrainfile outputtestfile'
else:
train_classes = set()
test_classes = set()
all_classes = set()
num_instances = 0
csvFile = open(argv[0],'rb')
trainCsv = open(argv[2],'w')
testCsv = open(argv[3],'w')
csvTrainWriter = csv.writer(trainCsv, delimiter=',')
csvTestWriter = csv.writer(testCsv, delimiter=',')
with open(argv[0]) as f:
total_data = csv.reader(csvFile, delimiter=',')
percent_as_decimal = float(argv[1])/100
for row in total_data:
num_instances +=1
if random() < percent_as_decimal:
train_classes.add(row[1])
csvTrainWriter.writerow([row[0], row[1]])
else:
test_classes.add(row[1])
csvTestWriter.writerow([row[0], row[1]])
all_classes.add(row[1])
print "\n#########" + "DATA STATISTICS" + "#########"
print num_instances, " training instances"
print len(all_classes), " classes"
print len(train_classes), " classes in the training set"
print len(test_classes), " classes in the testing set"
train_count = 0
for item in train_classes:
if not item in test_classes:
train_count += 1
print train_count, "classes ONLY in the training set"
test_count = 0
for item in test_classes:
if not item in train_classes:
test_count += 1
print test_count, "classes ONLY in the testing set"
print "\n**If you have lots of classes only in the training or only in the testing set, you are going to get bad results. If you test on something you've never seen before, you have no chance of getting it right. To fix this, make sure each class has at least 2 instances (preferrably 8)**"
print "#########" + "##############" + "#########\n"
trainCsv.close()
testCsv.close()
csvFile.close()
if __name__ == "__main__":
main(sys.argv[1:])
|
{
"content_hash": "9a6719936b30d5ed6d638cc6f60490d9",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 302,
"avg_line_length": 40.96923076923077,
"alnum_prop": 0.6060833646263613,
"repo_name": "mkaufmann1/Lightning",
"id": "27bdf74978f4ee28cf93a054230112c7a29f9224",
"size": "2937",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/split.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "27871"
}
],
"symlink_target": ""
}
|
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify the Jar() behavior when we have no JARCHDIR set (it should
automatically use the classdir that was deduced from the Java() call)
and when we explicity set it to None (it should not use the Java()
classdir attribute at all).
"""
import TestSCons
test = TestSCons.TestSCons()
where_javac, java_version = test.java_where_javac()
where_jar = test.java_where_jar()
test.subdir('src')
test.write(['src', 'a.java'], """\
package foo.bar;
public class a {}
""")
test.write(['src', 'b.java'], """\
package foo.bar;
public class b {}
""")
test.write('SConstruct', """\
env = Environment(tools = ['javac', 'jar'],
JAVAC = r'%(where_javac)s',
JAR = r'%(where_jar)s')
jar = env.Jar('x.jar', env.Java(target = 'classes', source = 'src'))
""" % locals())
test.run(arguments = '.')
test.run(program = where_jar, arguments = 'tf x.jar')
expect = """\
foo/bar/a.class
foo/bar/b.class
"""
test.must_contain_all_lines(test.stdout(), [expect])
test.run(arguments = '-c')
test.write('SConstruct', """\
env = Environment(tools = ['javac', 'jar'],
JAVAC = r'%(where_javac)s',
JAR = r'%(where_jar)s',
JARCHDIR = None)
jar = env.Jar('x.jar', env.Java(target = 'classes', source = 'src'))
""" % locals())
test.run(arguments = '.')
test.run(program = where_jar, arguments = 'tf x.jar')
expect = """\
classes/foo/bar/a.class
classes/foo/bar/b.class
"""
test.must_contain_all_lines(test.stdout(), [expect])
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "3f8245dd651b43a1c353cb2f3528ee8b",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 69,
"avg_line_length": 18.966666666666665,
"alnum_prop": 0.6051552431165788,
"repo_name": "andrewyoung1991/scons",
"id": "20375249ddb7cfb42b19fea778c80d647dbcde55",
"size": "2809",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/Java/no-JARCHDIR.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2437"
},
{
"name": "C",
"bytes": "746"
},
{
"name": "C++",
"bytes": "518"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1817"
},
{
"name": "DTrace",
"bytes": "180"
},
{
"name": "HTML",
"bytes": "857084"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "44714"
},
{
"name": "Python",
"bytes": "7385906"
},
{
"name": "Ruby",
"bytes": "10888"
},
{
"name": "Shell",
"bytes": "52194"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
}
|
import sys, os, subprocess
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.doctest', 'sphinx.ext.extlinks',
'sphinx.ext.autosummary',
'numpydoc',
# Optional
'sphinx.ext.graphviz',
]
extlinks = dict(issue=('https://github.com/blaze/blaze/issues/%s', '#'))
# -- Math ---------------------------------------------------------------------
try:
subprocess.call(["pdflatex", "--version"])
extensions += ['sphinx.ext.pngmath']
except OSError:
extensions += ['sphinx.ext.mathjax']
# -- Docstrings ---------------------------------------------------------------
import numpydoc
extensions += ['numpydoc']
numpydoc_show_class_members = False
# -- Diagrams -----------------------------------------------------------------
# TODO: check about the legal requirements of putting this in the
# tree. sphinx-ditaa is BSD so should be fine...
#try:
#sys.path.append(os.path.abspath('sphinxext'))
#extensions += ['sphinxext.ditaa']
#diagrams = True
#except ImportError:
#diagrams = False
# -----------------------------------------------------------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Blaze'
copyright = u'2012, Continuum Analytics'
#------------------------------------------------------------------------
# Path Munging
#------------------------------------------------------------------------
# This is beautiful... yeah
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('../..'))
from blaze import __version__ as version
#------------------------------------------------------------------------
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version is the same as the long version
#version = '0.x.x'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
except ImportError:
html_theme = 'default'
html_theme_path = []
else:
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name of the Pygments (syntax highlighting) style to use.
highlight_language = 'python'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = ''
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = os.path.join('svg', 'blaze.ico')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'blazedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'blaze.tex', u'Blaze Documentation',
u'Continuum', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'blaze', u'Blaze Documentation',
[u'Continuum'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'blaze', u'Blaze Documentation',
u'Continuum Analytics', 'blaze', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
intersphinx_mapping = {
'http://docs.python.org/dev': None,
}
doctest_global_setup = "import blaze"
|
{
"content_hash": "9f81905f54ba9745fe50e2d6b65144f6",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 80,
"avg_line_length": 31.55666666666667,
"alnum_prop": 0.6520545051230591,
"repo_name": "alexmojaki/blaze",
"id": "a49350076c8417f526d5bb00e2dcaec8dd2d30b0",
"size": "9883",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "752644"
},
{
"name": "Shell",
"bytes": "35"
}
],
"symlink_target": ""
}
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generate template values for methods.
Extends IdlArgument with property |default_cpp_value|.
Extends IdlTypeBase and IdlUnionType with property |union_arguments|.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
from idl_definitions import IdlArgument, IdlOperation
from idl_types import IdlTypeBase, IdlUnionType, inherits_interface
from v8_globals import includes
import v8_types
import v8_utilities
from v8_utilities import (has_extended_attribute_value, is_unforgeable,
is_legacy_interface_type_checking)
# Methods with any of these require custom method registration code in the
# interface's configure*Template() function.
CUSTOM_REGISTRATION_EXTENDED_ATTRIBUTES = frozenset([
'DoNotCheckSecurity',
'DoNotCheckSignature',
'NotEnumerable',
'Unforgeable',
])
def use_local_result(method):
extended_attributes = method.extended_attributes
idl_type = method.idl_type
return (has_extended_attribute_value(method, 'CallWith', 'ScriptState') or
'ImplementedInPrivateScript' in extended_attributes or
'RaisesException' in extended_attributes or
idl_type.is_union_type or
idl_type.is_explicit_nullable)
def method_context(interface, method, is_visible=True):
arguments = method.arguments
extended_attributes = method.extended_attributes
idl_type = method.idl_type
is_static = method.is_static
name = method.name
if is_visible:
idl_type.add_includes_for_type(extended_attributes)
this_cpp_value = cpp_value(interface, method, len(arguments))
def function_template():
if is_static:
return 'functionTemplate'
if is_unforgeable(interface, method):
return 'instanceTemplate'
return 'prototypeTemplate'
is_implemented_in_private_script = 'ImplementedInPrivateScript' in extended_attributes
if is_implemented_in_private_script:
includes.add('bindings/core/v8/PrivateScriptRunner.h')
includes.add('core/frame/LocalFrame.h')
includes.add('platform/ScriptForbiddenScope.h')
# [OnlyExposedToPrivateScript]
is_only_exposed_to_private_script = 'OnlyExposedToPrivateScript' in extended_attributes
is_call_with_script_arguments = has_extended_attribute_value(method, 'CallWith', 'ScriptArguments')
if is_call_with_script_arguments:
includes.update(['bindings/core/v8/ScriptCallStackFactory.h',
'core/inspector/ScriptArguments.h'])
is_call_with_script_state = has_extended_attribute_value(method, 'CallWith', 'ScriptState')
is_call_with_this_value = has_extended_attribute_value(method, 'CallWith', 'ThisValue')
if is_call_with_script_state or is_call_with_this_value:
includes.add('bindings/core/v8/ScriptState.h')
is_check_security_for_node = 'CheckSecurity' in extended_attributes
if is_check_security_for_node:
includes.add('bindings/core/v8/BindingSecurity.h')
is_custom_element_callbacks = 'CustomElementCallbacks' in extended_attributes
if is_custom_element_callbacks:
includes.add('core/dom/custom/CustomElementProcessingStack.h')
is_do_not_check_security = 'DoNotCheckSecurity' in extended_attributes
is_check_security_for_frame = (
has_extended_attribute_value(interface, 'CheckSecurity', 'Frame') and
not is_do_not_check_security)
is_check_security_for_window = (
has_extended_attribute_value(interface, 'CheckSecurity', 'Window') and
not is_do_not_check_security)
is_raises_exception = 'RaisesException' in extended_attributes
is_custom_call_prologue = has_extended_attribute_value(method, 'Custom', 'CallPrologue')
is_custom_call_epilogue = has_extended_attribute_value(method, 'Custom', 'CallEpilogue')
is_post_message = 'PostMessage' in extended_attributes
if is_post_message:
includes.add('bindings/core/v8/SerializedScriptValueFactory.h')
includes.add('core/dom/DOMArrayBuffer.h')
includes.add('core/dom/MessagePort.h')
if 'LenientThis' in extended_attributes:
raise Exception('[LenientThis] is not supported for operations.')
return {
'activity_logging_world_list': v8_utilities.activity_logging_world_list(method), # [ActivityLogging]
'arguments': [argument_context(interface, method, argument, index, is_visible=is_visible)
for index, argument in enumerate(arguments)],
'argument_declarations_for_private_script':
argument_declarations_for_private_script(interface, method),
'conditional_string': v8_utilities.conditional_string(method),
'cpp_type': (v8_types.cpp_template_type('Nullable', idl_type.cpp_type)
if idl_type.is_explicit_nullable else idl_type.cpp_type),
'cpp_value': this_cpp_value,
'cpp_type_initializer': idl_type.cpp_type_initializer,
'custom_registration_extended_attributes':
CUSTOM_REGISTRATION_EXTENDED_ATTRIBUTES.intersection(
extended_attributes.iterkeys()),
'deprecate_as': v8_utilities.deprecate_as(method), # [DeprecateAs]
'exposed_test': v8_utilities.exposed(method, interface), # [Exposed]
'function_template': function_template(),
'has_custom_registration':
is_static or
is_unforgeable(interface, method) or
v8_utilities.has_extended_attribute(
method, CUSTOM_REGISTRATION_EXTENDED_ATTRIBUTES),
'has_exception_state':
is_raises_exception or
is_check_security_for_frame or
is_check_security_for_window or
any(argument for argument in arguments
if (argument.idl_type.name == 'SerializedScriptValue' or
argument_conversion_needs_exception_state(method, argument))),
'idl_type': idl_type.base_type,
'is_call_with_execution_context': has_extended_attribute_value(method, 'CallWith', 'ExecutionContext'),
'is_call_with_script_arguments': is_call_with_script_arguments,
'is_call_with_script_state': is_call_with_script_state,
'is_call_with_this_value': is_call_with_this_value,
'is_check_security_for_frame': is_check_security_for_frame,
'is_check_security_for_node': is_check_security_for_node,
'is_check_security_for_window': is_check_security_for_window,
'is_custom': 'Custom' in extended_attributes and
not (is_custom_call_prologue or is_custom_call_epilogue),
'is_custom_call_prologue': is_custom_call_prologue,
'is_custom_call_epilogue': is_custom_call_epilogue,
'is_custom_element_callbacks': is_custom_element_callbacks,
'is_do_not_check_security': is_do_not_check_security,
'is_do_not_check_signature': 'DoNotCheckSignature' in extended_attributes,
'is_explicit_nullable': idl_type.is_explicit_nullable,
'is_implemented_in_private_script': is_implemented_in_private_script,
'is_partial_interface_member':
'PartialInterfaceImplementedAs' in extended_attributes,
'is_per_world_bindings': 'PerWorldBindings' in extended_attributes,
'is_post_message': is_post_message,
'is_raises_exception': is_raises_exception,
'is_read_only': is_unforgeable(interface, method),
'is_static': is_static,
'is_variadic': arguments and arguments[-1].is_variadic,
'measure_as': v8_utilities.measure_as(method, interface), # [MeasureAs]
'name': name,
'number_of_arguments': len(arguments),
'number_of_required_arguments': len([
argument for argument in arguments
if not (argument.is_optional or argument.is_variadic)]),
'number_of_required_or_variadic_arguments': len([
argument for argument in arguments
if not argument.is_optional]),
'on_instance': v8_utilities.on_instance(interface, method),
'on_interface': v8_utilities.on_interface(interface, method),
'on_prototype': v8_utilities.on_prototype(interface, method),
'only_exposed_to_private_script': is_only_exposed_to_private_script,
'private_script_v8_value_to_local_cpp_value': idl_type.v8_value_to_local_cpp_value(
extended_attributes, 'v8Value', 'cppValue', isolate='scriptState->isolate()', bailout_return_value='false'),
'property_attributes': property_attributes(interface, method),
'returns_promise': method.returns_promise,
'runtime_enabled_function': v8_utilities.runtime_enabled_function_name(method), # [RuntimeEnabled]
'should_be_exposed_to_script': not (is_implemented_in_private_script and is_only_exposed_to_private_script),
'signature': 'v8::Local<v8::Signature>()' if is_static or 'DoNotCheckSignature' in extended_attributes else 'defaultSignature',
'use_output_parameter_for_result': idl_type.use_output_parameter_for_result,
'use_local_result': use_local_result(method),
'v8_set_return_value': v8_set_return_value(interface.name, method, this_cpp_value),
'v8_set_return_value_for_main_world': v8_set_return_value(interface.name, method, this_cpp_value, for_main_world=True),
'visible': is_visible,
'world_suffixes': ['', 'ForMainWorld'] if 'PerWorldBindings' in extended_attributes else [''], # [PerWorldBindings],
}
def argument_context(interface, method, argument, index, is_visible=True):
extended_attributes = argument.extended_attributes
idl_type = argument.idl_type
if is_visible:
idl_type.add_includes_for_type(extended_attributes)
this_cpp_value = cpp_value(interface, method, index)
is_variadic_wrapper_type = argument.is_variadic and idl_type.is_wrapper_type
# [TypeChecking=Interface] / [LegacyInterfaceTypeChecking]
has_type_checking_interface = (
not is_legacy_interface_type_checking(interface, method) and
idl_type.is_wrapper_type)
if ('ImplementedInPrivateScript' in extended_attributes and
not idl_type.is_wrapper_type and
not idl_type.is_basic_type):
raise Exception('Private scripts supports only primitive types and DOM wrappers.')
set_default_value = argument.set_default_value
this_cpp_type = idl_type.cpp_type_args(extended_attributes=extended_attributes,
raw_type=True,
used_as_variadic_argument=argument.is_variadic)
return {
'cpp_type': (
v8_types.cpp_template_type('Nullable', this_cpp_type)
if idl_type.is_explicit_nullable and not argument.is_variadic
else this_cpp_type),
'cpp_value': this_cpp_value,
# FIXME: check that the default value's type is compatible with the argument's
'set_default_value': set_default_value,
'enum_type': idl_type.enum_type,
'enum_values': idl_type.enum_values,
'handle': '%sHandle' % argument.name,
# FIXME: remove once [Default] removed and just use argument.default_value
'has_default': 'Default' in extended_attributes or set_default_value,
'has_type_checking_interface': has_type_checking_interface,
# Dictionary is special-cased, but arrays and sequences shouldn't be
'idl_type': idl_type.base_type,
'idl_type_object': idl_type,
'index': index,
'is_callback_function': idl_type.is_callback_function,
'is_callback_interface': idl_type.is_callback_interface,
# FIXME: Remove generic 'Dictionary' special-casing
'is_dictionary': idl_type.is_dictionary or idl_type.base_type == 'Dictionary',
'is_explicit_nullable': idl_type.is_explicit_nullable,
'is_nullable': idl_type.is_nullable,
'is_optional': argument.is_optional,
'is_variadic': argument.is_variadic,
'is_variadic_wrapper_type': is_variadic_wrapper_type,
'is_wrapper_type': idl_type.is_wrapper_type,
'name': argument.name,
'private_script_cpp_value_to_v8_value': idl_type.cpp_value_to_v8_value(
argument.name, isolate='scriptState->isolate()',
creation_context='scriptState->context()->Global()'),
'use_permissive_dictionary_conversion': 'PermissiveDictionaryConversion' in extended_attributes,
'v8_set_return_value': v8_set_return_value(interface.name, method, this_cpp_value),
'v8_set_return_value_for_main_world': v8_set_return_value(interface.name, method, this_cpp_value, for_main_world=True),
'v8_value_to_local_cpp_value': v8_value_to_local_cpp_value(method, argument, index),
}
def argument_declarations_for_private_script(interface, method):
argument_declarations = ['LocalFrame* frame']
argument_declarations.append('%s* holderImpl' % interface.name)
argument_declarations.extend(['%s %s' % (argument.idl_type.cpp_type_args(
used_as_rvalue_type=True), argument.name) for argument in method.arguments])
if method.idl_type.name != 'void':
argument_declarations.append('%s* %s' % (method.idl_type.cpp_type, 'result'))
return argument_declarations
################################################################################
# Value handling
################################################################################
def cpp_value(interface, method, number_of_arguments):
def cpp_argument(argument):
idl_type = argument.idl_type
if idl_type.name == 'EventListener':
return argument.name
if (idl_type.name in ['NodeFilter', 'NodeFilterOrNull',
'XPathNSResolver', 'XPathNSResolverOrNull']):
# FIXME: remove this special case
return '%s.release()' % argument.name
return argument.name
# Truncate omitted optional arguments
arguments = method.arguments[:number_of_arguments]
cpp_arguments = []
if 'ImplementedInPrivateScript' in method.extended_attributes:
cpp_arguments.append('toLocalFrame(toFrameIfNotDetached(info.GetIsolate()->GetCurrentContext()))')
cpp_arguments.append('impl')
if method.is_constructor:
call_with_values = interface.extended_attributes.get('ConstructorCallWith')
else:
call_with_values = method.extended_attributes.get('CallWith')
cpp_arguments.extend(v8_utilities.call_with_arguments(call_with_values))
# Members of IDL partial interface definitions are implemented in C++ as
# static member functions, which for instance members (non-static members)
# take *impl as their first argument
if ('PartialInterfaceImplementedAs' in method.extended_attributes and
not 'ImplementedInPrivateScript' in method.extended_attributes and
not method.is_static):
cpp_arguments.append('*impl')
cpp_arguments.extend(cpp_argument(argument) for argument in arguments)
if 'ImplementedInPrivateScript' in method.extended_attributes:
if method.idl_type.name != 'void':
cpp_arguments.append('&result')
elif ('RaisesException' in method.extended_attributes or
(method.is_constructor and
has_extended_attribute_value(interface, 'RaisesException', 'Constructor'))):
cpp_arguments.append('exceptionState')
# If a method returns an IDL dictionary or union type, the return value is
# passed as an argument to impl classes.
idl_type = method.idl_type
if idl_type and idl_type.use_output_parameter_for_result:
cpp_arguments.append('result')
if method.name == 'Constructor':
base_name = 'create'
elif method.name == 'NamedConstructor':
base_name = 'createForJSConstructor'
elif 'ImplementedInPrivateScript' in method.extended_attributes:
base_name = '%sMethod' % method.name
else:
base_name = v8_utilities.cpp_name(method)
cpp_method_name = v8_utilities.scoped_name(interface, method, base_name)
return '%s(%s)' % (cpp_method_name, ', '.join(cpp_arguments))
def v8_set_return_value(interface_name, method, cpp_value, for_main_world=False):
idl_type = method.idl_type
extended_attributes = method.extended_attributes
if not idl_type or idl_type.name == 'void':
# Constructors and void methods don't have a return type
return None
if ('ImplementedInPrivateScript' in extended_attributes and
not idl_type.is_wrapper_type and
not idl_type.is_basic_type):
raise Exception('Private scripts supports only primitive types and DOM wrappers.')
release = False
# [CallWith=ScriptState], [RaisesException]
if use_local_result(method):
if idl_type.is_explicit_nullable:
# result is of type Nullable<T>
cpp_value = 'result.get()'
else:
cpp_value = 'result'
release = idl_type.release
script_wrappable = 'impl' if inherits_interface(interface_name, 'Node') else ''
return idl_type.v8_set_return_value(cpp_value, extended_attributes, script_wrappable=script_wrappable, release=release, for_main_world=for_main_world, is_static=method.is_static)
def v8_value_to_local_cpp_variadic_value(method, argument, index, return_promise):
assert argument.is_variadic
idl_type = argument.idl_type
this_cpp_type = idl_type.cpp_type
if method.returns_promise:
check_expression = 'exceptionState.hadException()'
else:
check_expression = 'exceptionState.throwIfNeeded()'
if idl_type.is_dictionary or idl_type.is_union_type:
vector_type = 'HeapVector'
else:
vector_type = 'Vector'
return {
'assign_expression': 'toImplArguments<%s<%s>>(info, %s, exceptionState)' % (vector_type, this_cpp_type, index),
'check_expression': check_expression,
'cpp_type': this_cpp_type,
'cpp_name': argument.name,
'declare_variable': False,
}
def v8_value_to_local_cpp_value(method, argument, index, return_promise=False, restricted_float=False):
extended_attributes = argument.extended_attributes
idl_type = argument.idl_type
name = argument.name
if argument.is_variadic:
return v8_value_to_local_cpp_variadic_value(method, argument, index, return_promise)
return idl_type.v8_value_to_local_cpp_value(extended_attributes, 'info[%s]' % index,
name, index=index, declare_variable=False,
use_exception_state=method.returns_promise,
restricted_float=restricted_float)
################################################################################
# Auxiliary functions
################################################################################
# [NotEnumerable]
def property_attributes(interface, method):
extended_attributes = method.extended_attributes
property_attributes_list = []
if 'NotEnumerable' in extended_attributes:
property_attributes_list.append('v8::DontEnum')
if is_unforgeable(interface, method):
property_attributes_list.append('v8::ReadOnly')
if property_attributes_list:
property_attributes_list.insert(0, 'v8::DontDelete')
return property_attributes_list
def argument_set_default_value(argument):
idl_type = argument.idl_type
default_value = argument.default_value
if not default_value:
return None
if idl_type.is_dictionary:
if not argument.default_value.is_null:
raise Exception('invalid default value for dictionary type')
return None
if idl_type.is_array_or_sequence_type:
if default_value.value != '[]':
raise Exception('invalid default value for sequence type: %s' % default_value.value)
# Nothing to do when we set an empty sequence as default value, but we
# need to return non-empty value so that we don't generate method calls
# without this argument.
return '/* Nothing to do */'
if idl_type.is_union_type:
if argument.default_value.is_null:
if not idl_type.includes_nullable_type:
raise Exception('invalid default value for union type: null for %s'
% idl_type.name)
# Union container objects are "null" initially.
return '/* null default value */'
if isinstance(default_value.value, basestring):
member_type = idl_type.string_member_type
elif isinstance(default_value.value, (int, float)):
member_type = idl_type.numeric_member_type
elif isinstance(default_value.value, bool):
member_type = idl_type.boolean_member_type
else:
member_type = None
if member_type is None:
raise Exception('invalid default value for union type: %r for %s'
% (default_value.value, idl_type.name))
member_type_name = (member_type.inner_type.name
if member_type.is_nullable else
member_type.name)
return '%s.set%s(%s)' % (argument.name, member_type_name,
member_type.literal_cpp_value(default_value))
return '%s = %s' % (argument.name,
idl_type.literal_cpp_value(default_value))
IdlArgument.set_default_value = property(argument_set_default_value)
def method_returns_promise(method):
return method.idl_type and method.idl_type.name == 'Promise'
IdlOperation.returns_promise = property(method_returns_promise)
def argument_conversion_needs_exception_state(method, argument):
idl_type = argument.idl_type
return (idl_type.v8_conversion_needs_exception_state or
argument.is_variadic or
(method.returns_promise and idl_type.is_string_type))
|
{
"content_hash": "93168d24e161bac159e9da6f6be8c2fb",
"timestamp": "",
"source": "github",
"line_count": 483,
"max_line_length": 182,
"avg_line_length": 48.54451345755694,
"alnum_prop": 0.6676333859342346,
"repo_name": "Pluto-tv/blink-crosswalk",
"id": "7db6e4ea4c0a38b2448d9e86d7f9af7f0018d715",
"size": "23447",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Source/bindings/scripts/v8_methods.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1835"
},
{
"name": "Assembly",
"bytes": "14768"
},
{
"name": "Batchfile",
"bytes": "35"
},
{
"name": "C",
"bytes": "128002"
},
{
"name": "C++",
"bytes": "45337051"
},
{
"name": "CSS",
"bytes": "596289"
},
{
"name": "CoffeeScript",
"bytes": "163"
},
{
"name": "GLSL",
"bytes": "11578"
},
{
"name": "Groff",
"bytes": "28067"
},
{
"name": "HTML",
"bytes": "64824312"
},
{
"name": "Java",
"bytes": "109377"
},
{
"name": "JavaScript",
"bytes": "25099309"
},
{
"name": "Objective-C",
"bytes": "45096"
},
{
"name": "Objective-C++",
"bytes": "302371"
},
{
"name": "PHP",
"bytes": "220636"
},
{
"name": "Perl",
"bytes": "115958"
},
{
"name": "Python",
"bytes": "3879209"
},
{
"name": "Ruby",
"bytes": "73952"
},
{
"name": "Shell",
"bytes": "10282"
},
{
"name": "XSLT",
"bytes": "50203"
},
{
"name": "Yacc",
"bytes": "10148"
}
],
"symlink_target": ""
}
|
"""Tests for the Google Drive snapshots event formatter."""
import unittest
from plaso.formatters import gdrive
from tests.formatters import test_lib
class GDriveCloudEntryFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Google Drive snapshot cloud event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = gdrive.GDriveCloudEntryFormatter()
self.assertNotEqual(event_formatter, None)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = gdrive.GDriveCloudEntryFormatter()
expected_attribute_names = [
u'path', u'shared', u'size', u'url', u'document_type']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
class GDriveLocalEntryFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Google Drive snapshot local event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = gdrive.GDriveLocalEntryFormatter()
self.assertNotEqual(event_formatter, None)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = gdrive.GDriveLocalEntryFormatter()
expected_attribute_names = [u'path', u'size']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "9d53b93be6fee50eda153139d8af93c2",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 69,
"avg_line_length": 30.192307692307693,
"alnum_prop": 0.7439490445859872,
"repo_name": "jorik041/plaso",
"id": "2e12ac35a243a787a78a6ac79425d07436379297",
"size": "1612",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/formatters/gdrive.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1276"
},
{
"name": "Makefile",
"bytes": "1151"
},
{
"name": "Protocol Buffer",
"bytes": "13905"
},
{
"name": "Python",
"bytes": "3032632"
},
{
"name": "Shell",
"bytes": "45900"
}
],
"symlink_target": ""
}
|
"""
Running or runtime configuration related to Virtual Routing and Forwarding
tables (VRFs).
"""
import abc
import logging
from ryu.lib.packet.bgp import RF_IPv4_UC
from ryu.lib.packet.bgp import RF_IPv6_UC
from ryu.lib.packet.bgp import BGPPathAttributeExtendedCommunities
from ryu.services.protocols.bgp.utils import validation
from ryu.services.protocols.bgp.base import get_validator
from ryu.services.protocols.bgp.rtconf.base import BaseConf
from ryu.services.protocols.bgp.rtconf.base import BaseConfListener
from ryu.services.protocols.bgp.rtconf.base import ConfigTypeError
from ryu.services.protocols.bgp.rtconf.base import ConfigValueError
from ryu.services.protocols.bgp.rtconf.base import ConfWithId
from ryu.services.protocols.bgp.rtconf.base import ConfWithIdListener
from ryu.services.protocols.bgp.rtconf.base import ConfWithStats
from ryu.services.protocols.bgp.rtconf.base import ConfWithStatsListener
from ryu.services.protocols.bgp.rtconf.base import MAX_NUM_EXPORT_RT
from ryu.services.protocols.bgp.rtconf.base import MAX_NUM_IMPORT_RT
from ryu.services.protocols.bgp.rtconf.base import MULTI_EXIT_DISC
from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError
from ryu.services.protocols.bgp.rtconf.base import SITE_OF_ORIGINS
from ryu.services.protocols.bgp.rtconf.base import validate
from ryu.services.protocols.bgp.rtconf.base import validate_med
from ryu.services.protocols.bgp.rtconf.base import validate_soo_list
LOG = logging.getLogger('bgpspeaker.rtconf.vrfs')
# Configuration setting names.
ROUTE_DISTINGUISHER = 'route_dist'
IMPORT_RTS = 'import_rts'
EXPORT_RTS = 'export_rts'
VRF_NAME = 'vrf_name'
VRF_DESC = 'vrf_desc'
VRF_RF = 'route_family'
IMPORT_MAPS = 'import_maps'
# Two supported VRF route-families
VRF_RF_IPV6 = 'ipv6'
VRF_RF_IPV4 = 'ipv4'
SUPPORTED_VRF_RF = (VRF_RF_IPV4, VRF_RF_IPV6)
# Default configuration values.
DEFAULT_VRF_NAME = 'no-vrf-name'
DEFAULT_VRF_DESC = 'no-vrf-desc'
@validate(name=IMPORT_RTS)
def validate_import_rts(import_rts):
if not isinstance(import_rts, list):
raise ConfigTypeError(conf_name=IMPORT_RTS, conf_value=import_rts)
if not (len(import_rts) <= MAX_NUM_IMPORT_RT):
raise ConfigValueError(desc='Max. import RT is limited to %s' %
MAX_NUM_IMPORT_RT)
if not all(validation.is_valid_ext_comm_attr(rt) for rt in import_rts):
raise ConfigValueError(conf_name=IMPORT_RTS, conf_value=import_rts)
# Check if we have duplicates
unique_rts = set(import_rts)
if len(unique_rts) != len(import_rts):
raise ConfigValueError(desc='Duplicate value provided %s' %
(import_rts))
return import_rts
@validate(name=EXPORT_RTS)
def validate_export_rts(export_rts):
if not isinstance(export_rts, list):
raise ConfigTypeError(conf_name=EXPORT_RTS, conf_value=export_rts)
if not (len(export_rts) <= MAX_NUM_EXPORT_RT):
raise ConfigValueError(desc='Max. import RT is limited to %s' %
MAX_NUM_EXPORT_RT)
if not all(validation.is_valid_ext_comm_attr(rt) for rt in export_rts):
raise ConfigValueError(conf_name=EXPORT_RTS, conf_value=export_rts)
# Check if we have duplicates
unique_rts = set(export_rts)
if len(unique_rts) != len(export_rts):
raise ConfigValueError(desc='Duplicate value provided in %s' %
(export_rts))
return export_rts
@validate(name=ROUTE_DISTINGUISHER)
def validate_rd(route_dist):
if not validation.is_valid_route_dist(route_dist):
raise ConfigValueError(conf_name=ROUTE_DISTINGUISHER,
conf_value=route_dist)
return route_dist
@validate(name=VRF_RF)
def validate_vrf_rf(vrf_rf):
if vrf_rf not in SUPPORTED_VRF_RF:
raise ConfigValueError(desc='Give VRF route family %s is not '
'supported.' % vrf_rf)
return vrf_rf
class VrfConf(ConfWithId, ConfWithStats):
"""Class that encapsulates configurations for one VRF."""
VRF_CHG_EVT = 'vrf_chg_evt'
VALID_EVT = frozenset([VRF_CHG_EVT])
REQUIRED_SETTINGS = frozenset([ROUTE_DISTINGUISHER,
IMPORT_RTS,
EXPORT_RTS])
OPTIONAL_SETTINGS = frozenset(
[VRF_NAME, MULTI_EXIT_DISC, SITE_OF_ORIGINS, VRF_RF, IMPORT_MAPS]
)
def __init__(self, **kwargs):
"""Create an instance of VRF runtime configuration."""
super(VrfConf, self).__init__(**kwargs)
def _init_opt_settings(self, **kwargs):
super(VrfConf, self)._init_opt_settings(**kwargs)
# We do not have valid default MED value.
# If no MED attribute is provided then we do not have to use MED.
# If MED attribute is provided we have to validate it and use it.
med = kwargs.pop(MULTI_EXIT_DISC, None)
if med and validate_med(med):
self._settings[MULTI_EXIT_DISC] = med
# We do not have valid default SOO value.
# If no SOO attribute is provided then we do not have to use SOO.
# If SOO attribute is provided we have to validate it and use it.
soos = kwargs.pop(SITE_OF_ORIGINS, None)
if soos and validate_soo_list(soos):
self._settings[SITE_OF_ORIGINS] = soos
# Current we we only support VRF for IPv4 and IPv6 with default IPv4
vrf_rf = kwargs.pop(VRF_RF, VRF_RF_IPV4)
if vrf_rf and validate_vrf_rf(vrf_rf):
self._settings[VRF_RF] = vrf_rf
import_maps = kwargs.pop(IMPORT_MAPS, [])
self._settings[IMPORT_MAPS] = import_maps
# =========================================================================
# Required attributes
# =========================================================================
@property
def route_dist(self):
return self._settings[ROUTE_DISTINGUISHER]
# =========================================================================
# Optional attributes with valid defaults.
# =========================================================================
@property
def import_rts(self):
return list(self._settings[IMPORT_RTS])
@property
def export_rts(self):
return list(self._settings[EXPORT_RTS])
@property
def soo_list(self):
soos = self._settings.get(SITE_OF_ORIGINS)
if soos:
soos = list(soos)
else:
soos = []
return soos
@property
def multi_exit_disc(self):
"""Returns configured value of MED, else None.
This configuration does not have default value.
"""
return self._settings.get(MULTI_EXIT_DISC)
@property
def route_family(self):
"""Returns configured route family for this VRF
This configuration does not change.
"""
return self._settings.get(VRF_RF)
@property
def rd_rf_id(self):
return VrfConf.create_rd_rf_id(self.route_dist, self.route_family)
@property
def import_maps(self):
return self._settings.get(IMPORT_MAPS)
@staticmethod
def create_rd_rf_id(route_dist, route_family):
return route_dist, route_family
@staticmethod
def vrf_rf_2_rf(vrf_rf):
if vrf_rf == VRF_RF_IPV4:
return RF_IPv4_UC
elif vrf_rf == VRF_RF_IPV6:
return RF_IPv6_UC
else:
raise ValueError('Unsupported VRF route family given %s' % vrf_rf)
@staticmethod
def rf_2_vrf_rf(route_family):
if route_family == RF_IPv4_UC:
return VRF_RF_IPV4
elif route_family == RF_IPv6_UC:
return VRF_RF_IPV6
else:
raise ValueError('No supported mapping for route family '
'to vrf_route_family exists for %s' %
route_family)
@property
def settings(self):
"""Returns a copy of current settings.
As some of the attributes are themselves containers, we clone the
settings to provide clones for those containers as well.
"""
# Shallow copy first
cloned_setting = self._settings.copy()
# Don't want clone to link to same RT containers
cloned_setting[IMPORT_RTS] = self.import_rts
cloned_setting[EXPORT_RTS] = self.export_rts
cloned_setting[SITE_OF_ORIGINS] = self.soo_list
return cloned_setting
@classmethod
def get_opt_settings(cls):
self_confs = super(VrfConf, cls).get_opt_settings()
self_confs.update(VrfConf.OPTIONAL_SETTINGS)
return self_confs
@classmethod
def get_req_settings(cls):
self_confs = super(VrfConf, cls).get_req_settings()
self_confs.update(VrfConf.REQUIRED_SETTINGS)
return self_confs
@classmethod
def get_valid_evts(cls):
self_valid_evts = super(VrfConf, cls).get_valid_evts()
self_valid_evts.update(VrfConf.VALID_EVT)
return self_valid_evts
def update(self, **kwargs):
"""Updates this `VrfConf` settings.
Notifies listeners if any settings changed. Returns `True` if update
was successful. This vrfs' route family, id and route dist settings
cannot be updated/changed.
"""
# Update inherited configurations
super(VrfConf, self).update(**kwargs)
vrf_id = kwargs.get(ConfWithId.ID)
vrf_rd = kwargs.get(ROUTE_DISTINGUISHER)
vrf_rf = kwargs.get(VRF_RF)
if (vrf_id != self.id or
vrf_rd != self.route_dist or
vrf_rf != self.route_family):
raise ConfigValueError(desc='id/route-distinguisher/route-family'
' do not match configured value.')
# Validate and update individual settings
new_imp_rts, old_imp_rts = \
self._update_import_rts(**kwargs)
export_rts_changed = self._update_export_rts(**kwargs)
soos_list_changed = self._update_soo_list(**kwargs)
med_changed = self._update_med(**kwargs)
re_export_needed = (export_rts_changed or
soos_list_changed or
med_changed)
import_maps = kwargs.get(IMPORT_MAPS, [])
re_import_needed = self._update_importmaps(import_maps)
# If we did have any change in value of any settings, we notify
# listeners
if (new_imp_rts is not None or
old_imp_rts is not None or
re_export_needed or re_import_needed):
evt_value = (
new_imp_rts,
old_imp_rts,
import_maps,
re_export_needed,
re_import_needed
)
self._notify_listeners(VrfConf.VRF_CHG_EVT, evt_value)
return True
def _update_import_rts(self, **kwargs):
import_rts = kwargs.get(IMPORT_RTS)
get_validator(IMPORT_RTS)(import_rts)
curr_import_rts = set(self._settings[IMPORT_RTS])
import_rts = set(import_rts)
if not import_rts.symmetric_difference(curr_import_rts):
return (None, None)
# Get the difference between current and new RTs
new_import_rts = import_rts - curr_import_rts
old_import_rts = curr_import_rts - import_rts
# Update current RTs and notify listeners.
self._settings[IMPORT_RTS] = import_rts
return (new_import_rts, old_import_rts)
def _update_export_rts(self, **kwargs):
export_rts = kwargs.get(EXPORT_RTS)
get_validator(EXPORT_RTS)(export_rts)
curr_export_rts = set(self._settings[EXPORT_RTS])
if curr_export_rts.symmetric_difference(export_rts):
# Update current RTs and notify listeners.
self._settings[EXPORT_RTS] = list(export_rts)
return True
return False
def _update_soo_list(self, **kwargs):
soo_list = kwargs.get(SITE_OF_ORIGINS, [])
get_validator(SITE_OF_ORIGINS)(soo_list)
curr_soos = set(self.soo_list)
# If given list is different from existing settings, we update it
if curr_soos.symmetric_difference(soo_list):
self._settings[SITE_OF_ORIGINS] = soo_list[:]
return True
return False
def _update_med(self, **kwargs):
multi_exit_disc = kwargs.get(MULTI_EXIT_DISC, None)
if multi_exit_disc:
get_validator(MULTI_EXIT_DISC)(multi_exit_disc)
if multi_exit_disc != self.multi_exit_disc:
self._settings[MULTI_EXIT_DISC] = multi_exit_disc
return True
return False
def _update_importmaps(self, import_maps):
if set(self._settings[IMPORT_MAPS]).symmetric_difference(import_maps):
self._settings[IMPORT_MAPS] = import_maps
return True
return False
def __repr__(self):
return ('<%s(route_dist: %r, import_rts: %r, export_rts: %r, '
'soo_list: %r)>' % (self.__class__.__name__,
self.route_dist, self.import_rts,
self.export_rts, self.soo_list))
def __str__(self):
return ('VrfConf-%s' % (self.route_dist))
class VrfsConf(BaseConf):
"""Container for all VRF configurations."""
ADD_VRF_CONF_EVT, REMOVE_VRF_CONF_EVT = range(2)
VALID_EVT = frozenset([ADD_VRF_CONF_EVT, REMOVE_VRF_CONF_EVT])
def __init__(self):
super(VrfsConf, self).__init__()
self._vrfs_by_rd_rf = {}
self._vrfs_by_id = {}
def _init_opt_settings(self, **kwargs):
pass
@property
def vrf_confs(self):
"""Returns a list of configured `VrfConf`s
"""
return self._vrfs_by_rd_rf.values()
@property
def vrf_interested_rts(self):
interested_rts = set()
for vrf_conf in self._vrfs_by_id.values():
interested_rts.update(vrf_conf.import_rts)
return interested_rts
def update(self, **kwargs):
raise NotImplementedError('Use either add/remove_vrf_conf'
' methods instead.')
def add_vrf_conf(self, vrf_conf):
if vrf_conf.rd_rf_id in self._vrfs_by_rd_rf.keys():
raise RuntimeConfigError(
desc='VrfConf with rd_rf %s already exists'
% str(vrf_conf.rd_rf_id)
)
if vrf_conf.id in self._vrfs_by_id:
raise RuntimeConfigError(
desc='VrfConf with id %s already exists' % str(vrf_conf.id)
)
self._vrfs_by_rd_rf[vrf_conf.rd_rf_id] = vrf_conf
self._vrfs_by_id[vrf_conf.id] = vrf_conf
self._notify_listeners(VrfsConf.ADD_VRF_CONF_EVT, vrf_conf)
def remove_vrf_conf(self, route_dist=None, vrf_id=None,
vrf_rf=None):
"""Removes any matching `VrfConf` for given `route_dist` or `vrf_id`
Paramters:
- `route_dist`: (str) route distinguisher of a configured VRF
- `vrf_id`: (str) vrf ID
- `vrf_rf`: (str) route family of the VRF configuration
If only `route_dist` is given, removes `VrfConf`s for all supported
address families for this `route_dist`. If `vrf_rf` is given, than only
removes `VrfConf` for that specific route family. If only `vrf_id` is
given, matching `VrfConf` will be removed.
"""
if route_dist is None and vrf_id is None:
raise RuntimeConfigError(desc='To delete supply route_dist or id.')
# By default we remove all VRFs for given Id or RD
vrf_rfs = SUPPORTED_VRF_RF
# If asked to delete specific route family vrf conf.
if vrf_rf:
vrf_rfs = (vrf_rf)
# For all vrf route family asked to be deleted, we collect all deleted
# VrfConfs
removed_vrf_confs = []
for route_family in vrf_rfs:
if route_dist is not None:
rd_rf_id = VrfConf.create_rd_rf_id(route_dist, route_family)
vrf_conf = self._vrfs_by_rd_rf.pop(rd_rf_id, None)
if vrf_conf:
self._vrfs_by_id.pop(vrf_conf.id, None)
removed_vrf_confs.append(vrf_conf)
else:
vrf_conf = self._vrfs_by_id.pop(vrf_id, None)
if vrf_conf:
self._vrfs_by_rd_rf.pop(vrf_conf.rd_rd_id, None)
removed_vrf_confs.append(vrf_conf)
# We do not raise any exception if we cannot find asked VRF.
for vrf_conf in removed_vrf_confs:
self._notify_listeners(VrfsConf.REMOVE_VRF_CONF_EVT, vrf_conf)
return removed_vrf_confs
def get_vrf_conf(self, route_dist, vrf_rf, vrf_id=None):
if route_dist is None and vrf_id is None:
raise RuntimeConfigError(desc='To get VRF supply route_dist '
'or vrf_id.')
vrf = None
if route_dist is not None and vrf_id is not None:
vrf1 = self._vrfs_by_id.get(vrf_id)
rd_rf_id = VrfConf.create_rd_rf_id(route_dist, vrf_rf)
vrf2 = self._vrfs_by_rd_rf.get(rd_rf_id)
if vrf1 is not vrf2:
raise RuntimeConfigError(desc='Given VRF ID (%s) and RD (%s)'
' are not of same VRF.' %
(vrf_id, route_dist))
vrf = vrf1
elif route_dist is not None:
rd_rf_id = VrfConf.create_rd_rf_id(route_dist, vrf_rf)
vrf = self._vrfs_by_rd_rf.get(rd_rf_id)
else:
vrf = self._vrfs_by_id.get(vrf_id)
return vrf
@property
def vrfs_by_rd_rf_id(self):
return dict(self._vrfs_by_rd_rf)
@classmethod
def get_valid_evts(self):
self_valid_evts = super(VrfsConf, self).get_valid_evts()
self_valid_evts.update(VrfsConf.VALID_EVT)
return self_valid_evts
def __repr__(self):
return '<%s(%r)>' % (self.__class__.__name__, self._vrfs_by_id)
@property
def settings(self):
return [vrf.settings for vrf in self._vrfs_by_id.values()]
class VrfConfListener(ConfWithIdListener, ConfWithStatsListener):
"""Base listener for various VRF configuration change event."""
def __init__(self, vrf_conf):
super(VrfConfListener, self).__init__(vrf_conf)
vrf_conf.add_listener(VrfConf.VRF_CHG_EVT, self.on_chg_vrf_conf)
def on_chg_vrf_conf(self, evt):
raise NotImplementedError('This method should be overridden')
class VrfsConfListener(BaseConfListener):
"""Base listener for VRF container change events."""
def __init__(self, vrfs_conf):
super(VrfsConfListener, self).__init__(vrfs_conf)
vrfs_conf.add_listener(VrfsConf.ADD_VRF_CONF_EVT, self.on_add_vrf_conf)
vrfs_conf.add_listener(VrfsConf.REMOVE_VRF_CONF_EVT,
self.on_remove_vrf_conf)
@abc.abstractmethod
def on_add_vrf_conf(self, evt):
raise NotImplementedError('This method should be overridden')
@abc.abstractmethod
def on_remove_vrf_conf(self, evt):
raise NotImplementedError('This method should be overridden')
|
{
"content_hash": "16567f06ea844528c4e8f70ee6462b61",
"timestamp": "",
"source": "github",
"line_count": 528,
"max_line_length": 79,
"avg_line_length": 36.515151515151516,
"alnum_prop": 0.6009854771784232,
"repo_name": "ynkjm/ryu",
"id": "9cbfe998b112d5f05a73a60e2e7b5f71df70e32d",
"size": "19893",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "ryu/services/protocols/bgp/rtconf/vrfs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "8269"
},
{
"name": "CSS",
"bytes": "306"
},
{
"name": "Erlang",
"bytes": "871862"
},
{
"name": "HTML",
"bytes": "306"
},
{
"name": "JavaScript",
"bytes": "8436"
},
{
"name": "Makefile",
"bytes": "1213"
},
{
"name": "Python",
"bytes": "5226046"
},
{
"name": "Shell",
"bytes": "14253"
}
],
"symlink_target": ""
}
|
from os import listdir
import os
lis1 = [f for f in listdir("set4 files")]
lis2 = [f for f in listdir("transcript")]
print (lis1)
for i in lis1:
if i in lis2:
os.remove('transcript/'+i)
|
{
"content_hash": "2ea701e0b6edcf86feaa566aa97ce2c3",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 41,
"avg_line_length": 23.625,
"alnum_prop": 0.6825396825396826,
"repo_name": "amudalab/concept-graphs",
"id": "1d33fead058c668628e2c122efdc2846f0b210fc",
"size": "189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keyphrase/keyphrase/rem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1357"
},
{
"name": "Processing",
"bytes": "9122"
},
{
"name": "Python",
"bytes": "481015"
},
{
"name": "Shell",
"bytes": "18777"
}
],
"symlink_target": ""
}
|
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
sentences = ["VADER is smart, handsome, and funny.", # positive sentence example
"VADER is not smart, handsome, nor funny.", # negation sentence example
"VADER is smart, handsome, and funny!", # punctuation emphasis handled correctly (sentiment intensity adjusted)
"VADER is very smart, handsome, and funny.", # booster words handled correctly (sentiment intensity adjusted)
"VADER is VERY SMART, handsome, and FUNNY.", # emphasis for ALLCAPS handled
"VADER is VERY SMART, handsome, and FUNNY!!!",# combination of signals - VADER appropriately adjusts intensity
"VADER is VERY SMART, uber handsome, and FRIGGIN FUNNY!!!",# booster words & punctuation make this close to ceiling for score
"The book was good.", # positive sentence
"The book was kind of good.", # qualified positive sentence is handled correctly (intensity adjusted)
"The plot was good, but the characters are uncompelling and the dialog is not great.", # mixed negation sentence
"At least it isn't a horrible book.", # negated negative sentence with contraction
"Make sure you :) or :D today!", # emoticons handled
"Today SUX!", # negative slang with capitalization emphasis
"Today only kinda sux! But I'll get by, lol" # mixed sentiment example with slang and constrastive conjunction "but"
]
analyzer = SentimentIntensityAnalyzer()
for sentence in sentences:
vs = analyzer.polarity_scores(sentence)
print("{:-<65} {}".format(sentence, str(vs)))
|
{
"content_hash": "723b72f6edfc76d4376064a0225af8ae",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 133,
"avg_line_length": 71.1304347826087,
"alnum_prop": 0.6968215158924206,
"repo_name": "steinnp/Big-Data-Final",
"id": "a83bc6eff2ac01492d7045623563d601e8e50af2",
"size": "1636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TextCleaning/vader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3214"
},
{
"name": "Python",
"bytes": "51193"
}
],
"symlink_target": ""
}
|
import sys, os
#get path of script
_script_path = os.path.realpath(__file__)
_script_dir = os.path.dirname(_script_path)
pyWolfPath = _script_dir
if sys.platform == "linux" or sys.platform == "linux2":
print "Linux not tested yet"
elif sys.platform == "darwin":
print "OS X not tested yet"
elif sys.platform == "win32":
pyWolfPath = pyWolfPath + "\\..\\..\\..\\..\\bin\\x64\\Debug\\Win32\\"
if pyWolfPath != "" and (not pyWolfPath in sys.path):
sys.path.append(pyWolfPath)
import ctypes, threading, pyWolf
from PySide import QtGui, QtCore
from PySide.QtGui import *
from PySide.QtCore import *
screen_width = 800
screen_height = 600
class scene(QWidget):
def __init__(self, pContentPath, pLogPath, pAppName, parent = None):
super(scene, self).__init__(parent)
self.__exiting = False
self._game = pyWolf.framework.w_game(pContentPath, pLogPath, pAppName)
self._game.set_pre_init_callback(self.pre_init)
self._game.set_post_init_callback(self.post_init)
self._game.set_load_callback(self.load)
self._game.set_update_callback(self.update)
self._game.set_pre_render_callback(self.pre_render)
self._game.set_post_render_callback(self.post_render)
self._gDevice = None
self._viewport = pyWolf.graphics.w_viewport()
self._viewport_scissor = pyWolf.graphics.w_viewport_scissor()
self._draw_command_buffers = pyWolf.graphics.w_command_buffers()
self._draw_render_pass = pyWolf.graphics.w_render_pass()
self._draw_fence = pyWolf.graphics.w_fences()
self._draw_semaphore = pyWolf.graphics.w_semaphore()
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#The following codes have been added for this project
#++++++++++++++++++++++++++++++++++++++++++++++++++++
self._shader = pyWolf.graphics.w_shader()
self._pipeline = pyWolf.graphics.w_pipeline()
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++
_config = pyWolf.graphics.w_graphics_device_manager_configs()
_config.debug_gpu = False
self._game.set_graphics_device_manager_configs(_config)
def pre_init(self):
print "pre_init"
def post_init(self):
#get main graphics device
self._gDevice = self._game.get_graphics_device(0)
print self._gDevice.get_info()
print "post_init"
def load(self):
#initialize viewport
self._viewport.y = 0
self._viewport.width = screen_width
self._viewport.height = screen_height
self._viewport.minDepth = 0
self._viewport.maxDepth = 1
#initialize scissor of viewport
self._viewport_scissor.offset.x = 0
self._viewport_scissor.offset.y = 0
self._viewport_scissor.extent.width = screen_width
self._viewport_scissor.extent.height = screen_height
#load render pass which contains frame buffers
_render_pass_attachments = []
_output_window = self._gDevice.output_presentation_window
for _iter in _output_window.swap_chain_image_views:
# COLOR #DEPTH
_render_pass_attachments.append([_iter, _output_window.depth_buffer_image_view])
_hr = self._draw_render_pass.load(self._gDevice, self._viewport, self._viewport_scissor, _render_pass_attachments)
if _hr:
print "Error on loading render pass"
sys.exit(1)
#create one semaphore for drawing
_hr = self._draw_semaphore.initialize(self._gDevice)
if _hr:
print "Error on initializing semaphore"
sys.exit(1)
#create one fence for drawing
_hr = self._draw_fence.initialize(self._gDevice, 1)
if _hr:
print "Error on initializing fence(s)"
sys.exit(1)
#create one fence for drawing
number_of_swap_chains = self._gDevice.get_number_of_swap_chains()
_hr = self._draw_command_buffers.load(self._gDevice, number_of_swap_chains, pyWolf.graphics.w_command_buffer_level.PRIMARY)
if _hr:
print "Error on initializing draw command buffer(s)"
sys.exit(1)
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#The following codes have been added for this project
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#loading vertex shader
_content_path_dir = _script_dir + "/content/"
_hr = self._shader.load(self._gDevice, _content_path_dir + "shaders/shader.vert.spv", pyWolf.graphics.w_shader_stage_flag_bits.VERTEX_SHADER, "main")
if _hr:
print "Error on loading vertex shader"
sys.exit(1)
#loading fragment shader
_hr = self._shader.load(self._gDevice, _content_path_dir + "shaders/shader.frag.spv", pyWolf.graphics.w_shader_stage_flag_bits.FRAGMENT_SHADER, "main")
if _hr:
print "Error on loading fragment shader"
sys.exit(1)
#create pipeline
_vba = pyWolf.graphics.w_vertex_binding_attributes()
_hr = self._pipeline.load(self._gDevice, _vba, pyWolf.graphics.w_primitive_topology.TRIANGLE_LIST, self._draw_render_pass, self._shader, [self._viewport], [ self._viewport_scissor ])
if _hr:
print "Error on creating pipeline"
sys.exit(1)
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++
_hr = self.build_command_buffers()
if _hr:
print "Error on building draw command buffer(s)"
sys.exit(1)
print "scene loaded successfully"
def build_command_buffers(self):
_hr = pyWolf.W_PASSED
_size = self._draw_command_buffers.get_commands_size()
for i in xrange(_size):
_cmd = self._draw_command_buffers.get_command_at(i)
_hr = self._draw_command_buffers.begin(i, pyWolf.graphics.w_command_buffer_usage_flag_bits.SIMULTANEOUS_USE_BIT)
if _hr:
print "Error on begining command buffer: " + str(i)
break
self._draw_render_pass.begin(i, _cmd, pyWolf.system.w_color.CORNFLOWER_BLUE(), 1.0, 0)
#place your draw code
self._pipeline.bind(_cmd)
self._gDevice.draw(_cmd, 3, 1, 0, 0)
self._draw_render_pass.end(_cmd)
_hr = self._draw_command_buffers.end(i)
if _hr:
print "Error on ending command buffer: " + str(i)
break
return _hr
def update(self, pGameTime):
print "fps: " + str(pGameTime.get_frames_per_second())
def pre_render(self, pGameTime):
_output_window = self._gDevice.output_presentation_window
_frame_index = _output_window.swap_chain_image_index
_wait_dst_stage_mask = [ pyWolf.graphics.w_pipeline_stage_flag_bits.COLOR_ATTACHMENT_OUTPUT_BIT ]
_wait_semaphores = [ _output_window.swap_chain_image_is_available_semaphore ]
_signal_semaphores = [ _output_window.rendering_done_semaphore ]
_cmd = self._draw_command_buffers.get_command_at(_frame_index)
_cmd_buffers = [_cmd]
#reset draw fence
self._draw_fence.reset()
_hr = self._gDevice.submit(_cmd_buffers, self._gDevice.graphics_queue, _wait_dst_stage_mask, _wait_semaphores, _signal_semaphores, self._draw_fence)
if _hr:
print "Error on submit to graphics device"
return
_hr = self._draw_fence.wait()
if _hr:
print "Error on waiting for draw fence"
return
def post_render(self, pSuccessfullyRendered):
if pSuccessfullyRendered == False:
print "Rendered Unsuccessfully"
def run(self):
#run game
_window_info = pyWolf.system.w_window_info()
_window_info.width = self.width()
_window_info.height = self.height()
_window_info.v_sync_enable = False
_window_info.is_full_screen = False
_window_info.swap_chain_format = 44 # BGRA8Unorm in VULKAN
_window_info.cpu_access_swap_chain_buffer = False
# get window handle
pycobject_hwnd = self.winId()
#convert window handle as HWND to unsigned integer pointer for c++
ctypes.pythonapi.PyCObject_AsVoidPtr.restype = ctypes.c_void_p
ctypes.pythonapi.PyCObject_AsVoidPtr.argtypes = [ctypes.py_object]
int_hwnd = ctypes.pythonapi.PyCObject_AsVoidPtr(pycobject_hwnd)
_window_info.set_win_id(int_hwnd)
#initialize game
_map_info = (0, _window_info)
while True:
if self.__exiting:
self.release()
break
self._game.run(_map_info)
print "Game exited"
def showEvent(self, event):
#run in another thread
threading.Thread(target=self.run).start()
event.accept()
def closeEvent(self, event):
self.__exiting = True
event.accept()
def release(self):
self._draw_fence.release()
self._draw_fence = None
self._draw_semaphore.release()
self._draw_semaphore = None
self._draw_command_buffers.release()
self._draw_command_buffers = None
self._draw_render_pass.release()
self._draw_render_pass = None
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#The following codes have been added for this project
#++++++++++++++++++++++++++++++++++++++++++++++++++++
self._shader.release()
self._shader = None
self._pipeline.release()
self._pipeline = None
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++
self._game.release()
self._game = None
self._gDevice = None
self._viewport = None
self._viewport_scissor = None
if __name__ == '__main__':
# Create a Qt application
app = QApplication(sys.argv)
scene = scene(pyWolfPath + "..\\..\\..\\..\\content\\",
pyWolfPath,
"py_02_shader")
scene.resize(screen_width, screen_height)
scene.setWindowTitle('Wolf.Engine')
scene.show()
sys.exit(app.exec_())
|
{
"content_hash": "a7eaf5f29a1fa09b74570a137ad228e7",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 190,
"avg_line_length": 37.61231884057971,
"alnum_prop": 0.5675753780945959,
"repo_name": "PooyaEimandar/WolfEngine",
"id": "7049b619e7a5bd7fd44906dca7c476d4d8d1741b",
"size": "10381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/02_basics/02_shader/src/02_shader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5545"
},
{
"name": "C++",
"bytes": "317259"
},
{
"name": "Objective-C",
"bytes": "1179"
},
{
"name": "Python",
"bytes": "1532"
}
],
"symlink_target": ""
}
|
import sys
import os
PATH = os.path.dirname(__file__)
sys.path.append(PATH + "/..")
from mobula.Defines import *
import numpy as np
import random
random.seed(1019)
np.random.seed(1019)
def test_layer_y(layer, X):
from mobula.layers import Data
data = Data(X, "data")
data.reshape()
l = layer(data, "testLayer")
l.reshape()
data.forward()
l.forward()
l.dY = np.ones(l.Y.shape)
l.backward()
return l.Y, l.dX
|
{
"content_hash": "094b8bce434033fb03d55bb2614838f5",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 34,
"avg_line_length": 18.08,
"alnum_prop": 0.6327433628318584,
"repo_name": "wkcn/mobula",
"id": "721e5fd351925588eadfd89c007254a4ffe7c86b",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/defines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "131507"
}
],
"symlink_target": ""
}
|
import argparse
import tomviz
def main():
parser = argparse.ArgumentParser(
description='Tomviz acquisition server.')
parser.add_argument('-a', '--adapter', help='source adapter to install')
parser.add_argument('-i', '--host', default='localhost',
help='on what interface the server should run')
parser.add_argument('-p', '--port',
help='on what port the server should run', default=8080)
parser.add_argument('-d', '--debug', help='turn on debug mode',
action='store_true')
parser.add_argument('-e', '--dev', help='turn on dev mode',
action='store_true')
parser.add_argument('-r', '--redirect',
help='redirect stdout/stderr to log',
action='store_true')
args = parser.parse_args()
if args.redirect:
tomviz.setup_std_loggers()
from tomviz.acquisition import server
tomviz.setup_loggers(args.debug)
server_params = vars(args)
del server_params['redirect']
server.start(**server_params)
if __name__ == '__main__':
main()
|
{
"content_hash": "afa5063e01addfcb2375a120382d4315",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 80,
"avg_line_length": 32.74285714285714,
"alnum_prop": 0.5759162303664922,
"repo_name": "cjh1/tomviz",
"id": "2e5d8c18f8293f35b598bd6a67edcc0cd9243bc1",
"size": "1146",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "acquisition/tomviz/__main__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "1339808"
},
{
"name": "CMake",
"bytes": "35873"
},
{
"name": "Python",
"bytes": "259761"
},
{
"name": "Shell",
"bytes": "302"
}
],
"symlink_target": ""
}
|
"""Handling of RSA and DSA keys.
This module is unstable.
Maintainer: U{Paul Swartz<mailto:z3p@twistedmatrix.com>}
"""
# base library imports
import base64
import string
import sha, md5
# external library imports
from Crypto.Cipher import DES3
from Crypto.PublicKey import RSA, DSA
from Crypto import Util
#twisted
from twisted.python import log
# sibling imports
import asn1, common, sexpy
class BadKeyError(Exception):
"""
raised when a key isn't what we expected from it.
XXX: we really need to check for bad keys
"""
def getPublicKeyString(filename = None, line = 0, data = ''):
"""
Return a public key string given a filename or data of a public key.
Currently handles OpenSSH and LSH keys.
@type filename: C{str}
@type line: C{int}
@type data: C{str}
@rtype: C{str}
"""
if filename:
lines = open(filename).readlines()
data = lines[line]
if data[0] == '{': # lsh key
return getPublicKeyString_lsh(data)
elif data.startswith('ssh-'): # openssh key
return getPublicKeyString_openssh(data)
else:
raise BadKeyError('unknown type of key')
def getPublicKeyString_lsh(data):
sexp = sexpy.parse(base64.decodestring(data[1:-1]))
assert sexp[0] == 'public-key'
kd = {}
for name, data in sexp[1][1:]:
kd[name] = common.NS(data)
if sexp[1][0] == 'dsa':
assert len(kd) == 4, len(kd)
return '\x00\x00\x00\x07ssh-dss' + kd['p'] + kd['q'] + kd['g'] + kd['y']
elif sexp[1][0] == 'rsa-pkcs1-sha1':
assert len(kd) == 2, len(kd)
return '\x00\x00\x00\x07ssh-rsa' + kd['e'] + kd['n']
else:
raise BadKeyError('unknown lsh key type %s' % sexp[1][0])
def getPublicKeyString_openssh(data):
fileKind, fileData = data.split()[:2]
# if fileKind != kind:
# raise BadKeyError, 'key should be %s but instead is %s' % (kind, fileKind)
return base64.decodestring(fileData)
def makePublicKeyString(obj, comment = '', kind = 'openssh'):
"""
Return an public key given a C{Crypto.PublicKey.pubkey.pubkey}
object.
kind is one of ('openssh', 'lsh')
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
@type comment: C{str}
@type kind: C{str}
@rtype: C{str}
"""
if kind == 'lsh':
return makePublicKeyString_lsh(obj) # no comment
elif kind == 'openssh':
return makePublicKeyString_openssh(obj, comment)
else:
raise BadKeyError('bad kind %s' % kind)
def makePublicKeyString_lsh(obj):
keyType = objectType(obj)
if keyType == 'ssh-rsa':
keyData = sexpy.pack([['public-key', ['rsa-pkcs1-sha1',
['n', common.MP(obj.n)[4:]],
['e', common.MP(obj.e)[4:]]]]])
elif keyType == 'ssh-dss':
keyData = sexpy.pack([['public-key', ['dsa',
['p', common.MP(obj.p)[4:]],
['q', common.MP(obj.q)[4:]],
['g', common.MP(obj.g)[4:]],
['y', common.MP(obj.y)[4:]]]]])
else:
raise BadKeyError('bad keyType %s' % keyType)
return '{' + base64.encodestring(keyData).replace('\n','') + '}'
def makePublicKeyString_openssh(obj, comment):
keyType = objectType(obj)
if keyType == 'ssh-rsa':
keyData = common.MP(obj.e) + common.MP(obj.n)
elif keyType == 'ssh-dss':
keyData = common.MP(obj.p)
keyData += common.MP(obj.q)
keyData += common.MP(obj.g)
keyData += common.MP(obj.y)
else:
raise BadKeyError('unknown key type %s' % keyType)
b64Data = base64.encodestring(common.NS(keyType)+keyData).replace('\n', '')
return '%s %s %s' % (keyType, b64Data, comment)
def getPublicKeyObject(data):
"""
Return a C{Crypto.PublicKey.pubkey.pubkey} corresponding to the SSHv2
public key data. data is in the over-the-wire public key format.
@type data: C{str}
@rtype: C{Crypto.PublicKey.pubkey.pubkey}
"""
keyKind, rest = common.getNS(data)
if keyKind == 'ssh-rsa':
e, rest = common.getMP(rest)
n, rest = common.getMP(rest)
return RSA.construct((n, e))
elif keyKind == 'ssh-dss':
p, rest = common.getMP(rest)
q, rest = common.getMP(rest)
g, rest = common.getMP(rest)
y, rest = common.getMP(rest)
return DSA.construct((y, g, p, q))
else:
raise BadKeyError('unknown key type %s' % keyKind)
def getPrivateKeyObject(filename = None, data = '', passphrase = ''):
"""
Return a C{Crypto.PublicKey.pubkey.pubkey} object corresponding to the
private key file/data. If the private key is encrypted, passphrase B{must}
be specified, other wise a L{BadKeyError} will be raised.
@type filename: C{str}
@type data: C{str}
@type passphrase: C{str}
@raises BadKeyError: if the key is invalid or a passphrase is not specified
"""
if filename:
data = open(filename).readlines()
else:
data = [x+'\n' for x in data.split('\n')]
if data[0][0] == '(': # lsh key
return getPrivateKeyObject_lsh(data, passphrase)
elif data[0].startswith('-----'): # openssh key
return getPrivateKeyObject_openssh(data, passphrase)
elif data[0].startswith('ssh-'): # agent v3 private key
return getPrivateKeyObject_agentv3(data, passphrase)
else:
raise BadKeyError('unknown private key type')
def getPrivateKeyObject_lsh(data, passphrase):
#assert passphrase == ''
data = ''.join(data)
sexp = sexpy.parse(data)
assert sexp[0] == 'private-key'
kd = {}
for name, data in sexp[1][1:]:
kd[name] = common.getMP(common.NS(data))[0]
if sexp[1][0] == 'dsa':
assert len(kd) == 5, len(kd)
return DSA.construct((kd['y'], kd['g'], kd['p'], kd['q'], kd['x']))
elif sexp[1][0] == 'rsa-pkcs1':
assert len(kd) == 8, len(kd)
return RSA.construct((kd['n'], kd['e'], kd['d'], kd['p'], kd['q']))
else:
raise BadKeyError('unknown lsh key type %s' % sexp[1][0])
def getPrivateKeyObject_openssh(data, passphrase):
kind = data[0][11: 14]
if data[1].startswith('Proc-Type: 4,ENCRYPTED'): # encrypted key
ivdata = data[2].split(',')[1][:-1]
iv = ''.join([chr(int(ivdata[i:i+2],16)) for i in range(0, len(ivdata), 2)])
if not passphrase:
raise BadKeyError, 'encrypted key with no passphrase'
ba = md5.new(passphrase + iv).digest()
bb = md5.new(ba + passphrase + iv).digest()
decKey = (ba + bb)[:24]
b64Data = base64.decodestring(''.join(data[4:-1]))
keyData = DES3.new(decKey, DES3.MODE_CBC, iv).decrypt(b64Data)
removeLen = ord(keyData[-1])
keyData = keyData[:-removeLen]
else:
keyData = base64.decodestring(''.join(data[1:-1]))
try:
decodedKey = asn1.parse(keyData)
except Exception, e:
raise BadKeyError, 'something wrong with decode'
if type(decodedKey[0]) == type([]):
decodedKey = decodedKey[0] # this happens with encrypted keys
if kind == 'RSA':
n,e,d,p,q=decodedKey[1:6]
return RSA.construct((n,e,d,p,q))
elif kind == 'DSA':
p, q, g, y, x = decodedKey[1: 6]
return DSA.construct((y, g, p, q, x))
def getPrivateKeyObject_agentv3(data, passphrase):
if passphrase:
raise BadKeyError("agent v3 key should not be encrypted")
keyType, data = common.getNS(data)
if keyType == 'ssh-dss':
p, data = common.getMP(data)
q, data = common.getMP(data)
g, data = common.getMP(data)
y, data = common.getMP(data)
x, data = common.getMP(data)
return DSA.construct((y,g,p,q,x))
elif keyType == 'ssh-rsa':
e, data = common.getMP(data)
d, data = common.getMP(data)
n, data = common.getMP(data)
u, data = common.getMP(data)
p, data = common.getMP(data)
q, data = common.getMP(data)
return RSA.construct((n,e,d,p,q,u))
else:
raise BadKeyError("unknown key type %s" % keyType)
def makePrivateKeyString(obj, passphrase = None, kind = 'openssh'):
"""
Return an OpenSSH-style private key for a
C{Crypto.PublicKey.pubkey.pubkey} object. If passphrase is given, encrypt
the private key with it.
kind is one of ('openssh', 'lsh', 'agentv3')
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
@type passphrase: C{str}/C{None}
@type kind: C{str}
@rtype: C{str}
"""
if kind == 'lsh':
return makePrivateKeyString_lsh(obj, passphrase)
elif kind == 'openssh':
return makePrivateKeyString_openssh(obj, passphrase)
elif kind == 'agentv3':
return makePrivateKeyString_agentv3(obj, passphrase)
else:
raise BadKeyError('bad kind %s' % kind)
def makePrivateKeyString_lsh(obj, passphrase):
if passphrase:
raise BadKeyError("cannot encrypt to lsh format")
keyType = objectType(obj)
if keyType == 'ssh-rsa':
p,q=obj.p,obj.q
if p > q:
(p,q)=(q,p)
return sexpy.pack([['private-key', ['rsa-pkcs1',
['n', common.MP(obj.n)[4:]],
['e', common.MP(obj.e)[4:]],
['d', common.MP(obj.d)[4:]],
['p', common.MP(q)[4:]],
['q', common.MP(p)[4:]],
['a', common.MP(obj.d%(q-1))[4:]],
['b', common.MP(obj.d%(p-1))[4:]],
['c', common.MP(Util.number.inverse(p, q))[4:]]]]])
elif keyType == 'ssh-dss':
return sexpy.pack([['private-key', ['dsa',
['p', common.MP(obj.p)[4:]],
['q', common.MP(obj.q)[4:]],
['g', common.MP(obj.g)[4:]],
['y', common.MP(obj.y)[4:]],
['x', common.MP(obj.x)[4:]]]]])
else:
raise BadKeyError('bad keyType %s' % keyType)
def makePrivateKeyString_openssh(obj, passphrase):
keyType = objectType(obj)
if keyType == 'ssh-rsa':
keyData = '-----BEGIN RSA PRIVATE KEY-----\n'
p,q=obj.p,obj.q
if p > q:
(p,q) = (q,p)
# p is less than q
objData = [0, obj.n, obj.e, obj.d, q, p, obj.d%(q-1), obj.d%(p-1),Util.number.inverse(p, q)]
elif keyType == 'ssh-dss':
keyData = '-----BEGIN DSA PRIVATE KEY-----\n'
objData = [0, obj.p, obj.q, obj.g, obj.y, obj.x]
else:
raise BadKeyError('unknown key type %s' % keyType)
if passphrase:
iv = common.entropy.get_bytes(8)
hexiv = ''.join(['%02X' % ord(x) for x in iv])
keyData += 'Proc-Type: 4,ENCRYPTED\n'
keyData += 'DEK-Info: DES-EDE3-CBC,%s\n\n' % hexiv
ba = md5.new(passphrase + iv).digest()
bb = md5.new(ba + passphrase + iv).digest()
encKey = (ba + bb)[:24]
asn1Data = asn1.pack([objData])
if passphrase:
padLen = 8 - (len(asn1Data) % 8)
asn1Data += (chr(padLen) * padLen)
asn1Data = DES3.new(encKey, DES3.MODE_CBC, iv).encrypt(asn1Data)
b64Data = base64.encodestring(asn1Data).replace('\n','')
b64Data = '\n'.join([b64Data[i:i+64] for i in range(0,len(b64Data),64)])
keyData += b64Data + '\n'
if keyType == 'ssh-rsa':
keyData += '-----END RSA PRIVATE KEY-----'
elif keyType == 'ssh-dss':
keyData += '-----END DSA PRIVATE KEY-----'
return keyData
def makePrivateKeyString_agentv3(obj, passphrase):
if passphrase:
raise BadKeyError("cannot encrypt to agent v3 format")
keyType = objectType(obj)
if keyType == 'ssh-rsa':
values = (obj.e, obj.d, obj.n, obj.u, obj.p, obj.q)
elif keyType == 'ssh-dss':
values = (obj.p, obj.q, obj.g, obj.y, obj.x)
return common.NS(keytype) + ''.join(map(common.MP, values))
def makePublicKeyBlob(obj):
keyType = objectType(obj)
if keyType == 'ssh-rsa':
keyData = common.MP(obj.e) + common.MP(obj.n)
elif keyType == 'ssh-dss':
keyData = common.MP(obj.p)
keyData += common.MP(obj.q)
keyData += common.MP(obj.g)
keyData += common.MP(obj.y)
return common.NS(keyType)+keyData
def makePrivateKeyBlob(obj):
keyType = objectType(obj)
if keyType == 'ssh-rsa':
return common.NS(keyType) + common.MP(obj.n) + common.MP(obj.e) + \
common.MP(obj.d) + common.MP(obj.u) + common.MP(obj.q) + \
common.MP(obj.p)
elif keyType == 'ssh-dss':
return common.NS(keyType) + common.MP(obj.p) + common.MP(obj.q) + \
common.MP(obj.g) + common.MP(obj.y) + common.MP(obj.x)
else:
raise ValueError('trying to get blob for invalid key type: %s' % keyType)
def objectType(obj):
"""
Return the SSH key type corresponding to a C{Crypto.PublicKey.pubkey.pubkey}
object.
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
@rtype: C{str}
"""
keyDataMapping = {
('n', 'e', 'd', 'p', 'q'): 'ssh-rsa',
('n', 'e', 'd', 'p', 'q', 'u'): 'ssh-rsa',
('y', 'g', 'p', 'q', 'x'): 'ssh-dss'
}
return keyDataMapping[tuple(obj.keydata)]
def pkcs1Pad(data, lMod):
lenPad = lMod-2-len(data)
return '\x01'+('\xff'*lenPad)+'\x00'+data
def pkcs1Digest(data, lMod):
digest = sha.new(data).digest()
return pkcs1Pad(ID_SHA1+digest, lMod)
def lenSig(obj):
return obj.size()/8
def signData(obj, data):
"""
Sign the data with the given C{Crypto.PublicKey.pubkey.pubkey} object.
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
@type data: C{str}
@rtype: C{str}
"""
mapping = {
'ssh-rsa': signData_rsa,
'ssh-dss': signData_dsa
}
objType = objectType(obj)
return common.NS(objType)+mapping[objType](obj, data)
def signData_rsa(obj, data):
sigData = pkcs1Digest(data, lenSig(obj))
sig = obj.sign(sigData, '')[0]
return common.NS(Util.number.long_to_bytes(sig)) # get around adding the \x00 byte
def signData_dsa(obj, data):
sigData = sha.new(data).digest()
randData = common.entropy.get_bytes(19)
sig = obj.sign(sigData, randData)
# SSH insists that the DSS signature blob be two 160-bit integers
# concatenated together. The sig[0], [1] numbers from obj.sign are just
# numbers, and could be any length from 0 to 160 bits. Make sure they
# are padded out to 160 bits (20 bytes each)
return common.NS(Util.number.long_to_bytes(sig[0], 20) +
Util.number.long_to_bytes(sig[1], 20))
def verifySignature(obj, sig, data):
"""
Verify that the signature for the data is valid.
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
@type sig: C{str}
@type data: C{str}
@rtype: C{bool}
"""
mapping = {
'ssh-rsa': verifySignature_rsa,
'ssh-dss': verifySignature_dsa,
}
objType = objectType(obj)
sigType, sigData = common.getNS(sig)
if objType != sigType: # object and signature are not of same type
return 0
return mapping[objType](obj, sigData, data)
def verifySignature_rsa(obj, sig, data):
sigTuple = [common.getMP(sig)[0]]
return obj.verify(pkcs1Digest(data, lenSig(obj)), sigTuple)
def verifySignature_dsa(obj, sig, data):
sig = common.getNS(sig)[0]
assert(len(sig) == 40)
l = len(sig)/2
sigTuple = map(Util.number.bytes_to_long, [sig[: l], sig[l:]])
return obj.verify(sha.new(data).digest(), sigTuple)
def printKey(obj):
"""
Pretty print a C{Crypto.PublicKey.pubkey.pubkey} object.
@type obj: C{Crypto.PublicKey.pubkey.pubkey}
"""
print '%s %s (%s bits)'%(objectType(obj),
obj.hasprivate()and 'Private Key'or 'Public Key',
obj.size())
for k in obj.keydata:
if hasattr(obj, k):
print 'attr', k
by = common.MP(getattr(obj, k))[4:]
while by:
m = by[: 15]
by = by[15:]
o = ''
for c in m:
o = o+'%02x:'%ord(c)
if len(m) < 15:
o = o[:-1]
print '\t'+o
ID_SHA1 = '\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'
|
{
"content_hash": "71ab3f1477e9bd57ef5098438337f25d",
"timestamp": "",
"source": "github",
"line_count": 466,
"max_line_length": 100,
"avg_line_length": 35.26824034334764,
"alnum_prop": 0.565378764831153,
"repo_name": "santisiri/popego",
"id": "aa766a7abba375ead13dd0373020404a9437f0e9",
"size": "16522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/conch/ssh/keys.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1246"
},
{
"name": "C",
"bytes": "504141"
},
{
"name": "C++",
"bytes": "26125"
},
{
"name": "CSS",
"bytes": "342653"
},
{
"name": "FORTRAN",
"bytes": "4872"
},
{
"name": "GAP",
"bytes": "13267"
},
{
"name": "Genshi",
"bytes": "407"
},
{
"name": "Groff",
"bytes": "17116"
},
{
"name": "HTML",
"bytes": "383181"
},
{
"name": "JavaScript",
"bytes": "1090769"
},
{
"name": "Makefile",
"bytes": "2441"
},
{
"name": "Mako",
"bytes": "376944"
},
{
"name": "Python",
"bytes": "20895618"
},
{
"name": "Ruby",
"bytes": "3380"
},
{
"name": "Shell",
"bytes": "23581"
},
{
"name": "Smarty",
"bytes": "522"
},
{
"name": "TeX",
"bytes": "35712"
}
],
"symlink_target": ""
}
|
"Unit test for the MplStyleManager class."
__version__ = "$Revision: #1 $"
#===========================================================================
# Required imports. Do not modify these.
import unittest
#===========================================================================
# Place all imports after here.
#
import matplotlib as mpl
mpl.use( "Agg" )
import matplotlib.axes
import matplotlib.figure
import matplotlib.patches
import matplotlib.text
import matplotlib.pyplot
import os
import os.path
import shutil
import math
import operator
import mplStyle as S
#
# Place all imports before here.
#===========================================================================
#===========================================================================
class TestMplStyleManager( unittest.TestCase ):
"""Test the MplStyleManager class."""
#-----------------------------------------------------------------------
@classmethod
def setUpClass( self ):
"""This method is called before any tests are run."""
# Save the existing STYLEPATH (if there is one)
self.outputDir = "output"
if not os.path.exists( self.outputDir ):
os.mkdir( self.outputDir )
self.stylepath = os.environ.get( "STYLEPATH", None )
os.environ[ "STYLEPATH" ] = self.outputDir
#-----------------------------------------------------------------------
@classmethod
def tearDownClass( self ):
"""This method is called after all tests are run."""
# You may place finalization code here.
if self.stylepath is not None:
os.environ[ "STYLEPATH" ] = self.stylepath
#if os.path.exists( self.outputDir ):
# shutil.rmtree( self.outputDir )
# Clean up the plot windows
S.mgr.clear()
#=======================================================================
# Add tests methods below.
# Any method whose name begins with 'test' will be run by the framework.
#-----------------------------------------------------------------------
def inputFile( self, fname ):
return os.path.join( "data-inputs", fname )
def outputFile( self, fname ):
return os.path.join( self.outputDir, fname )
def baselineFile( self, fname ):
return os.path.join( "baseline", fname )
#-----------------------------------------------------------------------
def checkPlot( self, testName, fig, msg ):
fname = "%s.png" % (testName,)
fig.savefig( self.outputFile( fname ) )
msg = "%s: Failed -- '%s'" % (testName, msg)
self.checkImage( self.baselineFile(fname),
self.outputFile(fname), 1.0e-3, msg )
#-----------------------------------------------------------------------
def checkImage( self, expected, actual, tol, msg ):
'''Compare two image files.
= INPUT VARIABLES
- expected The filename of the expected image.
- actual The filename of the actual image.
- tol The tolerance (a unitless float). This is used to
determinte the 'fuzziness' to use when comparing images.
'''
from PIL import Image, ImageOps, ImageFilter
# open the image files and remove the alpha channel (if it exists)
expectedImage = Image.open( expected ).convert("RGB")
actualImage = Image.open( actual ).convert("RGB")
# normalize the images
expectedImage = ImageOps.autocontrast( expectedImage, 2 )
actualImage = ImageOps.autocontrast( actualImage, 2 )
# compare the resulting image histogram functions
h1 = expectedImage.histogram()
h2 = actualImage.histogram()
rms = math.sqrt( reduce( operator.add, map( lambda a,b: ( a - b )**2,
h1, h2) ) / len( h1 ) )
diff = rms / 10000.0
msg += "\nError: Image files did not match.\n" \
" RMS Value: %22.15e\n" \
" Expected: %s\n" \
" Actual : %s\n" \
" Tolerance: %22.15e\n" % ( diff, expected, actual, tol )
self.assertLessEqual( diff, tol, msg )
#-----------------------------------------------------------------------
def checkStyleEq( self, testName, desired, expected ):
desiredProps = desired.propertyNames()
expectedProps = expected.propertyNames()
self.assertEqual( desiredProps, expectedProps,
msg = "%s: desired properties do not match expected " \
"properties." % (testName,) )
for propName in desiredProps:
desiredValue = getattr( desired, propName )
expectedValue = getattr( expected, propName )
if isinstance( desiredValue, S.types.SubStyle ):
self.checkStyleEq( "%s.%s" % (testName, propName),
desiredValue, expectedValue )
else:
self.assertEqual( desiredValue, expectedValue,
msg = "%s: style values do not match." % (testName,) )
#-----------------------------------------------------------------------
def testBasic( self ):
"""A basic test of MplStyleManager."""
# Setup the plot
fig, ax = matplotlib.pyplot.subplots()
patch = mpl.patches.Patch()
ax.minorticks_on()
ax.set_title( 'Axes Title' )
ax.set_xlabel( "X-Axis Label" )
ax.set_ylabel( "Y-Axis Label" )
xdata = [1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 4.75, 5]
ydata = [1, 1.75, 2, 2.75, 3, 2.75, 2, 2.25, 2.75, 3]
line = ax.plot( xdata, ydata, color='blue' )
axText = ax.text( 4.2, 1.1, "Axes Text" )
# Get the manager
mgr = S.mgr
style1 = mgr.create( 'Style #1' )
style1.axes.labels.font.size = 8
style2 = mgr.create( 'Style #2',
{ 'figure.bgColor' : 'grey',
'axes.bgColor' : 'white' } )
style3 = mgr.create( 'Style #3', parent = style1 )
style3.axes.labels.font.size = 24
# Resolved 3 with 2
style4 = S.MplStyle( 'Style #4' )
style4.axes.labels.font.size = 24
style4.figure.bgColor = 'grey'
style4.axes.bgColor = 'yellow'
# Resolved 3 with 2 and updated 3
style5 = mgr.create( 'Style #5' )
mgr[ 'Style #5' ].axes.labels.font.size = 16
mgr[ 'Style #5' ].figure.bgColor = 'grey'
mgr[ 'Style #5' ].axes.bgColor = 'yellow'
# Copy test
newStyle = mgr.copy( style3, 'NewStyle' )
self.checkStyleEq( 'Copy - style3', style3, newStyle )
self.assertRaises( Exception, mgr.copy, 'badName', 'blah',
msg = "Failed to raise when copying a non-existant style." )
self.assertEqual( [], mgr.getElementStyles( fig ),
msg = "Element styles should be []." )
mgr.apply( fig, style4 )
self.checkPlot( "MplStyleManager_testBasic_001", fig,
msg = "Apply by style" )
self.assertEqual( True, mgr.exists(style4),
msg = "Failed to auto add an applied style." )
# This should be identical to *_001
mgr.apply( fig, 'Style #3' )
self.checkPlot( "MplStyleManager_testBasic_002", fig,
msg = "Apply by name" )
style3.axes.labels.font.size = 16
mgr.reapply()
self.checkPlot( "MplStyleManager_testBasic_003", fig,
msg = "Re-Apply" )
mgr.set( fig, 'axes.labels.font.size', 24 )
self.checkPlot( "MplStyleManager_testBasic_004", fig,
msg = "Set by name" )
mgr.set( fig, { 'axes.labels.font.size' : 16 } )
self.checkPlot( "MplStyleManager_testBasic_005", fig,
msg = "Set by dict" )
mgr.setElementStyles( ax, [ style2.name ] )
mgr.reapply()
self.checkPlot( "MplStyleManager_testBasic_006", fig,
msg = "Manually set element styles" )
tmpStyle = S.MplStyle( "Temp Style" )
mgr.add( tmpStyle )
self.assertRaises( Exception, mgr.add, tmpStyle,
msg = "Failed to throw on multiple adds." )
mgr.erase( tmpStyle )
result = mgr.find( tmpStyle.name )
self.assertEqual( None, result,
msg = "Did not remove 'tmpStyle' from the manager." )
msg = "Failed to throw on multiple removes."
self.assertRaises( Exception, mgr.erase, tmpStyle, msg = msg )
mgr.loadFile( self.inputFile( "GoodStyle.mplstyle" ) )
mgr.apply( fig, "Good Style" )
self.checkPlot( "MplStyleManager_testBasic_007", fig,
msg = "Custom python script" )
# Check the get/set of the element name
self.assertEqual( [], mgr.getTags( fig ),
msg = "Element name should be None" )
mgr.tag( fig, 'testName' )
self.assertEqual( ['testName'], mgr.getTags( fig ),
msg = "Failed to get and set the element name." )
#-----------------------------------------------------------------------
def testPersistence( self ):
"""Test reading and writing functionality."""
mgr = S.MplStyleManager()
# Setup the style
style = mgr.create( 'Style_#1' )
style.bgColor = 'white'
style.fgColor = 'black'
# Figure
style.figure.width = 10
style.figure.height = 10
# Axes
style.axes.axisBelow = True
style.axes.leftEdge.color = 'magenta'
style.axes.leftEdge.width = 5
style.axes.leftEdge.style = '--'
style.axes.bottomEdge.color = 'magenta'
style.axes.bottomEdge.width = 5
style.axes.bottomEdge.style = 'dashed'
style.axes.topEdge.visible = False
style.axes.rightEdge.visible = False
style.axes.title.font.scale = 2.0
style.axes.title.font.family = 'sans-serif'
# X-Axis
style.axes.xAxis.autoscale = True
style.axes.xAxis.dataMargin = 0.2
style.axes.xAxis.label.font.scale = 1.2
style.axes.xAxis.majorTicks.labels.font.scale = 0.75
style.axes.xAxis.majorTicks.grid.visible = True
style.axes.xAxis.majorTicks.grid.color = '#B0B0B0'
style.axes.xAxis.majorTicks.grid.width = 1.5
style.axes.xAxis.majorTicks.grid.style = ':'
style.axes.xAxis.majorTicks.length = 15.0
style.axes.xAxis.majorTicks.width = 1.5
style.axes.xAxis.minorTicks.grid.visible = True
style.axes.xAxis.minorTicks.grid.color = '#B0B0B0'
style.axes.xAxis.minorTicks.grid.width = 0.5
style.axes.xAxis.minorTicks.grid.style = ':'
style.axes.xAxis.minorTicks.length = 5.0
style.axes.xAxis.minorTicks.width = 0.5
# Y-Axis
style.axes.yAxis = style.axes.xAxis.copy()
# Lines
style.line.color = "blue"
style.line.style = 'dash-dot'
style.line.width = 1.5
style.line.marker.color = 'red'
style.line.marker.edgeColor = 'green'
style.line.marker.size = 12
style.line.marker.style = 'circle'
style.line.marker.fill = 'bottom'
# Patches
style.patch.color = 'gold'
style.patch.filled = True
style.patch.edgeColor = 'purple'
style.patch.edgeWidth = 5
# Text
style.text.lineSpacing = 1.0
style.text.font.size = 12
style.text.font.family = 'monospace'
# Save to file
mgr.save( outdir = self.outputDir )
self.assertRaises( Exception, mgr.save, outdir=self.outputDir, overwrite=False,
msg = "Failed to raise when writing to an existing file." )
mgr2 = S.MplStyleManager()
mgr2.load()
self.checkStyleEq( "Default Load - style", style, mgr2[ style.name ] )
mgr3 = S.MplStyleManager()
mgr3.path = [ self.outputDir ]
mgr3.load()
self.checkStyleEq( "Load by path - style", style, mgr3[ style.name ] )
mgr4 = S.MplStyleManager()
mgr4.path = [ '$STYLEPATH' ]
mgr4.load()
self.checkStyleEq( "Load by path STYLEPATH - style",
style, mgr4[ style.name ] )
mgr5 = S.MplStyleManager()
mgr5.load( self.outputDir )
self.checkStyleEq( "Load by passed path - style",
style, mgr5[ style.name ] )
os.environ.pop( "STYLEPATH" )
mgr6 = S.MplStyleManager()
mgr6.load()
self.assertEqual( None, mgr6.find( style.name ),
msg = "There should be not style loaded." )
p = self.outputFile( 'Style_#1.mplstyle' )
mgr6.loadFile( p )
self.assertEqual( True, os.path.exists( p ),
msg = "Manager failed to write the style file." )
mgr6.erase( style, delete = True )
self.assertEqual( False, os.path.exists( p ),
msg = "Manager failed to remove the style file." )
#-----------------------------------------------------------------------
def testErrors( self ):
"""Test error conditions."""
mgr = S.MplStyleManager()
self.assertRaises( Exception, mgr.loadFile, "Invalid File",
msg = "Failed to throw exception for non-existent file." )
self.assertRaises( Exception, mgr.loadFile,
self.inputFile( "BadStyle1.mplstyle" ),
msg = "Failed to throw for missing 'style' in file." )
self.assertRaises( Exception, mgr.loadFile,
self.inputFile( "BadStyle2.mplstyle" ),
msg = "Failed to throw for invalid 'style' in file." )
self.assertRaises( Exception, mgr.loadFile,
self.inputFile( "BadCustom.mplstyle" ),
msg = "Failed to throw for bad custom file." )
#-----------------------------------------------------------------------
|
{
"content_hash": "cf86fd565d8d9477332c6cea53634a44",
"timestamp": "",
"source": "github",
"line_count": 378,
"max_line_length": 85,
"avg_line_length": 35.97883597883598,
"alnum_prop": 0.5415441176470588,
"repo_name": "nasa/mplStyle",
"id": "4997262562c28e91d67fb67e0104c3ccf08d859d",
"size": "15402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mplStyle/test/test_MplStyleManager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "578438"
}
],
"symlink_target": ""
}
|
from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
|
{
"content_hash": "abd5b936bef0b51201a22964a9fca312",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 67,
"avg_line_length": 36.77777777777778,
"alnum_prop": 0.6030211480362537,
"repo_name": "paked/WPC-fix",
"id": "2e8cef02091d776d3f3693d37d46d7d5771989d0",
"size": "1699",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "crossdomain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "535"
},
{
"name": "HTML",
"bytes": "14118"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "59878"
}
],
"symlink_target": ""
}
|
class Person:
def __init__(self, name):
self.name = name
# Getter function
@property
def name(self):
return self._name
# Setter function
@name.setter
def name(self, value):
if not isinstance(value, str):
raise TypeError('Expected a string')
self._name = value
@name.deleter
def name(self):
raise AttributeError("Can't delete attribute")
class SubPerson(Person):
@property
def name(self):
print('Getting name')
return super().name
@name.setter
def name(self, value):
print('Setting name to', value)
super(SubPerson, SubPerson).name.__set__(self, value)
@name.deleter
def name(self):
print('Deleting name')
super(SubPerson, SubPerson).name.__delete__(self)
class SubPerson2(Person):
@Person.name.setter
def name(self, value):
print('Setting name to', value)
super(SubPerson2, SubPerson2).name.__set__(self, value)
class SubPerson3(Person):
#@property
@Person.name.getter
def name(self):
print('Getting name')
return super().name
if __name__ == '__main__':
a = Person('Guido')
print(a.name)
a.name = 'Dave'
print(a.name)
try:
a.name = 42
except TypeError as e:
print(e)
|
{
"content_hash": "62797e434a8e4fe8974596e20288a9fc",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 63,
"avg_line_length": 22.75862068965517,
"alnum_prop": 0.5856060606060606,
"repo_name": "tuanavu/python-cookbook-3rd",
"id": "763f8c6deef3dca7ca59d203afb5d3e33e0d08b6",
"size": "1368",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/8/extending_a_property_in_a_subclass/example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "20265"
},
{
"name": "CSS",
"bytes": "184"
},
{
"name": "Jupyter Notebook",
"bytes": "219413"
},
{
"name": "Makefile",
"bytes": "231"
},
{
"name": "Python",
"bytes": "250592"
},
{
"name": "Shell",
"bytes": "179"
}
],
"symlink_target": ""
}
|
"""Device tracker for Synology SRM routers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.synology_srm/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import (
CONF_HOST, CONF_USERNAME, CONF_PASSWORD,
CONF_PORT, CONF_SSL, CONF_VERIFY_SSL)
REQUIREMENTS = ['synology-srm==0.0.4']
_LOGGER = logging.getLogger(__name__)
DEFAULT_USERNAME = 'admin'
DEFAULT_PORT = 8001
DEFAULT_SSL = True
DEFAULT_VERIFY_SSL = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
})
def get_scanner(hass, config):
"""Validate the configuration and return Synology SRM scanner."""
scanner = SynologySrmDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class SynologySrmDeviceScanner(DeviceScanner):
"""This class scans for devices connected to a Synology SRM router."""
def __init__(self, config):
"""Initialize the scanner."""
import synology_srm
self.client = synology_srm.Client(
host=config[CONF_HOST],
port=config[CONF_PORT],
username=config[CONF_USERNAME],
password=config[CONF_PASSWORD],
https=config[CONF_SSL]
)
if not config[CONF_VERIFY_SSL]:
self.client.http.disable_https_verify()
self.last_results = []
self.success_init = self._update_info()
_LOGGER.info("Synology SRM scanner initialized")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [device['mac'] for device in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
filter_named = [result['hostname'] for result in self.last_results if
result['mac'] == device]
if filter_named:
return filter_named[0]
return None
def _update_info(self):
"""Check the router for connected devices."""
_LOGGER.debug("Scanning for connected devices")
devices = self.client.mesh.network_wifidevice()
last_results = []
for device in devices:
last_results.append({
'mac': device['mac'],
'hostname': device['hostname']
})
_LOGGER.debug(
"Found %d device(s) connected to the router",
len(devices)
)
self.last_results = last_results
return True
|
{
"content_hash": "53e9438043e770b42d9cbb07f807d110",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 77,
"avg_line_length": 30.77,
"alnum_prop": 0.6454338641533962,
"repo_name": "PetePriority/home-assistant",
"id": "5c7ac9a5d00dffd9979430c195e91dd7a593504a",
"size": "3077",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/device_tracker/synology_srm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1073"
},
{
"name": "Python",
"bytes": "13985647"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import *
from cyder.cydns.urls import cydns_urls
urlpatterns = cydns_urls('mx')
|
{
"content_hash": "d4a8a0d4ae96d64f028cbd81cb59c37f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 39,
"avg_line_length": 19,
"alnum_prop": 0.7631578947368421,
"repo_name": "zeeman/cyder",
"id": "b806f2cdaf74c464176e8690f8724e113b55a5d3",
"size": "114",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cyder/cydns/mx/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1401"
},
{
"name": "CSS",
"bytes": "143911"
},
{
"name": "CoffeeScript",
"bytes": "4769"
},
{
"name": "HTML",
"bytes": "109090"
},
{
"name": "JavaScript",
"bytes": "344874"
},
{
"name": "Makefile",
"bytes": "11293"
},
{
"name": "Puppet",
"bytes": "6422"
},
{
"name": "Python",
"bytes": "2336377"
},
{
"name": "Shell",
"bytes": "16783"
}
],
"symlink_target": ""
}
|
"""
Find heights oftemperature etc. on pressure levels from models and save
Takes calculate pressure level heights from geopotential_interpolate_multiple.py
"""
import os,sys
import iris
import iris.coords as coords
import iris.unit as unit
#from tempfile import mkdtemp
import numpy as np
import os.path as path
import datetime
import time
import h5py
#from scipy.interpolate import interp1d
from scipy.interpolate import InterpolatedUnivariateSpline
def main():
#experiment_ids = ['djzny', 'djznq', 'djznw', 'djzns', 'dkbhu', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
experiment_ids = ['dklwu', 'dklzq']
for experiment_id in experiment_ids:
print "======================================"
print 'Start of interpolate script for temp/specific humidity'
print experiment_id
sys.stdout.flush()
start_time = time.clock()
try:
p_levels = [1000, 950, 925, 850, 700, 500, 400, 300, 250, 200, 150, 100, 70, 50, 30, 20, 10]
expmin1 = experiment_id[:-1]
fname_heights = '/projects/cascade/pwille/moose_retrievals/%s/%s/15101.pp'% (expmin1, experiment_id)
fname_temp = '/projects/cascade/pwille/moose_retrievals/%s/%s/4.pp' % (expmin1, experiment_id)
fname_sp_hum= '/projects/cascade/pwille/moose_retrievals/%s/%s/10.pp' % (expmin1, experiment_id)
path_tempfile='/projects/cascade/pwille/temp/'
load_name_i='/408_pressure_levels_interp_pressure_%s' % experiment_id
save_name_temp='/temp_pressure_levels_interp_%s' % experiment_id
save_name_sp_hum='/sp_hum_pressure_levels_interp_%s' % experiment_id
temp_p_heights_file = path.join("%s%s" % (path_tempfile, save_name_temp))
sp_hum_p_heights_file = path.join("%s%s" % (path_tempfile, save_name_sp_hum))
p_heights_file = path.join("%s%s" % (path_tempfile, load_name_i))
hl = iris.load_cube(fname_heights)
temp_cube = iris.load_cube(fname_temp)
sp_hum_cube = iris.load_cube(fname_sp_hum)
no_of_times = temp_cube.coord('time').points.size
lat_len = temp_cube.coord('grid_latitude').points.size
lon_len = temp_cube.coord('grid_longitude').points.size
shape_alltime = (no_of_times,lat_len,lon_len, len(p_levels))
shape_1time = (lat_len,lon_len, len(p_levels))
print shape_alltime
te = np.empty((shape_1time), dtype=float)
sh = np.empty((shape_1time), dtype=float)
sys.stdout.flush()
heights = hl.slices(['model_level_number', 'grid_latitude', 'grid_longitude']).next().data
with h5py.File(p_heights_file, 'r') as i:
p_hsf = i['interps']
with h5py.File(temp_p_heights_file, 'w') as tf:
temps = tf.create_dataset('t_on_p', dtype='float32', shape=shape_alltime)
for t, time_cube in enumerate(temp_cube.slices(['model_level_number', 'grid_latitude', 'grid_longitude'])):
p_hs=p_hsf[t,:,:,:]
tc = time_cube.data
for f in range(lat_len):
for y in range(lon_len):
i_func_t = InterpolatedUnivariateSpline(heights[:,f,y], tc[:,f,y])
te[f,y,:] = i_func_t(p_hs[f,y,:])
temps[t,:,:,:] = te
with h5py.File(sp_hum_p_heights_file, 'w') as s:
sphums = s.create_dataset('sh_on_p', dtype='float32', shape=shape_alltime)
for t, time_cube in enumerate(sp_hum_cube.slices(['model_level_number', 'grid_latitude', 'grid_longitude'])):
p_hs=p_hsf[t,:,:,:]
sphc = time_cube.data
for f in range(lat_len):
for y in range(lon_len):
i_func_sh = InterpolatedUnivariateSpline(heights[:,f,y], sphc[:,f,y])
sh[f,y,:] = i_func_sh(p_hs[f,y,:])
sphums[t,:,:,:] = sh
end_time=time.clock()
print(('%s time elapsed: {0}' % experiment_id).format(end_time - start_time))
# End of try for experiment_id loop
except Exception,e:
print e
print sys.exc_traceback.tb_lineno
print 'Failed to run interpolate script for %s' % experiment_id
sys.stdout.flush()
#pass
if __name__ == '__main__':
main()
|
{
"content_hash": "1af261aea1f9c0b4a958ea3c7144ff2c",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 117,
"avg_line_length": 36.49579831932773,
"alnum_prop": 0.5804743265024177,
"repo_name": "peterwilletts24/Monsoon-Python-Scripts",
"id": "e6ac2c118904987c8d9cab0b769bfcc48dc9ca62",
"size": "4343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geopotential/temps_etc_on_p_levels_12km.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "576592"
}
],
"symlink_target": ""
}
|
import os
from getpass import getpass
from addic7ed_cli.error import Error
from addic7ed_cli.util import remove_extension, file_to_query, string_set
from addic7ed_cli.episode import search
from addic7ed_cli.compat import echo, input
from addic7ed_cli.login import login, get_current_user
from addic7ed_cli.version import Version
class UI(object):
def __init__(self, args):
self.args = args
@property
def batch(self):
return self.args.batch or self.args.brute_batch
def select(self, choices):
if not choices:
raise Error("Internal error: no choices!")
chosen_index = None
skipping = False
if len(choices) == 1 or self.batch:
chosen_index = 1
else:
just = len(str(len(choices)))
index = 1
for choice in choices:
echo(" {} : {}".format(str(index).rjust(just), choice))
index += 1
echo(" S : Skip")
while True:
answer = input('[1] > ')
if not answer:
chosen_index = 1
elif answer.lower() == "s":
skipping = True
else:
try:
chosen_index = int(answer)
except ValueError:
pass
if skipping or (chosen_index and
1 <= chosen_index <= len(choices)):
break
else:
echo("Bad response")
if skipping:
echo("Skipping")
return None
result = choices[chosen_index - 1]
echo("{}".format(result))
return result
def confirm(self, question, default=None):
responses = 'yn' if default is None else 'Yn' if default else 'yN'
question += ' [{}] > '.format(responses)
if self.batch:
return True
while True:
answer = input(question).lower()
if answer in ('y', 'n'):
return answer == 'y'
elif answer == '' and default is not None:
return default
else:
echo('Bad answer')
class SearchUI(UI):
def episode(self, episode, languages=[], releases=[]):
episode.fetch_versions()
versions = episode.filter_versions(languages, releases, True,
self.args.hearing_impaired)
return self.select(versions)
def should_ignore_file(self, filename):
ignore = False
echo('Target SRT file: {}'.format(filename))
if os.path.isfile(filename):
if self.args.ignore or (not self.args.overwrite and
not self.confirm('Overwrite?', True)):
echo('File exists. Ignoring.')
ignore = True
else:
echo('File exists. Overwriting.')
return ignore
def launch_file(self, filename):
args = self.args
query, release = file_to_query(filename)
if args.query:
query = args.query
if args.release:
release = string_set(args.release)
if args.verbose:
echo('Using query "{query}" and release "{release}"'.format(
release=' '.join(release),
query=query
))
search_results = search(query)
if not search_results:
echo('No result')
return
if self.args.batch and len(search_results) > 1:
raise Error('More than one result, aborting')
episode = self.select(search_results)
return episode and self.episode(episode, args.language, release)
def iter_files(self):
for file_arg in self.args.file:
try:
if not self.args.lang_suffix:
output_file = remove_extension(file_arg) + '.srt'
if self.should_ignore_file(output_file):
continue
version = self.launch_file(file_arg)
if version:
if self.args.lang_suffix:
output_file = "{}.{}.srt".format(
remove_extension(file_arg),
version.iso639_language,
)
if self.should_ignore_file(output_file):
continue
yield version, output_file
echo()
except Error as e:
echo('Error: {}'.format(e))
def launch(self):
use_multidownload = bool(get_current_user()) and \
len(self.args.file) > 1
files = list(self.iter_files())
if not files:
echo('Nothing to download')
elif use_multidownload:
echo('Using multi-download')
Version.multidownload(files)
else:
for (version, output_file) in files:
version.download(output_file)
class LoginUI(UI):
def launch(self):
user = input('User: ')
password = getpass('Password: ')
self.args.session = login(user, password)
self.args.save_session()
class LogoutUI(UI):
def launch(self):
self.args.session = None
self.args.save_session()
echo('Logged out')
|
{
"content_hash": "59589802f5df08045a03abf389da2d47",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 74,
"avg_line_length": 27.205,
"alnum_prop": 0.5034001102738467,
"repo_name": "BenoitZugmeyer/addic7ed-cli",
"id": "3f96a46a496a51af30b762f19fb059d214b82714",
"size": "5442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "addic7ed_cli/ui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37028"
},
{
"name": "Shell",
"bytes": "600"
}
],
"symlink_target": ""
}
|
"""
Predefined ttk style
"""
import os
import logging
from tkinter import ttk
LOGGER = logging.getLogger(__name__)
class TTKStyle(ttk.Style):
def __init__(self, style_name, theme='default', **kwargs):
super().__init__()
self.theme_use(theme)
self.configure(style_name, **kwargs)
def init_css():
theme = 'default'
if os.name == 'posix':
theme = 'alt'
TTKStyle('H1.TLabel', theme=theme, font=('',32, 'bold'), background='white')
TTKStyle('H2.TLabel', theme=theme, font=('',24, 'bold'), background='white')
TTKStyle('H3.TLabel', theme=theme, font=('',18), background='gray82')
TTKStyle('H4.TLabel', theme=theme, font=('',16), background='gray82')
TTKStyle('H5.TLabel', theme=theme, font=('',13), background='gray82')
TTKStyle('H6.TLabel', theme=theme, font=('',10), background='gray82')
TTKStyle('H1.TButton', theme=theme, font=('',32, 'bold'), background='white')
TTKStyle('H2.TButton', theme=theme, font=('',24, 'bold'), background='white')
TTKStyle('H3.TButton', theme=theme, font=('',18), background='gray82')
TTKStyle('H4.TButton', theme=theme, font=('',16), background='gray82')
TTKStyle('H5.TButton', theme=theme, font=('',13), background='gray82')
TTKStyle('H6.TButton', theme=theme, font=('',10), background='gray82')
TTKStyle('H1.TLabelframe', theme=theme, background='white')
TTKStyle('H1.TLabelframe.Label', theme=theme, font=('', 32, 'bold'), background='white')
TTKStyle('H2.TLabelframe', theme=theme, background='white')
TTKStyle('H2.TLabelframe.Label', theme=theme, font=('', 24, 'bold'), background='white')
TTKStyle('H3.TLabelframe', theme=theme, background='gray82')
TTKStyle('H3.TLabelframe.Label', theme=theme, font=('', 18), background='gray82')
TTKStyle('H4.TLabelframe', theme=theme, background='gray82')
TTKStyle('H4.TLabelframe.Label', theme=theme, font=('', 16), background='gray82')
TTKStyle('H5.TLabelframe', theme=theme, background='gray82')
TTKStyle('H5.TLabelframe.Label', theme=theme, font=('', 13, 'bold'), background='gray82')
TTKStyle('H6.TLabelframe', theme=theme, background='gray82')
TTKStyle('H6.TLabelframe.Label', theme=theme, font=('', 10), background='gray82')
TTKStyle('H1.TCheckbutton', theme=theme, font=('',32, 'bold'), background='gray82')
TTKStyle('H2.TCheckbutton', theme=theme, font=('',24, 'bold'), background='gray82')
TTKStyle('H3.TCheckbutton', theme=theme, font=('',18), background='gray82')
TTKStyle('H4.TCheckbutton', theme=theme, font=('',16), background='gray82')
TTKStyle('H5.TCheckbutton', theme=theme, font=('',13), background='gray82')
TTKStyle('H6.TCheckbutton', theme=theme, font=('',10), background='gray82')
TTKStyle('H1.TRadiobutton', theme=theme, font=('', 32, 'bold'), background='gray82')
TTKStyle('H2.TRadiobutton', theme=theme, font=('', 24, 'bold'), background='gray82')
TTKStyle('H3.TRadiobutton', theme=theme, font=('', 18), background='gray82')
TTKStyle('H4.TRadiobutton', theme=theme, font=('', 16), background='gray82')
TTKStyle('H5.TRadiobutton', theme=theme, font=('', 13), background='gray82')
TTKStyle('H6.TRadiobutton', theme=theme, font=('', 10), background='gray82')
TTKStyle('Gray.Horizontal.TScale', theme=theme, padding=20, background='gray82')
if __name__ == '__main__':
import tkinter
root = tkinter.Tk()
root.wm_title('DEMO - {}'.format(__file__))
init_css()
ttk.Label(root, text='H1 header - text size 32', style='H1.TLabel').grid(row=0, column=0, sticky='w')
ttk.Label(root, text='H2 header - text size 24', style='H2.TLabel').grid(row=1, column=0, sticky='w')
ttk.Label(root, text='H3 header - text size 18', style='H3.TLabel').grid(row=2, column=0, sticky='w')
ttk.Label(root, text='H4 header - text size 16', style='H4.TLabel').grid(row=3, column=0, sticky='w')
ttk.Label(root, text='H5 header - text size 13', style='H5.TLabel').grid(row=4, column=0, sticky='w')
ttk.Label(root, text='H6 header - text size 10', style='H6.TLabel').grid(row=5, column=0, sticky='w')
ttk.Label(root, text='H1 checkbtn - text size 32', style='H1.TCheckbutton').grid(row=0, column=1, sticky='w')
ttk.Label(root, text='H2 checkbtn - text size 24', style='H2.TCheckbutton').grid(row=1, column=1, sticky='w')
ttk.Label(root, text='H3 checkbtn - text size 18', style='H3.TCheckbutton').grid(row=2, column=1, sticky='w')
ttk.Label(root, text='H4 checkbtn - text size 16', style='H4.TCheckbutton').grid(row=3, column=1, sticky='w')
ttk.Label(root, text='H5 checkbtn - text size 13', style='H5.TCheckbutton').grid(row=4, column=1, sticky='w')
ttk.Label(root, text='H6 checkbtn - text size 10', style='H6.TCheckbutton').grid(row=5, column=1, sticky='w')
root.mainloop()
|
{
"content_hash": "43d57b119145f2e8172062a64d1cd773",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 113,
"avg_line_length": 63.19736842105263,
"alnum_prop": 0.6570893191755153,
"repo_name": "afunTW/moth-graphcut",
"id": "3e8fe287050b0ebf109cb4800b0989c8cac0c52e",
"size": "4803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/view/ttkstyle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "232794"
}
],
"symlink_target": ""
}
|
import functools
from neutron.api import extensions
from neutron.common import exceptions
from neutron import manager
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class Diagnostician(object):
def __init__(self, plugin):
self.plugin = plugin
def diag_not_implemented(self, res, id, input):
LOG.warning("Diagnostics not implemented on resource %ss." % res)
raise exceptions.ServiceUnavailable()
def diagnose(self, res, input, req, id):
LOG.debug("Requested diagnostics fields %s on resource %s with id %s"
% (input['diag'], res, id))
return getattr(
self.plugin, 'diagnose_%s' % res.replace('-', '_'),
functools.partial(self.diag_not_implemented, res))(
req.context, id, input['diag'])
class Diagnostics(extensions.ExtensionDescriptor):
def get_name(self):
return "Diagnostics"
def get_alias(self):
return "diagnostics"
def get_description(self):
return "Diagnostics extension"
def get_namespace(self):
return "None"
def get_updated(self):
return "never"
def get_actions(self):
diagnose = Diagnostician(manager.NeutronManager.get_plugin()).diagnose
resources = ['port', 'subnet', 'network']
return (extensions.ActionExtension('%ss' % res, 'diag',
functools.partial(diagnose, res)) for res in resources)
|
{
"content_hash": "ccf9d5777b614e70af34cf434d6c33a8",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 78,
"avg_line_length": 30.893617021276597,
"alnum_prop": 0.6377410468319559,
"repo_name": "Cerberus98/quark",
"id": "edaaec6b0b6e2fc92aca0965feecc907983d3627",
"size": "2042",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "quark/api/extensions/diagnostics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1267108"
},
{
"name": "Shell",
"bytes": "861"
}
],
"symlink_target": ""
}
|
import re
from aiohttp import web
from components.eternity import config
from utils.abstract import AbsWebView
from utils.period import todate
from utils.shortcut import paginate, render
from utils.response import geass, http_400_response
class IndexView(AbsWebView):
async def get(self):
page = self._get('page', None)
# 处理置顶 TODO:储存在变量中 初始化时加载
if page == 'full':
# Return all articles
data = await self.redis.get_list('Article', istop=True)
else:
key = self._get('search', None)
if key is None:
if page is None:
page = 1
status = await paginate(self.request, page=page, istop=True)
if status['exit'] == 0:
data = status['data']
else:
return await http_400_response(self.request)
return geass({
'articles': data,
'page': int(page),
'total': status['total']
}, self.request, 'public/catalog.html')
else:
data = []
result = await self.redis.get_list('Article')
for item in result:
if re.search(key, item['text']) or re.search(key, item['title']) or re.search(key, item['tags']):
data.append(item)
return geass({
'articles': data,
'page': 1,
'total': 1
}, self.request, 'public/catalog.html')
class ListView(AbsWebView):
async def get(self):
page = self._get('page', None)
category = self.match['category'].lower()
data_list = await self.redis.lget('Category.' + category)
if page == 'full':
data = await self.redis.get_list('Article', data_list)
elif len(data_list) == 0:
data = []
else:
if page is None:
page = 1
status = await paginate(self.request, page=page, keys_array=data_list)
if status['exit'] == 0:
data = status['data']
else:
return await http_400_response(self.request)
return geass({
'articles': data,
'page': int(page),
'total': status['total'],
'category': category
}, self.request, 'public/catalog.html')
return geass({
'articles': data,
'page': 1
}, self.request, 'public/catalog.html')
class ArticleView(AbsWebView):
async def get(self):
id = self.match['id']
if id.isdigit() is False:
raise web.HTTPNotFound()
data = await self.redis.get('Article', id)
if data is None:
raise web.HTTPNotFound()
# 处理时间
data['created_date'] = todate(data['created_time'], '%Y-%m-%d %H:%M:%S')
data['updated_date'] = todate(data['updated_time'], '%Y-%m-%d %H:%M:%S')
# 引用
try:
data['citations'] = [render(item)[3:-5] for item in data.get('citation').split('|')]
except AttributeError:
data['citations'] = []
data['tags'] = [item for item in data.get('tag').split('|')]
if len(re.findall('[$]{1,2}', data['text'])) > 0:
math = True
else:
math = False
return geass({
'article': data,
'math': math,
'PAGE_IDENTIFIER': self.request.app.router['article'].url(
parts={'id': id}
),
'dev': not config.dev,
'comment': True
}, self.request, 'public/article.html')
class ArchiveView(AbsWebView):
async def get(self):
data = await self.redis.lget('Archive', isdict=True, reverse=True)
dit = {}
data.sort(key=lambda x: int(x['created_time']), reverse=True)
for idx, item in enumerate(data):
date = todate(item['created_time'], '%Y年|%m月')
year, month = date.split('|')
if year not in dit:
dit[year] = {}
if month not in dit[year]:
dit[year][month] = {
'length': 0,
'post': [],
'open': True if idx < 30 else False
}
item['date'] = todate(item['created_time'], '%b.%d %Y')
dit[year][month]['length'] += 1
dit[year][month]['post'].append(item)
return geass({
'archive': dit,
'profile': await self.redis.get('Profile'),
'identifier': 'archive'
}, self.request, 'public/archive.html')
class LinkView(AbsWebView):
async def get(self):
value = await self.redis.lget('Link', isdict=True, reverse=False)
profile = await self.redis.get('Profile')
data = []
if value is not None:
for link in value:
if link['hide'] != 'true':
data.append(link)
return geass({
'friends': data,
'blog': {
'name': profile['name'],
'link': config.rss['link'],
'desc': (await self.redis.get('Profile'))['link_desc']
},
'identifier': 'links',
'comment': True
}, self.request, 'public/links.html')
class ProfileView(AbsWebView):
async def get(self):
data = await self.redis.get('Profile')
words = await self.redis.get('Data.WordCount')
return geass({
'profile': data,
'word_count': words,
'identifier': 'about',
'comment': True
}, self.request, 'public/about.html')
class GuestBookView(AbsWebView):
async def get(self):
data = await self.redis.lget('GuestBook', isdict=True, reverse=True)
if data is None:
data = []
return geass({
'notes': data,
'identifier': 'guest-book',
'comment': True
}, self.request, 'public/guestbook.html')
class CommentView(AbsWebView):
async def get(self):
return geass({
'host': config.comment['host']
}, self.request, 'public/comment.html')
class TagView(AbsWebView):
async def get(self):
tag = self.match['tag'].lower()
data = await self.redis.get_list('Article')
articles = []
for article in data:
if tag in [item.lower() for item in article.get('tag').split('|')]:
articles.append(article)
return geass({
'articles': articles,
'page': 1
}, self.request, 'public/catalog.html')
|
{
"content_hash": "0f4ec0d1462c9e88cafd93c24b555420",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 117,
"avg_line_length": 33.24752475247525,
"alnum_prop": 0.4994044073853484,
"repo_name": "chiaki64/Windless",
"id": "eaf54881c5ffac28402714deea38f3635b37ba84",
"size": "6812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/views/public.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "504635"
},
{
"name": "Shell",
"bytes": "197"
}
],
"symlink_target": ""
}
|
from typing import List, Union
import torch
import torch.fx
from torch import nn, Tensor
from torch.jit.annotations import BroadcastingList2
from torch.nn.modules.utils import _pair
from torchvision.extension import _assert_has_ops
from ..utils import _log_api_usage_once
from ._utils import check_roi_boxes_shape, convert_boxes_to_roi_format
@torch.fx.wrap
def roi_pool(
input: Tensor,
boxes: Union[Tensor, List[Tensor]],
output_size: BroadcastingList2[int],
spatial_scale: float = 1.0,
) -> Tensor:
"""
Performs Region of Interest (RoI) Pool operator described in Fast R-CNN
Args:
input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element
contains ``C`` feature maps of dimensions ``H x W``.
boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)
format where the regions will be taken from.
The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
If a single Tensor is passed, then the first column should
contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.
If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i
in the batch.
output_size (int or Tuple[int, int]): the size of the output after the cropping
is performed, as (height, width)
spatial_scale (float): a scaling factor that maps the box coordinates to
the input coordinates. For example, if your boxes are defined on the scale
of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of
the original image), you'll want to set this to 0.5. Default: 1.0
Returns:
Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(roi_pool)
_assert_has_ops()
check_roi_boxes_shape(boxes)
rois = boxes
output_size = _pair(output_size)
if not isinstance(rois, torch.Tensor):
rois = convert_boxes_to_roi_format(rois)
output, _ = torch.ops.torchvision.roi_pool(input, rois, spatial_scale, output_size[0], output_size[1])
return output
class RoIPool(nn.Module):
"""
See :func:`roi_pool`.
"""
def __init__(self, output_size: BroadcastingList2[int], spatial_scale: float):
super().__init__()
_log_api_usage_once(self)
self.output_size = output_size
self.spatial_scale = spatial_scale
def forward(self, input: Tensor, rois: Union[Tensor, List[Tensor]]) -> Tensor:
return roi_pool(input, rois, self.output_size, self.spatial_scale)
def __repr__(self) -> str:
s = f"{self.__class__.__name__}(output_size={self.output_size}, spatial_scale={self.spatial_scale})"
return s
|
{
"content_hash": "b830de0de0487bc491b7c5bd04c7b925",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 108,
"avg_line_length": 40.875,
"alnum_prop": 0.6449201495073055,
"repo_name": "pytorch/vision",
"id": "96282418f0769bc91018a872dbd0a106742bf884",
"size": "2943",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "torchvision/ops/roi_pool.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "20242"
},
{
"name": "C",
"bytes": "930"
},
{
"name": "C++",
"bytes": "366825"
},
{
"name": "CMake",
"bytes": "18266"
},
{
"name": "Cuda",
"bytes": "90174"
},
{
"name": "Dockerfile",
"bytes": "1608"
},
{
"name": "Java",
"bytes": "21833"
},
{
"name": "Objective-C",
"bytes": "2715"
},
{
"name": "Objective-C++",
"bytes": "3284"
},
{
"name": "PowerShell",
"bytes": "2874"
},
{
"name": "Python",
"bytes": "3952070"
},
{
"name": "Ruby",
"bytes": "1086"
},
{
"name": "Shell",
"bytes": "35660"
}
],
"symlink_target": ""
}
|
"""Runs two benchmark programs and compares their results."""
from __future__ import print_function
import argparse
import subprocess
import sys
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
parser = argparse.ArgumentParser()
parser.add_argument('prog1')
parser.add_argument('prog2')
parser.add_argument('--runs', default=1, type=int,
help='number of times to run each program')
def main(args):
results1 = _RunBenchmark(args.prog1)
benchmarks = set(results1.keys())
results2 = {}
for _ in xrange(args.runs - 1):
_MergeResults(results1, _RunBenchmark(args.prog1), benchmarks)
_MergeResults(results2, _RunBenchmark(args.prog2), benchmarks)
_MergeResults(results2, _RunBenchmark(args.prog2), benchmarks)
for b in sorted(benchmarks):
print(b, '{:+.1%}'.format(results2[b] / results1[b] - 1))
def _MergeResults(merged, results, benchmarks):
benchmarks = set(benchmarks)
for k, v in results.iteritems():
if k not in benchmarks:
_Die('unmatched benchmark: {}', k)
merged[k] = max(merged.get(k, 0), v)
benchmarks.remove(k)
if benchmarks:
_Die('missing benchmark(s): {}', ', '.join(benchmarks))
def _RunBenchmark(prog):
"""Executes prog and returns a dict mapping benchmark name -> result."""
try:
p = subprocess.Popen([prog], shell=True, stdout=subprocess.PIPE)
except OSError as e:
_Die(e)
out, _ = p.communicate()
if p.returncode:
_Die('{} exited with status: {}', prog, p.returncode)
results = {}
for line in out.splitlines():
line = line.strip()
if not line:
continue
try:
name, status, result = line.split()
except ValueError:
_Die('invalid benchmark output: {}', line)
if status != 'PASSED':
_Die('benchmark failed: {}', line)
try:
result = float(result)
except ValueError:
_Die('invalid benchmark result: {}', line)
results[name] = result
return results
def _Die(msg, *args):
if args:
msg = msg.format(*args)
print(msg, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main(parser.parse_args())
|
{
"content_hash": "6bd858ef1c5d28211348895993121742",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 74,
"avg_line_length": 26.625,
"alnum_prop": 0.6460093896713615,
"repo_name": "pombredanne/grumpy",
"id": "eced9512e9217e70285cef3e9e4d022035938120",
"size": "2750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grumpy-tools-src/grumpy_tools/benchcmp.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "1015748"
},
{
"name": "Makefile",
"bytes": "13386"
},
{
"name": "Python",
"bytes": "352996"
}
],
"symlink_target": ""
}
|
"""Describe job command."""
from googlecloudsdk.api_lib.dataproc import util
from googlecloudsdk.calliope import base
class Describe(base.Command):
"""View the details of a job."""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To view the details of a job, run:
$ {command} job_id
""",
}
@staticmethod
def Args(parser):
parser.add_argument(
'id',
metavar='JOB_ID',
help='The ID of the job to describe.')
@util.HandleHttpError
def Run(self, args):
client = self.context['dataproc_client']
job_ref = util.ParseJob(args.id, self.context)
request = job_ref.Request()
job = client.projects_regions_jobs.Get(request)
return job
def Display(self, args, result):
self.format(result)
|
{
"content_hash": "f91c55311266ddc76c4cce429082fa62",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 51,
"avg_line_length": 22.10810810810811,
"alnum_prop": 0.6210268948655256,
"repo_name": "flgiordano/netcash",
"id": "0b6f363c5ccceb1c3174c81ec54fff2991aa50e1",
"size": "1414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "+/google-cloud-sdk/lib/surface/dataproc/jobs/describe.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "622"
},
{
"name": "HTML",
"bytes": "33831"
},
{
"name": "JavaScript",
"bytes": "13859"
},
{
"name": "Shell",
"bytes": "2716"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(name='YourAppName',
version='1.0',
description='OpenShift App',
author='Your Name',
author_email='example@example.com',
url='https://www.python.org/community/sigs/current/distutils-sig',
install_requires=['Flask>=0.7.2', 'MarkupSafe'],
)
|
{
"content_hash": "e0e118c6b4056dbff47d51f91cc99895",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 72,
"avg_line_length": 31.5,
"alnum_prop": 0.6476190476190476,
"repo_name": "waco001/abhishekgorti.me-flask",
"id": "cebfd5bf0b50f9d76420b07d842ae4c6466e8183",
"size": "315",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11734"
},
{
"name": "HTML",
"bytes": "3031"
},
{
"name": "JavaScript",
"bytes": "15056"
},
{
"name": "Python",
"bytes": "44850"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
}
|
import flask
def create_valid_response(response_id=None, result=None, error=None, jsonrpc_version='2.0'):
response = {}
if response_id is not None:
response['id'] = response_id
if error is not None: # always return an error
response['error'] = error
elif result is not None:
response['result'] = result
response['jsonrpc'] = jsonrpc_version
return flask.jsonify(response)
def register_remote_object(path, obj, app):
@app.route(path, methods=['POST'])
def jsonrpc(**kwargs):
req = flask.request.json
try:
name = req['method']
method = getattr(obj, name)
params = req.get('params', [])
if isinstance(params, dict):
params.update(kwargs)
result = method(**params)
else:
result = method(*params, **kwargs)
if not isinstance(result, (list, dict, str, unicode, int, float, bool)):
result = str(result)
print "jsonrpc -", result
return create_valid_response(req.get('id'), result)
except AttributeError as e:
error = {'code': -32601, 'message': 'Method not found'}
return create_valid_response(req.get('id'), error=error)
except TypeError as e:
error = {'code': -32602, 'message': 'Invalid params'}
return create_valid_response(req.get('id'), error=error)
except Exception as e:
error = {'code': -32000, 'message': e.message}
return create_valid_response(req.get('id'), error=error)
|
{
"content_hash": "aafc32fda50d3a2b6ae1c8e66d86f1d0",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 92,
"avg_line_length": 32.36,
"alnum_prop": 0.5673671199011124,
"repo_name": "alexander-svendsen/ev3-python",
"id": "03d957ec8f38435d0eb0373e7dc007b3debc39d8",
"size": "1642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/jsonrpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6656"
},
{
"name": "JavaScript",
"bytes": "1163811"
},
{
"name": "Python",
"bytes": "128642"
}
],
"symlink_target": ""
}
|
"""
views.py
.. moduleauthor:: Steve Androulakis <steve.androulakis@monash.edu>
.. moduleauthor:: Gerson Galang <gerson.galang@versi.edu.au>
.. moduleauthor:: Ulrich Felzmaann <ulrich.felzmann@versi.edu.au>
"""
from tardis.tardis_portal.auth.decorators import has_datafile_download_access,\
has_experiment_write, has_dataset_write
from base64 import b64decode
import urllib2
from urllib import urlencode, urlopen
from os import path
import logging
import json
from operator import itemgetter
from django.template import Context
from django.conf import settings
from django.db import transaction
from django.db.models import Q, Sum
from django.shortcuts import render_to_response, redirect
from django.contrib.auth.models import User, Group, AnonymousUser
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseForbidden, HttpResponseNotFound
from django.contrib.auth.decorators import login_required, permission_required
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.core.exceptions import PermissionDenied
from django.forms.models import model_to_dict
from django.views.decorators.http import require_POST
from django.views.decorators.cache import never_cache
from django.contrib.sites.models import Site
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
from django.template.defaultfilters import pluralize, filesizeformat
from tardis.urls import getTardisApps
from tardis.tardis_portal.ProcessExperiment import ProcessExperiment
from tardis.tardis_portal.forms import ExperimentForm, DatasetForm, \
createSearchDatafileForm, createSearchDatafileSelectionForm, \
LoginForm, RegisterExperimentForm, createSearchExperimentForm, \
ChangeGroupPermissionsForm, ChangeUserPermissionsForm, \
ImportParamsForm, create_parameterset_edit_form, \
save_datafile_edit_form, create_datafile_add_form,\
save_datafile_add_form, MXDatafileSearchForm, RightsForm,\
ManageAccountForm, CreateGroupPermissionsForm,\
CreateUserPermissionsForm
from tardis.tardis_portal.errors import UnsupportedSearchQueryTypeError
from tardis.tardis_portal.staging import get_full_staging_path, \
write_uploaded_file_to_dataset, get_staging_url_and_size, \
staging_list
from tardis.tardis_portal.tasks import create_staging_datafiles,\
create_staging_datafile
from tardis.tardis_portal.models import Experiment, ExperimentParameter, \
DatafileParameter, DatasetParameter, ExperimentACL, Dataset_File, \
DatafileParameterSet, ParameterName, GroupAdmin, Schema, \
Dataset, ExperimentParameterSet, DatasetParameterSet, \
License, UserProfile, UserAuthentication, Token
from tardis.tardis_portal import constants
from tardis.tardis_portal.auth.localdb_auth import django_user, django_group
from tardis.tardis_portal.auth.localdb_auth import auth_key as localdb_auth_key
from tardis.tardis_portal.auth import decorators as authz
from tardis.tardis_portal.auth import auth_service
from tardis.tardis_portal.shortcuts import render_response_index, \
return_response_error, return_response_not_found, \
render_response_search, render_error_message, \
get_experiment_referer
from tardis.tardis_portal.metsparser import parseMets
from tardis.tardis_portal.creativecommonshandler import CreativeCommonsHandler
from tardis.tardis_portal.hacks import oracle_dbops_hack
from tardis.tardis_portal.util import render_public_access_badge
from haystack.views import SearchView
from haystack.query import SearchQuerySet
from tardis.tardis_portal.search_query import FacetFixedSearchQuery
from tardis.tardis_portal.forms import RawSearchForm
from tardis.tardis_portal.search_backend import HighlightSearchBackend
from django.contrib.auth import logout as django_logout
logger = logging.getLogger(__name__)
def get_dataset_info(dataset, include_thumbnail=False):
def get_thumbnail_url(datafile):
return reverse('tardis.tardis_portal.iiif.download_image',
kwargs={'datafile_id': datafile.id,
'region': 'full',
'size': '100,',
'rotation': 0,
'quality': 'native',
'format': 'jpg'})
obj = model_to_dict(dataset)
obj['datafiles'] = list(dataset.dataset_file_set.values_list('id', flat=True))
obj['url'] = dataset.get_absolute_url()
obj['size'] = dataset.get_size()
obj['size_human_readable'] = filesizeformat(dataset.get_size())
if include_thumbnail:
try:
obj['thumbnail'] = get_thumbnail_url(dataset.image)
except AttributeError:
pass
return obj
class HttpResponseMethodNotAllowed(HttpResponse):
status_code=303
def __init__(self, *args, **kwargs):
super(HttpResponseMethodNotAllowed, self).__init__(*args, **kwargs)
try:
self["Allow"] = kwargs['allow']
except:
self["Allow"] = 'GET'
class HttpResponseSeeAlso(HttpResponseRedirect):
status_code=303
def _redirect_303(*args, **kwargs):
response = redirect(*args, **kwargs)
response.status_code = 303
return response
def getNewSearchDatafileSelectionForm(initial=None):
DatafileSelectionForm = createSearchDatafileSelectionForm(initial)
return DatafileSelectionForm()
def logout(request):
if 'datafileResults' in request.session:
del request.session['datafileResults']
c = Context({})
return HttpResponse(render_response_index(request,
'tardis_portal/index.html', c))
def index(request):
status = ''
c = Context({'status': status})
return HttpResponse(render_response_index(request,
'tardis_portal/index.html', c))
def site_settings(request):
if request.method == 'POST':
if 'username' in request.POST and 'password' in request.POST:
user = auth_service.authenticate(request=request,
authMethod=localdb_auth_key)
if user is not None:
if user.is_staff:
x509 = open(settings.GRID_PROXY_FILE, 'r')
c = Context({'baseurl': request.build_absolute_uri('/'),
'proxy': x509.read(), 'filestorepath':
settings.FILE_STORE_PATH})
return HttpResponse(render_response_index(request,
'tardis_portal/site_settings.xml', c),
mimetype='application/xml')
return return_response_error(request)
@never_cache
def load_image(request, parameter):
file_path = path.abspath(path.join(settings.FILE_STORE_PATH,
parameter.string_value))
from django.core.servers.basehttp import FileWrapper
wrapper = FileWrapper(file(file_path))
return HttpResponse(wrapper, mimetype=parameter.name.units)
def load_experiment_image(request, parameter_id):
parameter = ExperimentParameter.objects.get(pk=parameter_id)
experiment_id = parameter.parameterset.experiment.id
if authz.has_experiment_access(request, experiment_id):
return load_image(request, parameter)
else:
return return_response_error(request)
def load_dataset_image(request, parameter_id):
parameter = DatafileParameter.objects.get(pk=parameter_id)
dataset = parameter.parameterset.dataset
if authz.has_dataset_access(request, dataset.id):
return load_image(request, parameter)
else:
return return_response_error(request)
def load_datafile_image(request, parameter_id):
parameter = DatafileParameter.objects.get(pk=parameter_id)
dataset_file = parameter.parameterset.dataset_file
if authz.has_datafile_access(request, dataset_file.id):
return load_image(request, parameter)
else:
return return_response_error(request)
@authz.experiment_access_required
def display_experiment_image(
request, experiment_id, parameterset_id, parameter_name):
# TODO handle not exist
if not authz.has_experiment_access(request, experiment_id):
return return_response_error(request)
image = ExperimentParameter.objects.get(name__name=parameter_name,
parameterset=parameterset_id)
return HttpResponse(b64decode(image.string_value), mimetype='image/jpeg')
@authz.dataset_access_required
def display_dataset_image(
request, dataset_id, parameterset_id, parameter_name):
# TODO handle not exist
if not authz.has_dataset_access(request, dataset_id):
return return_response_error(request)
image = DatasetParameter.objects.get(name__name=parameter_name,
parameterset=parameterset_id)
return HttpResponse(b64decode(image.string_value), mimetype='image/jpeg')
@authz.datafile_access_required
def display_datafile_image(
request, dataset_file_id, parameterset_id, parameter_name):
# TODO handle not exist
if not authz.has_datafile_access(request, dataset_file_id):
return return_response_error(request)
image = DatafileParameter.objects.get(name__name=parameter_name,
parameterset=parameterset_id)
return HttpResponse(b64decode(image.string_value), mimetype='image/jpeg')
def about(request):
c = Context({'subtitle': 'About',
'about_pressed': True,
'nav': [{'name': 'About', 'link': '/about/'}]})
return HttpResponse(render_response_index(request,
'tardis_portal/about.html', c))
def experiment_index(request):
if request.user.is_authenticated():
return redirect('tardis_portal.experiment_list_mine')
else:
return redirect('tardis_portal.experiment_list_public')
@login_required
def experiment_list_mine(request):
c = Context({
'subtitle': 'My Experiments',
'can_see_private': True,
'experiments': authz.get_owned_experiments(request)\
.order_by('-update_time'),
})
# TODO actually change loaders to load this based on stuff
return HttpResponse(render_response_search(request,
'tardis_portal/experiment/list_mine.html', c))
@login_required
def experiment_list_shared(request):
c = Context({
'subtitle': 'Shared Experiments',
'can_see_private': True,
'experiments': authz.get_shared_experiments(request) \
.order_by('-update_time'),
})
# TODO actually change loaders to load this based on stuff
return HttpResponse(render_response_search(request,
'tardis_portal/experiment/list_shared.html', c))
def experiment_list_public(request):
private_filter = Q(public_access=Experiment.PUBLIC_ACCESS_NONE)
c = Context({
'subtitle': 'Public Experiments',
'can_see_private': False,
'experiments': Experiment.objects.exclude(private_filter) \
.order_by('-update_time'),
})
return HttpResponse(render_response_search(request,
'tardis_portal/experiment/list_public.html', c))
@authz.experiment_access_required
def view_experiment(request, experiment_id,
template_name='tardis_portal/view_experiment.html'):
"""View an existing experiment.
:param request: a HTTP Request instance
:type request: :class:`django.http.HttpRequest`
:param experiment_id: the ID of the experiment to be edited
:type experiment_id: string
:rtype: :class:`django.http.HttpResponse`
"""
c = Context({})
try:
experiment = Experiment.safe.get(request, experiment_id)
except PermissionDenied:
return return_response_error(request)
except Experiment.DoesNotExist:
return return_response_not_found(request)
c['experiment'] = experiment
c['has_write_permissions'] = \
authz.has_write_permissions(request, experiment_id)
c['has_download_permissions'] = \
authz.has_experiment_download_access(request, experiment_id)
if request.user.is_authenticated():
c['is_owner'] = authz.has_experiment_ownership(request, experiment_id)
c['subtitle'] = experiment.title
c['nav'] = [{'name': 'Data', 'link': '/experiment/view/'},
{'name': experiment.title,
'link': experiment.get_absolute_url()}]
if 'status' in request.POST:
c['status'] = request.POST['status']
if 'error' in request.POST:
c['error'] = request.POST['error']
if 'query' in request.GET:
c['search_query'] = SearchQueryString(request.GET['query'])
if 'search' in request.GET:
c['search'] = request.GET['search']
if 'load' in request.GET:
c['load'] = request.GET['load']
# Download protocols
c['protocol'] = []
download_urls = experiment.get_download_urls()
for key, value in download_urls.iteritems():
c['protocol'] += [[key, value]]
import sys
appnames = []
appurls = []
for app in getTardisApps():
try:
appnames.append(sys.modules['%s.%s.settings'
% (settings.TARDIS_APP_ROOT, app)].NAME)
appurls.append('%s.%s.views.index' % (settings.TARDIS_APP_ROOT,
app))
except:
logger.debug("No tab for %s" % app)
pass
c['apps'] = zip(appurls, appnames)
return HttpResponse(render_response_index(request, template_name, c))
@authz.experiment_access_required
def experiment_description(request, experiment_id):
"""View an existing experiment's description. To be loaded via ajax.
:param request: a HTTP Request instance
:type request: :class:`django.http.HttpRequest`
:param experiment_id: the ID of the experiment to be edited
:type experiment_id: string
:rtype: :class:`django.http.HttpResponse`
"""
c = Context({})
try:
experiment = Experiment.safe.get(request, experiment_id)
except PermissionDenied:
return return_response_error(request)
except Experiment.DoesNotExist:
return return_response_not_found(request)
c['experiment'] = experiment
c['subtitle'] = experiment.title
c['nav'] = [{'name': 'Data', 'link': '/experiment/view/'},
{'name': experiment.title,
'link': experiment.get_absolute_url()}]
c['authors'] = experiment.author_experiment_set.all()
c['datafiles'] = \
Dataset_File.objects.filter(dataset__experiments=experiment_id)
c['owners'] = experiment.get_owners()
# calculate the sum of the datafile sizes
c['size'] = Dataset_File.sum_sizes(c['datafiles'])
c['has_read_or_owner_ACL'] = \
authz.has_read_or_owner_ACL(request, experiment_id)
c['has_download_permissions'] = \
authz.has_experiment_download_access(request, experiment_id)
c['has_write_permissions'] = \
authz.has_write_permissions(request, experiment_id)
if request.user.is_authenticated():
c['is_owner'] = authz.has_experiment_ownership(request, experiment_id)
c['protocol'] = []
download_urls = experiment.get_download_urls()
for key, value in download_urls.iteritems():
c['protocol'] += [[key, value]]
if 'status' in request.GET:
c['status'] = request.GET['status']
if 'error' in request.GET:
c['error'] = request.GET['error']
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/experiment_description.html', c))
#
# Class to manage switching between space separated search queries and
# '+' separated search queries (for addition to urls
#
# TODO This would probably be better handled with filters
#
class SearchQueryString():
def __init__(self, query_string):
import re
# remove extra spaces around colons
stripped_query = re.sub('\s*?:\s*', ':', query_string)
# create a list of terms which can be easily joined by
# spaces or pluses
self.query_terms = stripped_query.split()
def __unicode__(self):
return ' '.join(self.query_terms)
def url_safe_query(self):
return '+'.join(self.query_terms)
def query_string(self):
return self.__unicode__()
@authz.dataset_access_required
def view_dataset(request, dataset_id):
dataset = Dataset.objects.get(id=dataset_id)
def get_datafiles_page():
# pagination was removed by someone in the interface but not here.
# need to fix.
pgresults = 100
paginator = Paginator(dataset.dataset_file_set.all(), pgresults)
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
# If page request (9999) is out of range, deliver last page of results.
try:
return paginator.page(page)
except (EmptyPage, InvalidPage):
return paginator.page(paginator.num_pages)
c = Context({
'dataset': dataset,
'datafiles': get_datafiles_page(),
'parametersets': dataset.getParameterSets()
.exclude(schema__hidden=True),
'has_download_permissions':
authz.has_dataset_download_access(request, dataset_id),
'has_write_permissions':
authz.has_dataset_write(request, dataset_id),
'from_experiment': \
get_experiment_referer(request, dataset_id),
'other_experiments': \
authz.get_accessible_experiments_for_dataset(request, dataset_id)
})
return HttpResponse(render_response_index(request,
'tardis_portal/view_dataset.html', c))
@never_cache
@authz.experiment_access_required
def experiment_datasets(request, experiment_id):
return view_experiment(request, experiment_id=experiment_id,
template_name='tardis_portal/ajax/experiment_datasets.html')
@never_cache
@authz.dataset_access_required
def dataset_json(request, experiment_id=None, dataset_id=None):
# Experiment ID is optional (but dataset_id is not)!
dataset = Dataset.objects.get(id=dataset_id)
if experiment_id:
try:
# PUT is fine for non-existing resources, but GET/DELETE is not
if request.method == 'PUT':
experiment = Experiment.objects.get(id=experiment_id)
else:
experiment = dataset.experiments.get(id=experiment_id)
except Experiment.DoesNotExist:
return HttpResponseNotFound()
# Convenience methods for permissions
def can_update():
return authz.has_dataset_ownership(request, dataset_id)
can_delete = can_update
def add_experiments(updated_experiments):
current_experiments = \
frozenset(dataset.experiments.values_list('id', flat=True))
# Get all the experiments that currently aren't associated
for experiment_id in updated_experiments - current_experiments:
# You must own the experiment to assign datasets to it
if authz.has_experiment_ownership(request, experiment_id):
experiment = Experiment.safe.get(request, experiment_id)
logger.info("Adding dataset #%d to experiment #%d" %
(dataset.id, experiment.id))
dataset.experiments.add(experiment)
# Update this experiment to add it to more experiments
if request.method == 'PUT':
# Obviously you can't do this if you don't own the dataset
if not can_update():
return HttpResponseForbidden()
data = json.loads(request.body)
# Detect if any experiments are new, and add the dataset to them
add_experiments(frozenset(data['experiments']))
# Include the experiment we PUT to, as it may also be new
if not experiment == None:
add_experiments(frozenset((experiment.id,)))
dataset.save()
# Remove this dataset from the given experiment
if request.method == 'DELETE':
# First, we need an experiment
if experiment_id == None:
# As the experiment is in the URL, this method will never be allowed
if can_update():
return HttpResponseMethodNotAllowed(allow="GET PUT")
return HttpResponseMethodNotAllowed(allow="GET")
# Cannot remove if this is the last experiment
if not can_delete() or dataset.experiments.count() < 2:
return HttpResponseForbidden()
dataset.experiments.remove(experiment)
dataset.save()
has_download_permissions = \
authz.has_dataset_download_access(request, dataset_id)
return HttpResponse(json.dumps(get_dataset_info(dataset,
has_download_permissions)),
mimetype='application/json')
@never_cache
@authz.experiment_access_required
def experiment_datasets_json(request, experiment_id):
try:
experiment = Experiment.safe.get(request, experiment_id)
except Experiment.DoesNotExist:
return return_response_not_found(request)
has_download_permissions = \
authz.has_experiment_download_access(request, experiment_id)
objects = [ get_dataset_info(ds, has_download_permissions) \
for ds in experiment.datasets.all() ]
return HttpResponse(json.dumps(objects), mimetype='application/json')
@never_cache
@authz.experiment_access_required
def experiment_dataset_transfer(request, experiment_id):
experiments = Experiment.safe.owned(request)
def get_json_url_pattern():
placeholder = '314159'
return reverse('tardis.tardis_portal.views.experiment_datasets_json',
args=[placeholder]).replace(placeholder,
'{{experiment_id}}')
c = Context({
'experiments': experiments.exclude(id=experiment_id),
'url_pattern': get_json_url_pattern()
});
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/experiment_dataset_transfer.html',
c))
@authz.dataset_access_required
def retrieve_dataset_metadata(request, dataset_id):
dataset = Dataset.objects.get(pk=dataset_id)
has_write_permissions = authz.has_dataset_write(request, dataset_id)
parametersets = dataset.datasetparameterset_set.exclude(schema__hidden=True)
c = Context({'dataset': dataset,
'parametersets': parametersets,
'has_write_permissions': has_write_permissions})
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/dataset_metadata.html', c))
@never_cache
@authz.experiment_access_required
def retrieve_experiment_metadata(request, experiment_id):
experiment = Experiment.objects.get(pk=experiment_id)
has_write_permissions = \
authz.has_write_permissions(request, experiment_id)
parametersets = experiment.experimentparameterset_set\
.exclude(schema__hidden=True)
c = Context({'experiment': experiment,
'parametersets': parametersets,
'has_write_permissions': has_write_permissions})
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/experiment_metadata.html', c))
@permission_required('tardis_portal.add_experiment')
@login_required
def create_experiment(request,
template_name='tardis_portal/create_experiment.html'):
"""Create a new experiment view.
:param request: a HTTP Request instance
:type request: :class:`django.http.HttpRequest`
:param template_name: the path of the template to render
:type template_name: string
:rtype: :class:`django.http.HttpResponse`
"""
c = Context({
'subtitle': 'Create Experiment',
'user_id': request.user.id,
})
if request.method == 'POST':
form = ExperimentForm(request.POST)
if form.is_valid():
full_experiment = form.save(commit=False)
# group/owner assignment stuff, soon to be replaced
experiment = full_experiment['experiment']
experiment.created_by = request.user
full_experiment.save_m2m()
# add defaul ACL
acl = ExperimentACL(experiment=experiment,
pluginId=django_user,
entityId=str(request.user.id),
canRead=True,
canWrite=True,
canDelete=True,
isOwner=True,
aclOwnershipType=ExperimentACL.OWNER_OWNED)
acl.save()
request.POST = {'status': "Experiment Created."}
return HttpResponseSeeAlso(reverse(
'tardis.tardis_portal.views.view_experiment',
args=[str(experiment.id)]) + "#created")
c['status'] = "Errors exist in form."
c["error"] = 'true'
else:
form = ExperimentForm(extra=1)
c['form'] = form
c['default_institution'] = settings.DEFAULT_INSTITUTION
return HttpResponse(render_response_index(request, template_name, c))
@never_cache
@authz.experiment_access_required
def metsexport_experiment(request, experiment_id):
force_http_urls = 'force_http_urls' in request.GET
from django.contrib.sites.models import Site
if force_http_urls:
protocol = ""
if request.is_secure():
protocol = "s"
force_http_urls = "http%s://%s" % (protocol, Site.objects.get_current().domain)
from os.path import basename
from django.core.servers.basehttp import FileWrapper
from tardis.tardis_portal.metsexporter import MetsExporter
exporter = MetsExporter()
filename = exporter.export(experiment_id,
force_http_urls=force_http_urls)
response = HttpResponse(FileWrapper(file(filename)),
mimetype='application')
response['Content-Disposition'] = \
'attachment; filename="%s"' % basename(filename)
return response
@login_required
@permission_required('tardis_portal.change_experiment')
@authz.write_permissions_required
def edit_experiment(request, experiment_id,
template="tardis_portal/create_experiment.html"):
"""Edit an existing experiment.
:param request: a HTTP Request instance
:type request: :class:`django.http.HttpRequest`
:param experiment_id: the ID of the experiment to be edited
:type experiment_id: string
:param template_name: the path of the template to render
:type template_name: string
:rtype: :class:`django.http.HttpResponse`
"""
experiment = Experiment.objects.get(id=experiment_id)
c = Context({'subtitle': 'Edit Experiment',
'experiment_id': experiment_id,
})
if request.method == 'POST':
form = ExperimentForm(data=request.POST, instance=experiment, extra=0)
if form.is_valid():
full_experiment = form.save(commit=False)
experiment = full_experiment['experiment']
experiment.created_by = request.user
full_experiment.save_m2m()
request.POST = {'status': "Experiment Saved."}
return HttpResponseSeeAlso(reverse(
'tardis.tardis_portal.views.view_experiment',
args=[str(experiment.id)]) + "#saved")
c['status'] = "Errors exist in form."
c["error"] = 'true'
else:
form = ExperimentForm(instance=experiment, extra=0)
c['form'] = form
return HttpResponse(render_response_index(request, template, c))
# todo complete....
def login(request):
from tardis.tardis_portal.auth import login, auth_service
if request.user.is_authenticated():
# redirect the user to the home page if he is trying to go to the
# login page
return HttpResponseRedirect('/')
# TODO: put me in SETTINGS
if 'username' in request.POST and \
'password' in request.POST:
authMethod = request.POST['authMethod']
if 'next' not in request.GET:
next = '/'
else:
next = request.GET['next']
user = auth_service.authenticate(
authMethod=authMethod, request=request)
if user:
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
return HttpResponseRedirect(next)
c = Context({'status': "Sorry, username and password don't match.",
'error': True,
'loginForm': LoginForm()})
return HttpResponseForbidden( \
render_response_index(request, 'tardis_portal/login.html', c))
c = Context({'loginForm': LoginForm()})
return HttpResponse(render_response_index(request,
'tardis_portal/login.html', c))
@permission_required('tardis_portal.change_userauthentication')
@login_required()
def manage_auth_methods(request):
'''Manage the user's authentication methods using AJAX.'''
from tardis.tardis_portal.auth.authentication import add_auth_method, \
merge_auth_method, remove_auth_method, edit_auth_method, \
list_auth_methods
if request.method == 'POST':
operation = request.POST['operation']
if operation == 'addAuth':
return add_auth_method(request)
elif operation == 'mergeAuth':
return merge_auth_method(request)
elif operation == 'removeAuth':
return remove_auth_method(request)
else:
return edit_auth_method(request)
else:
# if GET, we'll just give the initial list of auth methods for the user
return list_auth_methods(request)
# TODO removed username from arguments
@transaction.commit_on_success
def _registerExperimentDocument(filename, created_by, expid=None,
owners=[], username=None):
'''
Register the experiment document and return the experiment id.
:param filename: path of the document to parse (METS or notMETS)
:type filename: string
:param created_by: a User instance
:type created_by: :py:class:`django.contrib.auth.models.User`
:param expid: the experiment ID to use
:type expid: int
:param owners: a list of owners
:type owner: list
:param username: **UNUSED**
:rtype: int
'''
f = open(filename)
firstline = f.readline()
f.close()
sync_root = ''
if firstline.startswith('<experiment'):
logger.debug('processing simple xml')
processExperiment = ProcessExperiment()
eid, sync_root = processExperiment.process_simple(filename,
created_by,
expid)
else:
logger.debug('processing METS')
eid, sync_root = parseMets(filename, created_by, expid)
auth_key = ''
try:
auth_key = settings.DEFAULT_AUTH
except AttributeError:
logger.error('no default authentication for experiment ownership set (settings.DEFAULT_AUTH)')
force_user_create = False
try:
force_user_create = settings.DEFAULT_AUTH_FORCE_USER_CREATE
except AttributeError:
pass
if auth_key:
for owner in owners:
# for each PI
if not owner:
continue
owner_username = None
if '@' in owner:
owner_username = auth_service.getUsernameByEmail(auth_key,
owner)
if not owner_username:
owner_username = owner
owner_user = auth_service.getUser(auth_key, owner_username,
force_user_create=force_user_create)
# if exist, create ACL
if owner_user:
logger.debug('registering owner: ' + owner)
e = Experiment.objects.get(pk=eid)
acl = ExperimentACL(experiment=e,
pluginId=django_user,
entityId=str(owner_user.id),
canRead=True,
canWrite=True,
canDelete=True,
isOwner=True,
aclOwnershipType=ExperimentACL.OWNER_OWNED)
acl.save()
return (eid, sync_root)
# web service
def register_experiment_ws_xmldata(request):
status = ''
if request.method == 'POST': # If the form has been submitted...
# A form bound to the POST data
form = RegisterExperimentForm(request.POST, request.FILES)
if form.is_valid(): # All validation rules pass
xmldata = request.FILES['xmldata']
username = form.cleaned_data['username']
origin_id = form.cleaned_data['originid']
from_url = form.cleaned_data['from_url']
user = auth_service.authenticate(request=request,
authMethod=localdb_auth_key)
if user:
if not user.is_active:
return return_response_error(request)
else:
return return_response_error(request)
e = Experiment(
title='Placeholder Title',
approved=True,
created_by=user,
)
e.save()
local_id = e.id
filename = path.join(e.get_or_create_directory(),
'mets_upload.xml')
f = open(filename, 'wb+')
for chunk in xmldata.chunks():
f.write(chunk)
f.close()
logger.info('=== processing experiment: START')
owners = request.POST.getlist('experiment_owner')
try:
_, sync_path = _registerExperimentDocument(filename=filename,
created_by=user,
expid=local_id,
owners=owners,
username=username)
logger.info('=== processing experiment %s: DONE' % local_id)
except:
logger.exception('=== processing experiment %s: FAILED!' % local_id)
return return_response_error(request)
if from_url:
logger.info('Sending received_remote signal')
from tardis.tardis_portal.signals import received_remote
received_remote.send(sender=Experiment,
instance=e,
uid=origin_id,
from_url=from_url,
sync_path=sync_path)
response = HttpResponse(str(sync_path), status=200)
response['Location'] = request.build_absolute_uri(
'/experiment/view/' + str(local_id))
return response
else:
form = RegisterExperimentForm() # An unbound form
c = Context({
'form': form,
'status': status,
'subtitle': 'Register Experiment',
'searchDatafileSelectionForm': getNewSearchDatafileSelectionForm()})
return HttpResponse(render_response_index(request,
'tardis_portal/register_experiment.html', c))
@never_cache
@authz.datafile_access_required
def retrieve_parameters(request, dataset_file_id):
parametersets = DatafileParameterSet.objects.all()
parametersets = parametersets.filter(dataset_file__pk=dataset_file_id)\
.exclude(schema__hidden=True)
datafile = Dataset_File.objects.get(id=dataset_file_id)
dataset_id = datafile.dataset.id
has_write_permissions = authz.has_dataset_write(request, dataset_id)
c = Context({'parametersets': parametersets,
'datafile': datafile,
'has_write_permissions': has_write_permissions,
'has_download_permissions':
authz.has_dataset_download_access(request, dataset_id) })
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/parameters.html', c))
@never_cache
@authz.dataset_access_required
def retrieve_datafile_list(request, dataset_id, template_name='tardis_portal/ajax/datafile_list.html'):
params = {}
query = None
highlighted_dsf_pks = []
if 'query' in request.GET:
search_query = FacetFixedSearchQuery(backend=HighlightSearchBackend())
sqs = SearchQuerySet(query=search_query)
query = SearchQueryString(request.GET['query'])
results = sqs.raw_search(query.query_string() + ' AND dataset_id_stored:%i' % (int(dataset_id))).load_all()
highlighted_dsf_pks = [int(r.pk) for r in results if r.model_name == 'dataset_file' and r.dataset_id_stored == int(dataset_id)]
params['query'] = query.query_string()
elif 'datafileResults' in request.session and 'search' in request.GET:
highlighted_dsf_pks = [r.pk for r in request.session['datafileResults']]
dataset_results = \
Dataset_File.objects.filter(
dataset__pk=dataset_id,
).order_by('filename')
if request.GET.get('limit', False) and len(highlighted_dsf_pks):
dataset_results = \
dataset_results.filter(pk__in=highlighted_dsf_pks)
params['limit'] = request.GET['limit']
filename_search = None
if 'filename' in request.GET and len(request.GET['filename']):
filename_search = request.GET['filename']
dataset_results = \
dataset_results.filter(url__icontains=filename_search)
params['filename'] = filename_search
# pagination was removed by someone in the interface but not here.
# need to fix.
pgresults = 100
paginator = Paginator(dataset_results, pgresults)
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
# If page request (9999) is out of range, deliver last page of results.
try:
dataset = paginator.page(page)
except (EmptyPage, InvalidPage):
dataset = paginator.page(paginator.num_pages)
is_owner = False
has_download_permissions = authz.has_dataset_download_access(request,
dataset_id)
has_write_permissions = False
if request.user.is_authenticated():
is_owner = authz.has_dataset_ownership(request, dataset_id)
has_write_permissions = authz.has_dataset_write(request, dataset_id)
immutable = Dataset.objects.get(id=dataset_id).immutable
params = urlencode(params)
c = Context({
'datafiles': dataset,
'paginator': paginator,
'immutable': immutable,
'dataset': Dataset.objects.get(id=dataset_id),
'filename_search': filename_search,
'is_owner': is_owner,
'highlighted_dataset_files': highlighted_dsf_pks,
'has_download_permissions': has_download_permissions,
'has_write_permissions': has_write_permissions,
'search_query' : query,
'params' : params
})
return HttpResponse(render_response_index(request, template_name, c))
@login_required()
def control_panel(request):
experiments = Experiment.safe.owned(request)
if experiments:
experiments = experiments.order_by('title')
c = Context({'experiments': experiments,
'subtitle': 'Experiment Control Panel'})
return HttpResponse(render_response_index(request,
'tardis_portal/control_panel.html', c))
@oracle_dbops_hack
def search_experiment(request):
"""Either show the search experiment form or the result of the search
experiment query.
"""
if len(request.GET) == 0:
return __forwardToSearchExperimentFormPage(request)
form = __getSearchExperimentForm(request)
experiments = __processExperimentParameters(request, form)
# check if the submitted form is valid
if experiments is not None:
bodyclass = 'list'
else:
return __forwardToSearchExperimentFormPage(request)
# remove information from previous searches from session
if 'datafileResults' in request.session:
del request.session['datafileResults']
results = []
for e in experiments:
result = {}
result['sr'] = e
result['dataset_hit'] = False
result['dataset_file_hit'] = False
result['experiment_hit'] = True
results.append(result)
c = Context({'header': 'Search Experiment',
'experiments': results,
'bodyclass': bodyclass})
url = 'tardis_portal/search_experiment_results.html'
return HttpResponse(render_response_search(request, url, c))
def search_quick(request):
get = False
experiments = Experiment.objects.all().order_by('title')
if 'results' in request.GET:
get = True
if 'quicksearch' in request.GET \
and len(request.GET['quicksearch']) > 0:
experiments = \
experiments.filter(
title__icontains=request.GET['quicksearch']) | \
experiments.filter(
institution_name__icontains=request.GET['quicksearch']) | \
experiments.filter(
author_experiment__author__name__icontains=request.GET[
'quicksearch']) | \
experiments.filter(
pdbid__pdbid__icontains=request.GET['quicksearch'])
experiments = experiments.distinct()
logger.debug(experiments)
c = Context({'submitted': get, 'experiments': experiments,
'subtitle': 'Search Experiments'})
return HttpResponse(render_response_index(request,
'tardis_portal/search_experiment.html', c))
def __getFilteredDatafiles(request, searchQueryType, searchFilterData):
"""Filter the list of datafiles for the provided searchQueryType using the
cleaned up searchFilterData.
Arguments:
request -- the HTTP request
searchQueryType -- the type of query, 'mx' or 'saxs'
searchFilterData -- the cleaned up search form data
Returns:
A list of datafiles as a result of the query or None if the provided search
request is invalid
"""
datafile_results = authz.get_accessible_datafiles_for_user(request)
logger.info('__getFilteredDatafiles: searchFilterData {0}'.
format(searchFilterData))
# there's no need to do any filtering if we didn't find any
# datafiles that the user has access to
if not datafile_results:
logger.info("""__getFilteredDatafiles: user {0} doesn\'t have
access to any experiments""".format(request.user))
return datafile_results
datafile_results = \
datafile_results.filter(
datafileparameterset__datafileparameter__name__schema__namespace__in=Schema
.getNamespaces(
Schema.DATAFILE, searchQueryType)).distinct()
# if filename is searchable which i think will always be the case...
if searchFilterData['filename'] != '':
datafile_results = \
datafile_results.filter(
filename__icontains=searchFilterData['filename'])
# TODO: might need to cache the result of this later on
# get all the datafile parameters for the given schema
parameters = [p for p in
ParameterName.objects.filter(
schema__namespace__in=Schema.getNamespaces(Schema.DATAFILE,
searchQueryType))]
datafile_results = __filterParameters(parameters, datafile_results,
searchFilterData, 'datafileparameterset__datafileparameter')
# get all the dataset parameters for given schema
parameters = [p for p in
ParameterName.objects.filter(
schema__namespace__in=Schema.getNamespaces(Schema.DATASET,
searchQueryType))]
datafile_results = __filterParameters(parameters, datafile_results,
searchFilterData, 'dataset__datasetparameterset__datasetparameter')
# let's sort it in the end
if datafile_results:
datafile_results = datafile_results.order_by('filename')
logger.debug("results: {0}".format(datafile_results))
return datafile_results
def __getFilteredExperiments(request, searchFilterData):
"""Filter the list of experiments using the cleaned up searchFilterData.
Arguments:
request -- the HTTP request
searchFilterData -- the cleaned up search experiment form data
Returns:
A list of experiments as a result of the query or None if the provided
search request is invalid
"""
experiments = authz.get_accessible_experiments(request)
if experiments is None:
return []
# search for the default experiment fields
if searchFilterData['title'] != '':
experiments = \
experiments.filter(title__icontains=searchFilterData['title'])
if searchFilterData['description'] != '':
experiments = \
experiments.filter(
description__icontains=searchFilterData['description'])
if searchFilterData['institutionName'] != '':
experiments = \
experiments.filter(
institution_name__icontains=searchFilterData['institutionName'])
if searchFilterData['creator'] != '':
experiments = \
experiments.filter(
author_experiment__author__icontains=searchFilterData['creator'])
date = searchFilterData['date']
if not date == None:
experiments = \
experiments.filter(start_time__lt=date, end_time__gt=date)
# get all the experiment parameters
exp_schema_namespaces = Schema.getNamespaces(Schema.EXPERIMENT)
parameters = ParameterName.objects.filter(
schema__namespace__in=exp_schema_namespaces, is_searchable=True)
experiments = __filterParameters(parameters, experiments,
searchFilterData, 'experimentparameterset__experimentparameter')
# let's sort it in the end
experiments = experiments.order_by('title')
return experiments
def __filterParameters(parameters, datafile_results,
searchFilterData, paramType):
"""Go through each parameter and apply it as a filter (together with its
specified comparator) on the provided list of datafiles.
:param parameters: list of ParameterNames model
:type parameters: list containing
:py:class:`tardis.tardis_portal.models.ParameterNames`
:param datafile_results: list of datafile to apply the filter
:param searchFilterData: the cleaned up search form data
:param paramType: either ``datafile`` or ``dataset``
:type paramType: :py:class:`tardis.tardis_portal.models.Dataset` or
:py:class:`tardis.tardis_portal.models.Dataset_File`
:returns: A list of datafiles as a result of the query or None if the
provided search request is invalid
"""
for parameter in parameters:
fieldName = parameter.getUniqueShortName()
kwargs = {paramType + '__name__id': parameter.id}
try:
# if parameter is a string...
if not parameter.data_type == ParameterName.NUMERIC:
if searchFilterData[fieldName] != '':
# let's check if this is a field that's specified to be
# displayed as a dropdown menu in the form
if parameter.choices != '':
if searchFilterData[fieldName] != '-':
kwargs[paramType + '__string_value__iexact'] = \
searchFilterData[fieldName]
else:
if parameter.comparison_type == \
ParameterName.EXACT_VALUE_COMPARISON:
kwargs[paramType + '__string_value__iexact'] = \
searchFilterData[fieldName]
elif parameter.comparison_type == \
ParameterName.CONTAINS_COMPARISON:
# we'll implement exact comparison as 'icontains'
# for now
kwargs[paramType + '__string_value__icontains'] = \
searchFilterData[fieldName]
else:
# if comparison_type on a string is a comparison
# type that can only be applied to a numeric value,
# we'll default to just using 'icontains'
# comparison
kwargs[paramType + '__string_value__icontains'] = \
searchFilterData[fieldName]
else:
pass
else: # parameter.isNumeric():
if parameter.comparison_type == \
ParameterName.RANGE_COMPARISON:
fromParam = searchFilterData[fieldName + 'From']
toParam = searchFilterData[fieldName + 'To']
if fromParam is None and toParam is None:
pass
else:
# if parameters are provided and we want to do a range
# comparison
# note that we're using '1' as the lower range as using
# '0' in the filter would return all the data
# TODO: investigate on why the oddness above is
# happening
# TODO: we should probably move the static value here
# to the constants module
kwargs[paramType + '__numerical_value__range'] = \
(fromParam is None and
constants.FORM_RANGE_LOWEST_NUM or fromParam,
toParam is not None and toParam or
constants.FORM_RANGE_HIGHEST_NUM)
elif searchFilterData[fieldName] is not None:
# if parameter is an number and we want to handle other
# type of number comparisons
if parameter.comparison_type == \
ParameterName.EXACT_VALUE_COMPARISON:
kwargs[paramType + '__numerical_value__exact'] = \
searchFilterData[fieldName]
# TODO: is this really how not equal should be declared?
# elif parameter.comparison_type ==
# ParameterName.NOT_EQUAL_COMPARISON:
# datafile_results = \
# datafile_results.filter(
# datafileparameter__name__name__icontains=parameter.name)
# .filter(
# ~Q(datafileparameter__numerical_value=searchFilterData[
# parameter.name]))
elif parameter.comparison_type == \
ParameterName.GREATER_THAN_COMPARISON:
kwargs[paramType + '__numerical_value__gt'] = \
searchFilterData[fieldName]
elif parameter.comparison_type == \
ParameterName.GREATER_THAN_EQUAL_COMPARISON:
kwargs[paramType + '__numerical_value__gte'] = \
searchFilterData[fieldName]
elif parameter.comparison_type == \
ParameterName.LESS_THAN_COMPARISON:
kwargs[paramType + '__numerical_value__lt'] = \
searchFilterData[fieldName]
elif parameter.comparison_type == \
ParameterName.LESS_THAN_EQUAL_COMPARISON:
kwargs[paramType + '__numerical_value__lte'] = \
searchFilterData[fieldName]
else:
# if comparison_type on a numeric is a comparison type
# that can only be applied to a string value, we'll
# default to just using 'exact' comparison
kwargs[paramType + '__numerical_value__exact'] = \
searchFilterData[fieldName]
else:
# ignore...
pass
# we will only update datafile_results if we have an additional
# filter (based on the 'passed' condition) in addition to the
# initial value of kwargs
if len(kwargs) > 1:
logger.debug(kwargs)
datafile_results = datafile_results.filter(**kwargs)
except KeyError:
pass
return datafile_results
def __forwardToSearchDatafileFormPage(request, searchQueryType,
searchForm=None):
"""Forward to the search data file form page."""
# TODO: remove this later on when we have a more generic search form
if searchQueryType == 'mx':
url = 'tardis_portal/search_datafile_form_mx.html'
searchForm = MXDatafileSearchForm()
c = Context({'header': 'Search Datafile',
'searchForm': searchForm})
return HttpResponse(render_response_search(request, url, c))
url = 'tardis_portal/search_datafile_form.html'
if not searchForm:
#if searchQueryType == 'saxs':
SearchDatafileForm = createSearchDatafileForm(searchQueryType)
searchForm = SearchDatafileForm()
#else:
# # TODO: what do we need to do if the user didn't provide a page to
# display?
# pass
from itertools import groupby
# sort the fields in the form as it will make grouping the related fields
# together in the next step easier
sortedSearchForm = sorted(searchForm, lambda x, y: cmp(x.name, y.name))
# modifiedSearchForm will be used to customise how the range type of fields
# will be displayed. range type of fields will be displayed side by side.
modifiedSearchForm = [list(g) for k, g in groupby(
sortedSearchForm, lambda x: x.name.rsplit('To')[0].rsplit('From')[0])]
# the searchForm will be used by custom written templates whereas the
# modifiedSearchForm will be used by the 'generic template' that the
# dynamic search datafiles form uses.
c = Context({'header': 'Search Datafile',
'searchForm': searchForm,
'modifiedSearchForm': modifiedSearchForm})
return HttpResponse(render_response_search(request, url, c))
def __forwardToSearchExperimentFormPage(request):
"""Forward to the search experiment form page."""
searchForm = __getSearchExperimentForm(request)
c = Context({'searchForm': searchForm})
url = 'tardis_portal/search_experiment_form.html'
return HttpResponse(render_response_search(request, url, c))
def __getSearchDatafileForm(request, searchQueryType):
"""Create the search datafile form based on the HTTP GET request.
:param request: a HTTP Request instance
:type request: :class:`django.http.HttpRequest`
:param searchQueryType: The search query type: 'mx' or 'saxs'
:raises:
:py:class:`tardis.tardis_portal.errors.UnsupportedSearchQueryTypeError`
is the provided searchQueryType is not supported.
:returns: The supported search datafile form
"""
try:
SearchDatafileForm = createSearchDatafileForm(searchQueryType)
form = SearchDatafileForm(request.GET)
return form
except UnsupportedSearchQueryTypeError, e:
raise e
def __getSearchExperimentForm(request):
"""Create the search experiment form.
:param request: a HTTP Request instance
:type request: :class:`django.http.HttpRequest`
:returns: The search experiment form.
"""
SearchExperimentForm = createSearchExperimentForm()
form = SearchExperimentForm(request.GET)
return form
def __processDatafileParameters(request, searchQueryType, form):
"""Validate the provided datafile search request and return search results.
:param request: a HTTP Request instance
:type request: :class:`django.http.HttpRequest`
:param searchQueryType: The search query type
:param form: The search form to use
:raises:
:py:class:`tardis.tardis_portal.errors.SearchQueryTypeUnprovidedError`
if searchQueryType is not in the HTTP GET request
:raises:
:py:class:`tardis.tardis_portal.errors.UnsupportedSearchQueryTypeError`
is the provided searchQueryType is not supported
:returns: A list of datafiles as a result of the query or None if the
provided search request is invalid.
:rtype: list of :py:class:`tardis.tardis_portal.models.Dataset_Files` or
None
"""
if form.is_valid():
datafile_results = __getFilteredDatafiles(request,
searchQueryType, form.cleaned_data)
# let's cache the query with all the filters in the session so
# we won't have to keep running the query all the time it is needed
# by the paginator
request.session['datafileResults'] = datafile_results
return datafile_results
else:
return None
def __processExperimentParameters(request, form):
"""Validate the provided experiment search request and return search
results.
:param request: a HTTP Request instance
:type request: :class:`django.http.HttpRequest`
:param form: The search form to use
:returns: A list of experiments as a result of the query or None if the
provided search request is invalid.
"""
if form.is_valid():
experiments = __getFilteredExperiments(request, form.cleaned_data)
# let's cache the query with all the filters in the session so
# we won't have to keep running the query all the time it is needed
# by the paginator
request.session['experiments'] = experiments
return experiments
else:
return None
def search_datafile(request):
"""Either show the search datafile form or the result of the search
datafile query.
"""
if 'type' in request.GET:
searchQueryType = request.GET.get('type')
else:
# for now we'll default to MX if nothing is provided
# TODO: should we forward the page to experiment search page if
# nothing is provided in the future?
searchQueryType = 'mx'
logger.info('search_datafile: searchQueryType {0}'.format(searchQueryType))
# TODO: check if going to /search/datafile will flag an error in unit test
bodyclass = None
if 'page' not in request.GET and 'type' in request.GET and \
len(request.GET) > 1:
# display the 1st page of the results
form = __getSearchDatafileForm(request, searchQueryType)
datafile_results = __processDatafileParameters(
request, searchQueryType, form)
if datafile_results is not None:
bodyclass = 'list'
else:
return __forwardToSearchDatafileFormPage(
request, searchQueryType, form)
else:
if 'page' in request.GET:
# succeeding pages of pagination
if 'datafileResults' in request.session:
datafile_results = request.session['datafileResults']
else:
form = __getSearchDatafileForm(request, searchQueryType)
datafile_results = __processDatafileParameters(request,
searchQueryType, form)
if datafile_results is not None:
bodyclass = 'list'
else:
return __forwardToSearchDatafileFormPage(request,
searchQueryType, form)
else:
# display the form
if 'datafileResults' in request.session:
del request.session['datafileResults']
return __forwardToSearchDatafileFormPage(request, searchQueryType)
# process the files to be displayed by the paginator...
#paginator = Paginator(datafile_results,
# constants.DATAFILE_RESULTS_PER_PAGE)
#try:
# page = int(request.GET.get('page', '1'))
#except ValueError:
# page = 1
# If page request (9999) is out of :range, deliver last page of results.
#try:
# datafiles = paginator.page(page)
#except (EmptyPage, InvalidPage):
# datafiles = paginator.page(paginator.num_pages)
import re
cleanedUpQueryString = re.sub('&page=\d+', '',
request.META['QUERY_STRING'])
# get experiments associated with datafiles
if datafile_results:
experiment_pks = list(set(datafile_results.values_list('dataset__experiments', flat=True)))
experiments = Experiment.safe.in_bulk(experiment_pks)
else:
experiments = {}
results = []
for key, e in experiments.items():
result = {}
result['sr'] = e
result['dataset_hit'] = False
result['dataset_file_hit'] = True
result['experiment_hit'] = False
results.append(result)
c = Context({
'experiments': results,
'datafiles': datafile_results,
#'paginator': paginator,
'query_string': cleanedUpQueryString,
'subtitle': 'Search Datafiles',
'nav': [{'name': 'Search Datafile', 'link': '/search/datafile/'}],
'bodyclass': bodyclass,
'search_pressed': True,
'searchDatafileSelectionForm': getNewSearchDatafileSelectionForm()})
url = 'tardis_portal/search_experiment_results.html'
return HttpResponse(render_response_search(request, url, c))
@never_cache
@login_required()
def retrieve_user_list(request):
# TODO: Hook this up to authservice.searchUsers() to actually get
# autocompletion data directly from auth backends.
# The following local DB query would be moved to auth.localdb_auth.SearchUsers.
query = request.GET.get('q', '')
limit = int(request.GET.get('limit', '10'))
# Search all user fields and also the UserAuthentication username.
q = Q(username__icontains=query) | \
Q(email__icontains=query) | \
Q(userprofile__userauthentication__username__icontains=query)
# Tokenize query string so "Bob Sm" matches (first_name~=Bob & last_name~=Smith).
tokens = query.split()
if len(tokens) < 2:
q |= Q(first_name__icontains=query.strip())
q |= Q(last_name__icontains=query.strip())
else:
q |= Q(first_name__icontains=' '.join(tokens[:-1])) & Q(last_name__icontains=tokens[-1])
q_tokenuser = Q(username=settings.TOKEN_USERNAME)
users_query = User.objects.exclude(q_tokenuser).filter(q).distinct().select_related('userprofile')
# HACK FOR ORACLE - QUERY GENERATED DOES NOT WORK WITH LIMIT SO USING ITERATOR INSTEAD
from itertools import islice
first_n_users = list(islice(users_query, limit))
user_auths = list(UserAuthentication.objects.filter(userProfile__user__in=first_n_users))
auth_methods = dict( (ap[0], ap[1]) for ap in settings.AUTH_PROVIDERS)
"""
users = [ {
"username": "ksr",
"first_name": "Kieran",
"last_name": "Spear",
"email": "email@address.com",
"auth_methods": [ "ksr:vbl:VBL", "ksr:localdb:Local DB" ]
} , ... ]
"""
users = []
for u in users_query:
fields = ('first_name', 'last_name', 'username', 'email')
# Convert attributes to dictionary keys and make sure all values
# are strings.
user = dict( [ (k, str(getattr(u, k))) for k in fields ] )
try:
user['auth_methods'] = [ '%s:%s:%s' % \
(ua.username, ua.authenticationMethod, \
auth_methods[ua.authenticationMethod]) \
for ua in user_auths if ua.userProfile == u.get_profile() ]
except UserProfile.DoesNotExist:
user['auth_methods'] = []
if not user['auth_methods']:
user['auth_methods'] = [ '%s:localdb:%s' % \
(u.username, auth_methods['localdb']) ]
users.append(user)
users.sort(key=itemgetter('first_name'))
return HttpResponse(json.dumps(users))
@never_cache
@login_required()
def retrieve_group_list(request):
grouplist = ' ~ '.join(map(str, Group.objects.all().order_by('name')))
return HttpResponse(grouplist)
def retrieve_field_list(request):
from tardis.tardis_portal.search_indexes import DatasetFileIndex
# Get all of the fields in the indexes
#
# TODO: these should be onl read from registered indexes
#
allFields = DatasetFileIndex.fields.items()
users = User.objects.all()
usernames = [u.first_name + ' ' + u.last_name + ':username' for u in users]
# Collect all of the indexed (searchable) fields, except
# for the main search document ('text')
searchableFields = ([key + ':search_field' for key,f in allFields if f.indexed == True and key != 'text' ])
auto_list = usernames + searchableFields
fieldList = '+'.join([str(fn) for fn in auto_list])
return HttpResponse(fieldList)
@never_cache
@authz.experiment_ownership_required
def retrieve_access_list_user(request, experiment_id):
from tardis.tardis_portal.forms import AddUserPermissionsForm
user_acls = Experiment.safe.user_acls(request, experiment_id)
c = Context({ 'user_acls': user_acls, 'experiment_id': experiment_id,
'addUserPermissionsForm': AddUserPermissionsForm() })
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/access_list_user.html', c))
@never_cache
def retrieve_access_list_user_readonly(request, experiment_id):
from tardis.tardis_portal.forms import AddUserPermissionsForm
user_acls = Experiment.safe.user_acls(request, experiment_id)
c = Context({ 'user_acls': user_acls, 'experiment_id': experiment_id })
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/access_list_user_readonly.html', c))
@never_cache
@authz.experiment_ownership_required
def retrieve_access_list_group(request, experiment_id):
from tardis.tardis_portal.forms import AddGroupPermissionsForm
group_acls_system_owned = Experiment.safe.group_acls_system_owned(request,
experiment_id)
group_acls_user_owned = Experiment.safe.group_acls_user_owned(request,
experiment_id)
c = Context({'group_acls_user_owned': group_acls_user_owned,
'group_acls_system_owned': group_acls_system_owned,
'experiment_id': experiment_id,
'addGroupPermissionsForm': AddGroupPermissionsForm()})
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/access_list_group.html', c))
@never_cache
def retrieve_access_list_group_readonly(request, experiment_id):
group_acls_system_owned = Experiment.safe.group_acls_system_owned(request,
experiment_id)
group_acls_user_owned = Experiment.safe.group_acls_user_owned(request,
experiment_id)
c = Context({'experiment_id': experiment_id,
'group_acls_system_owned': group_acls_system_owned,
'group_acls_user_owned': group_acls_user_owned })
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/access_list_group_readonly.html', c))
@never_cache
@authz.experiment_ownership_required
def retrieve_access_list_external(request, experiment_id):
groups = Experiment.safe.external_users(request, experiment_id)
c = Context({'groups': groups, 'experiment_id': experiment_id})
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/access_list_external.html', c))
@never_cache
@authz.experiment_download_required
def retrieve_access_list_tokens(request, experiment_id):
tokens = Token.objects.filter(experiment=experiment_id)
tokens = [{'expiry_date': token.expiry_date,
'user': token.user,
'url': request.build_absolute_uri(token.get_absolute_url()),
'id': token.id,
'experiment_id': experiment_id,
} for token in tokens]
c = Context({'tokens': tokens})
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/access_list_tokens.html', c))
@never_cache
@authz.group_ownership_required
def retrieve_group_userlist(request, group_id):
from tardis.tardis_portal.forms import ManageGroupPermissionsForm
users = User.objects.filter(groups__id=group_id)
c = Context({'users': users, 'group_id': group_id,
'manageGroupPermissionsForm': ManageGroupPermissionsForm()})
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/group_user_list.html', c))
@never_cache
def retrieve_group_userlist_readonly(request, group_id):
from tardis.tardis_portal.forms import ManageGroupPermissionsForm
users = User.objects.filter(groups__id=group_id)
c = Context({'users': users, 'group_id': group_id,
'manageGroupPermissionsForm': ManageGroupPermissionsForm()})
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/group_user_list_readonly.html', c))
@never_cache
def retrieve_group_list_by_user(request):
groups = Group.objects.filter(groupadmin__user=request.user)
c = Context({'groups': groups})
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/group_list.html', c))
@never_cache
@permission_required('auth.change_group')
@login_required()
def manage_groups(request):
c = Context({})
return HttpResponse(render_response_index(request,
'tardis_portal/manage_group_members.html', c))
@never_cache
@authz.group_ownership_required
def add_user_to_group(request, group_id, username):
if username == settings.TOKEN_USERNAME:
return HttpResponse('User does not exist: %s' % username)
authMethod = localdb_auth_key
isAdmin = False
if 'isAdmin' in request.GET:
if request.GET['isAdmin'] == 'true':
isAdmin = True
try:
authMethod = request.GET['authMethod']
if authMethod == localdb_auth_key:
user = User.objects.get(username=username)
else:
user = UserAuthentication.objects.get(username=username,
authenticationMethod=authMethod).userProfile.user
except User.DoesNotExist:
return return_response_error(request)
except UserAuthentication.DoesNotExist:
return return_response_error(request)
try:
group = Group.objects.get(pk=group_id)
except Group.DoesNotExist:
return HttpResponse('Group does not exist.')
if user.groups.filter(name=group.name).count() > 0:
return HttpResponse('User %s is already member of that group.'
% username)
user.groups.add(group)
user.save()
if isAdmin:
groupadmin = GroupAdmin(user=user, group=group)
groupadmin.save()
c = Context({'user': user, 'group_id': group_id, 'isAdmin': isAdmin})
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/add_user_to_group_result.html', c))
@never_cache
@authz.group_ownership_required
def remove_user_from_group(request, group_id, username):
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return HttpResponse('User %s does not exist.' % username)
try:
group = Group.objects.get(pk=group_id)
except Group.DoesNotExist:
return HttpResponse('Group does not exist.')
if user.groups.filter(name=group.name).count() == 0:
return HttpResponse('User %s is not member of that group.'
% username)
if request.user == user:
return HttpResponse('You cannot remove yourself from that group.')
user.groups.remove(group)
user.save()
try:
groupadmin = GroupAdmin.objects.filter(user=user, group=group)
groupadmin.delete()
except GroupAdmin.DoesNotExist:
pass
return HttpResponse('OK')
@never_cache
@transaction.commit_on_success
@authz.experiment_ownership_required
def add_experiment_access_user(request, experiment_id, username):
canRead = False
canWrite = False
canDelete = False
isOwner = False
if 'canRead' in request.GET:
if request.GET['canRead'] == 'true':
canRead = True
if 'canWrite' in request.GET:
if request.GET['canWrite'] == 'true':
canWrite = True
if 'canDelete' in request.GET:
if request.GET['canDelete'] == 'true':
canDelete = True
if 'isOwner' in request.GET:
if request.GET['isOwner'] == 'true':
isOwner = True
authMethod = request.GET['authMethod']
user = auth_service.getUser(authMethod, username)
if user is None or username == settings.TOKEN_USERNAME:
return HttpResponse('User %s does not exist.' % (username))
try:
experiment = Experiment.objects.get(pk=experiment_id)
except Experiment.DoesNotExist:
return HttpResponse('Experiment (id=%d) does not exist.'
% (experiment.id))
acl = ExperimentACL.objects.filter(
experiment=experiment,
pluginId=django_user,
entityId=str(user.id),
aclOwnershipType=ExperimentACL.OWNER_OWNED)
if acl.count() == 0:
acl = ExperimentACL(experiment=experiment,
pluginId=django_user,
entityId=str(user.id),
canRead=canRead,
canWrite=canWrite,
canDelete=canDelete,
isOwner=isOwner,
aclOwnershipType=ExperimentACL.OWNER_OWNED)
acl.save()
c = Context({'authMethod': authMethod,
'user': user,
'user_acl': acl,
'username': username,
'experiment_id': experiment_id})
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/add_user_result.html', c))
return HttpResponse('User already has experiment access.')
@never_cache
@authz.experiment_ownership_required
def remove_experiment_access_user(request, experiment_id, username):
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return HttpResponse('User %s does not exist' % username)
try:
experiment = Experiment.objects.get(pk=experiment_id)
except Experiment.DoesNotExist:
return HttpResponse('Experiment does not exist')
acl = ExperimentACL.objects.filter(
experiment=experiment,
pluginId=django_user,
entityId=str(user.id),
aclOwnershipType=ExperimentACL.OWNER_OWNED)
if acl.count() == 1:
if int(acl[0].entityId) == request.user.id:
return HttpResponse('Cannot remove your own user access.')
acl[0].delete()
return HttpResponse('OK')
elif acl.count() == 0:
return HttpResponse(
'The user %s does not have access to this experiment.' % username)
else:
return HttpResponse('Multiple ACLs found')
@never_cache
@authz.experiment_ownership_required
def change_user_permissions(request, experiment_id, username):
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return return_response_error(request)
try:
experiment = Experiment.objects.get(pk=experiment_id)
except Experiment.DoesNotExist:
return return_response_error(request)
try:
acl = ExperimentACL.objects.get(
experiment=experiment,
pluginId=django_user,
entityId=str(user.id),
aclOwnershipType=ExperimentACL.OWNER_OWNED)
except ExperimentACL.DoesNotExist:
return return_response_error(request)
if request.method == 'POST':
form = ChangeUserPermissionsForm(request.POST, instance=acl)
if form.is_valid:
form.save()
url = reverse('tardis.tardis_portal.views.control_panel')
return HttpResponseRedirect(url)
else:
form = ChangeUserPermissionsForm(instance=acl)
c = Context({'form': form,
'header':
"Change User Permissions for '%s'" % user.username})
return HttpResponse(render_response_index(request,
'tardis_portal/form_template.html', c))
@never_cache
@authz.experiment_ownership_required
def change_group_permissions(request, experiment_id, group_id):
try:
group = Group.objects.get(pk=group_id)
except Group.DoesNotExist:
return return_response_error(request)
try:
experiment = Experiment.objects.get(pk=experiment_id)
except Experiment.DoesNotExist:
return return_response_error(request)
try:
acl = ExperimentACL.objects.get(
experiment=experiment,
pluginId=django_group,
entityId=str(group.id),
aclOwnershipType=ExperimentACL.OWNER_OWNED)
except ExperimentACL.DoesNotExist:
return return_response_error(request)
if request.method == 'POST':
form = ChangeGroupPermissionsForm(request.POST)
if form.is_valid():
acl.canRead = form.cleaned_data['canRead']
acl.canWrite = form.cleaned_data['canWrite']
acl.canDelete = form.cleaned_data['canDelete']
acl.effectiveDate = form.cleaned_data['effectiveDate']
acl.expiryDate = form.cleaned_data['expiryDate']
acl.save()
return HttpResponseRedirect('/experiment/control_panel/')
else:
form = ChangeGroupPermissionsForm(
initial={'canRead': acl.canRead,
'canWrite': acl.canWrite,
'canDelete': acl.canDelete,
'effectiveDate': acl.effectiveDate,
'expiryDate': acl.expiryDate})
c = Context({'form': form,
'header': "Change Group Permissions for '%s'" % group.name})
return HttpResponse(render_response_index(request,
'tardis_portal/form_template.html', c))
@transaction.commit_on_success
@never_cache
def create_group(request):
if not 'group' in request.GET:
c = Context({'createGroupPermissionsForm': CreateGroupPermissionsForm() })
response = HttpResponse(render_response_index(request,
'tardis_portal/ajax/create_group.html', c))
return response
authMethod = localdb_auth_key
admin = None
groupname = None
if 'group' in request.GET:
groupname = request.GET['group']
if 'admin' in request.GET:
admin = request.GET['admin']
if 'authMethod' in request.GET:
authMethod = request.GET['authMethod']
try:
group = Group(name=groupname)
group.save()
except:
transaction.rollback()
return HttpResponse('Could not create group %s ' \
'(It is likely that it already exists)' % (groupname))
adminuser = None
if admin:
if admin == settings.TOKEN_USERNAME:
transaction.rollback()
return HttpResponse('User %s does not exist' % (settings.TOKEN_USERNAME))
try:
authMethod = request.GET['authMethod']
if authMethod == localdb_auth_key:
adminuser = User.objects.get(username=admin)
else:
adminuser = UserAuthentication.objects.get(username=admin,
authenticationMethod=authMethod).userProfile.user
except User.DoesNotExist:
transaction.rollback()
return HttpResponse('User %s does not exist' % (admin))
except UserAuthentication.DoesNotExist:
transaction.rollback()
return HttpResponse('User %s does not exist' % (admin))
# create admin for this group and add it to the group
groupadmin = GroupAdmin(user=adminuser, group=group)
groupadmin.save()
adminuser.groups.add(group)
adminuser.save()
# add the current user as admin as well for newly created groups
if not request.user == adminuser:
user = request.user
groupadmin = GroupAdmin(user=user, group=group)
groupadmin.save()
user.groups.add(group)
user.save()
c = Context({'group': group})
transaction.commit()
response = HttpResponse(render_response_index(request,
'tardis_portal/ajax/create_group.html', c))
return response
@never_cache
@transaction.commit_manually
@authz.experiment_ownership_required
def add_experiment_access_group(request, experiment_id, groupname):
create = False
canRead = False
canWrite = False
canDelete = False
isOwner = False
authMethod = localdb_auth_key
admin = None
if 'canRead' in request.GET:
if request.GET['canRead'] == 'true':
canRead = True
if 'canWrite' in request.GET:
if request.GET['canWrite'] == 'true':
canWrite = True
if 'canDelete' in request.GET:
if request.GET['canDelete'] == 'true':
canDelete = True
if 'isOwner' in request.GET:
if request.GET['isOwner'] == 'true':
isOwner = True
try:
experiment = Experiment.objects.get(pk=experiment_id)
except Experiment.DoesNotExist:
transaction.rollback()
return HttpResponse('Experiment (id=%d) does not exist' %
(experiment_id))
try:
group = Group.objects.get(name=groupname)
except Group.DoesNotExist:
transaction.rollback()
return HttpResponse('Group %s does not exist' % (groupname))
acl = ExperimentACL.objects.filter(
experiment=experiment,
pluginId=django_group,
entityId=str(group.id),
aclOwnershipType=ExperimentACL.OWNER_OWNED)
if acl.count() > 0:
# An ACL already exists for this experiment/group.
transaction.rollback()
return HttpResponse('Could not create group %s ' \
'(It is likely that it already exists)' % (groupname))
acl = ExperimentACL(experiment=experiment,
pluginId=django_group,
entityId=str(group.id),
canRead=canRead,
canWrite=canWrite,
canDelete=canDelete,
isOwner=isOwner,
aclOwnershipType=ExperimentACL.OWNER_OWNED)
acl.save()
c = Context({'group': group,
'group_acl': acl,
'experiment_id': experiment_id})
response = HttpResponse(render_response_index(request,
'tardis_portal/ajax/add_group_result.html', c))
transaction.commit()
return response
@never_cache
@authz.experiment_ownership_required
def remove_experiment_access_group(request, experiment_id, group_id):
try:
group = Group.objects.get(pk=group_id)
except Group.DoesNotExist:
return HttpResponse('Group does not exist')
try:
experiment = Experiment.objects.get(pk=experiment_id)
except Experiment.DoesNotExist:
return HttpResponse('Experiment does not exist')
acl = ExperimentACL.objects.filter(
experiment=experiment,
pluginId=django_group,
entityId=str(group.id),
aclOwnershipType=ExperimentACL.OWNER_OWNED)
if acl.count() == 1:
acl[0].delete()
return HttpResponse('OK')
elif acl.count() == 0:
return HttpResponse('No ACL available.'
'It is likely the group doesnt have access to'
'this experiment.')
else:
return HttpResponse('Multiple ACLs found')
return HttpResponse('')
def stats(request):
# using count() is more efficient than using len() on a query set
c = Context({
'experiment_count': Experiment.objects.all().count(),
'dataset_count': Dataset.objects.all().count(),
'datafile_count': Dataset_File.objects.all().count(),
'datafile_size': Dataset_File.sum_sizes(Dataset_File.objects.all()),
})
return HttpResponse(render_response_index(request,
'tardis_portal/stats.html', c))
@transaction.commit_on_success
@never_cache
def create_user(request):
if not 'user' in request.POST:
c = Context({'createUserPermissionsForm': CreateUserPermissionsForm() })
response = HttpResponse(render_response_index(request,
'tardis_portal/ajax/create_user.html', c))
return response
authMethod = localdb_auth_key
if 'user' in request.POST:
username = request.POST['user']
if 'authMethod' in request.POST:
authMethod = request.POST['authMethod']
if 'email' in request.POST:
email = request.POST['email']
if 'password' in request.POST:
password = request.POST['password']
try:
validate_email(email)
user = User.objects.create_user(username, email, password)
userProfile = UserProfile(user=user, isDjangoAccount=True)
userProfile.save()
authentication = UserAuthentication(userProfile=userProfile,
username=username,
authenticationMethod=authMethod)
authentication.save()
except ValidationError:
return HttpResponse('Could not create user %s ' \
'(Email address is invalid: %s)' % (username, email), status=403)
except:
transaction.rollback()
return HttpResponse('Could not create user %s ' \
'(It is likely that this username already exists)' % (username), status=403)
c = Context({'user_created': username})
transaction.commit()
response = HttpResponse(render_response_index(request,
'tardis_portal/ajax/create_user.html', c))
return response
def import_params(request):
if request.method == 'POST': # If the form has been submitted...
# A form bound to the POST data
form = ImportParamsForm(request.POST, request.FILES)
if form.is_valid(): # All validation rules pass
params = request.FILES['params']
username = form.cleaned_data['username']
password = form.cleaned_data['password']
from django.contrib.auth import authenticate
user = authenticate(username=username, password=password)
if user is not None:
if not user.is_active or not user.is_staff:
return return_response_error(request)
else:
return return_response_error(request)
i = 0
for line in params:
if i == 0:
prefix = line
logger.debug(prefix)
elif i == 1:
schema = line
logger.debug(schema)
try:
Schema.objects.get(namespace=schema)
return HttpResponse('Schema already exists.')
except Schema.DoesNotExist:
schema_db = Schema(namespace=schema)
# TODO: add the extra info that the Schema instance
# needs
schema_db.save()
else:
part = line.split('^')
if len(part) == 4:
is_numeric = False
if part[3].strip(' \n\r') == 'True':
is_numeric = True
if is_numeric:
pn = ParameterName(schema=schema_db,
name=part[0], full_name=part[1],
units=part[2],
data_type=ParameterName.NUMERIC)
else:
pn = ParameterName(schema=schema_db,
name=part[0], full_name=part[1],
units=part[2],
data_type=ParameterName.STRING)
pn.save()
i = i + 1
return HttpResponse('OK')
else:
form = ImportParamsForm()
c = Context({'form': form, 'header': 'Import Parameters'})
return HttpResponse(render_response_index(request,
'tardis_portal/form_template.html', c))
def upload_complete(request,
template_name='tardis_portal/upload_complete.html'):
"""
The ajax-loaded result of a file being uploaded
:param request: a HTTP Request instance
:type request: :class:`django.http.HttpRequest`
:param template_name: the path of the template to render
:type template_name: string
:rtype: :class:`django.http.HttpResponse`
"""
c = Context({
'numberOfFiles': request.POST['filesUploaded'],
'bytes': request.POST['allBytesLoaded'],
'speed': request.POST['speed'],
'errorCount': request.POST['errorCount'],
})
return render_to_response(template_name, c)
@authz.upload_auth
@authz.dataset_write_permissions_required
def upload(request, dataset_id):
"""
Uploads a datafile to the store and datafile metadata
:param request: a HTTP Request instance
:type request: :class:`django.http.HttpRequest`
:param dataset_id: the dataset_id
:type dataset_id: integer
:returns: boolean true if successful
:rtype: bool
"""
dataset = Dataset.objects.get(id=dataset_id)
logger.debug('called upload')
if request.method == 'POST':
logger.debug('got POST')
if request.FILES:
uploaded_file_post = request.FILES['Filedata']
filepath = write_uploaded_file_to_dataset(dataset,
uploaded_file_post)
datafile = Dataset_File(dataset=dataset,
filename=uploaded_file_post.name,
url=filepath,
size=uploaded_file_post.size,
protocol='')
datafile.verify(allowEmptyChecksums=True)
datafile.save()
return HttpResponse('True')
@authz.dataset_write_permissions_required
def import_staging_files(request, dataset_id):
"""
Creates an jstree view of the staging area of the user, and provides
a selection mechanism importing files.
"""
staging = get_full_staging_path(request.user.username)
if not staging:
return HttpResponseNotFound()
c = Context({
'dataset_id': dataset_id,
'staging_mount_prefix': settings.STAGING_MOUNT_PREFIX,
'staging_mount_user_suffix_enable': settings.STAGING_MOUNT_USER_SUFFIX_ENABLE
})
return render_to_response('tardis_portal/ajax/import_staging_files.html', c)
def list_staging_files(request, dataset_id):
"""
Creates an jstree view of the staging area of the user, and provides
a selection mechanism importing files.
"""
staging = get_full_staging_path(request.user.username)
if not staging:
return HttpResponseNotFound()
from_path = staging
root = False
try:
path_var = request.GET.get('path', '')
if not path_var:
root = True
from_path = path.join(staging, urllib2.unquote(path_var))
except ValueError:
from_path = staging
c = Context({
'dataset_id': dataset_id,
'directory_listing': staging_list(from_path, staging, root=root),
})
return render_to_response('tardis_portal/ajax/list_staging_files.html', c)
@authz.dataset_write_permissions_required
def upload_files(request, dataset_id,
template_name='tardis_portal/ajax/upload_files.html'):
"""
Creates an Uploadify 'create files' button with a dataset
destination. `A workaround for a JQuery Dialog conflict\
<http://www.uploadify.com/forums/discussion/3348/
uploadify-in-jquery-ui-dialog-modal-causes-double-queue-item/p1>`_
:param request: a HTTP Request instance
:type request: :class:`django.http.HttpRequest`
:param template_name: the path of the template to render
:param dataset_id: the dataset_id
:type dataset_id: integer
:returns: A view containing an Uploadify *create files* button
"""
if 'message' in request.GET:
message = request.GET['message']
else:
message = "Upload Files to Dataset"
url = reverse('tardis.tardis_portal.views.upload_complete')
c = Context({'upload_complete_url': url,
'dataset_id': dataset_id,
'message': message,
'session_id': request.session.session_key
})
return render_to_response(template_name, c)
@login_required
def edit_experiment_par(request, parameterset_id):
parameterset = ExperimentParameterSet.objects.get(id=parameterset_id)
if authz.has_write_permissions(request, parameterset.experiment.id):
return edit_parameters(request, parameterset, otype="experiment")
else:
return return_response_error(request)
@login_required
def edit_dataset_par(request, parameterset_id):
parameterset = DatasetParameterSet.objects.get(id=parameterset_id)
if authz.has_dataset_write(request, parameterset.dataset.id):
return edit_parameters(request, parameterset, otype="dataset")
else:
return return_response_error(request)
@login_required
def edit_datafile_par(request, parameterset_id):
parameterset = DatafileParameterSet.objects.get(id=parameterset_id)
if authz.has_dataset_write(request, parameterset.dataset_file.dataset.id):
return edit_parameters(request, parameterset, otype="datafile")
else:
return return_response_error(request)
def edit_parameters(request, parameterset, otype):
parameternames = ParameterName.objects.filter(
schema__namespace=parameterset.schema.namespace)
success = False
valid = True
if request.method == 'POST':
class DynamicForm(create_parameterset_edit_form(
parameterset, request=request)):
pass
form = DynamicForm(request.POST)
if form.is_valid():
save_datafile_edit_form(parameterset, request)
success = True
else:
valid = False
else:
class DynamicForm(create_parameterset_edit_form(
parameterset)):
pass
form = DynamicForm()
c = Context({
'schema': parameterset.schema,
'form': form,
'parameternames': parameternames,
'type': otype,
'success': success,
'parameterset_id': parameterset.id,
'valid': valid,
})
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/parameteredit.html', c))
@login_required
def add_datafile_par(request, datafile_id):
parentObject = Dataset_File.objects.get(id=datafile_id)
if authz.has_dataset_write(request, parentObject.dataset.id):
return add_par(request, parentObject,
otype="datafile", stype=Schema.DATAFILE)
else:
return return_response_error(request)
@login_required
def add_dataset_par(request, dataset_id):
parentObject = Dataset.objects.get(id=dataset_id)
if authz.has_dataset_write(request, parentObject.id):
return add_par(request, parentObject, otype="dataset",
stype=Schema.DATASET)
else:
return return_response_error(request)
@login_required
def add_experiment_par(request, experiment_id):
parentObject = Experiment.objects.get(id=experiment_id)
if authz.has_write_permissions(request, parentObject.id):
return add_par(request, parentObject, otype="experiment",
stype=Schema.EXPERIMENT)
else:
return return_response_error(request)
def add_par(request, parentObject, otype, stype):
all_schema = Schema.objects.filter(type=stype, immutable=False)
if 'schema_id' in request.GET:
schema_id = request.GET['schema_id']
elif all_schema.count() > 0:
schema_id = all_schema[0].id
else:
return HttpResponse(render_response_index(
request, 'tardis_portal/ajax/parameter_set_unavailable.html', {}))
schema = Schema.objects.get(id=schema_id)
parameternames = ParameterName.objects.filter(
schema__namespace=schema.namespace)
success = False
valid = True
if request.method == 'POST':
class DynamicForm(create_datafile_add_form(
schema.namespace, parentObject, request=request)):
pass
form = DynamicForm(request.POST)
if form.is_valid():
save_datafile_add_form(schema.namespace, parentObject, request)
success = True
else:
valid = False
else:
class DynamicForm(create_datafile_add_form(
schema.namespace, parentObject)):
pass
form = DynamicForm()
c = Context({
'schema': schema,
'form': form,
'parameternames': parameternames,
'type': otype,
'success': success,
'valid': valid,
'parentObject': parentObject,
'all_schema': all_schema,
'schema_id': schema.id,
})
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/parameteradd.html', c))
class ExperimentSearchView(SearchView):
def __name__(self):
return "ExperimentSearchView"
def extra_context(self):
extra = super(ExperimentSearchView, self).extra_context()
# Results may contain Experiments, Datasets and Dataset_Files.
# Group them into experiments, noting whether or not the search
# hits were in the Dataset(s) or Dataset_File(s)
results = self.results
facets = results.facet_counts()
if facets:
experiment_facets = facets['fields']['experiment_id_stored']
experiment_ids = [ int(f[0]) for f in experiment_facets if int(f[1]) > 0 ]
else:
experiment_ids = []
access_list = []
if self.request.user.is_authenticated():
access_list.extend([e.pk for e in authz.get_accessible_experiments(self.request)])
access_list.extend([e.pk for e in Experiment.objects.exclude(public_access=Experiment.PUBLIC_ACCESS_NONE)])
ids = list(set(experiment_ids) & set(access_list))
experiments = Experiment.objects.filter(pk__in=ids).order_by('-update_time')
results = []
for e in experiments:
result = {}
result['sr'] = e
result['dataset_hit'] = False
result['dataset_file_hit'] = False
result['experiment_hit'] = False
results.append(result)
extra['experiments'] = results
return extra
# override SearchView's method in order to
# return a ResponseContext
def create_response(self):
(paginator, page) = self.build_page()
# Remove unnecessary whitespace
# TODO this should just be done in the form clean...
query = SearchQueryString(self.query)
context = {
'search_query': query,
'form': self.form,
'page': page,
'paginator' : paginator,
}
context.update(self.extra_context())
return render_response_index(self.request, self.template, context)
@login_required
def single_search(request):
search_query = FacetFixedSearchQuery(backend=HighlightSearchBackend())
sqs = SearchQuerySet(query=search_query)
sqs.highlight()
return ExperimentSearchView(
template = 'search/search.html',
searchqueryset=sqs,
form_class=RawSearchForm,
).__call__(request)
def share(request, experiment_id):
'''
Choose access rights and licence.
'''
experiment = Experiment.objects.get(id=experiment_id)
c = Context({})
c['has_write_permissions'] = \
authz.has_write_permissions(request, experiment_id)
c['has_download_permissions'] = \
authz.has_experiment_download_access(request, experiment_id)
if request.user.is_authenticated():
c['is_owner'] = authz.has_experiment_ownership(request, experiment_id)
domain = Site.objects.get_current().domain
public_link = experiment.public_access >= Experiment.PUBLIC_ACCESS_METADATA
c['experiment'] = experiment
c['public_link'] = public_link
c['domain'] = domain
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/share.html', c))
@authz.experiment_ownership_required
def choose_rights(request, experiment_id):
'''
Choose access rights and licence.
'''
experiment = Experiment.objects.get(id=experiment_id)
def is_valid_owner(owner):
if not settings.REQUIRE_VALID_PUBLIC_CONTACTS:
return True
userProfile, created = UserProfile.objects.get_or_create(
user=owner)
return userProfile.isValidPublicContact()
# Forbid access if no valid owner is available (and show error message)
if not any([is_valid_owner(owner) for owner in experiment.get_owners()]):
c = Context({'no_valid_owner': True, 'experiment': experiment})
return HttpResponseForbidden(\
render_response_index(request, \
'tardis_portal/ajax/unable_to_choose_rights.html', c))
# Process form or prepopulate it
if request.method == 'POST':
form = RightsForm(request.POST)
if form.is_valid():
experiment.public_access = form.cleaned_data['public_access']
experiment.license = form.cleaned_data['license']
experiment.save()
else:
form = RightsForm({ 'public_access': experiment.public_access,
'license': experiment.license_id })
c = Context({'form': form, 'experiment': experiment})
return HttpResponse(render_response_index(request,
'tardis_portal/ajax/choose_rights.html', c))
@require_POST
@authz.experiment_ownership_required
def create_token(request, experiment_id):
experiment = Experiment.objects.get(id=experiment_id)
token = Token(experiment=experiment, user=request.user)
token.save_with_random_token()
logger.info('created token: %s' % token)
return HttpResponse('{"success": true}', mimetype='application/json');
@require_POST
def token_delete(request, token_id):
token = Token.objects.get(id=token_id)
if authz.has_experiment_ownership(request, token.experiment_id):
token.delete()
return HttpResponse('{"success": true}', mimetype='application/json');
def token_login(request, token):
django_logout(request)
from tardis.tardis_portal.auth import login, token_auth
logger.debug('token login')
user = token_auth.authenticate(request, token)
if not user:
return return_response_error(request)
login(request, user)
experiment = Experiment.objects.get(token__token=token)
return HttpResponseRedirect(experiment.get_absolute_url())
@authz.experiment_access_required
def view_rifcs(request, experiment_id):
"""View the rif-cs of an existing experiment.
:param request: a HTTP Request instance
:type request: :class:`django.http.HttpRequest`
:param experiment_id: the ID of the experiment to be viewed
:type experiment_id: string
:rtype: :class:`django.http.HttpResponse`
"""
try:
experiment = Experiment.safe.get(request, experiment_id)
except PermissionDenied:
return return_response_error(request)
except Experiment.DoesNotExist:
return return_response_not_found(request)
try:
rifcs_provs = settings.RIFCS_PROVIDERS
except AttributeError:
rifcs_provs = ()
from tardis.tardis_portal.publish.publishservice import PublishService
pservice = PublishService(rifcs_provs, experiment)
context = pservice.get_context()
if context is None:
# return error page or something
return return_response_error(request)
template = pservice.get_template()
return HttpResponse(render_response_index(request,
template, context), mimetype="text/xml")
def retrieve_licenses(request):
try:
type_ = int(request.REQUEST['public_access'])
licenses = License.get_suitable_licenses(type_)
except KeyError:
licenses = License.get_suitable_licenses()
return HttpResponse(json.dumps([model_to_dict(x) for x in licenses]))
def experiment_public_access_badge(request, experiment_id):
try:
experiment = Experiment.objects.get(id=experiment_id)
except Experiment.DoesNotExist:
HttpResponse('')
if authz.has_experiment_access(request, experiment_id):
return HttpResponse(render_public_access_badge(experiment))
else:
return HttpResponse('')
@login_required
def manage_user_account(request):
user = request.user
# Process form or prepopulate it
if request.method == 'POST':
form = ManageAccountForm(request.POST)
if form.is_valid():
user.first_name = form.cleaned_data['first_name']
user.last_name = form.cleaned_data['last_name']
user.email = form.cleaned_data['email']
user.save()
return _redirect_303('tardis.tardis_portal.views.index')
else:
form = ManageAccountForm(instance=user)
c = Context({'form': form})
return HttpResponse(render_response_index(request,
'tardis_portal/manage_user_account.html', c))
@login_required
def add_dataset(request, experiment_id):
if not has_experiment_write(request, experiment_id):
return HttpResponseForbidden()
# Process form or prepopulate it
if request.method == 'POST':
form = DatasetForm(request.POST)
if form.is_valid():
dataset = Dataset()
dataset.description = form.cleaned_data['description']
dataset.save()
experiment = Experiment.objects.get(id=experiment_id)
dataset.experiments.add(experiment)
dataset.save()
return _redirect_303('tardis.tardis_portal.views.view_dataset',
dataset.id)
else:
form = DatasetForm()
c = Context({'form': form})
return HttpResponse(render_response_index(request,
'tardis_portal/add_or_edit_dataset.html', c))
@login_required
def edit_dataset(request, dataset_id):
if not has_dataset_write(request, dataset_id):
return HttpResponseForbidden()
dataset = Dataset.objects.get(id=dataset_id)
# Process form or prepopulate it
if request.method == 'POST':
form = DatasetForm(request.POST)
if form.is_valid():
dataset.description = form.cleaned_data['description']
dataset.save()
return _redirect_303('tardis.tardis_portal.views.view_dataset',
dataset.id)
else:
form = DatasetForm(instance=dataset)
c = Context({'form': form, 'dataset': dataset})
return HttpResponse(render_response_index(request,
'tardis_portal/add_or_edit_dataset.html', c))
@login_required
def stage_files_to_dataset(request, dataset_id):
"""
Takes a JSON list of filenames to import from the staging area to this
dataset.
"""
if not has_dataset_write(request, dataset_id):
return HttpResponseForbidden()
if request.method != 'POST':
# This method only accepts POSTS, so send 405 Method Not Allowed
response = HttpResponse(status=405)
response['Allow'] = 'POST'
return response
user = request.user
# Incoming data MUST be JSON
if not request.META['CONTENT_TYPE'].startswith('application/json'):
return HttpResponse(status=400)
try:
files = json.loads(request.body)
except:
return HttpResponse(status=400)
create_staging_datafiles.delay(files, user.id, dataset_id,
request.is_secure())
email = {'email': user.email}
return HttpResponse(json.dumps(email), status=201)
|
{
"content_hash": "a6595136e1461b6501e3f0c160a727ef",
"timestamp": "",
"source": "github",
"line_count": 3100,
"max_line_length": 135,
"avg_line_length": 35.51774193548387,
"alnum_prop": 0.6192089369238454,
"repo_name": "steveandroulakis/mytardis",
"id": "6f4d40695f281fee65c948337eafaea35297f0ec",
"size": "111856",
"binary": false,
"copies": "1",
"ref": "refs/heads/3.0",
"path": "tardis/tardis_portal/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "308601"
},
{
"name": "Python",
"bytes": "1673248"
},
{
"name": "Shell",
"bytes": "953"
}
],
"symlink_target": ""
}
|
""" Estimation of infection and mortality parameters from survival over time.
Parameters estimated:
(infection-related)
- p: probability that a single virion will cause infection in a host of the first group
- a,b: shape parameters for the distribution of susceptibility of the second group compared to the first
- eps: probability of ineffective challenge
(mortality-related)
- meanI: mean time to death of infected hosts
- sI: shape parameter of the distribution of time to death of infected hosts
- meanU: mean time to death from old-age (i.e. from uninfected hosts)
- sU: shape parameter of the distribution of time to death of old-age
- k: background probability of death, independent of infection or old-age
(extra)
- Ig1dX, Ig2dX: estimated number of infected hosts from group 1 (or 2) when challenged with dose number X
Assumptions:
- infected flies cannot outlive natural mortality (meanI<meanU)
- prior distributions for parameters governing natural mortality set from those estimated from control survival
"""
from matplotlib import use
use('Agg') # To save figures to disk, comment to have figures as pop-ups
import sys
import pickle
import pymc as py
# Import libraries
sys.path.append('lib')
import timeEst
# Import Data - see TimeData documentation for more information: help(timeEst.TimeData)
data=timeEst.TimeData.fromCSV(dataPath1='./data/Wneg.csv',dataPath2='./data/Wpos.csv',dataName='wolb2012')
# Initialize model - see Model documentation for more information: help(timeEst.Model)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Estimating infection parameters
niterations=5
burnin=0
thinF=1
mod=timeEst.Model.setup(data=data,bRandomIni=False, bOverWrite=True)
M=py.MCMC(mod,db='pickle', dbname=mod.saveTo+'-MCMC.pickle')
M.sample(niterations, burnin, thinF)
M.db.close()
# Check traces
#py.Matplot.plot(M,path=mod.path)
# The following can always be done in a later session using the folder to the results:
# mod= timeEst.savedModel(folder)
# Posterior calculations and plots. see mod.calcPosterior documentation for help
# Burnin can be also be set to 0 above, and thinning to 1, and be determined only after analysing the traces
# In such cases, set burnin and thinF parameters in the call below.
mod.calcPosterior()
# The posterior samples of parameter called X (see in priors) can be accessed in mod.Xs
# For example, the posterior samples of p are in mod.ps
|
{
"content_hash": "e4d06653214a840fb117fcd8ee2553e9",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 111,
"avg_line_length": 41.3448275862069,
"alnum_prop": 0.7668890742285238,
"repo_name": "dpessoaIGC/Dose-Invariant_Susceptibility_Estimator",
"id": "436b2172f9d0d7a366217f4f523c7a8232f746bd",
"size": "2398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/runEst.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "96717"
}
],
"symlink_target": ""
}
|
import numpy as np
from PIL import Image
from os.path import *
import re
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
TAG_CHAR = np.array([202021.25], np.float32)
def readFlow(fn):
""" Read .flo file in Middlebury format"""
# Code adapted from:
# http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
# WARNING: this will work on little-endian architectures (eg Intel x86) only!
# print 'fn = %s'%(fn)
with open(fn, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
return None
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
# print 'Reading %d x %d flo file\n' % (w, h)
data = np.fromfile(f, np.float32, count=2*int(w)*int(h))
# Reshape data into 3D array (columns, rows, bands)
# The reshape here is for visualization, the original code is (w,h,2)
return np.resize(data, (int(h), int(w), 2))
def readPFM(file):
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header == b'PF':
color = True
elif header == b'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(rb'^(\d+)\s(\d+)\s$', file.readline())
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data
def writeFlow(filename,uv,v=None):
""" Write optical flow to file.
If v is None, uv is assumed to contain both u and v channels,
stacked in depth.
Original code by Deqing Sun, adapted from Daniel Scharstein.
"""
nBands = 2
if v is None:
assert(uv.ndim == 3)
assert(uv.shape[2] == 2)
u = uv[:,:,0]
v = uv[:,:,1]
else:
u = uv
assert(u.shape == v.shape)
height,width = u.shape
f = open(filename,'wb')
# write the header
f.write(TAG_CHAR)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
# arrange into matrix form
tmp = np.zeros((height, width*nBands))
tmp[:,np.arange(width)*2] = u
tmp[:,np.arange(width)*2 + 1] = v
tmp.astype(np.float32).tofile(f)
f.close()
def readFlowKITTI(filename):
flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH|cv2.IMREAD_COLOR)
flow = flow[:,:,::-1].astype(np.float32)
flow, valid = flow[:, :, :2], flow[:, :, 2]
flow = (flow - 2**15) / 64.0
return flow, valid
def readDispKITTI(filename):
disp = cv2.imread(filename, cv2.IMREAD_ANYDEPTH) / 256.0
valid = disp > 0.0
flow = np.stack([-disp, np.zeros_like(disp)], -1)
return flow, valid
def writeFlowKITTI(filename, uv):
uv = 64.0 * uv + 2**15
valid = np.ones([uv.shape[0], uv.shape[1], 1])
uv = np.concatenate([uv, valid], axis=-1).astype(np.uint16)
cv2.imwrite(filename, uv[..., ::-1])
def read_gen(file_name, pil=False):
ext = splitext(file_name)[-1]
if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg':
return Image.open(file_name)
elif ext == '.bin' or ext == '.raw':
return np.load(file_name)
elif ext == '.flo':
return readFlow(file_name).astype(np.float32)
elif ext == '.pfm':
flow = readPFM(file_name).astype(np.float32)
if len(flow.shape) == 2:
return flow
else:
return flow[:, :, :-1]
return []
|
{
"content_hash": "01a886395d96a968210cc5c833fea57f",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 109,
"avg_line_length": 29.37226277372263,
"alnum_prop": 0.577286282306163,
"repo_name": "google-research/3d-moments",
"id": "6c491135efaffc25bd61ec3ecde99d236f5deb12",
"size": "4024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/RAFT/core/utils/frame_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "149952"
},
{
"name": "Shell",
"bytes": "1554"
}
],
"symlink_target": ""
}
|
from mock import MagicMock
from .test_base import CliCommandTest
class UsersTest(CliCommandTest):
def setUp(self):
super(UsersTest, self).setUp()
self.use_manager()
self.client.users = MagicMock()
def test_create_users_missing_username(self):
outcome = self.invoke(
'cfy users create',
err_str_segment='2', # Exit code
exception=SystemExit
)
self.assertIn('Missing argument "username"', outcome.output)
def test_create_users_missing_password(self):
outcome = self.invoke(
'cfy users create username',
err_str_segment='2', # Exit code
exception=SystemExit
)
self.assertIn('Missing option "-p" / "--password"', outcome.output)
def test_create_users_default_role(self):
self.invoke('cfy users create username -p password')
call_list = self.client.users.method_calls[0][1]
self.assertEqual(call_list, ('username', 'password', 'user'))
def test_create_users_custom_role(self):
self.invoke('cfy users create username -p password -r suspended')
call_list = self.client.users.method_calls[0][1]
self.assertEqual(call_list, ('username', 'password', 'suspended'))
def test_create_users_invalid_role(self):
outcome = self.invoke(
'cfy users create username -p password -r invalid_role',
err_str_segment='2', # Exit code
exception=SystemExit
)
self.assertIn(
'Invalid value for "-r" / "--security-role"',
outcome.output
)
|
{
"content_hash": "ad0c046b982052a5e959e53b75041660",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 75,
"avg_line_length": 34.659574468085104,
"alnum_prop": 0.6040515653775322,
"repo_name": "isaac-s/cloudify-cli",
"id": "c9b6669e23f7833556b672e5d7f7495c4da355f9",
"size": "2266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudify_cli/tests/commands/test_users.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "6346"
},
{
"name": "Inno Setup",
"bytes": "4977"
},
{
"name": "Makefile",
"bytes": "4178"
},
{
"name": "PowerShell",
"bytes": "8677"
},
{
"name": "Python",
"bytes": "1991534"
},
{
"name": "Ruby",
"bytes": "29400"
},
{
"name": "Shell",
"bytes": "11075"
}
],
"symlink_target": ""
}
|
"""Code to clean-up transform the JSON description of a dataflow.
Example clean-ups:
1. Dictionaries representing primitives with a schema will be converted to the
primitive:
Ex: { '@type': "https://schema.org/Text", 'value': "Hello" } becomes "Hello"
2. Fields that are unlikely to be human consumable may be hidden.
Ex: The serialized_fn field will be hidden, since humans are unlikely to try
to read the serialized Java object.
"""
_BLACKLISTED_PROPERTIES = set(['serialized_fn'])
_VALUE_RETRIEVERS = {
'http://schema.org/Boolean': lambda value: value.boolean_value,
'http://schema.org/Text': lambda value: value.string_value,
}
def _ExtractStep(step_msg):
"""Converts a Step message into a dict with more sensible structure.
Args:
step_msg: A Step message.
Returns:
A dict with the cleaned up information.
"""
properties = {}
if step_msg.properties:
for prop in step_msg.properties.additionalProperties:
if prop.key not in _BLACKLISTED_PROPERTIES:
properties[prop.key] = _ExtractValue(prop.value)
# TODO(user): Would it make sense to collapse properties into the
# top-level and assume there isn't a property with 'kind' or 'name' as the
# key?
return {
'kind': step_msg.kind,
'name': step_msg.name,
'properties': properties,
}
def _ExtractDecoratedObject(proto):
"""Extracts an object from the proto representation of the JSON object.
Args:
proto: A protocol representation of a JSON object.
Returns:
A clean representation of the JSON object. If it was an object
representing a primitive, then that primitive.
"""
prop_dict = {}
for prop in proto.object_value.properties:
prop_dict[prop.key] = prop.value
ty = prop_dict.get('@type', None)
retriever = ty and _VALUE_RETRIEVERS.get(ty.string_value, None)
if not ty or not retriever:
# No @type means this wasn't an object-wrapped leaf.
# No retriever means that this was created "by us", so we just want to
# output the properties. We leave the @type around since it has semantic
# value.
return dict((k, _ExtractValue(v)) for k, v in prop_dict.iteritems())
# If we have a retriever,we can throw away everything except the value, and
# convert it to a more reasonable type. This is important since it cleans
# up the printed representation significantly.
try:
return retriever(prop_dict['value'])
except KeyError:
return 'Missing value for type [{0}] in proto [{1}]'.format(
ty.string_value, proto)
def _ExtractValue(proto):
# Values are weird, because we actually wrap JSON objects around real
# JSON values.
if proto.object_value:
return _ExtractDecoratedObject(proto)
if proto.array_value:
return [_ExtractValue(v) for v in proto.array_value.entries]
if proto.string_value:
return proto.string_value
return 'No decoding provided for: {0}'.format(proto)
def ExtractSteps(job):
"""Extract the cleaned up step dictionary for all the steps in the job.
Args:
job: A Job message.
Returns:
A list of cleaned up step dictionaries.
"""
return [_ExtractStep(step) for step in job.steps]
|
{
"content_hash": "b1a36d3e3c10db343f9b62739b961783",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 78,
"avg_line_length": 31.59,
"alnum_prop": 0.7011712567268122,
"repo_name": "wemanuel/smry",
"id": "98aec9da991686b98759595ed73490770b06c36d",
"size": "3209",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/dataflow/lib/step_json.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3990"
},
{
"name": "Groff",
"bytes": "1221174"
},
{
"name": "HTML",
"bytes": "1873470"
},
{
"name": "JavaScript",
"bytes": "2192"
},
{
"name": "Makefile",
"bytes": "6032"
},
{
"name": "PHP",
"bytes": "16660"
},
{
"name": "Python",
"bytes": "47139164"
},
{
"name": "Shell",
"bytes": "37102"
},
{
"name": "SourcePawn",
"bytes": "1160"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from oscar.test.factories import create_product
from django.db import IntegrityError
from django.utils.translation import ugettext_lazy as _
from oscar.apps.offer import custom
class CustomRange(object):
name = "Custom range"
def contains_product(self, product):
return product.title.startswith("A")
def num_products(self):
return None
class CustomRangeLazy(object):
name = _("Custom range with ugettext_lazy")
def contains_product(self, product):
return product.title.startswith("B")
def num_products(self):
return None
class TestACustomRange(TestCase):
def test_creating_unique_custom_range(self):
custom.create_range(CustomRange)
try:
custom.create_range(CustomRange)
except IntegrityError:
self.fail(
'IntegrityError when added the same CustomRange as existing')
def test_must_have_a_text_name(self):
try:
custom.create_range(CustomRangeLazy)
except Exception:
pass
else:
self.fail("Range can't have ugettext titles")
def test_correctly_includes_match(self):
rng = custom.create_range(CustomRange)
test_product = create_product(title=u"A tale")
self.assertTrue(rng.contains_product(test_product))
def test_correctly_excludes_nonmatch(self):
rng = custom.create_range(CustomRange)
test_product = create_product(title=u"B tale")
self.assertFalse(rng.contains_product(test_product))
|
{
"content_hash": "6588d751dead8ab36ca6b1a56cb9c5bb",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 77,
"avg_line_length": 28.654545454545456,
"alnum_prop": 0.6694162436548223,
"repo_name": "marcoantoniooliveira/labweb",
"id": "19691cb9877e09ce305d16c8ce0564fe8498bb8b",
"size": "1576",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/unit/offer/custom_range_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "1534157"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "JavaScript",
"bytes": "2968822"
},
{
"name": "LiveScript",
"bytes": "6103"
},
{
"name": "Puppet",
"bytes": "3507"
},
{
"name": "Python",
"bytes": "30402832"
},
{
"name": "Shell",
"bytes": "10782"
},
{
"name": "TeX",
"bytes": "56626"
},
{
"name": "XSLT",
"bytes": "49764"
}
],
"symlink_target": ""
}
|
from oslo_config import cfg
import webob.exc
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.common import exceptions as nexception
allowed_address_pair_opts = [
#TODO(limao): use quota framework when it support quota for attributes
cfg.IntOpt('max_allowed_address_pair', default=10,
help=_("Maximum number of allowed address pairs")),
]
cfg.CONF.register_opts(allowed_address_pair_opts)
class AllowedAddressPairsMissingIP(nexception.InvalidInput):
message = _("AllowedAddressPair must contain ip_address")
class AddressPairAndPortSecurityRequired(nexception.Conflict):
message = _("Port Security must be enabled in order to have allowed "
"address pairs on a port.")
class DuplicateAddressPairInRequest(nexception.InvalidInput):
message = _("Request contains duplicate address pair: "
"mac_address %(mac_address)s ip_address %(ip_address)s.")
class AllowedAddressPairExhausted(nexception.BadRequest):
message = _("The number of allowed address pair "
"exceeds the maximum %(quota)s.")
def _validate_allowed_address_pairs(address_pairs, valid_values=None):
unique_check = {}
try:
if len(address_pairs) > cfg.CONF.max_allowed_address_pair:
raise AllowedAddressPairExhausted(
quota=cfg.CONF.max_allowed_address_pair)
except TypeError:
raise webob.exc.HTTPBadRequest(
_("Allowed address pairs must be a list."))
for address_pair in address_pairs:
# mac_address is optional, if not set we use the mac on the port
if 'mac_address' in address_pair:
msg = attr._validate_mac_address(address_pair['mac_address'])
if msg:
raise webob.exc.HTTPBadRequest(msg)
if 'ip_address' not in address_pair:
raise AllowedAddressPairsMissingIP()
mac = address_pair.get('mac_address')
ip_address = address_pair['ip_address']
if (mac, ip_address) not in unique_check:
unique_check[(mac, ip_address)] = None
else:
raise DuplicateAddressPairInRequest(mac_address=mac,
ip_address=ip_address)
invalid_attrs = set(address_pair.keys()) - set(['mac_address',
'ip_address'])
if invalid_attrs:
msg = (_("Unrecognized attribute(s) '%s'") %
', '.join(set(address_pair.keys()) -
set(['mac_address', 'ip_address'])))
raise webob.exc.HTTPBadRequest(msg)
if '/' in ip_address:
msg = attr._validate_subnet(ip_address)
else:
msg = attr._validate_ip_address(ip_address)
if msg:
raise webob.exc.HTTPBadRequest(msg)
attr.validators['type:validate_allowed_address_pairs'] = (
_validate_allowed_address_pairs)
ADDRESS_PAIRS = 'allowed_address_pairs'
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
ADDRESS_PAIRS: {'allow_post': True, 'allow_put': True,
'convert_list_to':
attr.convert_kvp_list_to_dict,
'validate': {'type:validate_allowed_address_pairs':
None},
'enforce_policy': True,
'default': attr.ATTR_NOT_SPECIFIED,
'is_visible': True},
}
}
class Allowedaddresspairs(extensions.ExtensionDescriptor):
"""Extension class supporting allowed address pairs."""
@classmethod
def get_name(cls):
return "Allowed Address Pairs"
@classmethod
def get_alias(cls):
return "allowed-address-pairs"
@classmethod
def get_description(cls):
return "Provides allowed address pairs"
@classmethod
def get_updated(cls):
return "2013-07-23T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
attr.PLURALS.update({'allowed_address_pairs':
'allowed_address_pair'})
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
|
{
"content_hash": "e51a59d9c2ccfa0209eb67611d2b4ccb",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 75,
"avg_line_length": 34.959016393442624,
"alnum_prop": 0.5981242672919109,
"repo_name": "dims/neutron",
"id": "fff7151d11debe8e77c7913c47d42456b959b3ec",
"size": "4892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/extensions/allowedaddresspairs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "8048836"
},
{
"name": "Shell",
"bytes": "14802"
}
],
"symlink_target": ""
}
|
from google.cloud import aiplatform_v1
def sample_export_data():
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
export_config = aiplatform_v1.ExportDataConfig()
export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
request = aiplatform_v1.ExportDataRequest(
name="name_value",
export_config=export_config,
)
# Make the request
operation = client.export_data(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END aiplatform_v1_generated_DatasetService_ExportData_sync]
|
{
"content_hash": "eeb5327c0ef963d02b4c9c07ebd9c34a",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 79,
"avg_line_length": 25.962962962962962,
"alnum_prop": 0.7061340941512125,
"repo_name": "googleapis/python-aiplatform",
"id": "4a68486f43a304267c44781e23cc1ea420de8bdd",
"size": "2087",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_v1_generated_dataset_service_export_data_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
}
|
import logging
import re
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.layout_tests.port import chromium
from webkitpy.layout_tests.port import chromium_win
from webkitpy.layout_tests.port import config
_log = logging.getLogger(__name__)
class ChromiumLinuxPort(chromium.ChromiumPort):
port_name = 'chromium-linux'
SUPPORTED_VERSIONS = ('x86', 'x86_64')
FALLBACK_PATHS = { 'x86_64': [ 'chromium-linux' ] + chromium_win.ChromiumWinPort.latest_platform_fallback_path() }
FALLBACK_PATHS['x86'] = ['chromium-linux-x86'] + FALLBACK_PATHS['x86_64']
DEFAULT_BUILD_DIRECTORIES = ('sconsbuild', 'out')
@classmethod
def _determine_driver_path_statically(cls, host, options):
config_object = config.Config(host.executive, host.filesystem)
build_directory = getattr(options, 'build_directory', None)
webkit_base = WebKitFinder(host.filesystem).webkit_base()
chromium_base = cls._chromium_base_dir(host.filesystem)
if hasattr(options, 'configuration') and options.configuration:
configuration = options.configuration
else:
configuration = config_object.default_configuration()
return cls._static_build_path(host.filesystem, build_directory, chromium_base, webkit_base, configuration, [cls.CONTENT_SHELL_NAME])
@staticmethod
def _determine_architecture(filesystem, executive, driver_path):
file_output = ''
if filesystem.exists(driver_path):
# The --dereference flag tells file to follow symlinks
file_output = executive.run_command(['file', '--brief', '--dereference', driver_path], return_stderr=True)
if re.match(r'ELF 32-bit LSB\s+executable', file_output):
return 'x86'
if re.match(r'ELF 64-bit LSB\s+executable', file_output):
return 'x86_64'
if file_output:
_log.warning('Could not determine architecture from "file" output: %s' % file_output)
# We don't know what the architecture is; default to 'x86' because
# maybe we're rebaselining and the binary doesn't actually exist,
# or something else weird is going on. It's okay to do this because
# if we actually try to use the binary, check_build() should fail.
return 'x86_64'
@classmethod
def determine_full_port_name(cls, host, options, port_name):
if port_name.endswith('-linux'):
return port_name + '-' + cls._determine_architecture(host.filesystem, host.executive, cls._determine_driver_path_statically(host, options))
return port_name
def __init__(self, host, port_name, **kwargs):
chromium.ChromiumPort.__init__(self, host, port_name, **kwargs)
(base, arch) = port_name.rsplit('-', 1)
assert base == 'chromium-linux'
assert arch in self.SUPPORTED_VERSIONS
assert port_name in ('chromium-linux', 'chromium-linux-x86', 'chromium-linux-x86_64')
self._version = 'lucid' # We only support lucid right now.
self._architecture = arch
def default_baseline_search_path(self):
port_names = self.FALLBACK_PATHS[self._architecture]
return map(self._webkit_baseline_path, port_names)
def _modules_to_search_for_symbols(self):
return [self._build_path('libffmpegsumo.so')]
def check_build(self, needs_http):
result = chromium.ChromiumPort.check_build(self, needs_http)
if not result:
_log.error('For complete Linux build requirements, please see:')
_log.error('')
_log.error(' http://code.google.com/p/chromium/wiki/LinuxBuildInstructions')
return result
def operating_system(self):
return 'linux'
#
# PROTECTED METHODS
#
def _check_apache_install(self):
result = self._check_file_exists(self._path_to_apache(), "apache2")
result = self._check_file_exists(self._path_to_apache_config_file(), "apache2 config file") and result
if not result:
_log.error(' Please install using: "sudo apt-get install apache2 libapache2-mod-php5"')
_log.error('')
return result
def _check_lighttpd_install(self):
result = self._check_file_exists(
self._path_to_lighttpd(), "LigHTTPd executable")
result = self._check_file_exists(self._path_to_lighttpd_php(), "PHP CGI executable") and result
result = self._check_file_exists(self._path_to_lighttpd_modules(), "LigHTTPd modules") and result
if not result:
_log.error(' Please install using: "sudo apt-get install lighttpd php5-cgi"')
_log.error('')
return result
def _wdiff_missing_message(self):
return 'wdiff is not installed; please install using "sudo apt-get install wdiff"'
def _path_to_apache(self):
# The Apache binary path can vary depending on OS and distribution
# See http://wiki.apache.org/httpd/DistrosDefaultLayout
for path in ["/usr/sbin/httpd", "/usr/sbin/apache2"]:
if self._filesystem.exists(path):
return path
_log.error("Could not find apache. Not installed or unknown path.")
return None
def _path_to_lighttpd(self):
return "/usr/sbin/lighttpd"
def _path_to_lighttpd_modules(self):
return "/usr/lib/lighttpd"
def _path_to_lighttpd_php(self):
return "/usr/bin/php-cgi"
def _path_to_driver(self, configuration=None):
binary_name = self.driver_name()
return self._build_path_with_configuration(configuration, binary_name)
def _path_to_helper(self):
return None
|
{
"content_hash": "d2490f416e045b2a12ba3987c9d65dec",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 151,
"avg_line_length": 41.6985294117647,
"alnum_prop": 0.650326221125022,
"repo_name": "espadrine/opera",
"id": "a4a6abfb5ff4d8dd6dfba4c4a4ee2188cd66a7e3",
"size": "7201",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chromium/src/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='endymion',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.4.1',
description='A small tool to check the link validity of external Vagrant boxes on Atlas',
long_description=long_description,
# The project's main homepage.
url='https://github.com/lpancescu/endymion',
# Author details
author='Laurențiu Păncescu',
author_email='laurentiu@laurentiupancescu.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='vagrant atlas',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=[]),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'endymion=endymion:main',
],
},
)
|
{
"content_hash": "7da971f51cd219ef23ccd76c66cc8370",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 94,
"avg_line_length": 35.216981132075475,
"alnum_prop": 0.673720867934637,
"repo_name": "lpancescu/atlas-lint",
"id": "89570b1430028aa2e02499d00927467b8147f5f8",
"size": "3767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4942"
}
],
"symlink_target": ""
}
|
import os
import re
from setuptools import setup, find_packages
version = None
for line in open('./djfactory/__init__.py'):
m = re.search('__version__\s*=\s*(.*)', line)
if m:
version = m.group(1).strip()[1:-1] # quotes
break
assert version
setup(
name='djfactory',
version=version,
description="Mozilla's Django app skeleton.",
long_description=open(os.path.join(os.path.dirname(__file__),
'README.rst')).read(),
author='Kumar McMillan and contributors',
author_email='',
license="BSD License",
url='https://github.com/hfeeki/djfactory',
include_package_data=True,
classifiers = [
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
],
packages=find_packages(exclude=['tests']),
entry_points="""
[console_scripts]
djfactory = djfactory.cmd:main
""",
install_requires=[])
|
{
"content_hash": "028acfd2df93e3ab573140833183ec92",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 65,
"avg_line_length": 27.31578947368421,
"alnum_prop": 0.5934489402697495,
"repo_name": "hfeeki/djfactory",
"id": "e07f97d237d25450c98fc3122645a81ebe24612d",
"size": "1038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Dispatches tests, either sharding or replicating them.
Performs the following steps:
* Create a test collection factory, using the given tests
- If sharding: test collection factory returns the same shared test collection
to all test runners
- If replciating: test collection factory returns a unique test collection to
each test runner, with the same set of tests in each.
* Create a test runner for each device.
* Run each test runner in its own thread, grabbing tests from the test
collection until there are no tests left.
"""
# TODO(jbudorick) Deprecate and remove this class after any relevant parts have
# been ported to the new environment / test instance model.
import logging
import threading
from pylib import constants
from pylib.base import base_test_result
from pylib.base import test_collection
from pylib.device import device_errors
from pylib.utils import reraiser_thread
from pylib.utils import watchdog_timer
DEFAULT_TIMEOUT = 7 * 60 # seven minutes
class _ThreadSafeCounter(object):
"""A threadsafe counter."""
def __init__(self):
self._lock = threading.Lock()
self._value = 0
def GetAndIncrement(self):
"""Get the current value and increment it atomically.
Returns:
The value before incrementing.
"""
with self._lock:
pre_increment = self._value
self._value += 1
return pre_increment
class _Test(object):
"""Holds a test with additional metadata."""
def __init__(self, test, tries=0):
"""Initializes the _Test object.
Args:
test: The test.
tries: Number of tries so far.
"""
self.test = test
self.tries = tries
def _RunTestsFromQueue(runner, collection, out_results, watcher,
num_retries, tag_results_with_device=False):
"""Runs tests from the collection until empty using the given runner.
Adds TestRunResults objects to the out_results list and may add tests to the
out_retry list.
Args:
runner: A TestRunner object used to run the tests.
collection: A TestCollection from which to get _Test objects to run.
out_results: A list to add TestRunResults to.
watcher: A watchdog_timer.WatchdogTimer object, used as a shared timeout.
num_retries: Number of retries for a test.
tag_results_with_device: If True, appends the name of the device on which
the test was run to the test name. Used when replicating to identify
which device ran each copy of the test, and to ensure each copy of the
test is recorded separately.
"""
def TagTestRunResults(test_run_results):
"""Tags all results with the last 4 digits of the device id.
Used when replicating tests to distinguish the same tests run on different
devices. We use a set to store test results, so the hash (generated from
name and tag) must be unique to be considered different results.
"""
new_test_run_results = base_test_result.TestRunResults()
for test_result in test_run_results.GetAll():
test_result.SetName('%s_%s' % (runner.device_serial[-4:],
test_result.GetName()))
new_test_run_results.AddResult(test_result)
return new_test_run_results
for test in collection:
watcher.Reset()
try:
if not runner.device.IsOnline():
# Device is unresponsive, stop handling tests on this device.
msg = 'Device %s is unresponsive.' % runner.device_serial
logging.warning(msg)
raise device_errors.DeviceUnreachableError(msg)
result, retry = runner.RunTest(test.test)
if tag_results_with_device:
result = TagTestRunResults(result)
test.tries += 1
if retry and test.tries <= num_retries:
# Retry non-passing results, only record passing results.
pass_results = base_test_result.TestRunResults()
pass_results.AddResults(result.GetPass())
out_results.append(pass_results)
logging.warning('Will retry test %s, try #%s.', retry, test.tries)
collection.add(_Test(test=retry, tries=test.tries))
else:
# All tests passed or retry limit reached. Either way, record results.
out_results.append(result)
except:
# An unhandleable exception, ensure tests get run by another device and
# reraise this exception on the main thread.
collection.add(test)
raise
finally:
# Retries count as separate tasks so always mark the popped test as done.
collection.test_completed()
def _SetUp(runner_factory, device, out_runners, threadsafe_counter):
"""Creates a test runner for each device and calls SetUp() in parallel.
Note: if a device is unresponsive the corresponding TestRunner will not be
added to out_runners.
Args:
runner_factory: Callable that takes a device and index and returns a
TestRunner object.
device: The device serial number to set up.
out_runners: List to add the successfully set up TestRunner object.
threadsafe_counter: A _ThreadSafeCounter object used to get shard indices.
"""
try:
index = threadsafe_counter.GetAndIncrement()
logging.warning('Creating shard %s for device %s.', index, device)
runner = runner_factory(device, index)
runner.SetUp()
out_runners.append(runner)
except device_errors.DeviceUnreachableError as e:
logging.warning('Failed to create shard for %s: [%s]', device, e)
def _RunAllTests(runners, test_collection_factory, num_retries, timeout=None,
tag_results_with_device=False):
"""Run all tests using the given TestRunners.
Args:
runners: A list of TestRunner objects.
test_collection_factory: A callable to generate a TestCollection object for
each test runner.
num_retries: Number of retries for a test.
timeout: Watchdog timeout in seconds.
tag_results_with_device: If True, appends the name of the device on which
the test was run to the test name. Used when replicating to identify
which device ran each copy of the test, and to ensure each copy of the
test is recorded separately.
Returns:
A tuple of (TestRunResults object, exit code)
"""
logging.warning('Running tests with %s test runners.' % (len(runners)))
results = []
exit_code = 0
run_results = base_test_result.TestRunResults()
watcher = watchdog_timer.WatchdogTimer(timeout)
test_collections = [test_collection_factory() for _ in runners]
threads = [
reraiser_thread.ReraiserThread(
_RunTestsFromQueue,
[r, tc, results, watcher, num_retries, tag_results_with_device],
name=r.device_serial[-4:])
for r, tc in zip(runners, test_collections)]
workers = reraiser_thread.ReraiserThreadGroup(threads)
workers.StartAll()
try:
workers.JoinAll(watcher)
except device_errors.CommandFailedError:
logging.exception('Command failed on device.')
except device_errors.CommandFailedError:
logging.exception('Command timed out on device.')
except device_errors.DeviceUnreachableError:
logging.exception('Device became unreachable.')
if not all((len(tc) == 0 for tc in test_collections)):
logging.error('Only ran %d tests (all devices are likely offline).' %
len(results))
for tc in test_collections:
run_results.AddResults(base_test_result.BaseTestResult(
t, base_test_result.ResultType.UNKNOWN) for t in tc.test_names())
for r in results:
run_results.AddTestRunResults(r)
if not run_results.DidRunPass():
exit_code = constants.ERROR_EXIT_CODE
return (run_results, exit_code)
def _CreateRunners(runner_factory, devices, timeout=None):
"""Creates a test runner for each device and calls SetUp() in parallel.
Note: if a device is unresponsive the corresponding TestRunner will not be
included in the returned list.
Args:
runner_factory: Callable that takes a device and index and returns a
TestRunner object.
devices: List of device serial numbers as strings.
timeout: Watchdog timeout in seconds, defaults to the default timeout.
Returns:
A list of TestRunner objects.
"""
logging.warning('Creating %s test runners.' % len(devices))
runners = []
counter = _ThreadSafeCounter()
threads = reraiser_thread.ReraiserThreadGroup(
[reraiser_thread.ReraiserThread(_SetUp,
[runner_factory, d, runners, counter],
name=str(d)[-4:])
for d in devices])
threads.StartAll()
threads.JoinAll(watchdog_timer.WatchdogTimer(timeout))
return runners
def _TearDownRunners(runners, timeout=None):
"""Calls TearDown() for each test runner in parallel.
Args:
runners: A list of TestRunner objects.
timeout: Watchdog timeout in seconds, defaults to the default timeout.
"""
threads = reraiser_thread.ReraiserThreadGroup(
[reraiser_thread.ReraiserThread(r.TearDown, name=r.device_serial[-4:])
for r in runners])
threads.StartAll()
threads.JoinAll(watchdog_timer.WatchdogTimer(timeout))
def ApplyMaxPerRun(tests, max_per_run):
"""Rearrange the tests so that no group contains more than max_per_run tests.
Args:
tests:
max_per_run:
Returns:
A list of tests with no more than max_per_run per run.
"""
tests_expanded = []
for test_group in tests:
if type(test_group) != str:
# Do not split test objects which are not strings.
tests_expanded.append(test_group)
else:
test_split = test_group.split(':')
for i in range(0, len(test_split), max_per_run):
tests_expanded.append(':'.join(test_split[i:i+max_per_run]))
return tests_expanded
def RunTests(tests, runner_factory, devices, shard=True,
test_timeout=DEFAULT_TIMEOUT, setup_timeout=DEFAULT_TIMEOUT,
num_retries=2, max_per_run=256):
"""Run all tests on attached devices, retrying tests that don't pass.
Args:
tests: List of tests to run.
runner_factory: Callable that takes a device and index and returns a
TestRunner object.
devices: List of attached devices.
shard: True if we should shard, False if we should replicate tests.
- Sharding tests will distribute tests across all test runners through a
shared test collection.
- Replicating tests will copy all tests to each test runner through a
unique test collection for each test runner.
test_timeout: Watchdog timeout in seconds for running tests.
setup_timeout: Watchdog timeout in seconds for creating and cleaning up
test runners.
num_retries: Number of retries for a test.
max_per_run: Maximum number of tests to run in any group.
Returns:
A tuple of (base_test_result.TestRunResults object, exit code).
"""
if not tests:
logging.critical('No tests to run.')
return (base_test_result.TestRunResults(), constants.ERROR_EXIT_CODE)
tests_expanded = ApplyMaxPerRun(tests, max_per_run)
if shard:
# Generate a shared TestCollection object for all test runners, so they
# draw from a common pool of tests.
shared_test_collection = test_collection.TestCollection(
[_Test(t) for t in tests_expanded])
test_collection_factory = lambda: shared_test_collection
tag_results_with_device = False
log_string = 'sharded across devices'
else:
# Generate a unique TestCollection object for each test runner, but use
# the same set of tests.
test_collection_factory = lambda: test_collection.TestCollection(
[_Test(t) for t in tests_expanded])
tag_results_with_device = True
log_string = 'replicated on each device'
logging.info('Will run %d tests (%s): %s',
len(tests_expanded), log_string, str(tests_expanded))
runners = _CreateRunners(runner_factory, devices, setup_timeout)
try:
return _RunAllTests(runners, test_collection_factory,
num_retries, test_timeout, tag_results_with_device)
finally:
try:
_TearDownRunners(runners, setup_timeout)
except device_errors.DeviceUnreachableError as e:
logging.warning('Device unresponsive during TearDown: [%s]', e)
except Exception as e:
logging.error('Unexpected exception caught during TearDown: %s' % str(e))
|
{
"content_hash": "b9fdefc606f33aa8e32b90721126fa13",
"timestamp": "",
"source": "github",
"line_count": 331,
"max_line_length": 80,
"avg_line_length": 37.027190332326285,
"alnum_prop": 0.6941090078328982,
"repo_name": "Just-D/chromium-1",
"id": "93df28ccb45ed8dcabd392534c1a31e39bbdd963",
"size": "12419",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "build/android/pylib/base/test_dispatcher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "37073"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9517927"
},
{
"name": "C++",
"bytes": "244067615"
},
{
"name": "CSS",
"bytes": "944025"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "27307576"
},
{
"name": "Java",
"bytes": "14757472"
},
{
"name": "JavaScript",
"bytes": "20666212"
},
{
"name": "Makefile",
"bytes": "70864"
},
{
"name": "Objective-C",
"bytes": "1772355"
},
{
"name": "Objective-C++",
"bytes": "10088862"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "178732"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "485040"
},
{
"name": "Python",
"bytes": "8652947"
},
{
"name": "Shell",
"bytes": "481276"
},
{
"name": "Standard ML",
"bytes": "5106"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
class ModuleDocFragment(object):
# Standard F5 documentation fragment
DOCUMENTATION = r'''
options:
provider:
description:
- A dict object containing connection details.
type: dict
version_added: '2.5'
suboptions:
password:
description:
- The password for the user account used to connect to the BIG-IP.
- You may omit this option by setting the environment variable C(F5_PASSWORD).
type: str
required: true
aliases: [ pass, pwd ]
server:
description:
- The BIG-IP host.
- You may omit this option by setting the environment variable C(F5_SERVER).
type: str
required: true
server_port:
description:
- The BIG-IP server port.
- You may omit this option by setting the environment variable C(F5_SERVER_PORT).
type: int
default: 443
user:
description:
- The username to connect to the BIG-IP with. This user must have
administrative privileges on the device.
- You may omit this option by setting the environment variable C(F5_USER).
type: str
required: true
validate_certs:
description:
- If C(no), SSL certificates are not validated. Use this only
on personally controlled sites using self-signed certificates.
- You may omit this option by setting the environment variable C(F5_VALIDATE_CERTS).
type: bool
default: yes
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
type: int
ssh_keyfile:
description:
- Specifies the SSH keyfile to use to authenticate the connection to
the remote device. This argument is only used for I(cli) transports.
- You may omit this option by setting the environment variable C(ANSIBLE_NET_SSH_KEYFILE).
type: path
transport:
description:
- Configures the transport connection to use when connecting to the
remote device.
type: str
choices: [ cli, rest ]
default: rest
auth_provider:
description:
- Configures the auth provider for to obtain authentication tokens from the remote device.
- This option is really used when working with BIG-IQ devices.
type: str
notes:
- For more information on using Ansible to manage F5 Networks devices see U(https://www.ansible.com/integrations/networks/f5).
- Requires BIG-IP software version >= 12.
- The F5 modules only manipulate the running configuration of the F5 product. To ensure that BIG-IP
specific configuration persists to disk, be sure to include at least one task that uses the
M(bigip_config) module to save the running configuration. Refer to the module's documentation for
the correct usage of the module to save your running configuration.
'''
|
{
"content_hash": "3ef9ef333305541c279d21dad3d06832",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 128,
"avg_line_length": 42.026666666666664,
"alnum_prop": 0.6516497461928934,
"repo_name": "thaim/ansible",
"id": "e247021e3cf586b84d9e23b8e57e96c56bbff7f2",
"size": "3272",
"binary": false,
"copies": "22",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/plugins/doc_fragments/f5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
from m5.objects import *
# Simple ALU Instructions have a latency of 1
class O3_ARM_v7a_Simple_Int(FUDesc):
opList = [ OpDesc(opClass='IntAlu', opLat=1) ]
count = 2
# Complex ALU instructions have a variable latencies
class O3_ARM_v7a_Complex_Int(FUDesc):
opList = [ OpDesc(opClass='IntMult', opLat=3, pipelined=True),
OpDesc(opClass='IntDiv', opLat=12, pipelined=False),
OpDesc(opClass='IprAccess', opLat=3, pipelined=True) ]
count = 1
# Floating point and SIMD instructions
class O3_ARM_v7a_FP(FUDesc):
opList = [ OpDesc(opClass='SimdAdd', opLat=4),
OpDesc(opClass='SimdAddAcc', opLat=4),
OpDesc(opClass='SimdAlu', opLat=4),
OpDesc(opClass='SimdCmp', opLat=4),
OpDesc(opClass='SimdCvt', opLat=3),
OpDesc(opClass='SimdMisc', opLat=3),
OpDesc(opClass='SimdMult',opLat=5),
OpDesc(opClass='SimdMultAcc',opLat=5),
OpDesc(opClass='SimdShift',opLat=3),
OpDesc(opClass='SimdShiftAcc', opLat=3),
OpDesc(opClass='SimdSqrt', opLat=9),
OpDesc(opClass='SimdFloatAdd',opLat=5),
OpDesc(opClass='SimdFloatAlu',opLat=5),
OpDesc(opClass='SimdFloatCmp', opLat=3),
OpDesc(opClass='SimdFloatCvt', opLat=3),
OpDesc(opClass='SimdFloatDiv', opLat=3),
OpDesc(opClass='SimdFloatMisc', opLat=3),
OpDesc(opClass='SimdFloatMult', opLat=3),
OpDesc(opClass='SimdFloatMultAcc',opLat=1),
OpDesc(opClass='SimdFloatSqrt', opLat=9),
OpDesc(opClass='FloatAdd', opLat=5),
OpDesc(opClass='FloatCmp', opLat=5),
OpDesc(opClass='FloatCvt', opLat=5),
OpDesc(opClass='FloatDiv', opLat=9, pipelined=False),
OpDesc(opClass='FloatSqrt', opLat=33, pipelined=False),
OpDesc(opClass='FloatMult', opLat=4) ]
count = 2
# Load/Store Units
class O3_ARM_v7a_Load(FUDesc):
opList = [ OpDesc(opClass='MemRead',opLat=2) ]
count = 1
class O3_ARM_v7a_Store(FUDesc):
opList = [OpDesc(opClass='MemWrite',opLat=2) ]
count = 1
# Functional Units for this CPU
class O3_ARM_v7a_FUP(FUPool):
FUList = [O3_ARM_v7a_Simple_Int(), O3_ARM_v7a_Complex_Int(),
O3_ARM_v7a_Load(), O3_ARM_v7a_Store(), O3_ARM_v7a_FP()]
# Bi-Mode Branch Predictor
class O3_ARM_v7a_BP(BiModeBP):
globalPredictorSize = 8192
globalCtrBits = 2
choicePredictorSize = 8192
choiceCtrBits = 2
BTBEntries = 2048
BTBTagSize = 18
RASSize = 16
instShiftAmt = 2
class O3_ARM_v7a_3(DerivO3CPU):
LQEntries = 16
SQEntries = 16
LSQDepCheckShift = 0
LFSTSize = 1024
SSITSize = 1024
decodeToFetchDelay = 1
renameToFetchDelay = 1
iewToFetchDelay = 1
commitToFetchDelay = 1
renameToDecodeDelay = 1
iewToDecodeDelay = 1
commitToDecodeDelay = 1
iewToRenameDelay = 1
commitToRenameDelay = 1
commitToIEWDelay = 1
fetchWidth = 3
fetchBufferSize = 16
fetchToDecodeDelay = 3
decodeWidth = 3
decodeToRenameDelay = 2
renameWidth = 3
renameToIEWDelay = 1
issueToExecuteDelay = 1
dispatchWidth = 6
issueWidth = 8
wbWidth = 8
fuPool = O3_ARM_v7a_FUP()
iewToCommitDelay = 1
renameToROBDelay = 1
commitWidth = 8
squashWidth = 8
trapLatency = 13
backComSize = 5
forwardComSize = 5
numPhysIntRegs = 128
numPhysFloatRegs = 192
numIQEntries = 32
numROBEntries = 40
switched_out = False
branchPred = O3_ARM_v7a_BP()
# Instruction Cache
class O3_ARM_v7a_ICache(Cache):
hit_latency = 1
response_latency = 1
mshrs = 2
tgts_per_mshr = 8
size = '32kB'
assoc = 2
is_read_only = True
# Writeback clean lines as well
writeback_clean = True
# Data Cache
class O3_ARM_v7a_DCache(Cache):
hit_latency = 2
response_latency = 2
mshrs = 6
tgts_per_mshr = 8
size = '32kB'
assoc = 2
write_buffers = 16
# Consider the L2 a victim cache also for clean lines
writeback_clean = True
# TLB Cache
# Use a cache as a L2 TLB
class O3_ARM_v7aWalkCache(Cache):
hit_latency = 4
response_latency = 4
mshrs = 6
tgts_per_mshr = 8
size = '1kB'
assoc = 8
write_buffers = 16
is_read_only = True
# Writeback clean lines as well
writeback_clean = True
# L2 Cache
class O3_ARM_v7aL2(Cache):
hit_latency = 12
response_latency = 12
mshrs = 16
tgts_per_mshr = 8
size = '1MB'
assoc = 16
write_buffers = 8
prefetch_on_access = True
clusivity = 'mostly_excl'
# Simple stride prefetcher
prefetcher = StridePrefetcher(degree=8, latency = 1)
tags = RandomRepl()
|
{
"content_hash": "148fbecc03c82c933e6c47e1873f1bd8",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 70,
"avg_line_length": 28.99397590361446,
"alnum_prop": 0.6166632038229795,
"repo_name": "BellScurry/gem5-fault-injection",
"id": "a38273c103ae8f84ffda1c2af003a834a7d79a2a",
"size": "6390",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "configs/common/O3_ARM_v7a.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "235599"
},
{
"name": "C",
"bytes": "1058406"
},
{
"name": "C++",
"bytes": "16334032"
},
{
"name": "CMake",
"bytes": "2202"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Groff",
"bytes": "8783"
},
{
"name": "HTML",
"bytes": "136695"
},
{
"name": "Java",
"bytes": "3179"
},
{
"name": "M4",
"bytes": "49620"
},
{
"name": "Makefile",
"bytes": "39213"
},
{
"name": "Perl",
"bytes": "33602"
},
{
"name": "Protocol Buffer",
"bytes": "11074"
},
{
"name": "Python",
"bytes": "4533632"
},
{
"name": "Shell",
"bytes": "59885"
},
{
"name": "VimL",
"bytes": "4335"
},
{
"name": "Visual Basic",
"bytes": "2884"
}
],
"symlink_target": ""
}
|
from libcloud.utils.py3 import parse_qs, urlparse
from libcloud.common.base import Connection
__all__ = ["get_response_object"]
def get_response_object(url, method="GET", headers=None, retry_failed=None):
"""
Utility function which uses libcloud's connection class to issue an HTTP
request.
:param url: URL to send the request to.
:type url: ``str``
:param method: HTTP method.
:type method: ``str``
:param headers: Optional request headers.
:type headers: ``dict``
:param retry_failed: True to retry failed requests.
:return: Response object.
:rtype: :class:`Response`.
"""
parsed_url = urlparse.urlparse(url)
parsed_qs = parse_qs(parsed_url.query)
secure = parsed_url.scheme == "https"
headers = headers or {}
method = method.upper()
con = Connection(secure=secure, host=parsed_url.netloc)
response = con.request(
action=parsed_url.path,
params=parsed_qs,
headers=headers,
method=method,
retry_failed=retry_failed,
)
return response
|
{
"content_hash": "35e860ca98639f509321843e7a94c2f3",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 76,
"avg_line_length": 26.170731707317074,
"alnum_prop": 0.6514445479962722,
"repo_name": "apache/libcloud",
"id": "40f4c1594c49cbdb6518db9ef85f4540df9b5440",
"size": "1855",
"binary": false,
"copies": "2",
"ref": "refs/heads/trunk",
"path": "libcloud/utils/connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2155"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "9105547"
},
{
"name": "Shell",
"bytes": "12994"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils import timezone
from django.utils.timezone import utc
from organizations import models as organization_models
def get_clubmaps():
return {
"American Club": "American",
"Britannia": "Britannia",
"Gard": "Gard",
"Japan Club": "Japan",
"London Club": "London",
"North of England Club": "North",
"Shipowners Club": "Shipowners",
"Standard": "Standard",
"Steamship Mutual": "Steamship",
"Swedish Club": "Swedish",
"UK P&I Club": "UK Mutual",
"West of England": "West",
"Skuld": "SKULD",
"IGPI": "IGP",
"Guru": "Guru"
}
def external_ids_for_pandiq(apps, schema_editor):
maps = get_clubmaps()
Organization = apps.get_model("organizations", "Organization")
for x in Organization.objects.filter(is_pandi_club=True):
x.external_id = maps[x.name]
x.save()
def nothing(apps, schema_editor):
return
class Migration(migrations.Migration):
dependencies = [
('organizations', '0008_auto_20160516_1306'),
]
operations = [
migrations.RunPython(
external_ids_for_pandiq,
nothing
),
]
|
{
"content_hash": "b7b707e16546513cf123127a5f20f415",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 66,
"avg_line_length": 25.26923076923077,
"alnum_prop": 0.5996955859969558,
"repo_name": "st8st8/django-organizations",
"id": "9a5bb99650078b928b9c6565b0faa3ee2eea51ba",
"size": "1338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "organizations/migrations/0009_external_ids_for_pandiq.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "8336"
},
{
"name": "Makefile",
"bytes": "2102"
},
{
"name": "Python",
"bytes": "258210"
}
],
"symlink_target": ""
}
|
import os
import shutil
import tempfile
#conary
from conary import errors
from conary.deps import deps
from conary.local import database
from conary.cmds import cscmd
from conary.repository import changeset
from conary.cmds.showchangeset import displayChangeSet
from conary import versions
#test
from conary_test import recipes
from conary_test import rephelp
class FileFromLine:
def __init__(self, line):
args = line.split()
# XXX eventually we may want to handle the optional args here
assert(len(args) in (9, 10))
if len(args) == 9:
(self.mode, self.nodes, self.owner,
self.grp, self.size, mo, day, tm, self.path) = args[0:9]
self.change = 'New'
else:
(self.change, self.mode, self.nodes, self.owner,
self.grp, self.size, mo, day, tm, self.path) = args[0:10]
def __repr__(self):
return "<%s %s>" % (self.change, self.path)
class ShowChangesetTest(rephelp.RepositoryHelper):
def _parseFileList(self, lines):
""" Takes the output of a file listing and returns tuples w/ time
removed """
files = {}
for line in lines:
if not line:
continue
newFile = FileFromLine(line)
files[newFile.path] = newFile
return files
def testAbsoluteChangeSet(self):
self.resetRepository()
d = tempfile.mkdtemp()
origDir = os.getcwd()
os.chdir(d)
self.writeFile('testcase.recipe', recipes.testRecipe1)
repos = self.openRepository()
try:
built, str = self.captureOutput(self.cookItem, repos, self.cfg,
'testcase.recipe')
cs = changeset.ChangeSetFromFile('testcase-1.0.ccs')
finally:
os.chdir(origDir)
shutil.rmtree(d)
rc, res = self.captureOutput(displayChangeSet, None, cs, None,
self.cfg)
assert(res == 'testcase=1.0-1-0.1\n')
rc, res = self.captureOutput(displayChangeSet, None, cs,
['testcase=1.0-1-0.1'], self.cfg,
showTroves=True)
assert(res == '''\
testcase=1.0-1-0.1
testcase:runtime=1.0-1-0.1
''')
rc, res = self.captureOutput(displayChangeSet, None, cs,
['testcase:runtime=1.0-1-0.1'],
self.cfg, fileVersions=True,
alwaysDisplayHeaders=True)
assert(res == '''\
testcase:runtime=1.0-1-0.1
/etc/changedconfig 1.0-1-0.1
/etc/unchangedconfig 1.0-1-0.1
/usr/bin/hello 1.0-1-0.1
/usr/share/changed 1.0-1-0.1
/usr/share/unchanged 1.0-1-0.1
''')
rc, res = self.captureOutput(displayChangeSet, None, cs,
['testcase:runtime=1.0-1-0.1'],
self.cfg, lsl=True)
lines = res.split('\n')
files = self._parseFileList(lines)
assert(len(files) == 5)
paths = files.keys()
paths.sort()
assert(paths == [ '/etc/changedconfig', '/etc/unchangedconfig',
'/usr/bin/hello', '/usr/share/changed',
'/usr/share/unchanged'])
for f in files.values():
assert f.change == 'New'
rc, res = self.captureOutput(displayChangeSet, None, cs,
['testcase:runtime=1.0-1-0.1'],
self.cfg, info=True)
# we don't test the output format of --info too closely, mostly just
# want to make sure it runs.
assert('testcase:runtime' in res)
assert('1.0-1-0.1' in res)
assert('is: x86' in res)
rc, res = self.captureOutput(displayChangeSet, None, cs,
['testcase:runtime=1.0-1-0.1'],
self.cfg, asDiff=True)
def testGitDiff(self):
# very simple test of scs --diff
t = self.addComponent('foo:run=1',
fileContents = [ ( '/foo', 'contents\n') ])
repos = self.openRepository()
cs = repos.createChangeSet([ ('foo:run', (None, None),
t.getNameVersionFlavor()[1:], False) ])
rc, res = self.captureOutput(displayChangeSet, None, cs, [],
self.cfg, asDiff=True)
self.assertEquals(res,
"diff --git a/foo b/foo\n"
"new user root\n"
"new group root\n"
"new mode 100644\n"
"--- a/dev/null\n"
"+++ b/foo\n"
"@@ -1,0 +1,1 @@\n"
"+contents\n")
def testPartialChangeSet(self):
db = database.Database(self.rootDir, self.cfg.dbPath)
self.repos = self.openRepository()
self.addTestPkg(1, content='r.Create("%(thisdocdir)s/README")')
self.cookTestPkg(1)
self.cfg.configLine('excludeTroves .*:runtime')
os.chdir(self.workDir)
cscmd.ChangeSetCommand(self.cfg, ['test1'], 'test.ccs')
cs = changeset.ChangeSetFromFile('test.ccs')
rc, res = self.captureOutput(displayChangeSet, None, cs,
['test1=1.0-1-1'], self.cfg,
showTroves=True)
assert(res == '''\
test1=1.0-1-1
test1:runtime=1.0-1-1
''')
def testEraseChangeSet(self):
db = database.Database(self.rootDir, self.cfg.dbPath)
self.repos = self.openRepository()
self.addTestPkg(1, content='r.Create("%(thisdocdir)s/README")')
self.cookTestPkg(1)
os.chdir(self.workDir)
cscmd.ChangeSetCommand(self.cfg, ['test1=1.0--'], 'test.ccs')
cs = changeset.ChangeSetFromFile('test.ccs')
rc, res = self.captureOutput(displayChangeSet, db,
cs, ['test1=1.0'], self.cfg)
assert(res == 'Erase test1=1.0-1-1\n')
def testChangedFiles(self):
# set up a recipe in a :source component
self.repos = self.openRepository()
self.addTestPkg(1, content='r.Create("/etc/foo", contents="A\\n"*1000)',
tag='myTag')
self.cookTestPkg(1)
res = self.addTestPkg(1, fileContents='a change in the file',
tag='myTag2')
self.cookTestPkg(1)
v1 = versions.VersionFromString('/localhost@rpl:linux/1.0-1-1')
v2 = versions.VersionFromString('/localhost@rpl:linux/1.0-2-1')
cs = self.repos.createChangeSet([('test1:runtime',
(v1, deps.Flavor()),
(v2, deps.Flavor()), False)])
rc, res = self.captureOutput(displayChangeSet, None, cs, None,
self.cfg, lsl=True)
lines = res.split('\n')
files = self._parseFileList(lines)
assert(files['/usr/bin/test1'].change == 'Mod')
assert(files['/etc/foo'].change == 'Del')
rc, res = self.captureOutput(displayChangeSet, None, cs,
None, self.cfg, lsl=True, showChanges=True)
lines = res.split('\n')
assert(lines[3].split()[1] == '52')
rc, res = self.captureOutput(displayChangeSet, None, cs,
None, self.cfg, lsl=True, showChanges=True,
tags=True)
lines = res.split('\n')
assert(lines[2].find('{myTag}') != -1)
assert(lines[3].find('{myTag2}') != -1)
rc, res = self.captureOutput(displayChangeSet, None, cs,
None, self.cfg, lsl=True, showChanges=True,
ids=True, sha1s=True)
lines = res.split('\n')
oldFile = lines[2].split()
newFile = lines[3].split()
# two changes btw the files -- sha1 and size -- possibly date
assert(len(newFile) in (3, 6))
assert(oldFile[2] != newFile[1])
assert(len(newFile[1]) == 40)
def testGroupChangeSet(self):
repos = self.openRepository()
os.chdir(self.workDir)
self.addQuickTestComponent('test:runtime', '1.0',
fileContents=['/usr/run1'])
self.addQuickTestComponent('test:doc', '1.0',
fileContents=['/usr/doc1'])
self.addQuickTestCollection('test', '1.0', ['test:runtime', 'test:doc'])
self.addQuickTestCollection('group-test', '1.0', ['test'])
self.changeset(repos, ['group-test'], 'group-test.ccs', recurse=False)
cs = changeset.ChangeSetFromFile('group-test.ccs')
rc, res = self.captureOutput(displayChangeSet, None, cs, [], self.cfg)
assert(res == 'group-test=1.0-1-1\n')
rc, res = self.captureOutput(displayChangeSet, None, cs, [], self.cfg,
ls=True, alwaysDisplayHeaders=True,
recurseRepos=True)
assert(res == '''\
group-test=1.0-1-1
test=1.0-1-1
test:doc=1.0-1-1
/usr/doc1
test:runtime=1.0-1-1
/usr/run1
''')
def testExactFlavor(self):
self.addComponent('foo:run[~ssl]')
repos = self.openRepository()
csPath = self.workDir + '/foo.ccs'
self.changeset(repos, ['foo:run'], csPath)
cs = changeset.ChangeSetFromFile(csPath)
self.assertRaises(errors.TroveNotFound,
displayChangeSet, None, cs, ['foo:run[ssl]'], self.cfg,
exactFlavors=True)
self.assertRaises(errors.TroveNotFound,
displayChangeSet, None, cs, ['foo:run'], self.cfg,
exactFlavors=True)
self.captureOutput(displayChangeSet, None, cs, ['foo:run[~ssl]'],
self.cfg,exactFlavors=True)
|
{
"content_hash": "c2db7fc1547273b446076862f036b422",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 80,
"avg_line_length": 41.77366255144033,
"alnum_prop": 0.5176829868978425,
"repo_name": "fedora-conary/conary",
"id": "358c27588f3d861c68e82db84b5167688ce54e04",
"size": "10738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conary_test/showchangesettest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "481681"
},
{
"name": "C++",
"bytes": "8244"
},
{
"name": "CSS",
"bytes": "3920"
},
{
"name": "Erlang",
"bytes": "477"
},
{
"name": "Perl",
"bytes": "45629"
},
{
"name": "Python",
"bytes": "10586616"
},
{
"name": "Shell",
"bytes": "4657"
},
{
"name": "Standard ML",
"bytes": "2756"
}
],
"symlink_target": ""
}
|
import os
from types import FunctionType
import logging
import json
import re
import datetime
import stat
import csv, chardet, StringIO
import time
from constance import config
from django.conf import settings as dj_settings
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.http import HttpResponse, Http404, HttpResponseRedirect, HttpResponseNotAllowed
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils import timezone
from django.utils.translation import ugettext as _
import seaserv
from seaserv import ccnet_threaded_rpc, seafserv_threaded_rpc, \
seafile_api, get_group, get_group_members
from pysearpc import SearpcError
from seahub.base.accounts import User
from seahub.base.models import UserLastLogin
from seahub.base.decorators import sys_staff_required, require_POST
from seahub.base.sudo_mode import update_sudo_mode_ts
from seahub.base.templatetags.seahub_tags import tsstr_sec, email2nickname
from seahub.auth import authenticate
from seahub.auth.decorators import login_required, login_required_ajax
from seahub.constants import GUEST_USER, DEFAULT_USER
from seahub.institutions.models import Institution, InstitutionAdmin
from seahub.utils import IS_EMAIL_CONFIGURED, string2list, is_valid_username, \
is_pro_version, send_html_email, get_user_traffic_list, get_server_id, \
clear_token, gen_file_get_url, is_org_context, handle_virus_record, \
get_virus_record_by_id, get_virus_record, FILE_AUDIT_ENABLED, \
get_max_upload_file_size
from seahub.utils.file_size import get_file_size_unit
from seahub.utils.rpc import mute_seafile_api
from seahub.utils.licenseparse import parse_license
from seahub.utils.sysinfo import get_platform_name
from seahub.utils.mail import send_html_email_with_dj_template
from seahub.utils.ms_excel import write_xls
from seahub.views.ajax import (get_related_users_by_org_repo,
get_related_users_by_repo)
from seahub.views import get_system_default_repo_id, gen_path_link
from seahub.forms import SetUserQuotaForm, AddUserForm, BatchAddUserForm
from seahub.options.models import UserOptions
from seahub.profile.models import Profile, DetailedProfile
from seahub.signals import repo_deleted
from seahub.share.models import FileShare, UploadLinkShare
import seahub.settings as settings
from seahub.settings import INIT_PASSWD, SITE_NAME, SITE_ROOT, \
SEND_EMAIL_ON_ADDING_SYSTEM_MEMBER, SEND_EMAIL_ON_RESETTING_USER_PASSWD, \
ENABLE_SYS_ADMIN_VIEW_REPO
try:
from seahub.settings import ENABLE_TRIAL_ACCOUNT
except:
ENABLE_TRIAL_ACCOUNT = False
if ENABLE_TRIAL_ACCOUNT:
from seahub_extra.trialaccount.models import TrialAccount
try:
from seahub.settings import MULTI_TENANCY
except ImportError:
MULTI_TENANCY = False
logger = logging.getLogger(__name__)
@login_required
@sys_staff_required
def sysadmin(request):
max_upload_file_size = get_max_upload_file_size()
folder_perm_enabled = True if is_pro_version() and settings.ENABLE_FOLDER_PERM else False
return render_to_response('sysadmin/sysadmin_backbone.html', {
'enable_upload_folder': settings.ENABLE_UPLOAD_FOLDER,
'enable_resumable_fileupload': settings.ENABLE_RESUMABLE_FILEUPLOAD,
'enable_thumbnail': settings.ENABLE_THUMBNAIL,
'thumbnail_default_size': settings.THUMBNAIL_DEFAULT_SIZE,
'thumbnail_size_for_grid': settings.THUMBNAIL_SIZE_FOR_GRID,
'enable_encrypted_library': config.ENABLE_ENCRYPTED_LIBRARY,
'enable_repo_history_setting': config.ENABLE_REPO_HISTORY_SETTING,
'max_upload_file_size': max_upload_file_size,
'folder_perm_enabled': folder_perm_enabled,
'is_pro': True if is_pro_version() else False,
'file_audit_enabled': FILE_AUDIT_ENABLED
}, context_instance=RequestContext(request))
@login_required
@sys_staff_required
def sys_info(request):
"""System info(members, pro, ..) page.
Arguments:
- `request`:
"""
# count repos
repos_count = mute_seafile_api.count_repos()
# count groups
try:
groups_count = len(ccnet_threaded_rpc.get_all_groups(-1, -1))
except Exception as e:
logger.error(e)
groups_count = 0
# count orgs
if MULTI_TENANCY:
try:
org_count = ccnet_threaded_rpc.count_orgs()
except Exception as e:
logger.error(e)
org_count = 0
else:
org_count = -1
# count users
try:
active_db_users = ccnet_threaded_rpc.count_emailusers('DB')
except Exception as e:
logger.error(e)
active_db_users = 0
try:
active_ldap_users = ccnet_threaded_rpc.count_emailusers('LDAP')
except Exception as e:
logger.error(e)
active_ldap_users = 0
try:
inactive_db_users = ccnet_threaded_rpc.count_inactive_emailusers('DB')
except Exception as e:
logger.error(e)
inactive_db_users = 0
try:
inactive_ldap_users = ccnet_threaded_rpc.count_inactive_emailusers('LDAP')
except Exception as e:
logger.error(e)
inactive_ldap_users = 0
active_users = active_db_users + active_ldap_users if active_ldap_users > 0 \
else active_db_users
inactive_users = inactive_db_users + inactive_ldap_users if inactive_ldap_users > 0 \
else inactive_db_users
is_pro = is_pro_version()
if is_pro:
license_file = os.path.join(settings.PROJECT_ROOT, '../../seafile-license.txt')
license_dict = parse_license(license_file)
else:
license_dict = {}
return render_to_response('sysadmin/sys_info.html', {
'users_count': active_users + inactive_users,
'active_users_count': active_users,
'repos_count': repos_count,
'groups_count': groups_count,
'org_count': org_count,
'is_pro': is_pro,
'license_dict': license_dict,
}, context_instance=RequestContext(request))
@login_required
@sys_staff_required
def sys_repo_admin(request):
# Make sure page request is an int. If not, deliver first page.
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '25'))
except ValueError:
current_page = 1
per_page = 25
repos_all = seafile_api.get_repo_list(per_page * (current_page -1),
per_page + 1)
repos = repos_all[:per_page]
if len(repos_all) == per_page + 1:
page_next = True
else:
page_next = False
repos = filter(lambda r: not r.is_virtual, repos)
default_repo_id = get_system_default_repo_id()
repos = filter(lambda r: not r.repo_id == default_repo_id, repos)
for repo in repos:
try:
repo.owner = seafile_api.get_repo_owner(repo.id)
except:
repo.owner = "failed to get"
return render_to_response(
'sysadmin/sys_repo_admin.html', {
'enable_sys_admin_view_repo': ENABLE_SYS_ADMIN_VIEW_REPO,
'is_pro_version': is_pro_version(),
'repos': repos,
'current_page': current_page,
'prev_page': current_page-1,
'next_page': current_page+1,
'per_page': per_page,
'page_next': page_next,
},
context_instance=RequestContext(request))
def can_view_sys_admin_repo(repo):
default_repo_id = get_system_default_repo_id()
is_default_repo = True if repo.id == default_repo_id else False
if is_default_repo:
return True
elif repo.encrypted:
return False
elif is_pro_version() and ENABLE_SYS_ADMIN_VIEW_REPO:
return True
else:
return False
@login_required
@sys_staff_required
def sys_admin_repo_download_file(request, repo_id):
next = request.META.get('HTTP_REFERER', None)
if not next:
next = reverse('sys_admin_repo')
repo = seafile_api.get_repo(repo_id)
if not repo:
messages.error(request, _(u'Library does not exist'))
return HttpResponseRedirect(next)
path = request.GET.get('p', '')
obj_id = seafile_api.get_file_id_by_path(repo_id, path)
if not obj_id:
messages.error(request, _(u'Unable to download file, invalid file path'))
return HttpResponseRedirect(next)
if not can_view_sys_admin_repo(repo):
messages.error(request, _(u'Unable to view library'))
return HttpResponseRedirect(next)
try:
token = seafile_api.get_fileserver_access_token(repo_id, obj_id,
'download', request.user.username)
except SearpcError as e:
logger.error(e)
messages.error(request, _(u'Unable to view library'))
return HttpResponseRedirect(next)
file_name = os.path.basename(path.rstrip('/'))
redirect_url = gen_file_get_url(token, file_name)
return HttpResponseRedirect(redirect_url)
@login_required
@sys_staff_required
def sys_admin_repo(request, repo_id):
next = reverse('sys_repo_admin')
repo = seafile_api.get_repo(repo_id)
if not repo:
messages.error(request, _(u'Library does not exist'))
return HttpResponseRedirect(next)
if not can_view_sys_admin_repo(repo):
messages.error(request, _(u'Unable to view library'))
return HttpResponseRedirect(next)
path = request.GET.get('p', '/')
if path[-1] != '/':
path = path + '/'
dir_id = seafile_api.get_dir_id_by_path(repo_id, path)
if not dir_id:
messages.error(request, _(u'Unable to view library, wrong folder path.'))
return HttpResponseRedirect(next)
if is_org_context(request):
repo_owner = seafile_api.get_org_repo_owner(repo_id)
else:
repo_owner = seafile_api.get_repo_owner(repo_id)
try:
dirs = seafserv_threaded_rpc.list_dir_with_perm(repo_id, path,
dir_id, repo_owner,
-1, -1)
except SearpcError as e:
logger.error(e)
messages.error(request, _(u'Unable to view library'))
return HttpResponseRedirect(next)
file_list, dir_list = [], []
for dirent in dirs:
dirent.last_modified = dirent.mtime
if stat.S_ISDIR(dirent.props.mode):
dir_list.append(dirent)
else:
if repo.version == 0:
dirent.file_size = seafile_api.get_file_size(repo.store_id,
repo.version,
dirent.obj_id)
else:
dirent.file_size = dirent.size
file_list.append(dirent)
zipped = gen_path_link(path, repo.name)
default_repo_id = get_system_default_repo_id()
is_default_repo = True if repo_id == default_repo_id else False
return render_to_response('sysadmin/admin_repo_view.html', {
'repo': repo,
'repo_owner': repo_owner,
'dir_list': dir_list,
'file_list': file_list,
'path': path,
'zipped': zipped,
'is_default_repo': is_default_repo,
'max_upload_file_size': seaserv.MAX_UPLOAD_FILE_SIZE,
}, context_instance=RequestContext(request))
@login_required
@sys_staff_required
def sys_list_system(request):
"""List system repos.
"""
repos = []
sys_repo = seafile_api.get_repo(get_system_default_repo_id())
repos.append(sys_repo)
return render_to_response('sysadmin/sys_list_system.html', {
'repos': repos,
}, context_instance=RequestContext(request))
@login_required
@sys_staff_required
def sys_repo_trash(request):
""" List deleted repos (by owner) """
search_owner = request.GET.get('name', '')
if search_owner:
if is_valid_username(search_owner):
repos = seafserv_threaded_rpc.get_trash_repos_by_owner(search_owner)
return render_to_response(
'sysadmin/sys_repo_trash.html', {
'repos': repos,
'search_owner': search_owner,
}, context_instance=RequestContext(request))
else:
messages.error(request, _(u'Invalid username'))
return HttpResponseRedirect(reverse('sys_repo_trash'))
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '25'))
except ValueError:
current_page = 1
per_page = 25
repos_all = seafserv_threaded_rpc.get_trash_repo_list(per_page * (current_page -1),
per_page + 1)
repos = repos_all[:per_page]
if len(repos_all) == per_page + 1:
page_next = True
else:
page_next = False
return render_to_response(
'sysadmin/sys_repo_trash.html', {
'repos': repos,
'current_page': current_page,
'prev_page': current_page-1,
'next_page': current_page+1,
'per_page': per_page,
'page_next': page_next,
}, context_instance=RequestContext(request))
@login_required
@sys_staff_required
@require_POST
def sys_repo_trash_restore(request, repo_id):
"""Restore deleted repo by id"""
referer = request.META.get('HTTP_REFERER', None)
next = reverse('sys_repo_trash') if referer is None else referer
try:
seafserv_threaded_rpc.restore_repo_from_trash(repo_id)
messages.success(request, _(u'Success'))
except SearpcError, e:
logger.error(e)
messages.error(request, _(u'Failed'))
return HttpResponseRedirect(next)
@login_required
@sys_staff_required
@require_POST
def sys_repo_trash_remove(request, repo_id):
"""Remove deleted repo by id"""
referer = request.META.get('HTTP_REFERER', None)
next = reverse('sys_repo_trash') if referer is None else referer
try:
seafserv_threaded_rpc.del_repo_from_trash(repo_id)
messages.success(request, _(u'Success'))
except SearpcError, e:
logger.error(e)
messages.error(request, _(u'Failed'))
return HttpResponseRedirect(next)
@login_required
@sys_staff_required
@require_POST
def sys_repo_trash_clear(request):
"""Clear repo trash (by owner)"""
next = reverse('sys_repo_trash')
owner = request.POST.get('owner', '')
try:
if owner:
if is_valid_username(owner):
seafserv_threaded_rpc.empty_repo_trash_by_owner(owner)
else:
messages.error(request, _(u'Invalid username'))
return HttpResponseRedirect(next)
else:
seafserv_threaded_rpc.empty_repo_trash()
except SearpcError, e:
logger.error(e)
messages.error(request, _(u'Failed'))
messages.success(request, _(u'Success'))
return HttpResponseRedirect(next)
def list_repos_by_name_and_owner(repo_name, owner):
repos = []
owned_repos = seafile_api.get_owned_repo_list(owner)
for repo in owned_repos:
if not repo.name:
continue
if repo_name in repo.name:
repo.owner = owner
repos.append(repo)
return repos
def list_repos_by_name(repo_name):
repos = []
repos_all = seafile_api.get_repo_list(-1, -1)
for repo in repos_all:
if not repo.name:
continue
if repo_name in repo.name:
try:
repo.owner = seafile_api.get_repo_owner(repo.id)
except SearpcError:
repo.owner = "failed to get"
repos.append(repo)
return repos
def list_repos_by_owner(owner):
repos = seafile_api.get_owned_repo_list(owner)
for e in repos:
e.owner = owner
return repos
@login_required
@sys_staff_required
def sys_repo_search(request):
"""Search a repo.
"""
repo_name = request.GET.get('name', '')
owner = request.GET.get('owner', '')
repos = []
if repo_name and owner : # search by name and owner
repos = list_repos_by_name_and_owner(repo_name, owner)
elif repo_name: # search by name
repos = list_repos_by_name(repo_name)
elif owner: # search by owner
repos = list_repos_by_owner(owner)
return render_to_response('sysadmin/sys_repo_search.html', {
'repos': repos,
'name': repo_name,
'owner': owner,
'enable_sys_admin_view_repo': ENABLE_SYS_ADMIN_VIEW_REPO,
}, context_instance=RequestContext(request))
def _populate_user_quota_usage(user):
"""Populate space/share quota to user.
Arguments:
- `user`:
"""
orgs = ccnet_threaded_rpc.get_orgs_by_user(user.email)
try:
if orgs:
user.org = orgs[0]
org_id = user.org.org_id
user.space_usage = seafserv_threaded_rpc.get_org_user_quota_usage(org_id, user.email)
user.space_quota = seafserv_threaded_rpc.get_org_user_quota(org_id, user.email)
else:
user.space_usage = seafile_api.get_user_self_usage(user.email)
user.space_quota = seafile_api.get_user_quota(user.email)
except SearpcError as e:
logger.error(e)
user.space_usage = -1
user.space_quota = -1
@login_required
@sys_staff_required
def sys_user_admin(request):
"""List all users from database.
"""
try:
from seahub_extra.plan.models import UserPlan
enable_user_plan = True
except ImportError:
enable_user_plan = False
if enable_user_plan and request.GET.get('filter', '') == 'paid':
# show paid users
users = []
ups = UserPlan.objects.all()
for up in ups:
try:
u = User.objects.get(up.username)
except User.DoesNotExist:
continue
_populate_user_quota_usage(u)
users.append(u)
last_logins = UserLastLogin.objects.filter(username__in=[x.username for x in users])
for u in users:
for e in last_logins:
if e.username == u.username:
u.last_login = e.last_login
return render_to_response('sysadmin/sys_useradmin_paid.html', {
'users': users,
'enable_user_plan': enable_user_plan,
}, context_instance=RequestContext(request))
### List all users
# Make sure page request is an int. If not, deliver first page.
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '25'))
except ValueError:
current_page = 1
per_page = 25
users_plus_one = seaserv.get_emailusers('DB', per_page * (current_page - 1),
per_page + 1)
if len(users_plus_one) == per_page + 1:
page_next = True
else:
page_next = False
users = users_plus_one[:per_page]
last_logins = UserLastLogin.objects.filter(username__in=[x.email for x in users])
if ENABLE_TRIAL_ACCOUNT:
trial_users = TrialAccount.objects.filter(user_or_org__in=[x.email for x in users])
else:
trial_users = []
for user in users:
if user.email == request.user.email:
user.is_self = True
_populate_user_quota_usage(user)
# check user's role
if user.role == GUEST_USER:
user.is_guest = True
else:
user.is_guest = False
# populate user last login time
user.last_login = None
for last_login in last_logins:
if last_login.username == user.email:
user.last_login = last_login.last_login
user.trial_info = None
for trial_user in trial_users:
if trial_user.user_or_org == user.email:
user.trial_info = {'expire_date': trial_user.expire_date}
have_ldap = True if len(seaserv.get_emailusers('LDAP', 0, 1)) > 0 else False
platform = get_platform_name()
server_id = get_server_id()
pro_server = 1 if is_pro_version() else 0
return render_to_response(
'sysadmin/sys_useradmin.html', {
'users': users,
'current_page': current_page,
'prev_page': current_page-1,
'next_page': current_page+1,
'per_page': per_page,
'page_next': page_next,
'have_ldap': have_ldap,
'platform': platform,
'server_id': server_id[:8],
'default_user': DEFAULT_USER,
'guest_user': GUEST_USER,
'is_pro': is_pro_version(),
'pro_server': pro_server,
'enable_user_plan': enable_user_plan,
}, context_instance=RequestContext(request))
@login_required
@sys_staff_required
def sys_useradmin_export_excel(request):
""" Export all users from database to excel
"""
next = request.META.get('HTTP_REFERER', None)
if not next:
next = SITE_ROOT
try:
users = seaserv.get_emailusers('DB', -1, -1) + \
seaserv.get_emailusers('LDAPImport', -1, -1)
except Exception as e:
logger.error(e)
messages.error(request, _(u'Failed to export Excel'))
return HttpResponseRedirect(next)
if is_pro_version():
is_pro = True
else:
is_pro = False
if is_pro:
head = [_("Email"), _("Status"), _("Role"), _("Create At"),
_("Last Login"), _("Admin"), _("LDAP(imported)"),]
else:
head = [_("Email"), _("Status"), _("Create At"),
_("Last Login"), _("Admin"), _("LDAP(imported)"),]
data_list = []
last_logins = UserLastLogin.objects.filter(username__in=[x.email for x in users])
for user in users:
# populate user last login time
user.last_login = None
for last_login in last_logins:
if last_login.username == user.email:
user.last_login = last_login.last_login
if user.is_active:
status = _('Active')
else:
status = _('Inactive')
create_at = tsstr_sec(user.ctime) if user.ctime else ''
last_login = user.last_login.strftime("%Y-%m-%d %H:%M:%S") if \
user.last_login else ''
is_admin = _('Yes') if user.is_staff else ''
ldap_import = _('Yes') if user.source == 'LDAPImport' else ''
if is_pro:
if user.role == GUEST_USER:
role = _('Guest')
else:
role = _('Default')
row = [user.email, status, role, create_at,
last_login, is_admin, ldap_import]
else:
row = [user.email, status, create_at, last_login,
is_admin, ldap_import]
data_list.append(row)
wb = write_xls('users', head, data_list)
if not wb:
messages.error(request, _(u'Failed to export Excel'))
return HttpResponseRedirect(next)
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename=users.xlsx'
wb.save(response)
return response
@login_required
@sys_staff_required
def sys_user_admin_ldap_imported(request):
"""List all users from LDAP imported.
"""
# Make sure page request is an int. If not, deliver first page.
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '25'))
except ValueError:
current_page = 1
per_page = 25
users_plus_one = seaserv.get_emailusers('LDAPImport',
per_page * (current_page - 1),
per_page + 1)
if len(users_plus_one) == per_page + 1:
page_next = True
else:
page_next = False
users = users_plus_one[:per_page]
last_logins = UserLastLogin.objects.filter(username__in=[x.email for x in users])
for user in users:
if user.email == request.user.email:
user.is_self = True
_populate_user_quota_usage(user)
# populate user last login time
user.last_login = None
for last_login in last_logins:
if last_login.username == user.email:
user.last_login = last_login.last_login
return render_to_response(
'sysadmin/sys_user_admin_ldap_imported.html', {
'users': users,
'current_page': current_page,
'prev_page': current_page-1,
'next_page': current_page+1,
'per_page': per_page,
'page_next': page_next,
'is_pro': is_pro_version(),
}, context_instance=RequestContext(request))
@login_required
@sys_staff_required
def sys_user_admin_ldap(request):
"""List all users from LDAP.
"""
# Make sure page request is an int. If not, deliver first page.
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '25'))
except ValueError:
current_page = 1
per_page = 25
users_plus_one = seaserv.get_emailusers('LDAP',
per_page * (current_page - 1),
per_page + 1)
if len(users_plus_one) == per_page + 1:
page_next = True
else:
page_next = False
users = users_plus_one[:per_page]
last_logins = UserLastLogin.objects.filter(username__in=[x.email for x in users])
for user in users:
if user.email == request.user.email:
user.is_self = True
_populate_user_quota_usage(user)
# populate user last login time
user.last_login = None
for last_login in last_logins:
if last_login.username == user.email:
user.last_login = last_login.last_login
return render_to_response(
'sysadmin/sys_useradmin_ldap.html', {
'users': users,
'current_page': current_page,
'prev_page': current_page-1,
'next_page': current_page+1,
'per_page': per_page,
'page_next': page_next,
'is_pro': is_pro_version(),
},
context_instance=RequestContext(request))
@login_required
@sys_staff_required
def sys_user_admin_admins(request):
"""List all admins from database and ldap imported
"""
db_users = seaserv.get_emailusers('DB', -1, -1)
ldpa_imported_users = seaserv.get_emailusers('LDAPImport', -1, -1)
admin_users = []
not_admin_users = []
for user in db_users + ldpa_imported_users:
if user.is_staff is True:
admin_users.append(user)
else:
not_admin_users.append(user)
last_logins = UserLastLogin.objects.filter(username__in=[x.email for x in admin_users])
for user in admin_users:
if user.email == request.user.email:
user.is_self = True
_populate_user_quota_usage(user)
# check db user's role
if user.source == "DB":
if user.role == GUEST_USER:
user.is_guest = True
else:
user.is_guest = False
# populate user last login time
user.last_login = None
for last_login in last_logins:
if last_login.username == user.email:
user.last_login = last_login.last_login
have_ldap = True if len(seaserv.get_emailusers('LDAP', 0, 1)) > 0 else False
return render_to_response(
'sysadmin/sys_useradmin_admins.html', {
'users': admin_users,
'not_admin_users': not_admin_users,
'have_ldap': have_ldap,
'default_user': DEFAULT_USER,
'guest_user': GUEST_USER,
'is_pro': is_pro_version(),
}, context_instance=RequestContext(request))
@login_required
@sys_staff_required
def user_info(request, email):
org_name = None
space_quota = space_usage = 0
org = ccnet_threaded_rpc.get_orgs_by_user(email)
if not org:
owned_repos = mute_seafile_api.get_owned_repo_list(email,
ret_corrupted=True)
in_repos = mute_seafile_api.get_share_in_repo_list(email, -1, -1)
space_usage = mute_seafile_api.get_user_self_usage(email)
space_quota = mute_seafile_api.get_user_quota(email)
else:
org_id = org[0].org_id
org_name = org[0].org_name
space_usage = seafserv_threaded_rpc.get_org_user_quota_usage(org_id,
email)
space_quota = seafserv_threaded_rpc.get_org_user_quota(org_id, email)
owned_repos = seafile_api.get_org_owned_repo_list(org_id, email,
ret_corrupted=True)
in_repos = seafile_api.get_org_share_in_repo_list(org_id, email, -1, -1)
# get user profile
profile = Profile.objects.get_profile_by_user(email)
d_profile = DetailedProfile.objects.get_detailed_profile_by_user(email)
user_shared_links = []
# download links
p_fileshares = []
fileshares = list(FileShare.objects.filter(username=email))
for fs in fileshares:
try:
r = seafile_api.get_repo(fs.repo_id)
if not r:
fs.delete()
continue
if fs.is_file_share_link():
if seafile_api.get_file_id_by_path(r.id, fs.path) is None:
fs.delete()
continue
fs.filename = os.path.basename(fs.path)
path = fs.path.rstrip('/') # Normalize file path
obj_id = seafile_api.get_file_id_by_path(r.id, path)
fs.file_size = seafile_api.get_file_size(r.store_id,
r.version, obj_id)
else:
if seafile_api.get_dir_id_by_path(r.id, fs.path) is None:
fs.delete()
continue
if fs.path == '/':
fs.filename = '/'
else:
fs.filename = os.path.basename(fs.path.rstrip('/'))
path = fs.path
if path[-1] != '/': # Normalize dir path
path += '/'
# get dir size
dir_id = seafile_api.get_dir_id_by_commit_and_path(r.id, r.head_cmmt_id, path)
fs.dir_size = seafile_api.get_dir_size(r.store_id, r.version, dir_id)
fs.is_download = True
p_fileshares.append(fs)
except SearpcError as e:
logger.error(e)
continue
p_fileshares.sort(key=lambda x: x.view_cnt, reverse=True)
user_shared_links += p_fileshares
# upload links
uploadlinks = list(UploadLinkShare.objects.filter(username=email))
p_uploadlinks = []
for link in uploadlinks:
try:
r = seafile_api.get_repo(link.repo_id)
if not r:
link.delete()
continue
if seafile_api.get_dir_id_by_path(r.id, link.path) is None:
link.delete()
continue
if link.path == '/':
link.dir_name = '/'
else:
link.dir_name = os.path.basename(link.path.rstrip('/'))
link.is_upload = True
p_uploadlinks.append(link)
except SearpcError as e:
logger.error(e)
continue
p_uploadlinks.sort(key=lambda x: x.view_cnt, reverse=True)
user_shared_links += p_uploadlinks
try:
personal_groups = seaserv.get_personal_groups_by_user(email)
except SearpcError as e:
logger.error(e)
personal_groups = []
for g in personal_groups:
try:
is_group_staff = seaserv.check_group_staff(g.id, email)
except SearpcError as e:
logger.error(e)
is_group_staff = False
if email == g.creator_name:
g.role = _('Owner')
elif is_group_staff:
g.role = _('Admin')
else:
g.role = _('Member')
return render_to_response(
'sysadmin/userinfo.html', {
'owned_repos': owned_repos,
'space_quota': space_quota,
'space_usage': space_usage,
'in_repos': in_repos,
'email': email,
'profile': profile,
'd_profile': d_profile,
'org_name': org_name,
'user_shared_links': user_shared_links,
'enable_sys_admin_view_repo': ENABLE_SYS_ADMIN_VIEW_REPO,
'personal_groups': personal_groups,
}, context_instance=RequestContext(request))
@login_required_ajax
@sys_staff_required
def user_set_quota(request, email):
if request.method != 'POST':
raise Http404
content_type = 'application/json; charset=utf-8'
result = {}
f = SetUserQuotaForm(request.POST)
if f.is_valid():
email = f.cleaned_data['email']
space_quota_mb = f.cleaned_data['space_quota']
space_quota = space_quota_mb * get_file_size_unit('MB')
org = ccnet_threaded_rpc.get_orgs_by_user(email)
try:
if not org:
seafile_api.set_user_quota(email, space_quota)
else:
org_id = org[0].org_id
org_quota_mb = seafserv_threaded_rpc.get_org_quota(org_id) / get_file_size_unit('MB')
if space_quota_mb > org_quota_mb:
result['error'] = _(u'Failed to set quota: maximum quota is %d MB' % \
org_quota_mb)
return HttpResponse(json.dumps(result), status=400, content_type=content_type)
else:
seafserv_threaded_rpc.set_org_user_quota(org_id, email, space_quota)
except:
result['error'] = _(u'Failed to set quota: internal server error')
return HttpResponse(json.dumps(result), status=500, content_type=content_type)
result['success'] = True
return HttpResponse(json.dumps(result), content_type=content_type)
else:
result['error'] = str(f.errors.values()[0])
return HttpResponse(json.dumps(result), status=400, content_type=content_type)
@login_required_ajax
@sys_staff_required
def sys_org_set_quota(request, org_id):
if request.method != 'POST':
raise Http404
content_type = 'application/json; charset=utf-8'
result = {}
org_id = int(org_id)
quota_mb = int(request.POST.get('quota', 0))
quota = quota_mb * get_file_size_unit('MB')
try:
seafserv_threaded_rpc.set_org_quota(org_id, quota)
except SearpcError as e:
logger.error(e)
result['error'] = _(u'Failed to set quota: internal server error')
return HttpResponse(json.dumps(result), status=500, content_type=content_type)
result['success'] = True
return HttpResponse(json.dumps(result), content_type=content_type)
@login_required
@sys_staff_required
@require_POST
def user_remove(request, email):
"""Remove user"""
referer = request.META.get('HTTP_REFERER', None)
next = reverse('sys_useradmin') if referer is None else referer
try:
user = User.objects.get(email=email)
org = ccnet_threaded_rpc.get_orgs_by_user(user.email)
if org:
if org[0].creator == user.email:
messages.error(request, _(u'Failed to delete: the user is an organization creator'))
return HttpResponseRedirect(next)
org_id = org[0].org_id
org_user_repos = seafile_api.get_org_owned_repo_list(org_id, user.email)
for repo in org_user_repos:
seafile_api.remove_repo(repo.id)
user.delete()
messages.success(request, _(u'Successfully deleted %s') % user.username)
except User.DoesNotExist:
messages.error(request, _(u'Failed to delete: the user does not exist'))
return HttpResponseRedirect(next)
@login_required
@sys_staff_required
@require_POST
def remove_trial(request, user_or_org):
"""Remove trial account.
Arguments:
- `request`:
"""
if not ENABLE_TRIAL_ACCOUNT:
raise Http404
referer = request.META.get('HTTP_REFERER', None)
next = reverse('sys_useradmin') if referer is None else referer
TrialAccount.objects.filter(user_or_org=user_or_org).delete()
messages.success(request, _('Successfully remove trial for: %s') % user_or_org)
return HttpResponseRedirect(next)
# @login_required
# @sys_staff_required
# def user_make_admin(request, user_id):
# """Set user as system admin."""
# try:
# user = User.objects.get(id=int(user_id))
# user.is_staff = True
# user.save()
# messages.success(request, _(u'Successfully set %s as admin') % user.username)
# except User.DoesNotExist:
# messages.error(request, _(u'Failed to set admin: the user does not exist'))
# referer = request.META.get('HTTP_REFERER', None)
# next = reverse('sys_useradmin') if referer is None else referer
# return HttpResponseRedirect(next)
@login_required
@sys_staff_required
@require_POST
def user_remove_admin(request, email):
"""Unset user admin."""
try:
user = User.objects.get(email=email)
user.is_staff = False
user.save()
messages.success(request, _(u'Successfully revoke the admin permission of %s') % user.username)
except User.DoesNotExist:
messages.error(request, _(u'Failed to revoke admin: the user does not exist'))
referer = request.META.get('HTTP_REFERER', None)
next = reverse('sys_useradmin') if referer is None else referer
return HttpResponseRedirect(next)
# @login_required
# @sys_staff_required
# def user_activate(request, user_id):
# try:
# user = User.objects.get(id=int(user_id))
# user.is_active = True
# user.save()
# messages.success(request, _(u'Successfully activated "%s".') % user.email)
# except User.DoesNotExist:
# messages.success(request, _(u'Failed to activate: user does not exist.'))
# next = request.META.get('HTTP_REFERER', None)
# if not next:
# next = reverse('sys_useradmin')
# return HttpResponseRedirect(next)
# @login_required
# @sys_staff_required
# def user_deactivate(request, user_id):
# try:
# user = User.objects.get(id=int(user_id))
# user.is_active = False
# user.save()
# messages.success(request, _(u'Successfully deactivated "%s".') % user.email)
# except User.DoesNotExist:
# messages.success(request, _(u'Failed to deactivate: user does not exist.'))
# next = request.META.get('HTTP_REFERER', None)
# if not next:
# next = reverse('sys_useradmin')
# return HttpResponseRedirect(next)
def email_user_on_activation(user):
"""Send an email to user when admin activate his/her account.
"""
c = {
'username': user.email,
}
send_html_email(_(u'Your account on %s is activated') % SITE_NAME,
'sysadmin/user_activation_email.html', c, None, [user.email])
@login_required_ajax
@sys_staff_required
@require_POST
def user_toggle_status(request, email):
content_type = 'application/json; charset=utf-8'
if not is_valid_username(email):
return HttpResponse(json.dumps({'success': False}), status=400,
content_type=content_type)
try:
user_status = int(request.POST.get('s', 0))
except ValueError:
user_status = 0
try:
user = User.objects.get(email)
user.is_active = bool(user_status)
result_code = user.save()
if result_code == -1:
return HttpResponse(json.dumps({'success': False}), status=403,
content_type=content_type)
if user.is_active is True:
try:
email_user_on_activation(user)
email_sent = True
except Exception as e:
logger.error(e)
email_sent = False
return HttpResponse(json.dumps({'success': True,
'email_sent': email_sent,
}), content_type=content_type)
else:
clear_token(user.email)
return HttpResponse(json.dumps({'success': True}),
content_type=content_type)
except User.DoesNotExist:
return HttpResponse(json.dumps({'success': False}), status=500,
content_type=content_type)
@login_required_ajax
@sys_staff_required
@require_POST
def user_toggle_role(request, email):
content_type = 'application/json; charset=utf-8'
if not is_valid_username(email):
return HttpResponse(json.dumps({'success': False}), status=400,
content_type=content_type)
if not is_pro_version():
return HttpResponse(json.dumps({'success': False}), status=403,
content_type=content_type)
try:
user_role = request.POST.get('r', DEFAULT_USER)
except ValueError:
user_role = DEFAULT_USER
try:
user = User.objects.get(email)
User.objects.update_role(user.email, user_role)
return HttpResponse(json.dumps({'success': True}),
content_type=content_type)
except User.DoesNotExist:
return HttpResponse(json.dumps({'success': False}), status=500,
content_type=content_type)
def send_user_reset_email(request, email, password):
"""
Send email when reset user password.
"""
c = {
'email': email,
'password': password,
}
send_html_email(_(u'Password has been reset on %s') % SITE_NAME,
'sysadmin/user_reset_email.html', c, None, [email])
@login_required
@sys_staff_required
@require_POST
def user_reset(request, email):
"""Reset password for user."""
try:
user = User.objects.get(email=email)
if isinstance(INIT_PASSWD, FunctionType):
new_password = INIT_PASSWD()
else:
new_password = INIT_PASSWD
user.set_password(new_password)
user.save()
clear_token(user.username)
if config.FORCE_PASSWORD_CHANGE:
UserOptions.objects.set_force_passwd_change(user.username)
if IS_EMAIL_CONFIGURED:
if SEND_EMAIL_ON_RESETTING_USER_PASSWD:
try:
send_user_reset_email(request, user.email, new_password)
msg = _('Successfully reset password to %(passwd)s, an email has been sent to %(user)s.') % \
{'passwd': new_password, 'user': user.email}
messages.success(request, msg)
except Exception, e:
logger.error(str(e))
msg = _('Successfully reset password to %(passwd)s, but failed to send email to %(user)s, please check your email configuration.') % \
{'passwd':new_password, 'user': user.email}
messages.success(request, msg)
else:
messages.success(request, _(u'Successfully reset password to %(passwd)s for user %(user)s.') % \
{'passwd':new_password,'user': user.email})
else:
messages.success(request, _(u'Successfully reset password to %(passwd)s for user %(user)s. But email notification can not be sent, because Email service is not properly configured.') % \
{'passwd':new_password,'user': user.email})
except User.DoesNotExist:
msg = _(u'Failed to reset password: user does not exist')
messages.error(request, msg)
referer = request.META.get('HTTP_REFERER', None)
next = reverse('sys_useradmin') if referer is None else referer
return HttpResponseRedirect(next)
def send_user_add_mail(request, email, password):
"""Send email when add new user."""
c = {
'user': request.user.username,
'org': request.user.org,
'email': email,
'password': password,
}
send_html_email(_(u'You are invited to join %s') % SITE_NAME,
'sysadmin/user_add_email.html', c, None, [email])
@login_required_ajax
def user_add(request):
"""Add a user"""
if not request.user.is_staff or request.method != 'POST':
raise Http404
content_type = 'application/json; charset=utf-8'
post_values = request.POST.copy()
post_email = request.POST.get('email', '')
post_role = request.POST.get('role', DEFAULT_USER)
post_values.update({
'email': post_email.lower(),
'role': post_role,
})
form = AddUserForm(post_values)
if form.is_valid():
email = form.cleaned_data['email']
role = form.cleaned_data['role']
password = form.cleaned_data['password1']
try:
user = User.objects.create_user(email, password, is_staff=False,
is_active=True)
except User.DoesNotExist as e:
logger.error(e)
err_msg = _(u'Fail to add user %s.') % email
return HttpResponse(json.dumps({'error': err_msg}), status=403, content_type=content_type)
if user:
User.objects.update_role(email, role)
if config.FORCE_PASSWORD_CHANGE:
UserOptions.objects.set_force_passwd_change(email)
if request.user.org:
org_id = request.user.org.org_id
url_prefix = request.user.org.url_prefix
ccnet_threaded_rpc.add_org_user(org_id, email, 0)
if IS_EMAIL_CONFIGURED:
try:
send_user_add_mail(request, email, password)
messages.success(request, _(u'Successfully added user %s. An email notification has been sent.') % email)
except Exception, e:
logger.error(str(e))
messages.success(request, _(u'Successfully added user %s. An error accurs when sending email notification, please check your email configuration.') % email)
else:
messages.success(request, _(u'Successfully added user %s.') % email)
return HttpResponse(json.dumps({'success': True}), content_type=content_type)
else:
if IS_EMAIL_CONFIGURED:
if SEND_EMAIL_ON_ADDING_SYSTEM_MEMBER:
try:
send_user_add_mail(request, email, password)
messages.success(request, _(u'Successfully added user %s. An email notification has been sent.') % email)
except Exception, e:
logger.error(str(e))
messages.success(request, _(u'Successfully added user %s. An error accurs when sending email notification, please check your email configuration.') % email)
else:
messages.success(request, _(u'Successfully added user %s.') % email)
else:
messages.success(request, _(u'Successfully added user %s. But email notification can not be sent, because Email service is not properly configured.') % email)
return HttpResponse(json.dumps({'success': True}), content_type=content_type)
else:
return HttpResponse(json.dumps({'error': str(form.errors.values()[0])}), status=400, content_type=content_type)
@login_required
@sys_staff_required
def sys_group_admin(request):
# Make sure page request is an int. If not, deliver first page.
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '25'))
except ValueError:
current_page = 1
per_page = 25
groups_plus_one = ccnet_threaded_rpc.get_all_groups(per_page * (current_page -1),
per_page +1)
groups = groups_plus_one[:per_page]
for grp in groups:
org_id = ccnet_threaded_rpc.get_org_id_by_group(int(grp.id))
if org_id > 0:
grp.org_id = org_id
grp.org_name = ccnet_threaded_rpc.get_org_by_id(int(org_id)).org_name
if len(groups_plus_one) == per_page + 1:
page_next = True
else:
page_next = False
return render_to_response('sysadmin/sys_group_admin.html', {
'groups': groups,
'current_page': current_page,
'prev_page': current_page-1,
'next_page': current_page+1,
'per_page': per_page,
'page_next': page_next,
}, context_instance=RequestContext(request))
@login_required
@sys_staff_required
def sys_group_admin_export_excel(request):
""" Export all groups to excel
"""
next = request.META.get('HTTP_REFERER', None)
if not next:
next = SITE_ROOT
try:
groups = ccnet_threaded_rpc.get_all_groups(-1, -1)
except Exception as e:
logger.error(e)
messages.error(request, _(u'Failed to export Excel'))
return HttpResponseRedirect(next)
head = [_("Name"), _("Creator"), _("Create At")]
data_list = []
for grp in groups:
create_at = tsstr_sec(grp.timestamp) if grp.timestamp else ''
row = [grp.group_name, grp.creator_name, create_at]
data_list.append(row)
wb = write_xls('groups', head, data_list)
if not wb:
messages.error(request, _(u'Failed to export Excel'))
return HttpResponseRedirect(next)
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename=groups.xlsx'
wb.save(response)
return response
@login_required
@sys_staff_required
def sys_admin_group_info(request, group_id):
group_id = int(group_id)
group = get_group(group_id)
org_id = request.GET.get('org_id', None)
if org_id:
repos = seafile_api.get_org_group_repos(org_id, group_id)
else:
repos = seafile_api.get_repos_by_group(group_id)
members = get_group_members(group_id)
return render_to_response('sysadmin/sys_admin_group_info.html', {
'group': group,
'repos': repos,
'members': members,
'enable_sys_admin_view_repo': ENABLE_SYS_ADMIN_VIEW_REPO,
}, context_instance=RequestContext(request))
@login_required
@sys_staff_required
def sys_org_admin(request):
# Make sure page request is an int. If not, deliver first page.
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '25'))
except ValueError:
current_page = 1
per_page = 25
try:
from seahub_extra.plan.models import OrgPlan
enable_org_plan = True
except ImportError:
enable_org_plan = False
if enable_org_plan and request.GET.get('filter', '') == 'paid':
orgs = []
ops = OrgPlan.objects.all()
for e in ops:
o = ccnet_threaded_rpc.get_org_by_id(e.org_id)
if not o:
continue
o.quota_usage = seafserv_threaded_rpc.get_org_quota_usage(o.org_id)
o.total_quota = seafserv_threaded_rpc.get_org_quota(o.org_id)
o.expiration = e.expire_date
o.is_expired = True if e.expire_date < timezone.now() else False
orgs.append(o)
return render_to_response('sysadmin/sys_org_admin.html', {
'orgs': orgs,
'enable_org_plan': enable_org_plan,
'hide_paginator': True,
'paid_page': True,
}, context_instance=RequestContext(request))
orgs_plus_one = ccnet_threaded_rpc.get_all_orgs(per_page * (current_page - 1),
per_page + 1)
if len(orgs_plus_one) == per_page + 1:
page_next = True
else:
page_next = False
orgs = orgs_plus_one[:per_page]
if ENABLE_TRIAL_ACCOUNT:
trial_orgs = TrialAccount.objects.filter(user_or_org__in=[x.org_id for x in orgs])
else:
trial_orgs = []
for org in orgs:
org.quota_usage = seafserv_threaded_rpc.get_org_quota_usage(org.org_id)
org.total_quota = seafserv_threaded_rpc.get_org_quota(org.org_id)
from seahub_extra.organizations.settings import ORG_TRIAL_DAYS
if ORG_TRIAL_DAYS > 0:
from datetime import timedelta
org.expiration = datetime.datetime.fromtimestamp(org.ctime / 1e6) + timedelta(days=ORG_TRIAL_DAYS)
org.trial_info = None
for trial_org in trial_orgs:
if trial_org.user_or_org == str(org.org_id):
org.trial_info = {'expire_date': trial_org.expire_date}
if trial_org.expire_date:
org.expiration = trial_org.expire_date
if org.expiration:
org.is_expired = True if org.expiration < timezone.now() else False
else:
org.is_expired = False
return render_to_response('sysadmin/sys_org_admin.html', {
'orgs': orgs,
'current_page': current_page,
'prev_page': current_page-1,
'next_page': current_page+1,
'per_page': per_page,
'page_next': page_next,
'enable_org_plan': enable_org_plan,
'all_page': True,
}, context_instance=RequestContext(request))
@login_required
@sys_staff_required
def sys_org_search(request):
org_name = request.GET.get('name', '').lower()
creator = request.GET.get('creator', '').lower()
if not org_name and not creator:
return HttpResponseRedirect(reverse('sys_org_admin'))
orgs = []
orgs_all = ccnet_threaded_rpc.get_all_orgs(-1, -1)
if org_name and creator:
for o in orgs_all:
if org_name in o.org_name.lower() and creator in o.creator.lower():
orgs.append(o)
else:
if org_name:
for o in orgs_all:
if org_name in o.org_name.lower():
orgs.append(o)
if creator:
for o in orgs_all:
if creator in o.creator.lower():
orgs.append(o)
return render_to_response(
'sysadmin/sys_org_search.html', {
'orgs': orgs,
'name': org_name,
'creator': creator,
}, context_instance=RequestContext(request))
@login_required
@sys_staff_required
def sys_org_rename(request, org_id):
if request.method != 'POST':
raise Http404
referer = request.META.get('HTTP_REFERER', None)
next = reverse('sys_org_admin') if referer is None else referer
new_name = request.POST.get('new_name', None)
if new_name:
try:
ccnet_threaded_rpc.set_org_name(int(org_id), new_name)
messages.success(request, _(u'Success'))
except Exception as e:
logger.error(e)
messages.error(request, _(u'Failed to rename organization'))
return HttpResponseRedirect(next)
@login_required
@require_POST
@sys_staff_required
def sys_org_remove(request, org_id):
"""Remove an org and all members/repos/groups.
Arguments:
- `request`:
- `org_id`:
"""
org_id = int(org_id)
org = ccnet_threaded_rpc.get_org_by_id(org_id)
users = ccnet_threaded_rpc.get_org_emailusers(org.url_prefix, -1, -1)
for u in users:
ccnet_threaded_rpc.remove_org_user(org_id, u.email)
groups = ccnet_threaded_rpc.get_org_groups(org.org_id, -1, -1)
for g in groups:
ccnet_threaded_rpc.remove_org_group(org_id, g.gid)
# remove org repos
seafserv_threaded_rpc.remove_org_repo_by_org_id(org_id)
# remove org
ccnet_threaded_rpc.remove_org(org_id)
messages.success(request, _(u'Successfully deleted.'))
referer = request.META.get('HTTP_REFERER', None)
next = reverse('sys_org_admin') if referer is None else referer
return HttpResponseRedirect(next)
@login_required_ajax
@sys_staff_required
def sys_org_set_member_quota(request, org_id):
if request.method != 'POST':
raise Http404
content_type = 'application/json; charset=utf-8'
try:
member_quota = int(request.POST.get('member_quota', '0'))
except ValueError:
return HttpResponse(json.dumps({ 'error': _('Input should be a number')}),
status=400, content_type=content_type)
if member_quota > 0:
from seahub_extra.organizations.models import OrgMemberQuota
OrgMemberQuota.objects.set_quota(org_id, member_quota)
messages.success(request, _(u'Success'))
return HttpResponse(json.dumps({'success': True}), status=200,
content_type=content_type)
else:
return HttpResponse(json.dumps({ 'error': _('Input number should be greater than 0')}),
status=400, content_type=content_type)
def sys_get_org_base_info(org_id):
org = ccnet_threaded_rpc.get_org_by_id(org_id)
# users
users = ccnet_threaded_rpc.get_org_emailusers(org.url_prefix, -1, -1)
users_count = len(users)
# groups
groups = ccnet_threaded_rpc.get_org_groups(org_id, -1, -1)
groups_count = len(groups)
# quota
total_quota = seafserv_threaded_rpc.get_org_quota(org_id)
quota_usage = seafserv_threaded_rpc.get_org_quota_usage(org_id)
return {
"org": org,
"users": users,
"users_count": users_count,
"groups": groups,
"groups_count": groups_count,
"total_quota": total_quota,
"quota_usage": quota_usage,
}
@login_required
@sys_staff_required
def sys_org_info_user(request, org_id):
org_id = int(org_id)
org_basic_info = sys_get_org_base_info(org_id)
users = org_basic_info["users"]
last_logins = UserLastLogin.objects.filter(username__in=[x.email for x in users])
for user in users:
if user.email == request.user.email:
user.is_self = True
try:
user.self_usage =seafserv_threaded_rpc. \
get_org_user_quota_usage(org_id, user.email)
user.quota = seafserv_threaded_rpc. \
get_org_user_quota(org_id, user.email)
except SearpcError as e:
logger.error(e)
user.self_usage = -1
user.quota = -1
# populate user last login time
user.last_login = None
for last_login in last_logins:
if last_login.username == user.email:
user.last_login = last_login.last_login
return render_to_response('sysadmin/sys_org_info_user.html',
org_basic_info, context_instance=RequestContext(request))
@login_required
@sys_staff_required
def sys_org_info_group(request, org_id):
org_id = int(org_id)
org_basic_info = sys_get_org_base_info(org_id)
return render_to_response('sysadmin/sys_org_info_group.html',
org_basic_info, context_instance=RequestContext(request))
@login_required
@sys_staff_required
def sys_org_info_library(request, org_id):
org_id = int(org_id)
org_basic_info = sys_get_org_base_info(org_id)
# library
org_repos = seafserv_threaded_rpc.get_org_repo_list(org_id, -1, -1)
for repo in org_repos:
try:
repo.owner = seafserv_threaded_rpc.get_org_repo_owner(repo.id)
except:
repo.owner = None
org_basic_info["org_repos"] = org_repos
return render_to_response('sysadmin/sys_org_info_library.html',
org_basic_info, context_instance=RequestContext(request))
@login_required
@sys_staff_required
def sys_org_info_setting(request, org_id):
org_id = int(org_id)
org_basic_info = sys_get_org_base_info(org_id)
if getattr(settings, 'ORG_MEMBER_QUOTA_ENABLED', False):
from seahub_extra.organizations.models import OrgMemberQuota
org_basic_info['org_member_quota'] = OrgMemberQuota.objects.get_quota(org_id)
else:
org_basic_info['org_member_quota'] = None
return render_to_response('sysadmin/sys_org_info_setting.html',
org_basic_info,
context_instance=RequestContext(request))
@login_required
@sys_staff_required
def sys_publink_admin(request):
# Make sure page request is an int. If not, deliver first page.
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '100'))
except ValueError:
current_page = 1
per_page = 100
offset = per_page * (current_page -1)
limit = per_page + 1
sort_by = request.GET.get('sort_by', 'time_up')
if sort_by == 'time_down':
publinks = FileShare.objects.all().order_by('ctime')[offset:offset+limit]
elif sort_by == 'count_up':
publinks = FileShare.objects.all().order_by('-view_cnt')[offset:offset+limit]
elif sort_by == 'count_down':
publinks = FileShare.objects.all().order_by('view_cnt')[offset:offset+limit]
else:
publinks = FileShare.objects.all().order_by('-ctime')[offset:offset+limit]
if len(publinks) == per_page + 1:
page_next = True
else:
page_next = False
for l in publinks:
if l.is_file_share_link():
l.name = os.path.basename(l.path)
else:
l.name = os.path.dirname(l.path)
return render_to_response(
'sysadmin/sys_publink_admin.html', {
'publinks': publinks,
'current_page': current_page,
'prev_page': current_page-1,
'next_page': current_page+1,
'per_page': per_page,
'page_next': page_next,
'per_page': per_page,
'sort_by': sort_by,
},
context_instance=RequestContext(request))
@login_required_ajax
@sys_staff_required
@require_POST
def sys_publink_remove(request):
"""Remove share links.
"""
content_type = 'application/json; charset=utf-8'
result = {}
token = request.POST.get('t')
if not token:
result = {'error': _(u"Argument missing")}
return HttpResponse(json.dumps(result), status=400, content_type=content_type)
FileShare.objects.filter(token=token).delete()
result = {'success': True}
return HttpResponse(json.dumps(result), content_type=content_type)
@login_required_ajax
@sys_staff_required
@require_POST
def sys_upload_link_remove(request):
"""Remove shared upload links.
"""
content_type = 'application/json; charset=utf-8'
result = {}
token = request.POST.get('t')
if not token:
result = {'error': _(u"Argument missing")}
return HttpResponse(json.dumps(result), status=400, content_type=content_type)
UploadLinkShare.objects.filter(token=token).delete()
result = {'success': True}
return HttpResponse(json.dumps(result), content_type=content_type)
@login_required
@sys_staff_required
def user_search(request):
"""Search a user.
"""
email = request.GET.get('email', '')
users = ccnet_threaded_rpc.search_emailusers('DB', email, -1, -1)
ldap_users = ccnet_threaded_rpc.search_emailusers('LDAP', email, -1, -1)
users.extend(ldap_users)
last_logins = UserLastLogin.objects.filter(username__in=[x.email for x in users])
if ENABLE_TRIAL_ACCOUNT:
trial_users = TrialAccount.objects.filter(user_or_org__in=[x.email for x in users])
else:
trial_users = []
for user in users:
_populate_user_quota_usage(user)
# check user's role
if user.role == GUEST_USER:
user.is_guest = True
else:
user.is_guest = False
# populate user last login time
user.last_login = None
for last_login in last_logins:
if last_login.username == user.email:
user.last_login = last_login.last_login
user.trial_info = None
for trial_user in trial_users:
if trial_user.user_or_org == user.email:
user.trial_info = {'expire_date': trial_user.expire_date}
return render_to_response('sysadmin/user_search.html', {
'users': users,
'email': email,
'default_user': DEFAULT_USER,
'guest_user': GUEST_USER,
'is_pro': is_pro_version(),
}, context_instance=RequestContext(request))
@login_required
@sys_staff_required
@require_POST
def sys_repo_transfer(request):
"""Transfer a repo to others.
"""
repo_id = request.POST.get('repo_id', None)
new_owner = request.POST.get('email', None)
next = request.META.get('HTTP_REFERER', None)
if not next:
next = reverse(sys_repo_admin)
if not (repo_id and new_owner):
messages.error(request, _(u'Failed to transfer, invalid arguments.'))
return HttpResponseRedirect(next)
repo = seafile_api.get_repo(repo_id)
if not repo:
messages.error(request, _(u'Library does not exist'))
return HttpResponseRedirect(next)
try:
User.objects.get(email=new_owner)
except User.DoesNotExist:
messages.error(request, _(u'Failed to transfer, user %s not found') % new_owner)
return HttpResponseRedirect(next)
try:
if seafserv_threaded_rpc.get_org_id_by_repo_id(repo_id) > 0:
messages.error(request, _(u'Can not transfer organization library'))
return HttpResponseRedirect(next)
if ccnet_threaded_rpc.get_orgs_by_user(new_owner):
messages.error(request, _(u'Can not transfer library to organization user %s') % new_owner)
return HttpResponseRedirect(next)
except SearpcError: # XXX: ignore rpc not found error
pass
seafile_api.set_repo_owner(repo_id, new_owner)
messages.success(request, _(u'Successfully transfered.'))
return HttpResponseRedirect(next)
@login_required
@sys_staff_required
@require_POST
def sys_repo_delete(request, repo_id):
"""Delete a repo.
"""
next = request.META.get('HTTP_REFERER', None)
if not next:
next = reverse(sys_repo_admin)
if get_system_default_repo_id() == repo_id:
messages.error(request, _('System library can not be deleted.'))
return HttpResponseRedirect(next)
repo = seafile_api.get_repo(repo_id)
if repo: # Handle the case that repo is `None`.
repo_name = repo.name
else:
repo_name = ''
if MULTI_TENANCY:
org_id = seafserv_threaded_rpc.get_org_id_by_repo_id(repo_id)
usernames = get_related_users_by_org_repo(org_id, repo_id)
repo_owner = seafile_api.get_org_repo_owner(repo_id)
else:
org_id = -1
usernames = get_related_users_by_repo(repo_id)
repo_owner = seafile_api.get_repo_owner(repo_id)
seafile_api.remove_repo(repo_id)
repo_deleted.send(sender=None, org_id=org_id, usernames=usernames,
repo_owner=repo_owner, repo_id=repo_id,
repo_name=repo_name)
messages.success(request, _(u'Successfully deleted.'))
return HttpResponseRedirect(next)
@login_required
@sys_staff_required
def sys_traffic_admin(request):
"""List all users from database.
"""
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '25'))
except ValueError:
current_page = 1
per_page = 25
month = request.GET.get('month', '')
if not re.match(r'[\d]{6}', month):
month = datetime.datetime.now().strftime('%Y%m')
start = per_page * (current_page -1)
limit = per_page + 1
traffic_info_list = get_user_traffic_list(month, start, limit)
page_next = len(traffic_info_list) == limit
for info in traffic_info_list:
info['total'] = info['file_view'] + info['file_download'] + info['dir_download']
return render_to_response(
'sysadmin/sys_trafficadmin.html', {
'traffic_info_list': traffic_info_list,
'month': month,
'current_page': current_page,
'prev_page': current_page-1,
'next_page': current_page+1,
'per_page': per_page,
'page_next': page_next,
},
context_instance=RequestContext(request))
@login_required
@sys_staff_required
def sys_virus_scan_records(request):
"""List virus scan records.
"""
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '100'))
except ValueError:
current_page = 1
per_page = 100
records_all = get_virus_record(start=per_page * (current_page - 1),
limit=per_page + 1)
if len(records_all) == per_page + 1:
page_next = True
else:
page_next = False
records = []
for r in records_all[:per_page]:
try:
repo = seafile_api.get_repo(r.repo_id)
except SearpcError as e:
logger.error(e)
continue
if not repo:
continue
r.repo = repo
r.repo.owner = seafile_api.get_repo_owner(r.repo.repo_id)
records.append(r)
return render_to_response(
'sysadmin/sys_virus_scan_records.html', {
'records': records,
'current_page': current_page,
'prev_page': current_page - 1,
'next_page': current_page + 1,
'per_page': per_page,
'page_next': page_next,
}, context_instance=RequestContext(request))
@login_required
@sys_staff_required
@require_POST
def sys_delete_virus_scan_records(request, vid):
r = get_virus_record_by_id(vid)
parent_dir = os.path.dirname(r.file_path)
dirent_name = os.path.basename(r.file_path)
try:
seafile_api.del_file(r.repo_id, parent_dir, dirent_name,
request.user.username)
handle_virus_record(vid)
messages.success(request, _('Successfully deleted.'))
except SearpcError as e:
logger.error(e)
messages.error(request, _('Failed to delete, please try again later.'))
return HttpResponseRedirect(reverse('sys_virus_scan_records'))
@login_required_ajax
@sys_staff_required
def batch_user_make_admin(request):
"""Batch make users as admins.
"""
if request.method != 'POST':
raise Http404
content_type = 'application/json; charset=utf-8'
set_admin_emails = request.POST.get('set_admin_emails')
set_admin_emails = string2list(set_admin_emails)
success = []
failed = []
for email in set_admin_emails:
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
failed.append(email)
continue
user.is_staff = True
user.save()
success.append(email)
for item in success:
messages.success(request, _(u'Successfully set %s as admin.') % item)
for item in failed:
messages.error(request, _(u'Failed to set %s as admin: user does not exist.') % item)
return HttpResponse(json.dumps({'success': True,}), content_type=content_type)
@login_required
@sys_staff_required
def batch_add_user(request):
"""Batch add users. Import users from CSV file.
"""
if request.method != 'POST':
raise Http404
form = BatchAddUserForm(request.POST, request.FILES)
if form.is_valid():
content = request.FILES['file'].read()
encoding = chardet.detect(content)['encoding']
if encoding != 'utf-8':
content = content.decode(encoding, 'replace').encode('utf-8')
filestream = StringIO.StringIO(content)
reader = csv.reader(filestream)
for row in reader:
if not row:
continue
username = row[0].strip()
password = row[1].strip()
if not is_valid_username(username):
continue
if password == '':
continue
try:
User.objects.get(email=username)
continue
except User.DoesNotExist:
User.objects.create_user(username, password, is_staff=False,
is_active=True)
send_html_email_with_dj_template(
username, dj_template='sysadmin/user_batch_add_email.html',
subject=_(u'You are invited to join %s') % SITE_NAME,
context={
'user': email2nickname(request.user.username),
'email': username,
'password': password,
})
messages.success(request, _('Import succeeded'))
else:
messages.error(request, _(u'Please select a csv file first.'))
next = request.META.get('HTTP_REFERER', reverse(sys_user_admin))
return HttpResponseRedirect(next)
@login_required
def sys_sudo_mode(request):
if request.method not in ('GET', 'POST'):
return HttpResponseNotAllowed
# here we can't use @sys_staff_required
if not request.user.is_staff:
raise Http404
password_error = False
if request.method == 'POST':
password = request.POST.get('password')
if password:
user = authenticate(username=request.user.username, password=password)
if user:
update_sudo_mode_ts(request)
return HttpResponseRedirect(
request.GET.get('next', reverse('sys_useradmin')))
password_error = True
enable_shib_login = getattr(settings, 'ENABLE_SHIB_LOGIN', False)
return render_to_response(
'sysadmin/sudo_mode.html', {
'password_error': password_error,
'enable_shib_login': enable_shib_login,
},
context_instance=RequestContext(request))
@login_required
@sys_staff_required
def sys_settings(request):
"""List and change seahub settings in admin panel.
"""
if not dj_settings.ENABLE_SETTINGS_VIA_WEB:
raise Http404
DIGIT_WEB_SETTINGS = (
'DISABLE_SYNC_WITH_ANY_FOLDER', 'ENABLE_SIGNUP',
'ACTIVATE_AFTER_REGISTRATION', 'REGISTRATION_SEND_MAIL',
'LOGIN_REMEMBER_DAYS', 'REPO_PASSWORD_MIN_LENGTH',
'ENABLE_REPO_HISTORY_SETTING', 'USER_STRONG_PASSWORD_REQUIRED',
'ENABLE_ENCRYPTED_LIBRARY', 'USER_PASSWORD_MIN_LENGTH',
'USER_PASSWORD_STRENGTH_LEVEL', 'SHARE_LINK_PASSWORD_MIN_LENGTH',
'ENABLE_USER_CREATE_ORG_REPO', 'FORCE_PASSWORD_CHANGE',
'LOGIN_ATTEMPT_LIMIT', 'FREEZE_USER_ON_LOGIN_FAILED',
)
STRING_WEB_SETTINGS = ('SERVICE_URL', 'FILE_SERVER_ROOT',)
if request.is_ajax() and request.method == "POST":
content_type = 'application/json; charset=utf-8'
result = {}
key = request.POST.get('key', None)
value = request.POST.get('value', None)
if key not in dir(config) or value is None:
result['error'] = _(u'Invalid setting')
return HttpResponse(json.dumps(result), status=400, content_type=content_type)
if value.isdigit():
if key in DIGIT_WEB_SETTINGS:
value = int(value)
else:
result['error'] = _(u'Invalid value')
return HttpResponse(json.dumps(result), status=400, content_type=content_type)
if key == 'USER_PASSWORD_STRENGTH_LEVEL' and value not in (1,2,3,4):
result['error'] = _(u'Invalid value')
return HttpResponse(json.dumps(result), status=400, content_type=content_type)
else:
if key not in STRING_WEB_SETTINGS:
result['error'] = _(u'Invalid value')
return HttpResponse(json.dumps(result), status=400, content_type=content_type)
try:
setattr(config, key, value)
result['success'] = True
return HttpResponse(json.dumps(result), content_type=content_type)
except AttributeError as e:
logger.error(e)
result['error'] = _(u'Internal server error')
return HttpResponse(json.dumps(result), status=500, content_type=content_type)
config_dict = {}
for key in dir(config):
value = getattr(config, key)
config_dict[key] = value
return render_to_response('sysadmin/settings.html', {
'config_dict': config_dict,
}, context_instance=RequestContext(request))
@login_required_ajax
@sys_staff_required
def sys_check_license(request):
"""Check seafile license expiration.
"""
if not is_pro_version():
raise Http404
content_type = 'application/json; charset=utf-8'
result = {}
license_file = os.path.join(settings.PROJECT_ROOT, '../../seafile-license.txt')
license_dict = parse_license(license_file)
if license_dict:
try:
expiration = license_dict['Expiration']
except KeyError as e:
logger.error(e)
result['error'] = str(e)
return HttpResponse(json.dumps(result), status=500, content_type=content_type)
struct_time = datetime.datetime.strptime(expiration, "%Y-%m-%d")
expiration_timestamp = time.mktime(struct_time.timetuple())
if time.time() > expiration_timestamp:
# already expired
result['already_expired'] = True
elif time.time() + 30 * 24 * 60 * 60 > expiration_timestamp:
# will be expired in 30 days
result['to_be_expired'] = True
result['expiration_date'] = expiration
return HttpResponse(json.dumps(result), content_type=content_type)
@login_required
@sys_staff_required
def sys_inst_admin(request):
"""List institutions.
"""
if request.method == "POST":
inst_name = request.POST.get('name').strip()
if not inst_name:
messages.error(request, 'Name is required.')
return HttpResponseRedirect(reverse('sys_inst_admin'))
Institution.objects.create(name=inst_name)
messages.success(request, _('Success'))
return HttpResponseRedirect(reverse('sys_inst_admin'))
# Make sure page request is an int. If not, deliver first page.
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '100'))
except ValueError:
current_page = 1
per_page = 100
offset = per_page * (current_page - 1)
insts = Institution.objects.all()[offset:offset + per_page + 1]
if len(insts) == per_page + 1:
page_next = True
else:
page_next = False
return render_to_response(
'sysadmin/sys_inst_admin.html', {
'insts': insts[:per_page],
'current_page': current_page,
'prev_page': current_page - 1,
'next_page': current_page + 1,
'per_page': per_page,
'page_next': page_next,
}, context_instance=RequestContext(request))
@login_required
@sys_staff_required
@require_POST
def sys_inst_remove(request, inst_id):
"""Delete an institution.
"""
try:
inst = Institution.objects.get(pk=inst_id)
except Institution.DoesNotExist:
raise Http404
inst.delete()
messages.success(request, _('Success'))
return HttpResponseRedirect(reverse('sys_inst_admin'))
@login_required
@sys_staff_required
def sys_inst_info_user(request, inst_id):
"""List institution members.
"""
try:
inst = Institution.objects.get(pk=inst_id)
except Institution.DoesNotExist:
raise Http404
# Make sure page request is an int. If not, deliver first page.
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '100'))
except ValueError:
current_page = 1
per_page = 100
offset = per_page * (current_page - 1)
inst_admins = [x.user for x in InstitutionAdmin.objects.filter(institution=inst)]
usernames = [x.user for x in Profile.objects.filter(institution=inst.name)[offset:offset + per_page + 1]]
if len(usernames) == per_page + 1:
page_next = True
else:
page_next = False
users = [User.objects.get(x) for x in usernames[:per_page]]
last_logins = UserLastLogin.objects.filter(username__in=[x.email for x in users])
for u in users:
_populate_user_quota_usage(u)
if u.username in inst_admins:
u.inst_admin = True
else:
u.inst_admin = False
# populate user last login time
u.last_login = None
for last_login in last_logins:
if last_login.username == u.email:
u.last_login = last_login.last_login
users_count = len(users)
return render_to_response('sysadmin/sys_inst_info_user.html', {
'inst': inst,
'users': users,
'users_count': users_count,
'current_page': current_page,
'prev_page': current_page - 1,
'next_page': current_page + 1,
'per_page': per_page,
'page_next': page_next,
}, context_instance=RequestContext(request))
@login_required
@sys_staff_required
@require_POST
def sys_inst_toggle_admin(request, inst_id, email):
"""Set or revoke an institution admin.
"""
try:
inst = Institution.objects.get(pk=inst_id)
except Institution.DoesNotExist:
raise Http404
try:
u = User.objects.get(email=email)
except User.DoesNotExist:
assert False, 'TODO'
if u.is_staff:
assert False
res = InstitutionAdmin.objects.filter(institution=inst, user=email)
if len(res) == 0:
InstitutionAdmin.objects.create(institution=inst, user=email)
elif len(res) == 1:
res[0].delete()
# todo: expire user's session
else:
assert False
messages.success(request, _('Success'))
return HttpResponseRedirect(reverse('sys_inst_info_user', args=[inst.pk]))
|
{
"content_hash": "71a1fd78b3feed5aa61facd02941a184",
"timestamp": "",
"source": "github",
"line_count": 2421,
"max_line_length": 198,
"avg_line_length": 34.0512185047501,
"alnum_prop": 0.593561221766661,
"repo_name": "saukrIppl/seahub",
"id": "d9004558d74c144f2078f7e09f8a2f0816fff0bb",
"size": "82457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seahub/views/sysadmin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "329387"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "722728"
},
{
"name": "Java",
"bytes": "307193"
},
{
"name": "JavaScript",
"bytes": "7293422"
},
{
"name": "Makefile",
"bytes": "1097"
},
{
"name": "PLpgSQL",
"bytes": "19598"
},
{
"name": "Python",
"bytes": "9050702"
},
{
"name": "Shell",
"bytes": "9695"
}
],
"symlink_target": ""
}
|
from django.db import models
from localflavor.md.models import MDCompanyTypeField, MDIDNOField, MDLicensePlateField
class MDPlaceModel(models.Model):
idno = MDIDNOField()
company_type_1 = MDCompanyTypeField()
company_type_2 = MDCompanyTypeField()
license_plate = MDLicensePlateField()
|
{
"content_hash": "0b6f9e78b1c25a3d8d2660d26bf7ae0b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 86,
"avg_line_length": 30.4,
"alnum_prop": 0.7730263157894737,
"repo_name": "rsalmaso/django-localflavor",
"id": "a2eb2b95b43cd5f4d9bf7a72d9cf726ecacf201c",
"size": "304",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_md/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "906251"
}
],
"symlink_target": ""
}
|
import click
import requests
from .exceptions import (
CachedPage,
WaybackRuntimeError,
BlockedByRobots
)
from urllib.parse import urljoin
from requests.utils import parse_header_links
def capture(
target_url,
user_agent="savepagenow (https://github.com/pastpages/savepagenow)",
accept_cache=False
):
"""
Archives the provided URL using archive.org's Wayback Machine.
Returns the archive.org URL where the capture is stored.
Raises a CachedPage exception if archive.org declines to conduct a new
capture and returns a previous snapshot instead.
To silence that exception, pass into True to the ``accept_cache`` keyword
argument.
"""
# Put together the URL that will save our request
domain = "https://web.archive.org"
save_url = urljoin(domain, "/save/")
request_url = save_url + target_url
# Send the capture request to archive.org
headers = {
'User-Agent': user_agent,
}
response = requests.get(request_url, headers=headers)
# If it has an error header, raise that.
has_error_header = 'X-Archive-Wayback-Runtime-Error' in response.headers
if has_error_header:
error_header = response.headers['X-Archive-Wayback-Runtime-Error']
if error_header == 'RobotAccessControlException: Blocked By Robots':
raise BlockedByRobots("archive.org returned blocked by robots.txt error")
else:
raise WaybackRuntimeError(error_header)
# If it has an error code, raise that
if response.status_code in [403, 502, 520]:
raise WaybackRuntimeError(response.headers)
# If there's a content-location header in the response, we will use that.
try:
content_location = response.headers['Content-Location']
archive_url = domain + content_location
except KeyError:
# If there's not, we will try to parse out a Link header, which is another style they use.
try:
# Parse the Link tag in the header, which points to memento URLs in Wayback
header_links = parse_header_links(response.headers['Link'])
archive_obj = [h for h in header_links if h['rel'] == 'memento'][0]
archive_url = archive_obj['url']
except Exception:
# If neither of those things works throw this error.
raise WaybackRuntimeError(dict(status_code=response.status_code, headers=response.headers))
# Determine if the response was cached
cached = 'X-Page-Cache' in response.headers and response.headers['X-Page-Cache'] == 'HIT'
# If it was cached ...
if cached:
# .. and we're not allowing that
if not accept_cache:
# ... throw an error
msg = "archive.org returned a cache of this page: {}".format(archive_url)
raise CachedPage(msg)
# Finally, return the archived URL
return archive_url
def capture_or_cache(
target_url,
user_agent="savepagenow (https://github.com/pastpages/savepagenow)"
):
"""
Archives the provided URL using archive.org's Wayback Machine, unless
the page has been recently captured.
Returns a tuple with the archive.org URL where the capture is stored,
along with a boolean indicating if a new capture was conducted.
If the boolean is True, archive.org conducted a new capture. If it is False,
archive.org has returned a recently cached capture instead, likely taken
in the previous minutes.
"""
try:
return capture(target_url, user_agent=user_agent, accept_cache=False), True
except CachedPage:
return capture(target_url, user_agent=user_agent, accept_cache=True), False
@click.command()
@click.argument("url")
@click.option("-ua", "--user-agent", help="User-Agent header for the web request")
@click.option("-c", "--accept-cache", help="Accept and return cached URL", is_flag=True)
def cli(url, user_agent, accept_cache):
"""
Archives the provided URL using archive.org's Wayback Machine.
Raises a CachedPage exception if archive.org declines to conduct a new
capture and returns a previous snapshot instead.
"""
kwargs = {}
if user_agent:
kwargs['user_agent'] = user_agent
if accept_cache:
kwargs['accept_cache'] = accept_cache
archive_url = capture(url, **kwargs)
click.echo(archive_url)
if __name__ == "__main__":
cli()
|
{
"content_hash": "ceb9c57e2da462f260cfa9286dea2f93",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 103,
"avg_line_length": 35.483870967741936,
"alnum_prop": 0.6715909090909091,
"repo_name": "pastpages/savepagenow",
"id": "1b2e65eba52eda34fcbe68cdea16778793cac0c7",
"size": "4446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "savepagenow/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "168"
},
{
"name": "Python",
"bytes": "7014"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('projects', '0037_project_tasks'),
]
operations = [
migrations.CreateModel(
name='Integration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('provider_key', models.TextField(help_text='The key of the provider for this integration.', verbose_name='Provider key')),
('project', models.ForeignKey(help_text='The project for this integration.', on_delete=django.db.models.deletion.CASCADE, related_name='integrations', to='projects.Project', verbose_name='Project')),
],
options={
'verbose_name': 'Integration',
'verbose_name_plural': 'Integrations',
'ordering': ('project__title',),
},
),
migrations.CreateModel(
name='IntegrationOption',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.SlugField(help_text='The key for this integration option.', max_length=128, verbose_name='Key')),
('value', models.TextField(help_text='The value for this integration option.', verbose_name='Value')),
('integration', models.ForeignKey(help_text='The integration for this integration option.', on_delete=django.db.models.deletion.CASCADE, related_name='options', to='projects.Integration', verbose_name='Integration')),
],
options={
'verbose_name': 'Integration option',
'verbose_name_plural': 'Integration options',
'ordering': ('integration__project__title',),
},
),
]
|
{
"content_hash": "4959a12bf947fd4e2b24e039937891b7",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 233,
"avg_line_length": 49.02564102564103,
"alnum_prop": 0.5920502092050209,
"repo_name": "DMPwerkzeug/DMPwerkzeug",
"id": "7454a9cb49ae1a5db6d7a6d71f6e4a023661de82",
"size": "1962",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rdmo/projects/migrations/0038_integration_integrationoption.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9735"
},
{
"name": "HTML",
"bytes": "126570"
},
{
"name": "JavaScript",
"bytes": "46177"
},
{
"name": "Python",
"bytes": "120676"
}
],
"symlink_target": ""
}
|
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, UML, XEN and Parallels.
"""
import collections
from collections import deque
import contextlib
import errno
import functools
import glob
import itertools
import mmap
import operator
import os
import shutil
import tempfile
import time
import uuid
import eventlet
from eventlet import greenthread
from eventlet import tpool
from lxml import etree
from os_brick import exception as brick_exception
from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import importutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from six.moves import range
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
import nova.conf
from nova.console import serial as serial_console
from nova.console import type as ctype
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova.network import model as network_model
from nova import objects
from nova.objects import fields
from nova.objects import migrate_data as migrate_data_obj
from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import diagnostics
from nova.virt.disk import api as disk_api
from nova.virt.disk.vfs import guestfs
from nova.virt import driver
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import instancejobtracker
from nova.virt.libvirt import migration as libvirt_migrate
from nova.virt.libvirt.storage import dmcrypt
from nova.virt.libvirt.storage import lvm
from nova.virt.libvirt.storage import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import vif as libvirt_vif
from nova.virt.libvirt.volume import remotefs
from nova.virt import netutils
from nova.volume import cinder
from nova.volume import encryptors
libvirt = None
uefi_logged = False
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
DEFAULT_UEFI_LOADER_PATH = {
"x86_64": "/usr/share/OVMF/OVMF_CODE.fd",
"aarch64": "/usr/share/AAVMF/AAVMF_CODE.fd"
}
MAX_CONSOLE_BYTES = 100 * units.Ki
# The libvirt driver will prefix any disable reason codes with this string.
DISABLE_PREFIX = 'AUTO: '
# Disable reason for the service which was enabled or disabled without reason
DISABLE_REASON_UNDEFINED = None
# Guest config console string
# hon: follow this patch for virsh console support:
# https://github.com/hyunsun/nova/commit/1cef20bed1096a11f4a5d5a24168243f6e7a41bf#diff-f4019782d93a196a0d026479e6aa61b1
# CONSOLE = "console=tty0 console=ttyS0"
CONSOLE = "console=tty0 console=ttyS0,115200"
GuestNumaConfig = collections.namedtuple(
'GuestNumaConfig', ['cpuset', 'cputune', 'numaconfig', 'numatune'])
InjectionInfo = collections.namedtuple(
'InjectionInfo', ['network_info', 'files', 'admin_pass'])
libvirt_volume_drivers = [
'iscsi=nova.virt.libvirt.volume.iscsi.LibvirtISCSIVolumeDriver',
'iser=nova.virt.libvirt.volume.iser.LibvirtISERVolumeDriver',
'local=nova.virt.libvirt.volume.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.net.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.net.LibvirtNetVolumeDriver',
'nfs=nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver',
'smbfs=nova.virt.libvirt.volume.smbfs.LibvirtSMBFSVolumeDriver',
'aoe=nova.virt.libvirt.volume.aoe.LibvirtAOEVolumeDriver',
'glusterfs='
'nova.virt.libvirt.volume.glusterfs.LibvirtGlusterfsVolumeDriver',
'fibre_channel='
'nova.virt.libvirt.volume.fibrechannel.'
'LibvirtFibreChannelVolumeDriver',
'scality=nova.virt.libvirt.volume.scality.LibvirtScalityVolumeDriver',
'gpfs=nova.virt.libvirt.volume.gpfs.LibvirtGPFSVolumeDriver',
'quobyte=nova.virt.libvirt.volume.quobyte.LibvirtQuobyteVolumeDriver',
'hgst=nova.virt.libvirt.volume.hgst.LibvirtHGSTVolumeDriver',
'scaleio=nova.virt.libvirt.volume.scaleio.LibvirtScaleIOVolumeDriver',
'disco=nova.virt.libvirt.volume.disco.LibvirtDISCOVolumeDriver',
'vzstorage='
'nova.virt.libvirt.volume.vzstorage.LibvirtVZStorageVolumeDriver',
]
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
"""
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
# For information about when MIN_LIBVIRT_VERSION and
# NEXT_MIN_LIBVIRT_VERSION can be changed, consult
#
# https://wiki.openstack.org/wiki/LibvirtDistroSupportMatrix
#
# Currently this is effectively the min version for i686/x86_64
# + KVM/QEMU, as other architectures/hypervisors require newer
# versions. Over time, this will become a common min version
# for all architectures/hypervisors, as this value rises to
# meet them.
MIN_LIBVIRT_VERSION = (1, 2, 1)
MIN_QEMU_VERSION = (1, 5, 3)
# TODO(berrange): Re-evaluate this at start of each release cycle
# to decide if we want to plan a future min version bump.
# MIN_LIBVIRT_VERSION can be updated to match this after
# NEXT_MIN_LIBVIRT_VERSION has been at a higher value for
# one cycle
NEXT_MIN_LIBVIRT_VERSION = (1, 2, 9)
NEXT_MIN_QEMU_VERSION = (2, 1, 0)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
# Relative block commit & rebase (feature is detected,
# this version is only used for messaging)
MIN_LIBVIRT_BLOCKJOB_RELATIVE_VERSION = (1, 2, 7)
# Libvirt version 1.2.17 is required for successful block live migration
# of vm booted from image with attached devices
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION = (1, 2, 17)
# libvirt discard feature
MIN_QEMU_DISCARD_VERSION = (1, 6, 0)
# While earlier versions could support NUMA reporting and
# NUMA placement, not until 1.2.7 was there the ability
# to pin guest nodes to host nodes, so mandate that. Without
# this the scheduler cannot make guaranteed decisions, as the
# guest placement may not match what was requested
MIN_LIBVIRT_NUMA_VERSION = (1, 2, 7)
# PowerPC based hosts that support NUMA using libvirt
MIN_LIBVIRT_NUMA_VERSION_PPC = (1, 2, 19)
# Versions of libvirt with known NUMA topology issues
# See bug #1449028
BAD_LIBVIRT_NUMA_VERSIONS = [(1, 2, 9, 2)]
# While earlier versions could support hugepage backed
# guests, not until 1.2.8 was there the ability to request
# a particular huge page size. Without this the scheduler
# cannot make guaranteed decisions, as the huge page size
# used by the guest may not match what was requested
MIN_LIBVIRT_HUGEPAGE_VERSION = (1, 2, 8)
# Versions of libvirt with broken cpu pinning support. This excludes
# versions of libvirt with broken NUMA support since pinning needs
# NUMA
# See bug #1438226
BAD_LIBVIRT_CPU_POLICY_VERSIONS = [(1, 2, 10)]
# qemu 2.1 introduces support for pinning memory on host
# NUMA nodes, along with the ability to specify hugepage
# sizes per guest NUMA node
MIN_QEMU_NUMA_HUGEPAGE_VERSION = (2, 1, 0)
# fsFreeze/fsThaw requirement
MIN_LIBVIRT_FSFREEZE_VERSION = (1, 2, 5)
# UEFI booting support
MIN_LIBVIRT_UEFI_VERSION = (1, 2, 9)
# Hyper-V paravirtualized time source
MIN_LIBVIRT_HYPERV_TIMER_VERSION = (1, 2, 2)
MIN_QEMU_HYPERV_TIMER_VERSION = (2, 0, 0)
# Virtuozzo driver support
MIN_VIRTUOZZO_VERSION = (7, 0, 0)
MIN_LIBVIRT_VIRTUOZZO_VERSION = (1, 2, 12)
# Ability to set the user guest password with Qemu
MIN_LIBVIRT_SET_ADMIN_PASSWD = (1, 2, 16)
# Ability to set the user guest password with parallels
MIN_LIBVIRT_PARALLELS_SET_ADMIN_PASSWD = (2, 0, 0)
# s/390 & s/390x architectures with KVM
MIN_LIBVIRT_KVM_S390_VERSION = (1, 2, 13)
MIN_QEMU_S390_VERSION = (2, 3, 0)
# libvirt < 1.3 reported virt_functions capability
# only when VFs are enabled.
# libvirt 1.3 fix f391889f4e942e22b9ef8ecca492de05106ce41e
MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION = (1, 3, 0)
# Use the "logd" backend for handling stdout/stderr from QEMU processes.
MIN_LIBVIRT_VIRTLOGD = (1, 3, 3)
MIN_QEMU_VIRTLOGD = (2, 7, 0)
# ppc64/ppc64le architectures with KVM
# NOTE(rfolco): Same levels for Libvirt/Qemu on Big Endian and Little
# Endian giving the nuance around guest vs host architectures
MIN_LIBVIRT_KVM_PPC64_VERSION = (1, 2, 12)
MIN_QEMU_PPC64_VERSION = (2, 1, 0)
# Auto converge support
MIN_LIBVIRT_AUTO_CONVERGE_VERSION = (1, 2, 3)
MIN_QEMU_AUTO_CONVERGE = (1, 6, 0)
# Names of the types that do not get compressed during migration
NO_COMPRESSION_TYPES = ('qcow2',)
# number of serial console limit
QEMU_MAX_SERIAL_PORTS = 4
# Qemu supports 4 serial consoles, we remove 1 because of the PTY one defined
ALLOWED_QEMU_SERIAL_PORTS = QEMU_MAX_SERIAL_PORTS - 1
# realtime support
MIN_LIBVIRT_REALTIME_VERSION = (1, 2, 13)
# libvirt postcopy support
MIN_LIBVIRT_POSTCOPY_VERSION = (1, 3, 3)
# qemu postcopy support
MIN_QEMU_POSTCOPY_VERSION = (2, 5, 0)
MIN_LIBVIRT_OTHER_ARCH = {
fields.Architecture.S390: MIN_LIBVIRT_KVM_S390_VERSION,
fields.Architecture.S390X: MIN_LIBVIRT_KVM_S390_VERSION,
fields.Architecture.PPC: MIN_LIBVIRT_KVM_PPC64_VERSION,
fields.Architecture.PPC64: MIN_LIBVIRT_KVM_PPC64_VERSION,
fields.Architecture.PPC64LE: MIN_LIBVIRT_KVM_PPC64_VERSION,
}
MIN_QEMU_OTHER_ARCH = {
fields.Architecture.S390: MIN_QEMU_S390_VERSION,
fields.Architecture.S390X: MIN_QEMU_S390_VERSION,
fields.Architecture.PPC: MIN_QEMU_PPC64_VERSION,
fields.Architecture.PPC64: MIN_QEMU_PPC64_VERSION,
fields.Architecture.PPC64LE: MIN_QEMU_PPC64_VERSION,
}
# perf events support
MIN_LIBVIRT_PERF_VERSION = (2, 0, 0)
LIBVIRT_PERF_EVENT_PREFIX = 'VIR_PERF_PARAM_'
PERF_EVENTS_CPU_FLAG_MAPPING = {'cmt': 'cmt',
'mbml': 'mbm_local',
'mbmt': 'mbm_total',
}
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
"supports_migrate_to_same_host": False,
"supports_attach_interface": True,
"supports_device_tagging": True,
}
def __init__(self, virtapi, read_only=False):
super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
libvirt_migrate.libvirt = libvirt
self._host = host.Host(self._uri(), read_only,
lifecycle_event_handler=self.emit_event,
conn_event_handler=self._handle_conn_event)
self._initiator = None
self._fc_wwnns = None
self._fc_wwpns = None
self._caps = None
self._supported_perf_events = []
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
host=self._host)
self.vif_driver = libvirt_vif.LibvirtGenericVIFDriver()
# TODO(mriedem): Long-term we should load up the volume drivers on
# demand as needed rather than doing this on startup, as there might
# be unsupported volume drivers in this list based on the underlying
# platform.
self.volume_drivers = self._get_volume_drivers()
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
self.valid_cachemodes = ["default",
"none",
"writethrough",
"writeback",
"directsync",
"unsafe",
]
self._conn_supports_start_paused = CONF.libvirt.virt_type in ('kvm',
'qemu')
for mode_str in CONF.libvirt.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
LOG.warning(_LW('Invalid cachemode %(cache_mode)s specified '
'for disk type %(disk_type)s.'),
{'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
self._volume_api = cinder.API()
self._image_api = image.API()
sysinfo_serial_funcs = {
'none': lambda: None,
'hardware': self._get_host_sysinfo_serial_hardware,
'os': self._get_host_sysinfo_serial_os,
'auto': self._get_host_sysinfo_serial_auto,
}
self._sysinfo_serial_func = sysinfo_serial_funcs.get(
CONF.libvirt.sysinfo_serial)
self.job_tracker = instancejobtracker.InstanceJobTracker()
self._remotefs = remotefs.RemoteFilesystem()
self._live_migration_flags = self._block_migration_flags = 0
self.active_migrations = {}
# Compute reserved hugepages from conf file at the very
# beginning to ensure any syntax error will be reported and
# avoid any re-calculation when computing resources.
self._reserved_hugepages = hardware.numa_get_reserved_huge_pages()
def _get_volume_drivers(self):
driver_registry = dict()
for driver_str in libvirt_volume_drivers:
driver_type, _sep, driver = driver_str.partition('=')
driver_class = importutils.import_class(driver)
try:
driver_registry[driver_type] = driver_class(self._host)
except brick_exception.InvalidConnectorProtocol:
LOG.debug('Unable to load volume driver %s. It is not '
'supported on this host.', driver)
return driver_registry
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems (eg GlusterFS via FUSE) don't support
# O_DIRECT though. For those we fallback to 'writethrough'
# which gives host crash safety, and is safe for migration
# provided the filesystem is cache coherent (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not self._supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
def _set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
return
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
def _do_quality_warnings(self):
"""Warn about untested driver configurations.
This will log a warning message about untested driver or host arch
configurations to indicate to administrators that the quality is
unknown. Currently, only qemu or kvm on intel 32- or 64-bit systems
is tested upstream.
"""
caps = self._host.get_capabilities()
hostarch = caps.host.cpu.arch
if (CONF.libvirt.virt_type not in ('qemu', 'kvm') or
hostarch not in (fields.Architecture.I686,
fields.Architecture.X86_64)):
LOG.warning(_LW('The libvirt driver is not tested on '
'%(type)s/%(arch)s by the OpenStack project and '
'thus its quality can not be ensured. For more '
'information, see: http://docs.openstack.org/'
'developer/nova/support-matrix.html'),
{'type': CONF.libvirt.virt_type, 'arch': hostarch})
def _handle_conn_event(self, enabled, reason):
LOG.info(_LI("Connection event '%(enabled)d' reason '%(reason)s'"),
{'enabled': enabled, 'reason': reason})
self._set_host_enabled(enabled, reason)
def _version_to_string(self, version):
return '.'.join([str(x) for x in version])
def init_host(self, host):
self._host.initialize()
self._do_quality_warnings()
self._parse_migration_flags()
self._supported_perf_events = self._get_supported_perf_events()
if (CONF.libvirt.virt_type == 'lxc' and
not (CONF.libvirt.uid_maps and CONF.libvirt.gid_maps)):
LOG.warning(_LW("Running libvirt-lxc without user namespaces is "
"dangerous. Containers spawned by Nova will be run "
"as the host's root user. It is highly suggested "
"that user namespaces be used in a public or "
"multi-tenant environment."))
# Stop libguestfs using KVM unless we're also configured
# to use this. This solves problem where people need to
# stop Nova use of KVM because nested-virt is broken
if CONF.libvirt.virt_type != "kvm":
guestfs.force_tcg()
if not self._host.has_min_version(MIN_LIBVIRT_VERSION):
raise exception.InternalError(
_('Nova requires libvirt version %s or greater.') %
self._version_to_string(MIN_LIBVIRT_VERSION))
if (CONF.libvirt.virt_type in ("qemu", "kvm") and
not self._host.has_min_version(hv_ver=MIN_QEMU_VERSION)):
raise exception.InternalError(
_('Nova requires QEMU version %s or greater.') %
self._version_to_string(MIN_QEMU_VERSION))
if CONF.libvirt.virt_type == 'parallels':
if not self._host.has_min_version(hv_ver=MIN_VIRTUOZZO_VERSION):
raise exception.InternalError(
_('Nova requires Virtuozzo version %s or greater.') %
self._version_to_string(MIN_VIRTUOZZO_VERSION))
if not self._host.has_min_version(MIN_LIBVIRT_VIRTUOZZO_VERSION):
raise exception.InternalError(
_('Running Nova with parallels virt_type requires '
'libvirt version %s') %
self._version_to_string(MIN_LIBVIRT_VIRTUOZZO_VERSION))
# Give the cloud admin a heads up if we are intending to
# change the MIN_LIBVIRT_VERSION in the next release.
if not self._host.has_min_version(NEXT_MIN_LIBVIRT_VERSION):
LOG.warning(_LW('Running Nova with a libvirt version less than '
'%(version)s is deprecated. The required minimum '
'version of libvirt will be raised to %(version)s '
'in the next release.'),
{'version': self._version_to_string(
NEXT_MIN_LIBVIRT_VERSION)})
if (CONF.libvirt.virt_type in ("qemu", "kvm") and
not self._host.has_min_version(hv_ver=NEXT_MIN_QEMU_VERSION)):
LOG.warning(_LW('Running Nova with a QEMU version less than '
'%(version)s is deprecated. The required minimum '
'version of QEMU will be raised to %(version)s '
'in the next release.'),
{'version': self._version_to_string(
NEXT_MIN_QEMU_VERSION)})
kvm_arch = fields.Architecture.from_host()
if (CONF.libvirt.virt_type in ('kvm', 'qemu') and
kvm_arch in MIN_LIBVIRT_OTHER_ARCH and
not self._host.has_min_version(
MIN_LIBVIRT_OTHER_ARCH.get(kvm_arch),
MIN_QEMU_OTHER_ARCH.get(kvm_arch))):
raise exception.InternalError(
_('Running Nova with qemu/kvm virt_type on %(arch)s '
'requires libvirt version %(libvirt_ver)s and '
'qemu version %(qemu_ver)s, or greater') %
{'arch': kvm_arch,
'libvirt_ver': self._version_to_string(
MIN_LIBVIRT_OTHER_ARCH.get(kvm_arch)),
'qemu_ver': self._version_to_string(
MIN_QEMU_OTHER_ARCH.get(kvm_arch))})
def _prepare_migration_flags(self):
migration_flags = 0
migration_flags |= libvirt.VIR_MIGRATE_LIVE
# Adding p2p flag only if xen is not in use, because xen does not
# support p2p migrations
if CONF.libvirt.virt_type != 'xen':
migration_flags |= libvirt.VIR_MIGRATE_PEER2PEER
# Adding VIR_MIGRATE_UNDEFINE_SOURCE because, without it, migrated
# instance will remain defined on the source host
migration_flags |= libvirt.VIR_MIGRATE_UNDEFINE_SOURCE
# Adding VIR_MIGRATE_PERSIST_DEST to persist the VM on the
# destination host
migration_flags |= libvirt.VIR_MIGRATE_PERSIST_DEST
live_migration_flags = block_migration_flags = migration_flags
# Adding VIR_MIGRATE_NON_SHARED_INC, otherwise all block-migrations
# will be live-migrations instead
block_migration_flags |= libvirt.VIR_MIGRATE_NON_SHARED_INC
return (live_migration_flags, block_migration_flags)
def _handle_live_migration_tunnelled(self, migration_flags):
if (CONF.libvirt.live_migration_tunnelled is None or
CONF.libvirt.live_migration_tunnelled):
migration_flags |= libvirt.VIR_MIGRATE_TUNNELLED
return migration_flags
def _is_post_copy_available(self):
if self._host.has_min_version(lv_ver=MIN_LIBVIRT_POSTCOPY_VERSION,
hv_ver=MIN_QEMU_POSTCOPY_VERSION):
return True
return False
def _is_virtlogd_available(self):
return self._host.has_min_version(MIN_LIBVIRT_VIRTLOGD,
MIN_QEMU_VIRTLOGD)
def _handle_live_migration_post_copy(self, migration_flags):
if CONF.libvirt.live_migration_permit_post_copy:
if self._is_post_copy_available():
migration_flags |= libvirt.VIR_MIGRATE_POSTCOPY
else:
LOG.info(_LI('The live_migration_permit_post_copy is set '
'to True, but it is not supported.'))
return migration_flags
def _handle_live_migration_auto_converge(self, migration_flags):
if self._host.has_min_version(lv_ver=MIN_LIBVIRT_AUTO_CONVERGE_VERSION,
hv_ver=MIN_QEMU_AUTO_CONVERGE):
if (self._is_post_copy_available() and
(migration_flags & libvirt.VIR_MIGRATE_POSTCOPY) != 0):
LOG.info(_LI('The live_migration_permit_post_copy is set to '
'True and post copy live migration is available '
'so auto-converge will not be in use.'))
elif CONF.libvirt.live_migration_permit_auto_converge:
migration_flags |= libvirt.VIR_MIGRATE_AUTO_CONVERGE
elif CONF.libvirt.live_migration_permit_auto_converge:
LOG.info(_LI('The live_migration_permit_auto_converge is set '
'to True, but it is not supported.'))
return migration_flags
def _parse_migration_flags(self):
(live_migration_flags,
block_migration_flags) = self._prepare_migration_flags()
live_migration_flags = self._handle_live_migration_tunnelled(
live_migration_flags)
block_migration_flags = self._handle_live_migration_tunnelled(
block_migration_flags)
live_migration_flags = self._handle_live_migration_post_copy(
live_migration_flags)
block_migration_flags = self._handle_live_migration_post_copy(
block_migration_flags)
live_migration_flags = self._handle_live_migration_auto_converge(
live_migration_flags)
block_migration_flags = self._handle_live_migration_auto_converge(
block_migration_flags)
self._live_migration_flags = live_migration_flags
self._block_migration_flags = block_migration_flags
# TODO(sahid): This method is targeted for removal when the tests
# have been updated to avoid its use
#
# All libvirt API calls on the libvirt.Connect object should be
# encapsulated by methods on the nova.virt.libvirt.host.Host
# object, rather than directly invoking the libvirt APIs. The goal
# is to avoid a direct dependency on the libvirt API from the
# driver.py file.
def _get_connection(self):
return self._host.get_connection()
_conn = property(_get_connection)
@staticmethod
def _uri():
if CONF.libvirt.virt_type == 'uml':
uri = CONF.libvirt.connection_uri or 'uml:///system'
elif CONF.libvirt.virt_type == 'xen':
uri = CONF.libvirt.connection_uri or 'xen:///'
elif CONF.libvirt.virt_type == 'lxc':
uri = CONF.libvirt.connection_uri or 'lxc:///'
elif CONF.libvirt.virt_type == 'parallels':
uri = CONF.libvirt.connection_uri or 'parallels:///system'
else:
uri = CONF.libvirt.connection_uri or 'qemu:///system'
return uri
@staticmethod
def _live_migration_uri(dest):
uris = {
'kvm': 'qemu+%s://%s/system',
'qemu': 'qemu+%s://%s/system',
'xen': 'xenmigr://%s/system',
'parallels': 'parallels+tcp://%s/system',
}
virt_type = CONF.libvirt.virt_type
# TODO(pkoniszewski): Remove fetching live_migration_uri in Pike
uri = CONF.libvirt.live_migration_uri
if uri:
return uri % dest
uri = uris.get(virt_type)
if uri is None:
raise exception.LiveMigrationURINotAvailable(virt_type=virt_type)
str_format = (dest,)
if virt_type in ('kvm', 'qemu'):
scheme = CONF.libvirt.live_migration_scheme or 'tcp'
str_format = (scheme, dest)
return uris.get(virt_type) % str_format
@staticmethod
def _migrate_uri(dest):
uri = None
# Only QEMU live migrations supports migrate-uri parameter
virt_type = CONF.libvirt.virt_type
if virt_type in ('qemu', 'kvm'):
# QEMU accept two schemes: tcp and rdma. By default
# libvirt build the URI using the remote hostname and the
# tcp schema.
uri = 'tcp://%s' % dest
# Because dest might be of type unicode, here we might return value of
# type unicode as well which is not acceptable by libvirt python
# binding when Python 2.7 is in use, so let's convert it explicitly
# back to string. When Python 3.x is in use, libvirt python binding
# accepts unicode type so it is completely fine to do a no-op str(uri)
# conversion which will return value of type unicode.
return uri and str(uri)
def instance_exists(self, instance):
"""Efficient override of base instance_exists method."""
try:
self._host.get_guest(instance)
return True
except (exception.InternalError, exception.InstanceNotFound):
return False
def list_instances(self):
names = []
for guest in self._host.list_guests(only_running=False):
names.append(guest.name)
return names
def list_instance_uuids(self):
uuids = []
for guest in self._host.list_guests(only_running=False):
uuids.append(guest.uuid)
return uuids
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for vif in network_info:
self.vif_driver.plug(instance, vif)
def _unplug_vifs(self, instance, network_info, ignore_errors):
"""Unplug VIFs from networks."""
for vif in network_info:
try:
self.vif_driver.unplug(instance, vif)
except exception.NovaException:
if not ignore_errors:
raise
def unplug_vifs(self, instance, network_info):
self._unplug_vifs(instance, network_info, False)
def _teardown_container(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
rootfs_dev = instance.system_metadata.get('rootfs_device_name')
LOG.debug('Attempting to teardown container at path %(dir)s with '
'root device: %(rootfs_dev)s',
{'dir': container_dir, 'rootfs_dev': rootfs_dev},
instance=instance)
disk_api.teardown_container(container_dir, rootfs_dev)
def _destroy(self, instance, attempt=1):
try:
guest = self._host.get_guest(instance)
if CONF.serial_console.enabled:
# This method is called for several events: destroy,
# rebuild, hard-reboot, power-off - For all of these
# events we want to release the serial ports acquired
# for the guest before destroying it.
serials = self._get_serial_ports_from_guest(guest)
for hostname, port in serials:
serial_console.release_port(host=hostname, port=port)
except exception.InstanceNotFound:
guest = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
old_domid = -1
if guest is not None:
try:
old_domid = guest.id
guest.poweroff()
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_NO_DOMAIN:
# Domain already gone. This can safely be ignored.
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# If the instance is already shut off, we get this:
# Code=55 Error=Requested operation is not valid:
# domain is not running
state = guest.get_power_state(self._host)
if state == power_state.SHUTDOWN:
is_okay = True
elif errcode == libvirt.VIR_ERR_INTERNAL_ERROR:
errmsg = e.get_error_message()
if (CONF.libvirt.virt_type == 'lxc' and
errmsg == 'internal error: '
'Some processes refused to die'):
# Some processes in the container didn't die
# fast enough for libvirt. The container will
# eventually die. For now, move on and let
# the wait_for_destroy logic take over.
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
LOG.warning(_LW("Cannot destroy instance, operation time "
"out"),
instance=instance)
reason = _("operation time out")
raise exception.InstancePowerOffFailure(reason=reason)
elif errcode == libvirt.VIR_ERR_SYSTEM_ERROR:
if e.get_int1() == errno.EBUSY:
# NOTE(danpb): When libvirt kills a process it sends it
# SIGTERM first and waits 10 seconds. If it hasn't gone
# it sends SIGKILL and waits another 5 seconds. If it
# still hasn't gone then you get this EBUSY error.
# Usually when a QEMU process fails to go away upon
# SIGKILL it is because it is stuck in an
# uninterruptible kernel sleep waiting on I/O from
# some non-responsive server.
# Given the CPU load of the gate tests though, it is
# conceivable that the 15 second timeout is too short,
# particularly if the VM running tempest has a high
# steal time from the cloud host. ie 15 wallclock
# seconds may have passed, but the VM might have only
# have a few seconds of scheduled run time.
LOG.warning(_LW('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s; '
'attempt %(attempt)d of 3'),
{'errcode': errcode, 'e': e,
'attempt': attempt},
instance=instance)
with excutils.save_and_reraise_exception() as ctxt:
# Try up to 3 times before giving up.
if attempt < 3:
ctxt.reraise = False
self._destroy(instance, attempt + 1)
return
if not is_okay:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
dom_info = self.get_info(instance)
state = dom_info.state
new_domid = dom_info.id
except exception.InstanceNotFound:
LOG.debug("During wait destroy, instance disappeared.",
instance=instance)
state = power_state.SHUTDOWN
if state == power_state.SHUTDOWN:
LOG.info(_LI("Instance destroyed successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
# this may be an endless loop, so check the id of
# domain here, if it changed and the instance is
# still running, we should destroy it again.
# see https://bugs.launchpad.net/nova/+bug/1111213 for more details
if new_domid != expected_domid:
LOG.info(_LI("Instance may be started again."),
instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
kwargs = {'is_running': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,
old_domid)
timer.start(interval=0.5).wait()
if kwargs['is_running']:
LOG.info(_LI("Going to destroy instance again."),
instance=instance)
self._destroy(instance)
else:
# NOTE(GuanQiang): teardown container to avoid resource leak
if CONF.libvirt.virt_type == 'lxc':
self._teardown_container(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
self._destroy(instance)
self.cleanup(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
def _undefine_domain(self, instance):
try:
guest = self._host.get_guest(instance)
try:
support_uefi = self._has_uefi_support()
guest.delete_configuration(support_uefi)
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
errcode = e.get_error_code()
LOG.error(_LE('Error from libvirt during undefine. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e}, instance=instance)
except exception.InstanceNotFound:
pass
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
if destroy_vifs:
self._unplug_vifs(instance, network_info, True)
retry = True
while retry:
try:
self.unfilter_instance(instance, network_info)
except libvirt.libvirtError as e:
try:
state = self.get_info(instance).state
except exception.InstanceNotFound:
state = power_state.SHUTDOWN
if state != power_state.SHUTDOWN:
LOG.warning(_LW("Instance may be still running, destroy "
"it again."), instance=instance)
self._destroy(instance)
else:
retry = False
errcode = e.get_error_code()
LOG.exception(_LE('Error from libvirt during unfilter. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
reason = "Error unfiltering instance."
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
retry = False
raise
else:
retry = False
# FIXME(wangpan): if the instance is booted again here, such as the
# soft reboot operation boot it here, it will become
# "running deleted", should we check and destroy it
# at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device']
if disk_dev is not None:
disk_dev = disk_dev.rpartition("/")[2]
if ('data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
try:
self._disconnect_volume(connection_info, disk_dev)
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if destroy_disks:
# Don't block on Volume errors if we're trying to
# delete the instance as we may be partially created
# or deleted
ctxt.reraise = False
LOG.warning(
_LW("Ignoring Volume Error on vol %(vol_id)s "
"during delete %(exc)s"),
{'vol_id': vol.get('volume_id'), 'exc': exc},
instance=instance)
if destroy_disks:
# NOTE(haomai): destroy volumes if needed
if CONF.libvirt.images_type == 'lvm':
self._cleanup_lvm(instance, block_device_info)
if CONF.libvirt.images_type == 'rbd':
self._cleanup_rbd(instance)
is_shared_block_storage = False
if migrate_data and 'is_shared_block_storage' in migrate_data:
is_shared_block_storage = migrate_data.is_shared_block_storage
if destroy_disks or is_shared_block_storage:
attempts = int(instance.system_metadata.get('clean_attempts',
'0'))
success = self.delete_instance_files(instance)
# NOTE(mriedem): This is used in the _run_pending_deletes periodic
# task in the compute manager. The tight coupling is not great...
instance.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
instance.cleaned = True
instance.save()
self._undefine_domain(instance)
def _detach_encrypted_volumes(self, instance, block_device_info):
"""Detaches encrypted volumes attached to instance."""
disks = jsonutils.loads(self.get_instance_disk_info(instance,
block_device_info))
encrypted_volumes = filter(dmcrypt.is_encrypted,
[disk['path'] for disk in disks])
for path in encrypted_volumes:
dmcrypt.delete_volume(path)
def _get_serial_ports_from_guest(self, guest, mode=None):
"""Returns an iterator over serial port(s) configured on guest.
:param mode: Should be a value in (None, bind, connect)
"""
xml = guest.get_xml_desc()
tree = etree.fromstring(xml)
# The 'serial' device is the base for x86 platforms. Other platforms
# (e.g. kvm on system z = S390X) can only use 'console' devices.
xpath_mode = "[@mode='%s']" % mode if mode else ""
serial_tcp = "./devices/serial[@type='tcp']/source" + xpath_mode
console_tcp = "./devices/console[@type='tcp']/source" + xpath_mode
tcp_devices = tree.findall(serial_tcp)
if len(tcp_devices) == 0:
tcp_devices = tree.findall(console_tcp)
for source in tcp_devices:
yield (source.get("host"), int(source.get("service")))
@staticmethod
def _get_rbd_driver():
return rbd_utils.RBDDriver(
pool=CONF.libvirt.images_rbd_pool,
ceph_conf=CONF.libvirt.images_rbd_ceph_conf,
rbd_user=CONF.libvirt.rbd_user)
def _cleanup_rbd(self, instance):
# NOTE(nic): On revert_resize, the cleanup steps for the root
# volume are handled with an "rbd snap rollback" command,
# and none of this is needed (and is, in fact, harmful) so
# filter out non-ephemerals from the list
if instance.task_state == task_states.RESIZE_REVERTING:
filter_fn = lambda disk: (disk.startswith(instance.uuid) and
disk.endswith('disk.local'))
else:
filter_fn = lambda disk: disk.startswith(instance.uuid)
LibvirtDriver._get_rbd_driver().cleanup_volumes(filter_fn)
def _cleanup_lvm(self, instance, block_device_info):
"""Delete all LVM disks for given instance object."""
if instance.get('ephemeral_key_uuid') is not None:
self._detach_encrypted_volumes(instance, block_device_info)
disks = self._lvm_disks(instance)
if disks:
lvm.remove_volumes(disks)
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object."""
if CONF.libvirt.images_volume_group:
vg = os.path.join('/dev', CONF.libvirt.images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance.uuid
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = lvm.list_volumes(vg)
disks = [fullpath(disk) for disk in logical_volumes
if belongs_to_instance(disk)]
return disks
return []
def get_volume_connector(self, instance):
root_helper = utils.get_root_helper()
return connector.get_connector_properties(
root_helper, CONF.my_block_storage_ip,
CONF.libvirt.volume_use_multipath,
enforce_multipath=True,
host=CONF.host)
def _cleanup_resize(self, instance, network_info):
inst_base = libvirt_utils.get_instance_path(instance)
target = inst_base + '_resize'
if os.path.exists(target):
# Deletion can fail over NFS, so retry the deletion as required.
# Set maximum attempt as 5, most test can remove the directory
# for the second time.
utils.execute('rm', '-rf', target, delay_on_retry=True,
attempts=5)
root_disk = self.image_backend.by_name(instance, 'disk')
# TODO(nic): Set ignore_errors=False in a future release.
# It is set to True here to avoid any upgrade issues surrounding
# instances being in pending resize state when the software is updated;
# in that case there will be no snapshot to remove. Once it can be
# reasonably assumed that no such instances exist in the wild
# anymore, it should be set back to False (the default) so it will
# throw errors, like it should.
if root_disk.exists():
root_disk.remove_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME,
ignore_errors=True)
# NOTE(mjozefcz):
# self.image_backend.image for some backends recreates instance
# directory and image disk.info - remove it here if exists
if os.path.exists(inst_base) and not root_disk.exists():
try:
shutil.rmtree(inst_base)
except OSError as e:
if e.errno != errno.ENOENT:
raise
if instance.host != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.unfilter_instance(instance, network_info)
def _get_volume_driver(self, connection_info):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
return self.volume_drivers[driver_type]
def _connect_volume(self, connection_info, disk_info):
vol_driver = self._get_volume_driver(connection_info)
vol_driver.connect_volume(connection_info, disk_info)
def _disconnect_volume(self, connection_info, disk_dev):
vol_driver = self._get_volume_driver(connection_info)
vol_driver.disconnect_volume(connection_info, disk_dev)
def _get_volume_config(self, connection_info, disk_info):
vol_driver = self._get_volume_driver(connection_info)
return vol_driver.get_config(connection_info, disk_info)
def _get_volume_encryptor(self, connection_info, encryption):
encryptor = encryptors.get_volume_encryptor(connection_info,
**encryption)
return encryptor
def _check_discard_for_attach_volume(self, conf, instance):
"""Perform some checks for volumes configured for discard support.
If discard is configured for the volume, and the guest is using a
configuration known to not work, we will log a message explaining
the reason why.
"""
if conf.driver_discard == 'unmap' and conf.target_bus == 'virtio':
LOG.debug('Attempting to attach volume %(id)s with discard '
'support enabled to an instance using an '
'unsupported configuration. target_bus = '
'%(bus)s. Trim commands will not be issued to '
'the storage device.',
{'bus': conf.target_bus,
'id': conf.serial},
instance=instance)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
guest = self._host.get_guest(instance)
disk_dev = mountpoint.rpartition("/")[2]
bdm = {
'device_name': disk_dev,
'disk_bus': disk_bus,
'device_type': device_type}
# Note(cfb): If the volume has a custom block size, check that
# that we are using QEMU/KVM and libvirt >= 0.10.2. The
# presence of a block size is considered mandatory by
# cinder so we fail if we can't honor the request.
data = {}
if ('data' in connection_info):
data = connection_info['data']
if ('logical_block_size' in data or 'physical_block_size' in data):
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Volume sets block size, but the current "
"libvirt hypervisor '%s' does not support custom "
"block size") % CONF.libvirt.virt_type
raise exception.InvalidHypervisorType(msg)
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type, instance.image_meta, bdm)
self._connect_volume(connection_info, disk_info)
conf = self._get_volume_config(connection_info, disk_info)
self._set_cache_mode(conf)
self._check_discard_for_attach_volume(conf, instance)
try:
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
guest.attach_device(conf, persistent=True, live=live)
except Exception as ex:
LOG.exception(_LE('Failed to attach volume at mountpoint: %s'),
mountpoint, instance=instance)
if isinstance(ex, libvirt.libvirtError):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self._disconnect_volume(connection_info, disk_dev)
raise exception.DeviceIsBusy(device=disk_dev)
with excutils.save_and_reraise_exception():
self._disconnect_volume(connection_info, disk_dev)
def _swap_volume(self, guest, disk_path, conf, resize_to):
"""Swap existing disk with a new block device."""
dev = guest.get_block_device(disk_path)
# Save a copy of the domain's persistent XML file
xml = guest.get_xml_desc(dump_inactive=True, dump_sensitive=True)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
dev.abort_job()
except Exception:
pass
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if guest.has_persistent_configuration():
support_uefi = self._has_uefi_support()
guest.delete_configuration(support_uefi)
try:
# Start copy with VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT flag to
# allow writing to existing external volume file. Use
# VIR_DOMAIN_BLOCK_REBASE_COPY_DEV if it's a block device to
# make sure XML is generated correctly (bug 1691195)
copy_dev = conf.source_type == 'block'
dev.rebase(conf.source_path, copy=True, reuse_ext=True,
copy_dev=copy_dev)
while not dev.is_job_complete():
time.sleep(0.5)
dev.abort_job(pivot=True)
# NOTE(alex_xu): domain.blockJobAbort isn't sync call. This
# is bug in libvirt. So we need waiting for the pivot is
# finished. libvirt bug #1119173
while not dev.is_job_complete():
time.sleep(0.5)
except Exception as exc:
LOG.exception(_LE("Failure rebasing volume %(new_path)s on "
"%(new_path)s."), {'new_path': conf.source_path,
'old_path': disk_path})
raise exception.VolumeRebaseFailed(reason=six.text_type(exc))
if resize_to:
dev.resize(resize_to * units.Gi / units.Ki)
finally:
self._host.write_instance_config(xml)
def swap_volume(self, old_connection_info,
new_connection_info, instance, mountpoint, resize_to):
guest = self._host.get_guest(instance)
disk_dev = mountpoint.rpartition("/")[2]
if not guest.get_disk(disk_dev):
raise exception.DiskNotFound(location=disk_dev)
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(
CONF.libvirt.virt_type, disk_dev),
'type': 'disk',
}
# NOTE (lyarwood): new_connection_info will be modified by the
# following _connect_volume call down into the volume drivers. The
# majority of the volume drivers will add a device_path that is in turn
# used by _get_volume_config to set the source_path of the
# LibvirtConfigGuestDisk object it returns. We do not explicitly save
# this to the BDM here as the upper compute swap_volume method will
# eventually do this for us.
self._connect_volume(new_connection_info, disk_info)
conf = self._get_volume_config(new_connection_info, disk_info)
if not conf.source_path:
self._disconnect_volume(new_connection_info, disk_dev)
raise NotImplementedError(_("Swap only supports host devices"))
try:
self._swap_volume(guest, disk_dev, conf, resize_to)
except exception.VolumeRebaseFailed:
with excutils.save_and_reraise_exception():
self._disconnect_volume(new_connection_info, disk_dev)
self._disconnect_volume(old_connection_info, disk_dev)
def _get_existing_domain_xml(self, instance, network_info,
block_device_info=None):
try:
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
except exception.InstanceNotFound:
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
instance.image_meta,
block_device_info)
xml = self._get_guest_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
instance.image_meta,
block_device_info=block_device_info)
return xml
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
disk_dev = mountpoint.rpartition("/")[2]
try:
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
# The volume must be detached from the VM before disconnecting it
# from its encryptor. Otherwise, the encryptor may report that the
# volume is still in use.
wait_for_detach = guest.detach_device_with_retry(guest.get_disk,
disk_dev,
persistent=True,
live=live)
wait_for_detach()
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
except exception.InstanceNotFound:
# NOTE(zhaoqin): If the instance does not exist, _lookup_by_name()
# will throw InstanceNotFound exception. Need to
# disconnect volume under this circumstance.
LOG.warning(_LW("During detach_volume, instance disappeared."),
instance=instance)
except exception.DeviceNotFound:
raise exception.DiskNotFound(location=disk_dev)
except libvirt.libvirtError as ex:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still disconnect even if
# the instance doesn't exist here anymore.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
LOG.warning(_LW("During detach_volume, instance disappeared."),
instance=instance)
else:
raise
self._disconnect_volume(connection_info, disk_dev)
def attach_interface(self, context, instance, image_meta, vif):
guest = self._host.get_guest(instance)
self.vif_driver.plug(instance, vif)
self.firewall_driver.setup_basic_filtering(instance, [vif])
cfg = self.vif_driver.get_config(instance, vif, image_meta,
instance.flavor,
CONF.libvirt.virt_type,
self._host)
try:
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
guest.attach_device(cfg, persistent=True, live=live)
except libvirt.libvirtError:
LOG.error(_LE('attaching network adapter failed.'),
instance=instance, exc_info=True)
self.vif_driver.unplug(instance, vif)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
def detach_interface(self, context, instance, vif):
guest = self._host.get_guest(instance)
cfg = self.vif_driver.get_config(instance, vif,
instance.image_meta,
instance.flavor,
CONF.libvirt.virt_type, self._host)
interface = guest.get_interface_by_cfg(cfg)
try:
self.vif_driver.unplug(instance, vif)
# NOTE(mriedem): When deleting an instance and using Neutron,
# we can be racing against Neutron deleting the port and
# sending the vif-deleted event which then triggers a call to
# detach the interface, so if the interface is not found then
# we can just log it as a warning.
if not interface:
mac = vif.get('address')
# The interface is gone so just log it as a warning.
LOG.warning(_LW('Detaching interface %(mac)s failed because '
'the device is no longer found on the guest.'),
{'mac': mac}, instance=instance)
return
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
guest.detach_device(interface, persistent=True, live=live)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warning(_LW("During detach_interface, "
"instance disappeared."),
instance=instance)
else:
# NOTE(mriedem): When deleting an instance and using Neutron,
# we can be racing against Neutron deleting the port and
# sending the vif-deleted event which then triggers a call to
# detach the interface, so we might have failed because the
# network device no longer exists. Libvirt will fail with
# "operation failed: no matching network device was found"
# which unfortunately does not have a unique error code so we
# need to look up the interface by config and if it's not found
# then we can just log it as a warning rather than tracing an
# error.
mac = vif.get('address')
interface = guest.get_interface_by_cfg(cfg)
if interface:
LOG.error(_LE('detaching network adapter failed.'),
instance=instance, exc_info=True)
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
# The interface is gone so just log it as a warning.
LOG.warning(_LW('Detaching interface %(mac)s failed because '
'the device is no longer found on the guest.'),
{'mac': mac}, instance=instance)
def _create_snapshot_metadata(self, image_meta, instance,
img_fmt, snp_name):
metadata = {'is_public': False,
'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance.kernel_id,
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance.project_id,
'ramdisk_id': instance.ramdisk_id,
}
}
if instance.os_type:
metadata['properties']['os_type'] = instance.os_type
# NOTE(vish): glance forces ami disk format to be ami
if image_meta.disk_format == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = img_fmt
if image_meta.obj_attr_is_set("container_format"):
metadata['container_format'] = image_meta.container_format
else:
metadata['container_format'] = "bare"
return metadata
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
"""
try:
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove virt_dom at the end.
virt_dom = guest._domain
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
snapshot = self._image_api.get(context, image_id)
# source_format is an on-disk format
# source_type is a backend type
disk_path, source_format = libvirt_utils.find_disk(virt_dom)
source_type = libvirt_utils.get_disk_type_from_path(disk_path)
# We won't have source_type for raw or qcow2 disks, because we can't
# determine that from the path. We should have it from the libvirt
# xml, though.
if source_type is None:
source_type = source_format
# For lxc instances we won't have it either from libvirt xml
# (because we just gave libvirt the mounted filesystem), or the path,
# so source_type is still going to be None. In this case,
# root_disk is going to default to CONF.libvirt.images_type
# below, which is still safe.
image_format = CONF.libvirt.snapshot_image_format or source_type
# NOTE(bfilippov): save lvm and rbd as raw
if image_format == 'lvm' or image_format == 'rbd':
image_format = 'raw'
metadata = self._create_snapshot_metadata(instance.image_meta,
instance,
image_format,
snapshot['name'])
snapshot_name = uuid.uuid4().hex
state = guest.get_power_state(self._host)
# NOTE(dgenin): Instances with LVM encrypted ephemeral storage require
# cold snapshots. Currently, checking for encryption is
# redundant because LVM supports only cold snapshots.
# It is necessary in case this situation changes in the
# future.
if (self._host.has_min_version(hv_type=host.HV_DRIVER_QEMU)
and source_type not in ('lvm')
and not CONF.ephemeral_storage_encryption.enabled
and not CONF.workarounds.disable_libvirt_livesnapshot):
live_snapshot = True
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended. This operation also
# confirms the running instance, as opposed to the system as a
# whole, has a new enough version of the hypervisor (bug 1193146).
try:
guest.get_block_device(disk_path).abort_job()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
live_snapshot = False
else:
pass
else:
live_snapshot = False
# NOTE(rmk): We cannot perform live snapshots when a managedSave
# file is present, so we will use the cold/legacy method
# for instances which are shutdown.
if state == power_state.SHUTDOWN:
live_snapshot = False
self._prepare_domain_for_snapshot(context, live_snapshot, state,
instance)
root_disk = self.image_backend.by_libvirt_path(
instance, disk_path, image_type=source_type)
if live_snapshot:
LOG.info(_LI("Beginning live snapshot process"),
instance=instance)
else:
LOG.info(_LI("Beginning cold snapshot process"),
instance=instance)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
try:
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
metadata['location'] = root_disk.direct_snapshot(
context, snapshot_name, image_format, image_id,
instance.image_ref)
self._snapshot_domain(context, live_snapshot, virt_dom, state,
instance)
self._image_api.update(context, image_id, metadata,
purge_props=False)
except (NotImplementedError, exception.ImageUnacceptable,
exception.Forbidden) as e:
if type(e) != NotImplementedError:
LOG.warning(_LW('Performing standard snapshot because direct '
'snapshot failed: %(error)s'), {'error': e})
failed_snap = metadata.pop('location', None)
if failed_snap:
failed_snap = {'url': str(failed_snap)}
root_disk.cleanup_direct_snapshot(failed_snap,
also_destroy_volume=True,
ignore_errors=True)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD,
expected_state=task_states.IMAGE_UPLOADING)
# TODO(nic): possibly abstract this out to the root_disk
if source_type == 'rbd' and live_snapshot:
# Standard snapshot uses qemu-img convert from RBD which is
# not safe to run with live_snapshot.
live_snapshot = False
# Suspend the guest, so this is no longer a live snapshot
self._prepare_domain_for_snapshot(context, live_snapshot,
state, instance)
snapshot_directory = CONF.libvirt.snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
# NOTE(xqueralt): libvirt needs o+x in the tempdir
os.chmod(tmpdir, 0o701)
self._live_snapshot(context, instance, guest,
disk_path, out_path, source_format,
image_format, instance.image_meta)
else:
root_disk.snapshot_extract(out_path, image_format)
finally:
self._snapshot_domain(context, live_snapshot, virt_dom,
state, instance)
LOG.info(_LI("Snapshot extracted, beginning image upload"),
instance=instance)
# Upload that image to the image service
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path, 'rb') as image_file:
self._image_api.update(context,
image_id,
metadata,
image_file)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to snapshot image"))
failed_snap = metadata.pop('location', None)
if failed_snap:
failed_snap = {'url': str(failed_snap)}
root_disk.cleanup_direct_snapshot(
failed_snap, also_destroy_volume=True,
ignore_errors=True)
LOG.info(_LI("Snapshot image upload complete"), instance=instance)
def _prepare_domain_for_snapshot(self, context, live_snapshot, state,
instance):
# NOTE(dkang): managedSave does not work for LXC
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING or state == power_state.PAUSED:
self.suspend(context, instance)
def _snapshot_domain(self, context, live_snapshot, virt_dom, state,
instance):
guest = None
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING:
guest = self._create_domain(domain=virt_dom)
elif state == power_state.PAUSED:
guest = self._create_domain(domain=virt_dom, pause=True)
if guest is not None:
self._attach_pci_devices(
guest, pci_manager.get_instance_pci_devs(instance))
self._attach_direct_passthrough_ports(
context, instance, guest)
def _can_set_admin_password(self, image_meta):
if CONF.libvirt.virt_type == 'parallels':
if not self._host.has_min_version(
MIN_LIBVIRT_PARALLELS_SET_ADMIN_PASSWD):
raise exception.SetAdminPasswdNotSupported()
elif CONF.libvirt.virt_type in ('kvm', 'qemu'):
if not self._host.has_min_version(
MIN_LIBVIRT_SET_ADMIN_PASSWD):
raise exception.SetAdminPasswdNotSupported()
if not image_meta.properties.get('hw_qemu_guest_agent', False):
raise exception.QemuGuestAgentNotEnabled()
else:
raise exception.SetAdminPasswdNotSupported()
def set_admin_password(self, instance, new_pass):
self._can_set_admin_password(instance.image_meta)
guest = self._host.get_guest(instance)
user = instance.image_meta.properties.get("os_admin_user")
if not user:
if instance.os_type == "windows":
user = "Administrator"
else:
user = "root"
try:
guest.set_user_password(user, new_pass)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = (_('Error from libvirt while set password for username '
'"%(user)s": [Error Code %(error_code)s] %(ex)s')
% {'user': user, 'error_code': error_code, 'ex': ex})
raise exception.InternalError(msg)
def _can_quiesce(self, instance, image_meta):
if (CONF.libvirt.virt_type not in ('kvm', 'qemu') or
not self._host.has_min_version(MIN_LIBVIRT_FSFREEZE_VERSION)):
raise exception.InstanceQuiesceNotSupported(
instance_id=instance.uuid)
if not image_meta.properties.get('hw_qemu_guest_agent', False):
raise exception.QemuGuestAgentNotEnabled()
def _set_quiesced(self, context, instance, image_meta, quiesced):
self._can_quiesce(instance, image_meta)
try:
guest = self._host.get_guest(instance)
if quiesced:
guest.freeze_filesystems()
else:
guest.thaw_filesystems()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = (_('Error from libvirt while quiescing %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s')
% {'instance_name': instance.name,
'error_code': error_code, 'ex': ex})
raise exception.InternalError(msg)
def quiesce(self, context, instance, image_meta):
"""Freeze the guest filesystems to prepare for snapshot.
The qemu-guest-agent must be setup to execute fsfreeze.
"""
self._set_quiesced(context, instance, image_meta, True)
def unquiesce(self, context, instance, image_meta):
"""Thaw the guest filesystems after snapshot."""
self._set_quiesced(context, instance, image_meta, False)
def _live_snapshot(self, context, instance, guest, disk_path, out_path,
source_format, image_format, image_meta):
"""Snapshot an instance without downtime."""
dev = guest.get_block_device(disk_path)
# Save a copy of the domain's persistent XML file
xml = guest.get_xml_desc(dump_inactive=True, dump_sensitive=True)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
dev.abort_job()
except Exception:
pass
# NOTE (rmk): We are using shallow rebases as a workaround to a bug
# in QEMU 1.3. In order to do this, we need to create
# a destination image with the original backing file
# and matching size of the instance root disk.
src_disk_size = libvirt_utils.get_disk_size(disk_path,
format=source_format)
src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
format=source_format,
basename=False)
disk_delta = out_path + '.delta'
libvirt_utils.create_cow_image(src_back_path, disk_delta,
src_disk_size)
quiesced = False
try:
self._set_quiesced(context, instance, image_meta, True)
quiesced = True
except exception.NovaException as err:
if image_meta.properties.get('os_require_quiesce', False):
raise
LOG.info(_LI('Skipping quiescing instance: %(reason)s.'),
{'reason': err}, instance=instance)
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if guest.has_persistent_configuration():
support_uefi = self._has_uefi_support()
guest.delete_configuration(support_uefi)
# NOTE (rmk): Establish a temporary mirror of our root disk and
# issue an abort once we have a complete copy.
dev.rebase(disk_delta, copy=True, reuse_ext=True, shallow=True)
while not dev.is_job_complete():
time.sleep(0.5)
dev.abort_job()
libvirt_utils.chown(disk_delta, os.getuid())
finally:
self._host.write_instance_config(xml)
if quiesced:
self._set_quiesced(context, instance, image_meta, False)
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
libvirt_utils.extract_snapshot(disk_delta, 'qcow2',
out_path, image_format)
def _volume_snapshot_update_status(self, context, snapshot_id, status):
"""Send a snapshot status update to Cinder.
This method captures and logs exceptions that occur
since callers cannot do anything useful with these exceptions.
Operations on the Cinder side waiting for this will time out if
a failure occurs sending the update.
:param context: security context
:param snapshot_id: id of snapshot being updated
:param status: new status value
"""
try:
self._volume_api.update_snapshot_status(context,
snapshot_id,
status)
except Exception:
LOG.exception(_LE('Failed to send updated snapshot status '
'to volume service.'))
def _volume_snapshot_create(self, context, instance, guest,
volume_id, new_file):
"""Perform volume snapshot.
:param guest: VM that volume is attached to
:param volume_id: volume UUID to snapshot
:param new_file: relative path to new qcow2 file present on share
"""
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
disks_to_snap = [] # to be snapshotted by libvirt
network_disks_to_snap = [] # network disks (netfs, gluster, etc.)
disks_to_skip = [] # local disks not snapshotted
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None):
continue
if (guest_disk.serial is None or guest_disk.serial != volume_id):
disks_to_skip.append(guest_disk.target_dev)
continue
# disk is a Cinder volume with the correct volume_id
disk_info = {
'dev': guest_disk.target_dev,
'serial': guest_disk.serial,
'current_file': guest_disk.source_path,
'source_protocol': guest_disk.source_protocol,
'source_name': guest_disk.source_name,
'source_hosts': guest_disk.source_hosts,
'source_ports': guest_disk.source_ports
}
# Determine path for new_file based on current path
if disk_info['current_file'] is not None:
current_file = disk_info['current_file']
new_file_path = os.path.join(os.path.dirname(current_file),
new_file)
disks_to_snap.append((current_file, new_file_path))
elif disk_info['source_protocol'] in ('gluster', 'netfs'):
network_disks_to_snap.append((disk_info, new_file))
if not disks_to_snap and not network_disks_to_snap:
msg = _('Found no disk to snapshot.')
raise exception.InternalError(msg)
snapshot = vconfig.LibvirtConfigGuestSnapshot()
for current_name, new_filename in disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = current_name
snap_disk.source_path = new_filename
snap_disk.source_type = 'file'
snap_disk.snapshot = 'external'
snap_disk.driver_name = 'qcow2'
snapshot.add_disk(snap_disk)
for disk_info, new_filename in network_disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = disk_info['dev']
snap_disk.source_type = 'network'
snap_disk.source_protocol = disk_info['source_protocol']
snap_disk.snapshot = 'external'
snap_disk.source_path = new_filename
old_dir = disk_info['source_name'].split('/')[0]
snap_disk.source_name = '%s/%s' % (old_dir, new_filename)
snap_disk.source_hosts = disk_info['source_hosts']
snap_disk.source_ports = disk_info['source_ports']
snapshot.add_disk(snap_disk)
for dev in disks_to_skip:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = dev
snap_disk.snapshot = 'no'
snapshot.add_disk(snap_disk)
snapshot_xml = snapshot.to_xml()
LOG.debug("snap xml: %s", snapshot_xml, instance=instance)
try:
guest.snapshot(snapshot, no_metadata=True, disk_only=True,
reuse_ext=True, quiesce=True)
return
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create quiesced VM snapshot, '
'attempting again with quiescing disabled.'),
instance=instance)
try:
guest.snapshot(snapshot, no_metadata=True, disk_only=True,
reuse_ext=True, quiesce=False)
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create VM snapshot, '
'failing volume_snapshot operation.'),
instance=instance)
raise
def _volume_refresh_connection_info(self, context, instance, volume_id):
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, volume_id, instance.uuid)
driver_bdm = driver_block_device.convert_volume(bdm)
if driver_bdm:
driver_bdm.refresh_connection_info(context, instance,
self._volume_api, self)
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
"""Create snapshots of a Cinder volume via libvirt.
:param instance: VM instance object reference
:param volume_id: id of volume being snapshotted
:param create_info: dict of information used to create snapshots
- snapshot_id : ID of snapshot
- type : qcow2 / <other>
- new_file : qcow2 file created by Cinder which
becomes the VM's active image after
the snapshot is complete
"""
LOG.debug("volume_snapshot_create: create_info: %(c_info)s",
{'c_info': create_info}, instance=instance)
try:
guest = self._host.get_guest(instance)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
if create_info['type'] != 'qcow2':
msg = _('Unknown type: %s') % create_info['type']
raise exception.InternalError(msg)
snapshot_id = create_info.get('snapshot_id', None)
if snapshot_id is None:
msg = _('snapshot_id required in create_info')
raise exception.InternalError(msg)
try:
self._volume_snapshot_create(context, instance, guest,
volume_id, create_info['new_file'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error occurred during '
'volume_snapshot_create, '
'sending error status to Cinder.'),
instance=instance)
self._volume_snapshot_update_status(
context, snapshot_id, 'error')
self._volume_snapshot_update_status(
context, snapshot_id, 'creating')
def _wait_for_snapshot():
snapshot = self._volume_api.get_snapshot(context, snapshot_id)
if snapshot.get('status') != 'creating':
self._volume_refresh_connection_info(context, instance,
volume_id)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_snapshot)
timer.start(interval=0.5).wait()
@staticmethod
def _rebase_with_qemu_img(guest, device, active_disk_object,
rebase_base):
"""Rebase a device tied to a guest using qemu-img.
:param guest:the Guest which owns the device being rebased
:type guest: nova.virt.libvirt.guest.Guest
:param device: the guest block device to rebase
:type device: nova.virt.libvirt.guest.BlockDevice
:param active_disk_object: the guest block device to rebase
:type active_disk_object: nova.virt.libvirt.config.\
LibvirtConfigGuestDisk
:param rebase_base: the new parent in the backing chain
:type rebase_base: None or string
"""
# It's unsure how well qemu-img handles network disks for
# every protocol. So let's be safe.
active_protocol = active_disk_object.source_protocol
if active_protocol is not None:
msg = _("Something went wrong when deleting a volume snapshot: "
"rebasing a %(protocol)s network disk using qemu-img "
"has not been fully tested") % {'protocol':
active_protocol}
LOG.error(msg)
raise exception.InternalError(msg)
if rebase_base is None:
# If backing_file is specified as "" (the empty string), then
# the image is rebased onto no backing file (i.e. it will exist
# independently of any backing file).
backing_file = ""
qemu_img_extra_arg = []
else:
# If the rebased image is going to have a backing file then
# explicitly set the backing file format to avoid any security
# concerns related to file format auto detection.
backing_file = rebase_base
b_file_fmt = images.qemu_img_info(backing_file).file_format
qemu_img_extra_arg = ['-F', b_file_fmt]
qemu_img_extra_arg.append(active_disk_object.source_path)
utils.execute("qemu-img", "rebase", "-b", backing_file,
*qemu_img_extra_arg)
def _volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info=None):
"""Note:
if file being merged into == active image:
do a blockRebase (pull) operation
else:
do a blockCommit operation
Files must be adjacent in snap chain.
:param instance: instance object reference
:param volume_id: volume UUID
:param snapshot_id: snapshot UUID (unused currently)
:param delete_info: {
'type': 'qcow2',
'file_to_merge': 'a.img',
'merge_target_file': 'b.img' or None (if merging file_to_merge into
active image)
}
"""
LOG.debug('volume_snapshot_delete: delete_info: %s', delete_info,
instance=instance)
if delete_info['type'] != 'qcow2':
msg = _('Unknown delete_info type %s') % delete_info['type']
raise exception.InternalError(msg)
try:
guest = self._host.get_guest(instance)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
# Find dev name
my_dev = None
active_disk = None
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
active_disk_object = None
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None or guest_disk.serial is None):
continue
if guest_disk.serial == volume_id:
my_dev = guest_disk.target_dev
active_disk = guest_disk.source_path
active_protocol = guest_disk.source_protocol
active_disk_object = guest_disk
break
if my_dev is None or (active_disk is None and active_protocol is None):
LOG.debug('Domain XML: %s', xml, instance=instance)
msg = (_('Disk with id: %s not found attached to instance.')
% volume_id)
raise exception.InternalError(msg)
LOG.debug("found device at %s", my_dev, instance=instance)
def _get_snap_dev(filename, backing_store):
if filename is None:
msg = _('filename cannot be None')
raise exception.InternalError(msg)
# libgfapi delete
LOG.debug("XML: %s", xml)
LOG.debug("active disk object: %s", active_disk_object)
# determine reference within backing store for desired image
filename_to_merge = filename
matched_name = None
b = backing_store
index = None
current_filename = active_disk_object.source_name.split('/')[1]
if current_filename == filename_to_merge:
return my_dev + '[0]'
while b is not None:
source_filename = b.source_name.split('/')[1]
if source_filename == filename_to_merge:
LOG.debug('found match: %s', b.source_name)
matched_name = b.source_name
index = b.index
break
b = b.backing_store
if matched_name is None:
msg = _('no match found for %s') % (filename_to_merge)
raise exception.InternalError(msg)
LOG.debug('index of match (%s) is %s', b.source_name, index)
my_snap_dev = '%s[%s]' % (my_dev, index)
return my_snap_dev
if delete_info['merge_target_file'] is None:
# pull via blockRebase()
# Merge the most recent snapshot into the active image
rebase_disk = my_dev
rebase_base = delete_info['file_to_merge'] # often None
if (active_protocol is not None) and (rebase_base is not None):
rebase_base = _get_snap_dev(rebase_base,
active_disk_object.backing_store)
# NOTE(deepakcs): libvirt added support for _RELATIVE in v1.2.7,
# and when available this flag _must_ be used to ensure backing
# paths are maintained relative by qemu.
#
# If _RELATIVE flag not found, continue with old behaviour
# (relative backing path seems to work for this case)
try:
libvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE
relative = rebase_base is not None
except AttributeError:
LOG.warning(_LW(
"Relative blockrebase support was not detected. "
"Continuing with old behaviour."))
relative = False
LOG.debug(
'disk: %(disk)s, base: %(base)s, '
'bw: %(bw)s, relative: %(relative)s',
{'disk': rebase_disk,
'base': rebase_base,
'bw': libvirt_guest.BlockDevice.REBASE_DEFAULT_BANDWIDTH,
'relative': str(relative)}, instance=instance)
dev = guest.get_block_device(rebase_disk)
if guest.is_active():
result = dev.rebase(rebase_base, relative=relative)
if result == 0:
LOG.debug('blockRebase started successfully',
instance=instance)
while not dev.is_job_complete():
LOG.debug('waiting for blockRebase job completion',
instance=instance)
time.sleep(0.5)
# If the guest is not running libvirt won't do a blockRebase.
# In that case, let's ask qemu-img to rebase the disk.
else:
LOG.debug('Guest is not running so doing a block rebase '
'using "qemu-img rebase"', instance=instance)
self._rebase_with_qemu_img(guest, dev, active_disk_object,
rebase_base)
else:
# commit with blockCommit()
my_snap_base = None
my_snap_top = None
commit_disk = my_dev
# NOTE(deepakcs): libvirt added support for _RELATIVE in v1.2.7,
# and when available this flag _must_ be used to ensure backing
# paths are maintained relative by qemu.
#
# If _RELATIVE flag not found, raise exception as relative backing
# path may not be maintained and Cinder flow is broken if allowed
# to continue.
try:
libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
except AttributeError:
ver = '.'.join(
[str(x) for x in
MIN_LIBVIRT_BLOCKJOB_RELATIVE_VERSION])
msg = _("Relative blockcommit support was not detected. "
"Libvirt '%s' or later is required for online "
"deletion of file/network storage-backed volume "
"snapshots.") % ver
raise exception.Invalid(msg)
if active_protocol is not None:
my_snap_base = _get_snap_dev(delete_info['merge_target_file'],
active_disk_object.backing_store)
my_snap_top = _get_snap_dev(delete_info['file_to_merge'],
active_disk_object.backing_store)
commit_base = my_snap_base or delete_info['merge_target_file']
commit_top = my_snap_top or delete_info['file_to_merge']
LOG.debug('will call blockCommit with commit_disk=%(commit_disk)s '
'commit_base=%(commit_base)s '
'commit_top=%(commit_top)s ',
{'commit_disk': commit_disk,
'commit_base': commit_base,
'commit_top': commit_top}, instance=instance)
dev = guest.get_block_device(commit_disk)
result = dev.commit(commit_base, commit_top, relative=True)
if result == 0:
LOG.debug('blockCommit started successfully',
instance=instance)
while not dev.is_job_complete():
LOG.debug('waiting for blockCommit job completion',
instance=instance)
time.sleep(0.5)
def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id,
delete_info):
try:
self._volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info=delete_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error occurred during '
'volume_snapshot_delete, '
'sending error status to Cinder.'),
instance=instance)
self._volume_snapshot_update_status(
context, snapshot_id, 'error_deleting')
self._volume_snapshot_update_status(context, snapshot_id, 'deleting')
self._volume_refresh_connection_info(context, instance, volume_id)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
try:
soft_reboot_success = self._soft_reboot(instance)
except libvirt.libvirtError as e:
LOG.debug("Instance soft reboot failed: %s", e,
instance=instance)
soft_reboot_success = False
if soft_reboot_success:
LOG.info(_LI("Instance soft rebooted successfully."),
instance=instance)
return
else:
LOG.warning(_LW("Failed to soft reboot instance. "
"Trying hard reboot."),
instance=instance)
return self._hard_reboot(context, instance, network_info,
block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
"""
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
old_domid = guest.id
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
guest.shutdown()
# NOTE(vish): This actually could take slightly longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance, 'all'))
for x in range(CONF.libvirt.wait_soft_reboot_seconds):
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
new_domid = guest.id
# NOTE(ivoks): By checking domain IDs, we make sure we are
# not recreating domain that's already running.
if old_domid != new_domid:
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
LOG.info(_LI("Instance shutdown successfully."),
instance=instance)
self._create_domain(domain=guest._domain)
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running, instance)
timer.start(interval=0.5).wait()
return True
else:
LOG.info(_LI("Instance may have been rebooted during soft "
"reboot, so return now."), instance=instance)
return True
greenthread.sleep(1)
return False
def _hard_reboot(self, context, instance, network_info,
block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
If Libvirt reset is unavailable this method actually destroys and
re-creates the domain to ensure the reboot happens, as the guest
OS cannot ignore this action.
"""
self._destroy(instance)
# Domain XML will be redefined so we can safely undefine it
# from libvirt. This ensure that such process as create serial
# console for guest will run smoothly.
self._undefine_domain(instance)
# Convert the system metadata to image metadata
# NOTE(mdbooth): This is a workaround for stateless Nova compute
# https://bugs.launchpad.net/nova/+bug/1349978
instance_dir = libvirt_utils.get_instance_path(instance)
fileutils.ensure_tree(instance_dir)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
instance.image_meta,
block_device_info)
# NOTE(vish): This could generate the wrong device_format if we are
# using the raw backend and the images don't exist yet.
# The create_images_and_backing below doesn't properly
# regenerate raw backend images, however, so when it
# does we need to (re)generate the xml after the images
# are in place.
xml = self._get_guest_xml(context, instance, network_info, disk_info,
instance.image_meta,
block_device_info=block_device_info)
# NOTE(mdbooth): context.auth_token will not be set when we call
# _hard_reboot from resume_state_on_host_boot()
if context.auth_token is not None:
# NOTE (rmk): Re-populate any missing backing files.
backing_disk_info = self._get_instance_disk_info(instance.name,
xml,
block_device_info)
self._create_images_and_backing(context, instance, instance_dir,
backing_disk_info)
# Initialize all the necessary networking, block devices and
# start the instance.
self._create_domain_and_network(context, xml, instance, network_info,
disk_info,
block_device_info=block_device_info,
reboot=True,
vifs_already_plugged=True)
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance, 'all'))
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance rebooted successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
def pause(self, instance):
"""Pause VM instance."""
self._host.get_guest(instance).pause()
def unpause(self, instance):
"""Unpause paused VM instance."""
guest = self._host.get_guest(instance)
guest.resume()
guest.sync_guest_time()
def _clean_shutdown(self, instance, timeout, retry_interval):
"""Attempt to shutdown the instance gracefully.
:param instance: The instance to be shutdown
:param timeout: How long to wait in seconds for the instance to
shutdown
:param retry_interval: How often in seconds to signal the instance
to shutdown while waiting
:returns: True if the shutdown succeeded
"""
# List of states that represent a shutdown instance
SHUTDOWN_STATES = [power_state.SHUTDOWN,
power_state.CRASHED]
try:
guest = self._host.get_guest(instance)
except exception.InstanceNotFound:
# If the instance has gone then we don't need to
# wait for it to shutdown
return True
state = guest.get_power_state(self._host)
if state in SHUTDOWN_STATES:
LOG.info(_LI("Instance already shutdown."),
instance=instance)
return True
LOG.debug("Shutting down instance from state %s", state,
instance=instance)
guest.shutdown()
retry_countdown = retry_interval
for sec in range(timeout):
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
if state in SHUTDOWN_STATES:
LOG.info(_LI("Instance shutdown successfully after %d "
"seconds."), sec, instance=instance)
return True
# Note(PhilD): We can't assume that the Guest was able to process
# any previous shutdown signal (for example it may
# have still been startingup, so within the overall
# timeout we re-trigger the shutdown every
# retry_interval
if retry_countdown == 0:
retry_countdown = retry_interval
# Instance could shutdown at any time, in which case we
# will get an exception when we call shutdown
try:
LOG.debug("Instance in state %s after %d seconds - "
"resending shutdown", state, sec,
instance=instance)
guest.shutdown()
except libvirt.libvirtError:
# Assume this is because its now shutdown, so loop
# one more time to clean up.
LOG.debug("Ignoring libvirt exception from shutdown "
"request.", instance=instance)
continue
else:
retry_countdown -= 1
time.sleep(1)
LOG.info(_LI("Instance failed to shutdown in %d seconds."),
timeout, instance=instance)
return False
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
if timeout:
self._clean_shutdown(instance, timeout, retry_interval)
self._destroy(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
# We use _hard_reboot here to ensure that all backing files,
# network, and block device connections, etc. are established
# and available before we attempt to start the instance.
self._hard_reboot(context, instance, network_info, block_device_info)
def trigger_crash_dump(self, instance):
"""Trigger crash dump by injecting an NMI to the specified instance."""
try:
self._host.get_guest(instance).inject_nmi()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
raise exception.TriggerCrashDumpNotSupported()
elif error_code == libvirt.VIR_ERR_OPERATION_INVALID:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
LOG.exception(_LE('Error from libvirt while injecting an NMI to '
'%(instance_uuid)s: '
'[Error Code %(error_code)s] %(ex)s'),
{'instance_uuid': instance.uuid,
'error_code': error_code, 'ex': ex})
raise
def suspend(self, context, instance):
"""Suspend the specified instance."""
guest = self._host.get_guest(instance)
self._detach_pci_devices(guest,
pci_manager.get_instance_pci_devs(instance))
self._detach_direct_passthrough_ports(context, instance, guest)
guest.save_memory_state()
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
disk_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance, instance.image_meta,
block_device_info=block_device_info)
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
guest = self._create_domain_and_network(context, xml, instance,
network_info, disk_info,
block_device_info=block_device_info,
vifs_already_plugged=True)
self._attach_pci_devices(guest,
pci_manager.get_instance_pci_devs(instance))
self._attach_direct_passthrough_ports(
context, instance, guest, network_info)
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
guest.sync_guest_time()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
try:
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
if state in ignored_states:
return
except (exception.InternalError, exception.InstanceNotFound):
pass
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self._hard_reboot(context, instance, network_info, block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
primary images and data needs to be corrected/recovered. Rescuing
should not edit or over-ride the original image, only allow for
data recovery.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_image_id = None
if image_meta.obj_attr_is_set("id"):
rescue_image_id = image_meta.id
rescue_images = {
'image_id': (rescue_image_id or
CONF.libvirt.rescue_image_id or instance.image_ref),
'kernel_id': (CONF.libvirt.rescue_kernel_id or
instance.kernel_id),
'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or
instance.ramdisk_id),
}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
rescue=True)
injection_info = InjectionInfo(network_info=network_info,
admin_pass=rescue_password,
files=None)
gen_confdrive = functools.partial(self._create_configdrive,
context, instance, injection_info,
rescue=True)
self._create_image(context, instance, disk_info['mapping'],
injection_info=injection_info, suffix='.rescue',
disk_images=rescue_images)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta, rescue=rescue_images)
self._destroy(instance)
self._create_domain(xml, post_xml_callback=gen_confdrive)
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove virt_dom at the end.
virt_dom = guest._domain
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
if os.path.isdir(rescue_file):
shutil.rmtree(rescue_file)
else:
libvirt_utils.file_delete(rescue_file)
# cleanup rescue volume
lvm.remove_volumes([lvmdisk for lvmdisk in self._lvm_disks(instance)
if lvmdisk.endswith('.rescue')])
if CONF.libvirt.images_type == 'rbd':
filter_fn = lambda disk: (disk.startswith(instance.uuid) and
disk.endswith('.rescue'))
LibvirtDriver._get_rbd_driver().cleanup_volumes(filter_fn)
def poll_rebooting_instances(self, timeout, instances):
pass
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
injection_info = InjectionInfo(network_info=network_info,
files=injected_files,
admin_pass=admin_password)
gen_confdrive = functools.partial(self._create_configdrive,
context, instance,
injection_info)
self._create_image(context, instance, disk_info['mapping'],
injection_info=injection_info,
block_device_info=block_device_info)
# Required by Quobyte CI
self._ensure_console_log_for_instance(instance)
xml = self._get_guest_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info)
self._create_domain_and_network(
context, xml, instance, network_info, disk_info,
block_device_info=block_device_info,
post_xml_callback=gen_confdrive,
destroy_disks_on_failure=True)
LOG.debug("Instance is running", instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance spawned successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _flush_libvirt_console(self, pty):
out, err = utils.execute('dd',
'if=%s' % pty,
'iflag=nonblock',
run_as_root=True,
check_exit_code=False)
return out
def _append_to_file(self, data, fpath):
LOG.info(_LI('data: %(data)r, fpath: %(fpath)r'),
{'data': data, 'fpath': fpath})
with open(fpath, 'a+') as fp:
fp.write(data)
return fpath
def _get_console_output_file(self, instance, console_log):
bytes_to_read = MAX_CONSOLE_BYTES
log_data = b"" # The last N read bytes
i = 0 # in case there is a log rotation (like "virtlogd")
path = console_log
while bytes_to_read > 0 and os.path.exists(path):
libvirt_utils.chown(path, os.getuid())
with libvirt_utils.file_open(path, 'rb') as fp:
read_log_data, remaining = utils.last_bytes(fp, bytes_to_read)
# We need the log file content in chronological order,
# that's why we *prepend* the log data.
log_data = read_log_data + log_data
bytes_to_read -= len(read_log_data)
path = console_log + "." + str(i)
i += 1
if remaining > 0:
LOG.info(_LI('Truncated console log returned, '
'%d bytes ignored'), remaining,
instance=instance)
return log_data
def get_console_output(self, context, instance):
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
tree = etree.fromstring(xml)
# If the guest has a console logging to a file prefer to use that
file_consoles = tree.findall("./devices/console[@type='file']")
if file_consoles:
for file_console in file_consoles:
source_node = file_console.find('./source')
if source_node is None:
continue
path = source_node.get("path")
if not path:
continue
if not os.path.exists(path):
LOG.info(_LI('Instance is configured with a file console, '
'but the backing file is not (yet?) present'),
instance=instance)
return ""
return self._get_console_output_file(instance, path)
# Try 'pty' types
pty_consoles = tree.findall("./devices/console[@type='pty']")
if pty_consoles:
for pty_console in pty_consoles:
source_node = pty_console.find('./source')
if source_node is None:
continue
pty = source_node.get("path")
if not pty:
continue
break
else:
raise exception.ConsoleNotAvailable()
else:
raise exception.ConsoleNotAvailable()
console_log = self._get_console_log_path(instance)
# By default libvirt chowns the console log when it starts a domain.
# We need to chown it back before attempting to read from or write
# to it.
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
data = self._flush_libvirt_console(pty)
# NOTE(markus_z): The virt_types kvm and qemu are the only ones
# which create a dedicated file device for the console logging.
# Other virt_types like xen, lxc, uml, parallels depend on the
# flush of that pty device into the "console.log" file to ensure
# that a series of "get_console_output" calls return the complete
# content even after rebooting a guest.
fpath = self._append_to_file(data, console_log)
return self._get_console_output_file(instance, fpath)
def get_host_ip_addr(self):
ips = compute_utils.get_machine_ips()
if CONF.my_ip not in ips:
LOG.warning(_LW('my_ip address (%(my_ip)s) was not found on '
'any of the interfaces: %(ifaces)s'),
{'my_ip': CONF.my_ip, 'ifaces': ", ".join(ips)})
return CONF.my_ip
def get_vnc_console(self, context, instance):
def get_vnc_port_for_instance(instance_name):
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
xml_dom = etree.fromstring(xml)
graphic = xml_dom.find("./devices/graphics[@type='vnc']")
if graphic is not None:
return graphic.get('port')
# NOTE(rmk): We had VNC consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='vnc')
port = get_vnc_port_for_instance(instance.name)
host = CONF.vnc.vncserver_proxyclient_address
return ctype.ConsoleVNC(host=host, port=port)
def get_spice_console(self, context, instance):
def get_spice_ports_for_instance(instance_name):
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
xml_dom = etree.fromstring(xml)
graphic = xml_dom.find("./devices/graphics[@type='spice']")
if graphic is not None:
return (graphic.get('port'), graphic.get('tlsPort'))
# NOTE(rmk): We had Spice consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='spice')
ports = get_spice_ports_for_instance(instance.name)
host = CONF.spice.server_proxyclient_address
return ctype.ConsoleSpice(host=host, port=ports[0], tlsPort=ports[1])
def get_serial_console(self, context, instance):
guest = self._host.get_guest(instance)
for hostname, port in self._get_serial_ports_from_guest(
guest, mode='bind'):
return ctype.ConsoleSerial(host=hostname, port=port)
raise exception.ConsoleTypeUnavailable(console_type='serial')
@staticmethod
def _supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug("This python runtime does not support direct I/O")
return False
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
fd = None
try:
fd = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
# Check is the write allowed with 512 byte alignment
align_size = 512
m = mmap.mmap(-1, align_size)
m.write(b"x" * align_size)
os.write(fd, m)
LOG.debug("Path '%(path)s' supports direct I/O",
{'path': dirpath})
except OSError as e:
if e.errno == errno.EINVAL:
LOG.debug("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'", {'path': dirpath, 'ex': e})
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error on '%(path)s' while checking "
"direct I/O: '%(ex)s'"),
{'path': dirpath, 'ex': e})
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'"), {'path': dirpath, 'ex': e})
finally:
# ensure unlink(filepath) will actually remove the file by deleting
# the remaining link to it in close(fd)
if fd is not None:
os.close(fd)
try:
os.unlink(testfile)
except Exception:
pass
return hasDirectIO
@staticmethod
def _create_ephemeral(target, ephemeral_size,
fs_label, os_type, is_block_dev=False,
context=None, specified_fs=None,
vm_mode=None):
if not is_block_dev:
if (CONF.libvirt.virt_type == "parallels" and
vm_mode == fields.VMMode.EXE):
libvirt_utils.create_ploop_image('expanded', target,
'%dG' % ephemeral_size,
specified_fs)
return
libvirt_utils.create_image('raw', target, '%dG' % ephemeral_size)
# Run as root only for block devices.
disk_api.mkfs(os_type, fs_label, target, run_as_root=is_block_dev,
specified_fs=specified_fs)
@staticmethod
def _create_swap(target, swap_mb, context=None):
"""Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@staticmethod
def _get_console_log_path(instance):
return os.path.join(libvirt_utils.get_instance_path(instance),
'console.log')
def _ensure_console_log_for_instance(self, instance):
# NOTE(mdbooth): Although libvirt will create this file for us
# automatically when it starts, it will initially create it with
# root ownership and then chown it depending on the configuration of
# the domain it is launching. Quobyte CI explicitly disables the
# chown by setting dynamic_ownership=0 in libvirt's config.
# Consequently when the domain starts it is unable to write to its
# console.log. See bug https://bugs.launchpad.net/nova/+bug/1597644
#
# To work around this, we create the file manually before starting
# the domain so it has the same ownership as Nova. This works
# for Quobyte CI because it is also configured to run qemu as the same
# user as the Nova service. Installations which don't set
# dynamic_ownership=0 are not affected because libvirt will always
# correctly configure permissions regardless of initial ownership.
#
# Setting dynamic_ownership=0 is dubious and potentially broken in
# more ways than console.log (see comment #22 on the above bug), so
# Future Maintainer who finds this code problematic should check to see
# if we still support it.
console_file = self._get_console_log_path(instance)
LOG.debug('Ensure instance console log exists: %s', console_file,
instance=instance)
try:
libvirt_utils.file_open(console_file, 'a').close()
# NOTE(sfinucan): We can safely ignore permission issues here and
# assume that it is libvirt that has taken ownership of this file.
except IOError as ex:
if ex.errno != errno.EACCES:
raise
LOG.debug('Console file already exists: %s.', console_file)
@staticmethod
def _get_disk_config_image_type():
# TODO(mikal): there is a bug here if images_type has
# changed since creation of the instance, but I am pretty
# sure that this bug already exists.
return 'rbd' if CONF.libvirt.images_type == 'rbd' else 'raw'
@staticmethod
def _is_booted_from_volume(block_device_info):
"""Determines whether the VM is booting from volume
Determines whether the block device info indicates that the VM
is booting from a volume.
"""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
return bool(block_device.get_root_bdm(block_device_mapping))
def _inject_data(self, disk, instance, injection_info):
"""Injects data in a disk image
Helper used for injecting data in a disk image file system.
:param disk: The disk we're injecting into (an Image object)
:param instance: The instance we're injecting into
:param injection_info: Injection info
"""
# Handles the partition need to be used.
LOG.debug('Checking root disk injection %(info)s',
info=str(injection_info), instance=instance)
target_partition = None
if not instance.kernel_id:
target_partition = CONF.libvirt.inject_partition
if target_partition == 0:
target_partition = None
if CONF.libvirt.virt_type == 'lxc':
target_partition = None
# Handles the key injection.
if CONF.libvirt.inject_key and instance.get('key_data'):
key = str(instance.key_data)
else:
key = None
# Handles the admin password injection.
if not CONF.libvirt.inject_password:
admin_pass = None
else:
admin_pass = injection_info.admin_pass
# Handles the network injection.
net = netutils.get_injected_network_template(
injection_info.network_info,
libvirt_virt_type=CONF.libvirt.virt_type)
# Handles the metadata injection
metadata = instance.get('metadata')
if any((key, net, metadata, admin_pass, injection_info.files)):
LOG.debug('Injecting %(info)s', info=str(injection_info),
instance=instance)
img_id = instance.image_ref
try:
disk_api.inject_data(disk.get_model(self._conn),
key, net, metadata, admin_pass,
injection_info.files,
partition=target_partition,
mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error injecting data into image '
'%(img_id)s (%(e)s)'),
{'img_id': img_id, 'e': e},
instance=instance)
# NOTE(sileht): many callers of this method assume that this
# method doesn't fail if an image already exists but instead
# think that it will be reused (ie: (live)-migration/resize)
def _create_image(self, context, instance,
disk_mapping, injection_info=None, suffix='',
disk_images=None, block_device_info=None,
fallback_from_host=None,
ignore_bdi_for_swap=False):
booted_from_volume = self._is_booted_from_volume(block_device_info)
def image(fname, image_type=CONF.libvirt.images_type):
return self.image_backend.by_name(instance,
fname + suffix, image_type)
def raw(fname):
return image(fname, image_type='raw')
# ensure directories exist and are writable
fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))
LOG.info(_LI('Creating image'), instance=instance)
if not disk_images:
disk_images = {'image_id': instance.image_ref,
'kernel_id': instance.kernel_id,
'ramdisk_id': instance.ramdisk_id}
if disk_images['kernel_id']:
fname = imagecache.get_cache_fname(disk_images['kernel_id'])
raw('kernel').cache(fetch_func=libvirt_utils.fetch_raw_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'])
if disk_images['ramdisk_id']:
fname = imagecache.get_cache_fname(disk_images['ramdisk_id'])
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_raw_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'])
inst_type = instance.get_flavor()
if CONF.libvirt.virt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')
self._create_and_inject_local_root(context, instance,
booted_from_volume, suffix,
disk_images, injection_info,
fallback_from_host)
# Lookup the filesystem type if required
os_type_with_default = disk_api.get_fs_type_for_os_type(
instance.os_type)
# Generate a file extension based on the file system
# type and the mkfs commands configured if any
file_extension = disk_api.get_file_extension_for_os_type(
os_type_with_default)
vm_mode = fields.VMMode.get_from_instance(instance)
ephemeral_gb = instance.flavor.ephemeral_gb
if 'disk.local' in disk_mapping:
disk_image = image('disk.local')
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance.os_type,
is_block_dev=disk_image.is_block_dev,
vm_mode=vm_mode)
fname = "ephemeral_%s_%s" % (ephemeral_gb, file_extension)
size = ephemeral_gb * units.Gi
disk_image.cache(fetch_func=fn,
context=context,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
disk_image = image(blockinfo.get_eph_disk(idx))
specified_fs = eph.get('guest_format')
if specified_fs and not self.is_supported_fs_format(specified_fs):
msg = _("%s format is not supported") % specified_fs
raise exception.InvalidBDMFormat(details=msg)
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % idx,
os_type=instance.os_type,
is_block_dev=disk_image.is_block_dev,
vm_mode=vm_mode)
size = eph['size'] * units.Gi
fname = "ephemeral_%s_%s" % (eph['size'], file_extension)
disk_image.cache(fetch_func=fn,
context=context,
filename=fname,
size=size,
ephemeral_size=eph['size'],
specified_fs=specified_fs)
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
swap_mb = 0
if ignore_bdi_for_swap:
# This is a workaround to support legacy swap resizing,
# which does not touch swap size specified in bdm,
# but works with flavor specified size only.
# In this case we follow the legacy logic and ignore block
# device info completely.
# NOTE(ft): This workaround must be removed when a correct
# implementation of resize operation changing sizes in bdms is
# developed. Also at that stage we probably may get rid of
# the direct usage of flavor swap size here,
# leaving the work with bdm only.
swap_mb = inst_type['swap']
else:
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * units.Mi
image('disk.swap').cache(fetch_func=self._create_swap,
context=context,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
def _create_and_inject_local_root(self, context, instance,
booted_from_volume, suffix, disk_images,
injection_info, fallback_from_host):
# File injection only if needed
need_inject = (not configdrive.required_by(instance) and
injection_info is not None and
CONF.libvirt.inject_partition != -2)
# NOTE(ndipanov): Even if disk_mapping was passed in, which
# currently happens only on rescue - we still don't want to
# create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images['image_id'])
size = instance.flavor.root_gb * units.Gi
if size == 0 or suffix == '.rescue':
size = None
backend = self.image_backend.by_name(instance, 'disk' + suffix,
CONF.libvirt.images_type)
if instance.task_state == task_states.RESIZE_FINISH:
backend.create_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME)
if backend.SUPPORTS_CLONE:
def clone_fallback_to_fetch(*args, **kwargs):
try:
backend.clone(context, disk_images['image_id'])
except exception.ImageUnacceptable:
libvirt_utils.fetch_image(*args, **kwargs)
fetch_func = clone_fallback_to_fetch
else:
fetch_func = libvirt_utils.fetch_image
self._try_fetch_image_cache(backend, fetch_func, context,
root_fname, disk_images['image_id'],
instance, size, fallback_from_host)
if need_inject:
self._inject_data(backend, instance, injection_info)
elif need_inject:
LOG.warning(_LW('File injection into a boot from volume '
'instance is not supported'), instance=instance)
def _create_configdrive(self, context, instance, injection_info,
rescue=False):
# As this method being called right after the definition of a
# domain, but before its actual launch, device metadata will be built
# and saved in the instance for it to be used by the config drive and
# the metadata service.
instance.device_metadata = self._build_device_metadata(context,
instance)
if configdrive.required_by(instance):
LOG.info(_LI('Using config drive'), instance=instance)
name = 'disk.config'
if rescue:
name += '.rescue'
config_disk = self.image_backend.by_name(
instance, name, self._get_disk_config_image_type())
# Don't overwrite an existing config drive
if not config_disk.exists():
extra_md = {}
if injection_info.admin_pass:
extra_md['admin_pass'] = injection_info.admin_pass
inst_md = instance_metadata.InstanceMetadata(
instance, content=injection_info.files, extra_md=extra_md,
network_info=injection_info.network_info,
request_context=context)
cdb = configdrive.ConfigDriveBuilder(instance_md=inst_md)
with cdb:
# NOTE(mdbooth): We're hardcoding here the path of the
# config disk when using the flat backend. This isn't
# good, but it's required because we need a local path we
# know we can write to in case we're subsequently
# importing into rbd. This will be cleaned up when we
# replace this with a call to create_from_func, but that
# can't happen until we've updated the backends and we
# teach them not to cache config disks. This isn't
# possible while we're still using cache() under the hood.
config_disk_local_path = os.path.join(
libvirt_utils.get_instance_path(instance), name)
LOG.info(_LI('Creating config drive at %(path)s'),
{'path': config_disk_local_path},
instance=instance)
try:
cdb.make_drive(config_disk_local_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed '
'with error: %s'),
e, instance=instance)
try:
config_disk.import_file(
instance, config_disk_local_path, name)
finally:
# NOTE(mikal): if the config drive was imported into RBD,
# then we no longer need the local copy
if CONF.libvirt.images_type == 'rbd':
os.unlink(config_disk_local_path)
def _prepare_pci_devices_for_use(self, pci_devices):
# kvm , qemu support managed mode
# In managed mode, the configured device will be automatically
# detached from the host OS drivers when the guest is started,
# and then re-attached when the guest shuts down.
if CONF.libvirt.virt_type != 'xen':
# we do manual detach only for xen
return
try:
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._host.device_lookup_by_name(libvirt_dev_addr)
# Note(yjiang5) Spelling for 'dettach' is correct, see
# http://libvirt.org/html/libvirt-libvirt.html.
libvirt_dev.dettach()
# Note(yjiang5): A reset of one PCI device may impact other
# devices on the same bus, thus we need two separated loops
# to detach and then reset it.
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._host.device_lookup_by_name(libvirt_dev_addr)
libvirt_dev.reset()
except libvirt.libvirtError as exc:
raise exception.PciDevicePrepareFailed(id=dev['id'],
instance_uuid=
dev['instance_uuid'],
reason=six.text_type(exc))
def _detach_pci_devices(self, guest, pci_devs):
try:
for dev in pci_devs:
guest.detach_device(self._get_guest_pci_device(dev), live=True)
# after detachDeviceFlags returned, we should check the dom to
# ensure the detaching is finished
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_doc)
for hdev in [d for d in guest_config.devices
if isinstance(d, vconfig.LibvirtConfigGuestHostdevPCI)]:
hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function]
dbsf = pci_utils.parse_address(dev.address)
if [int(x, 16) for x in hdbsf] ==\
[int(x, 16) for x in dbsf]:
raise exception.PciDeviceDetachFailed(reason=
"timeout",
dev=dev)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warning(_LW("Instance disappeared while detaching "
"a PCI device from it."))
else:
raise
def _attach_pci_devices(self, guest, pci_devs):
try:
for dev in pci_devs:
guest.attach_device(self._get_guest_pci_device(dev))
except libvirt.libvirtError:
LOG.error(_LE('Attaching PCI devices %(dev)s to %(dom)s failed.'),
{'dev': pci_devs, 'dom': guest.id})
raise
@staticmethod
def _has_direct_passthrough_port(network_info):
for vif in network_info:
if (vif['vnic_type'] in
network_model.VNIC_TYPES_DIRECT_PASSTHROUGH):
return True
return False
def _attach_direct_passthrough_ports(
self, context, instance, guest, network_info=None):
if network_info is None:
network_info = instance.info_cache.network_info
if network_info is None:
return
if self._has_direct_passthrough_port(network_info):
for vif in network_info:
if (vif['vnic_type'] in
network_model.VNIC_TYPES_DIRECT_PASSTHROUGH):
cfg = self.vif_driver.get_config(instance,
vif,
instance.image_meta,
instance.flavor,
CONF.libvirt.virt_type,
self._host)
LOG.debug('Attaching direct passthrough port %(port)s '
'to %(dom)s', {'port': vif, 'dom': guest.id},
instance=instance)
guest.attach_device(cfg)
def _detach_direct_passthrough_ports(self, context, instance, guest):
network_info = instance.info_cache.network_info
if network_info is None:
return
if self._has_direct_passthrough_port(network_info):
# In case of VNIC_TYPES_DIRECT_PASSTHROUGH ports we create
# pci request per direct passthrough port. Therefore we can trust
# that pci_slot value in the vif is correct.
direct_passthrough_pci_addresses = [
vif['profile']['pci_slot']
for vif in network_info
if (vif['vnic_type'] in
network_model.VNIC_TYPES_DIRECT_PASSTHROUGH and
vif['profile'].get('pci_slot') is not None)
]
# use detach_pci_devices to avoid failure in case of
# multiple guest direct passthrough ports with the same MAC
# (protection use-case, ports are on different physical
# interfaces)
pci_devs = pci_manager.get_instance_pci_devs(instance, 'all')
direct_passthrough_pci_addresses = (
[pci_dev for pci_dev in pci_devs
if pci_dev.address in direct_passthrough_pci_addresses])
self._detach_pci_devices(guest, direct_passthrough_pci_addresses)
def _set_host_enabled(self, enabled,
disable_reason=DISABLE_REASON_UNDEFINED):
"""Enables / Disables the compute service on this host.
This doesn't override non-automatic disablement with an automatic
setting; thereby permitting operators to keep otherwise
healthy hosts out of rotation.
"""
status_name = {True: 'disabled',
False: 'enabled'}
disable_service = not enabled
ctx = nova_context.get_admin_context()
try:
service = objects.Service.get_by_compute_host(ctx, CONF.host)
if service.disabled != disable_service:
# Note(jang): this is a quick fix to stop operator-
# disabled compute hosts from re-enabling themselves
# automatically. We prefix any automatic reason code
# with a fixed string. We only re-enable a host
# automatically if we find that string in place.
# This should probably be replaced with a separate flag.
if not service.disabled or (
service.disabled_reason and
service.disabled_reason.startswith(DISABLE_PREFIX)):
service.disabled = disable_service
service.disabled_reason = (
DISABLE_PREFIX + disable_reason
if disable_service and disable_reason else
DISABLE_REASON_UNDEFINED)
service.save()
LOG.debug('Updating compute service status to %s',
status_name[disable_service])
else:
LOG.debug('Not overriding manual compute service '
'status with: %s',
status_name[disable_service])
except exception.ComputeHostNotFound:
LOG.warning(_LW('Cannot update service status on host "%s" '
'since it is not registered.'), CONF.host)
except Exception:
LOG.warning(_LW('Cannot update service status on host "%s" '
'due to an unexpected exception.'), CONF.host,
exc_info=True)
def _get_guest_cpu_model_config(self):
mode = CONF.libvirt.cpu_mode
model = CONF.libvirt.cpu_model
if (CONF.libvirt.virt_type == "kvm" or
CONF.libvirt.virt_type == "qemu"):
if mode is None:
mode = "host-model"
if mode == "none":
return vconfig.LibvirtConfigGuestCPU()
else:
if mode is None or mode == "none":
return None
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % CONF.libvirt.virt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
msg = _("Config requested a custom CPU model, but no "
"model name was provided")
raise exception.Invalid(msg)
elif mode != "custom" and model is not None:
msg = _("A CPU model name should not be set when a "
"host CPU model is requested")
raise exception.Invalid(msg)
LOG.debug("CPU mode '%(mode)s' model '%(model)s' was chosen",
{'mode': mode, 'model': (model or "")})
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.mode = mode
cpu.model = model
return cpu
def _get_guest_cpu_config(self, flavor, image_meta,
guest_cpu_numa_config, instance_numa_topology):
cpu = self._get_guest_cpu_model_config()
if cpu is None:
return None
topology = hardware.get_best_cpu_topology(
flavor, image_meta, numa_topology=instance_numa_topology)
cpu.sockets = topology.sockets
cpu.cores = topology.cores
cpu.threads = topology.threads
cpu.numa = guest_cpu_numa_config
return cpu
def _get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
image_type=None):
if CONF.libvirt.hw_disk_discard:
if not self._host.has_min_version(hv_ver=MIN_QEMU_DISCARD_VERSION,
hv_type=host.HV_DRIVER_QEMU):
msg = (_('Volume sets discard option, qemu %(qemu)s'
' or later is required.') %
{'qemu': MIN_QEMU_DISCARD_VERSION})
raise exception.Invalid(msg)
disk = self.image_backend.by_name(instance, name, image_type)
if (name == 'disk.config' and image_type == 'rbd' and
not disk.exists()):
# This is likely an older config drive that has not been migrated
# to rbd yet. Try to fall back on 'flat' image type.
# TODO(melwitt): Add online migration of some sort so we can
# remove this fall back once we know all config drives are in rbd.
# NOTE(vladikr): make sure that the flat image exist, otherwise
# the image will be created after the domain definition.
flat_disk = self.image_backend.by_name(instance, name, 'flat')
if flat_disk.exists():
disk = flat_disk
LOG.debug('Config drive not found in RBD, falling back to the '
'instance directory', instance=instance)
disk_info = disk_mapping[name]
return disk.libvirt_info(disk_info['bus'],
disk_info['dev'],
disk_info['type'],
self.disk_cachemode,
inst_type['extra_specs'],
self._host.get_version())
def _get_guest_fs_config(self, instance, name, image_type=None):
disk = self.image_backend.by_name(instance, name, image_type)
return disk.libvirt_fs_info("/", "ploop")
def _get_guest_storage_config(self, instance, image_meta,
disk_info,
rescue, block_device_info,
inst_type, os_type):
devices = []
disk_mapping = disk_info['mapping']
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
mount_rootfs = CONF.libvirt.virt_type == "lxc"
def _get_ephemeral_devices():
eph_devices = []
for idx, eph in enumerate(
driver.block_device_info_get_ephemerals(
block_device_info)):
diskeph = self._get_guest_disk_config(
instance,
blockinfo.get_eph_disk(idx),
disk_mapping, inst_type)
eph_devices.append(diskeph)
return eph_devices
if mount_rootfs:
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
fs.source_dir = os.path.join(
libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
elif (os_type == fields.VMMode.EXE and
CONF.libvirt.virt_type == "parallels"):
if rescue:
fsrescue = self._get_guest_fs_config(instance, "disk.rescue")
devices.append(fsrescue)
fsos = self._get_guest_fs_config(instance, "disk")
fsos.target_dir = "/mnt/rescue"
devices.append(fsos)
else:
if 'disk' in disk_mapping:
fs = self._get_guest_fs_config(instance, "disk")
devices.append(fs)
devices = devices + _get_ephemeral_devices()
else:
if rescue:
diskrescue = self._get_guest_disk_config(instance,
'disk.rescue',
disk_mapping,
inst_type)
devices.append(diskrescue)
diskos = self._get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
else:
if 'disk' in disk_mapping:
diskos = self._get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
if 'disk.local' in disk_mapping:
disklocal = self._get_guest_disk_config(instance,
'disk.local',
disk_mapping,
inst_type)
devices.append(disklocal)
instance.default_ephemeral_device = (
block_device.prepend_dev(disklocal.target_dev))
devices = devices + _get_ephemeral_devices()
if 'disk.swap' in disk_mapping:
diskswap = self._get_guest_disk_config(instance,
'disk.swap',
disk_mapping,
inst_type)
devices.append(diskswap)
instance.default_swap_device = (
block_device.prepend_dev(diskswap.target_dev))
config_name = 'disk.config.rescue' if rescue else 'disk.config'
if config_name in disk_mapping:
diskconfig = self._get_guest_disk_config(
instance, config_name, disk_mapping, inst_type,
self._get_disk_config_image_type())
devices.append(diskconfig)
for vol in block_device.get_bdms_to_connect(block_device_mapping,
mount_rootfs):
connection_info = vol['connection_info']
vol_dev = block_device.prepend_dev(vol['mount_device'])
info = disk_mapping[vol_dev]
self._connect_volume(connection_info, info)
cfg = self._get_volume_config(connection_info, info)
devices.append(cfg)
vol['connection_info'] = connection_info
vol.save()
for d in devices:
self._set_cache_mode(d)
if image_meta.properties.get('hw_scsi_model'):
hw_scsi_model = image_meta.properties.hw_scsi_model
scsi_controller = vconfig.LibvirtConfigGuestController()
scsi_controller.type = 'scsi'
scsi_controller.model = hw_scsi_model
devices.append(scsi_controller)
return devices
def _get_host_sysinfo_serial_hardware(self):
"""Get a UUID from the host hardware
Get a UUID for the host hardware reported by libvirt.
This is typically from the SMBIOS data, unless it has
been overridden in /etc/libvirt/libvirtd.conf
"""
caps = self._host.get_capabilities()
return caps.host.uuid
def _get_host_sysinfo_serial_os(self):
"""Get a UUID from the host operating system
Get a UUID for the host operating system. Modern Linux
distros based on systemd provide a /etc/machine-id
file containing a UUID. This is also provided inside
systemd based containers and can be provided by other
init systems too, since it is just a plain text file.
"""
if not os.path.exists("/etc/machine-id"):
msg = _("Unable to get host UUID: /etc/machine-id does not exist")
raise exception.InternalError(msg)
with open("/etc/machine-id") as f:
# We want to have '-' in the right place
# so we parse & reformat the value
lines = f.read().split()
if not lines:
msg = _("Unable to get host UUID: /etc/machine-id is empty")
raise exception.InternalError(msg)
return str(uuid.UUID(lines[0]))
def _get_host_sysinfo_serial_auto(self):
if os.path.exists("/etc/machine-id"):
return self._get_host_sysinfo_serial_os()
else:
return self._get_host_sysinfo_serial_hardware()
def _get_guest_config_sysinfo(self, instance):
sysinfo = vconfig.LibvirtConfigGuestSysinfo()
sysinfo.system_manufacturer = version.vendor_string()
sysinfo.system_product = version.product_string()
sysinfo.system_version = version.version_string_with_package()
sysinfo.system_serial = self._sysinfo_serial_func()
sysinfo.system_uuid = instance.uuid
sysinfo.system_family = "Virtual Machine"
return sysinfo
def _get_guest_pci_device(self, pci_device):
dbsf = pci_utils.parse_address(pci_device.address)
dev = vconfig.LibvirtConfigGuestHostdevPCI()
dev.domain, dev.bus, dev.slot, dev.function = dbsf
# only kvm support managed mode
if CONF.libvirt.virt_type in ('xen', 'parallels',):
dev.managed = 'no'
if CONF.libvirt.virt_type in ('kvm', 'qemu'):
dev.managed = 'yes'
return dev
def _get_guest_config_meta(self, context, instance):
"""Get metadata config for guest."""
meta = vconfig.LibvirtConfigGuestMetaNovaInstance()
meta.package = version.version_string_with_package()
meta.name = instance.display_name
meta.creationTime = time.time()
if instance.image_ref not in ("", None):
meta.roottype = "image"
meta.rootid = instance.image_ref
if context is not None:
ometa = vconfig.LibvirtConfigGuestMetaNovaOwner()
ometa.userid = context.user_id
ometa.username = context.user_name
ometa.projectid = context.project_id
ometa.projectname = context.project_name
meta.owner = ometa
fmeta = vconfig.LibvirtConfigGuestMetaNovaFlavor()
flavor = instance.flavor
fmeta.name = flavor.name
fmeta.memory = flavor.memory_mb
fmeta.vcpus = flavor.vcpus
fmeta.ephemeral = flavor.ephemeral_gb
fmeta.disk = flavor.root_gb
fmeta.swap = flavor.swap
meta.flavor = fmeta
return meta
def _machine_type_mappings(self):
mappings = {}
for mapping in CONF.libvirt.hw_machine_type:
host_arch, _, machine_type = mapping.partition('=')
mappings[host_arch] = machine_type
return mappings
def _get_machine_type(self, image_meta, caps):
# The underlying machine type can be set as an image attribute,
# or otherwise based on some architecture specific defaults
mach_type = None
if image_meta.properties.get('hw_machine_type') is not None:
mach_type = image_meta.properties.hw_machine_type
else:
# For ARM systems we will default to vexpress-a15 for armv7
# and virt for aarch64
if caps.host.cpu.arch == fields.Architecture.ARMV7:
mach_type = "vexpress-a15"
if caps.host.cpu.arch == fields.Architecture.AARCH64:
mach_type = "virt"
if caps.host.cpu.arch in (fields.Architecture.S390,
fields.Architecture.S390X):
mach_type = 's390-ccw-virtio'
# If set in the config, use that as the default.
if CONF.libvirt.hw_machine_type:
mappings = self._machine_type_mappings()
mach_type = mappings.get(caps.host.cpu.arch)
return mach_type
@staticmethod
def _create_idmaps(klass, map_strings):
idmaps = []
if len(map_strings) > 5:
map_strings = map_strings[0:5]
LOG.warning(_LW("Too many id maps, only included first five."))
for map_string in map_strings:
try:
idmap = klass()
values = [int(i) for i in map_string.split(":")]
idmap.start = values[0]
idmap.target = values[1]
idmap.count = values[2]
idmaps.append(idmap)
except (ValueError, IndexError):
LOG.warning(_LW("Invalid value for id mapping %s"), map_string)
return idmaps
def _get_guest_idmaps(self):
id_maps = []
if CONF.libvirt.virt_type == 'lxc' and CONF.libvirt.uid_maps:
uid_maps = self._create_idmaps(vconfig.LibvirtConfigGuestUIDMap,
CONF.libvirt.uid_maps)
id_maps.extend(uid_maps)
if CONF.libvirt.virt_type == 'lxc' and CONF.libvirt.gid_maps:
gid_maps = self._create_idmaps(vconfig.LibvirtConfigGuestGIDMap,
CONF.libvirt.gid_maps)
id_maps.extend(gid_maps)
return id_maps
def _update_guest_cputune(self, guest, flavor, virt_type):
is_able = self._host.is_cpu_control_policy_capable()
cputuning = ['shares', 'period', 'quota']
wants_cputune = any([k for k in cputuning
if "quota:cpu_" + k in flavor.extra_specs.keys()])
if wants_cputune and not is_able:
raise exception.UnsupportedHostCPUControlPolicy()
if not is_able or virt_type not in ('lxc', 'kvm', 'qemu'):
return
if guest.cputune is None:
guest.cputune = vconfig.LibvirtConfigGuestCPUTune()
# Setting the default cpu.shares value to be a value
# dependent on the number of vcpus
guest.cputune.shares = 1024 * guest.vcpus
for name in cputuning:
key = "quota:cpu_" + name
if key in flavor.extra_specs:
setattr(guest.cputune, name,
int(flavor.extra_specs[key]))
def _get_cpu_numa_config_from_instance(self, instance_numa_topology,
wants_hugepages):
if instance_numa_topology:
guest_cpu_numa = vconfig.LibvirtConfigGuestCPUNUMA()
for instance_cell in instance_numa_topology.cells:
guest_cell = vconfig.LibvirtConfigGuestCPUNUMACell()
guest_cell.id = instance_cell.id
guest_cell.cpus = instance_cell.cpuset
guest_cell.memory = instance_cell.memory * units.Ki
# The vhost-user network backend requires file backed
# guest memory (ie huge pages) to be marked as shared
# access, not private, so an external process can read
# and write the pages.
#
# You can't change the shared vs private flag for an
# already running guest, and since we can't predict what
# types of NIC may be hotplugged, we have no choice but
# to unconditionally turn on the shared flag. This has
# no real negative functional effect on the guest, so
# is a reasonable approach to take
if wants_hugepages:
guest_cell.memAccess = "shared"
guest_cpu_numa.cells.append(guest_cell)
return guest_cpu_numa
def _has_cpu_policy_support(self):
for ver in BAD_LIBVIRT_CPU_POLICY_VERSIONS:
if self._host.has_version(ver):
ver_ = self._version_to_string(ver)
raise exception.CPUPinningNotSupported(reason=_(
'Invalid libvirt version %(version)s') % {'version': ver_})
return True
def _wants_hugepages(self, host_topology, instance_topology):
"""Determine if the guest / host topology implies the
use of huge pages for guest RAM backing
"""
if host_topology is None or instance_topology is None:
return False
avail_pagesize = [page.size_kb
for page in host_topology.cells[0].mempages]
avail_pagesize.sort()
# Remove smallest page size as that's not classed as a largepage
avail_pagesize = avail_pagesize[1:]
# See if we have page size set
for cell in instance_topology.cells:
if (cell.pagesize is not None and
cell.pagesize in avail_pagesize):
return True
return False
def _get_guest_numa_config(self, instance_numa_topology, flavor,
allowed_cpus=None, image_meta=None):
"""Returns the config objects for the guest NUMA specs.
Determines the CPUs that the guest can be pinned to if the guest
specifies a cell topology and the host supports it. Constructs the
libvirt XML config object representing the NUMA topology selected
for the guest. Returns a tuple of:
(cpu_set, guest_cpu_tune, guest_cpu_numa, guest_numa_tune)
With the following caveats:
a) If there is no specified guest NUMA topology, then
all tuple elements except cpu_set shall be None. cpu_set
will be populated with the chosen CPUs that the guest
allowed CPUs fit within, which could be the supplied
allowed_cpus value if the host doesn't support NUMA
topologies.
b) If there is a specified guest NUMA topology, then
cpu_set will be None and guest_cpu_numa will be the
LibvirtConfigGuestCPUNUMA object representing the guest's
NUMA topology. If the host supports NUMA, then guest_cpu_tune
will contain a LibvirtConfigGuestCPUTune object representing
the optimized chosen cells that match the host capabilities
with the instance's requested topology. If the host does
not support NUMA, then guest_cpu_tune and guest_numa_tune
will be None.
"""
if (not self._has_numa_support() and
instance_numa_topology is not None):
# We should not get here, since we should have avoided
# reporting NUMA topology from _get_host_numa_topology
# in the first place. Just in case of a scheduler
# mess up though, raise an exception
raise exception.NUMATopologyUnsupported()
topology = self._get_host_numa_topology()
# We have instance NUMA so translate it to the config class
guest_cpu_numa_config = self._get_cpu_numa_config_from_instance(
instance_numa_topology,
self._wants_hugepages(topology, instance_numa_topology))
if not guest_cpu_numa_config:
# No NUMA topology defined for instance - let the host kernel deal
# with the NUMA effects.
# TODO(ndipanov): Attempt to spread the instance
# across NUMA nodes and expose the topology to the
# instance as an optimisation
return GuestNumaConfig(allowed_cpus, None, None, None)
else:
if topology:
# Now get the CpuTune configuration from the numa_topology
guest_cpu_tune = vconfig.LibvirtConfigGuestCPUTune()
guest_numa_tune = vconfig.LibvirtConfigGuestNUMATune()
emupcpus = []
numa_mem = vconfig.LibvirtConfigGuestNUMATuneMemory()
numa_memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()
for _ in guest_cpu_numa_config.cells]
vcpus_rt = set([])
wants_realtime = hardware.is_realtime_enabled(flavor)
if wants_realtime:
if not self._host.has_min_version(
MIN_LIBVIRT_REALTIME_VERSION):
raise exception.RealtimePolicyNotSupported()
# Prepare realtime config for libvirt
vcpus_rt = hardware.vcpus_realtime_topology(
flavor, image_meta)
vcpusched = vconfig.LibvirtConfigGuestCPUTuneVCPUSched()
vcpusched.vcpus = vcpus_rt
vcpusched.scheduler = "fifo"
vcpusched.priority = (
CONF.libvirt.realtime_scheduler_priority)
guest_cpu_tune.vcpusched.append(vcpusched)
for host_cell in topology.cells:
for guest_node_id, guest_config_cell in enumerate(
guest_cpu_numa_config.cells):
if guest_config_cell.id == host_cell.id:
node = numa_memnodes[guest_node_id]
node.cellid = guest_node_id
node.nodeset = [host_cell.id]
node.mode = "strict"
numa_mem.nodeset.append(host_cell.id)
object_numa_cell = (
instance_numa_topology.cells[guest_node_id]
)
for cpu in guest_config_cell.cpus:
pin_cpuset = (
vconfig.LibvirtConfigGuestCPUTuneVCPUPin())
pin_cpuset.id = cpu
# If there is pinning information in the cell
# we pin to individual CPUs, otherwise we float
# over the whole host NUMA node
if (object_numa_cell.cpu_pinning and
self._has_cpu_policy_support()):
pcpu = object_numa_cell.cpu_pinning[cpu]
pin_cpuset.cpuset = set([pcpu])
else:
pin_cpuset.cpuset = host_cell.cpuset
if not wants_realtime or cpu not in vcpus_rt:
# - If realtime IS NOT enabled, the
# emulator threads are allowed to float
# across all the pCPUs associated with
# the guest vCPUs ("not wants_realtime"
# is true, so we add all pcpus)
# - If realtime IS enabled, then at least
# 1 vCPU is required to be set aside for
# non-realtime usage. The emulator
# threads are allowed to float acros the
# pCPUs that are associated with the
# non-realtime VCPUs (the "cpu not in
# vcpu_rt" check deals with this
# filtering)
emupcpus.extend(pin_cpuset.cpuset)
guest_cpu_tune.vcpupin.append(pin_cpuset)
# TODO(berrange) When the guest has >1 NUMA node, it will
# span multiple host NUMA nodes. By pinning emulator threads
# to the union of all nodes, we guarantee there will be
# cross-node memory access by the emulator threads when
# responding to guest I/O operations. The only way to avoid
# this would be to pin emulator threads to a single node and
# tell the guest OS to only do I/O from one of its virtual
# NUMA nodes. This is not even remotely practical.
#
# The long term solution is to make use of a new QEMU feature
# called "I/O Threads" which will let us configure an explicit
# I/O thread for each guest vCPU or guest NUMA node. It is
# still TBD how to make use of this feature though, especially
# how to associate IO threads with guest devices to eliminiate
# cross NUMA node traffic. This is an area of investigation
# for QEMU community devs.
emulatorpin = vconfig.LibvirtConfigGuestCPUTuneEmulatorPin()
emulatorpin.cpuset = set(emupcpus)
guest_cpu_tune.emulatorpin = emulatorpin
# Sort the vcpupin list per vCPU id for human-friendlier XML
guest_cpu_tune.vcpupin.sort(key=operator.attrgetter("id"))
guest_numa_tune.memory = numa_mem
guest_numa_tune.memnodes = numa_memnodes
# normalize cell.id
for i, (cell, memnode) in enumerate(
zip(guest_cpu_numa_config.cells,
guest_numa_tune.memnodes)):
cell.id = i
memnode.cellid = i
return GuestNumaConfig(None, guest_cpu_tune,
guest_cpu_numa_config,
guest_numa_tune)
else:
return GuestNumaConfig(allowed_cpus, None,
guest_cpu_numa_config, None)
def _get_guest_os_type(self, virt_type):
"""Returns the guest OS type based on virt type."""
if virt_type == "lxc":
ret = fields.VMMode.EXE
elif virt_type == "uml":
ret = fields.VMMode.UML
elif virt_type == "xen":
ret = fields.VMMode.XEN
else:
ret = fields.VMMode.HVM
return ret
def _set_guest_for_rescue(self, rescue, guest, inst_path, virt_type,
root_device_name):
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
if virt_type == "xen":
guest.os_cmdline = "ro root=%s" % root_device_name
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE))
if virt_type == "qemu":
guest.os_cmdline += " no_timer_check"
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
def _set_guest_for_inst_kernel(self, instance, guest, inst_path, virt_type,
root_device_name, image_meta):
guest.os_kernel = os.path.join(inst_path, "kernel")
if virt_type == "xen":
guest.os_cmdline = "ro root=%s" % root_device_name
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE))
if virt_type == "qemu":
guest.os_cmdline += " no_timer_check"
if instance.ramdisk_id:
guest.os_initrd = os.path.join(inst_path, "ramdisk")
# we only support os_command_line with images with an explicit
# kernel set and don't want to break nova if there's an
# os_command_line property without a specified kernel_id param
if image_meta.properties.get("os_command_line"):
guest.os_cmdline = image_meta.properties.os_command_line
def _set_clock(self, guest, os_type, image_meta, virt_type):
# NOTE(mikal): Microsoft Windows expects the clock to be in
# "localtime". If the clock is set to UTC, then you can use a
# registry key to let windows know, but Microsoft says this is
# buggy in http://support.microsoft.com/kb/2687252
clk = vconfig.LibvirtConfigGuestClock()
if os_type == 'windows':
LOG.info(_LI('Configuring timezone for windows instance to '
'localtime'))
clk.offset = 'localtime'
else:
clk.offset = 'utc'
guest.set_clock(clk)
if virt_type == "kvm":
self._set_kvm_timers(clk, os_type, image_meta)
def _set_kvm_timers(self, clk, os_type, image_meta):
# TODO(berrange) One day this should be per-guest
# OS type configurable
tmpit = vconfig.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "delay"
tmrtc = vconfig.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "catchup"
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
guestarch = libvirt_utils.get_arch(image_meta)
if guestarch in (fields.Architecture.I686,
fields.Architecture.X86_64):
# NOTE(rfolco): HPET is a hardware timer for x86 arch.
# qemu -no-hpet is not supported on non-x86 targets.
tmhpet = vconfig.LibvirtConfigGuestTimer()
tmhpet.name = "hpet"
tmhpet.present = False
clk.add_timer(tmhpet)
# With new enough QEMU we can provide Windows guests
# with the paravirtualized hyperv timer source. This
# is the windows equiv of kvm-clock, allowing Windows
# guests to accurately keep time.
if (os_type == 'windows' and
self._host.has_min_version(MIN_LIBVIRT_HYPERV_TIMER_VERSION,
MIN_QEMU_HYPERV_TIMER_VERSION)):
tmhyperv = vconfig.LibvirtConfigGuestTimer()
tmhyperv.name = "hypervclock"
tmhyperv.present = True
clk.add_timer(tmhyperv)
def _set_features(self, guest, os_type, caps, virt_type):
if virt_type == "xen":
# PAE only makes sense in X86
if caps.host.cpu.arch in (fields.Architecture.I686,
fields.Architecture.X86_64):
guest.features.append(vconfig.LibvirtConfigGuestFeaturePAE())
if (virt_type not in ("lxc", "uml", "parallels", "xen") or
(virt_type == "xen" and guest.os_type == fields.VMMode.HVM)):
guest.features.append(vconfig.LibvirtConfigGuestFeatureACPI())
guest.features.append(vconfig.LibvirtConfigGuestFeatureAPIC())
if (virt_type in ("qemu", "kvm") and
os_type == 'windows'):
hv = vconfig.LibvirtConfigGuestFeatureHyperV()
hv.relaxed = True
hv.spinlocks = True
# Increase spinlock retries - value recommended by
# KVM maintainers who certify Windows guests
# with Microsoft
hv.spinlock_retries = 8191
hv.vapic = True
guest.features.append(hv)
def _check_number_of_serial_console(self, num_ports):
virt_type = CONF.libvirt.virt_type
if (virt_type in ("kvm", "qemu") and
num_ports > ALLOWED_QEMU_SERIAL_PORTS):
raise exception.SerialPortNumberLimitExceeded(
allowed=ALLOWED_QEMU_SERIAL_PORTS, virt_type=virt_type)
def _add_video_driver(self, guest, image_meta, flavor):
VALID_VIDEO_DEVICES = ("vga", "cirrus", "vmvga", "xen", "qxl")
video = vconfig.LibvirtConfigGuestVideo()
# NOTE(ldbragst): The following logic sets the video.type
# depending on supported defaults given the architecture,
# virtualization type, and features. The video.type attribute can
# be overridden by the user with image_meta.properties, which
# is carried out in the next if statement below this one.
guestarch = libvirt_utils.get_arch(image_meta)
if guest.os_type == fields.VMMode.XEN:
video.type = 'xen'
elif CONF.libvirt.virt_type == 'parallels':
video.type = 'vga'
elif guestarch in (fields.Architecture.PPC,
fields.Architecture.PPC64,
fields.Architecture.PPC64LE):
# NOTE(ldbragst): PowerKVM doesn't support 'cirrus' be default
# so use 'vga' instead when running on Power hardware.
video.type = 'vga'
elif CONF.spice.enabled:
video.type = 'qxl'
if image_meta.properties.get('hw_video_model'):
video.type = image_meta.properties.hw_video_model
if (video.type not in VALID_VIDEO_DEVICES):
raise exception.InvalidVideoMode(model=video.type)
# Set video memory, only if the flavor's limit is set
video_ram = image_meta.properties.get('hw_video_ram', 0)
max_vram = int(flavor.extra_specs.get('hw_video:ram_max_mb', 0))
if video_ram > max_vram:
raise exception.RequestedVRamTooHigh(req_vram=video_ram,
max_vram=max_vram)
if max_vram and video_ram:
video.vram = video_ram * units.Mi / units.Ki
guest.add_device(video)
def _add_qga_device(self, guest, instance):
qga = vconfig.LibvirtConfigGuestChannel()
qga.type = "unix"
qga.target_name = "org.qemu.guest_agent.0"
qga.source_path = ("/var/lib/libvirt/qemu/%s.%s.sock" %
("org.qemu.guest_agent.0", instance.name))
guest.add_device(qga)
def _add_rng_device(self, guest, flavor):
rng_device = vconfig.LibvirtConfigGuestRng()
rate_bytes = flavor.extra_specs.get('hw_rng:rate_bytes', 0)
period = flavor.extra_specs.get('hw_rng:rate_period', 0)
if rate_bytes:
rng_device.rate_bytes = int(rate_bytes)
rng_device.rate_period = int(period)
rng_path = CONF.libvirt.rng_dev_path
if (rng_path and not os.path.exists(rng_path)):
raise exception.RngDeviceNotExist(path=rng_path)
rng_device.backend = rng_path
guest.add_device(rng_device)
def _set_qemu_guest_agent(self, guest, flavor, instance, image_meta):
# Enable qga only if the 'hw_qemu_guest_agent' is equal to yes
if image_meta.properties.get('hw_qemu_guest_agent', False):
LOG.debug("Qemu guest agent is enabled through image "
"metadata", instance=instance)
self._add_qga_device(guest, instance)
rng_is_virtio = image_meta.properties.get('hw_rng_model') == 'virtio'
rng_allowed_str = flavor.extra_specs.get('hw_rng:allowed', '')
rng_allowed = strutils.bool_from_string(rng_allowed_str)
if rng_is_virtio and rng_allowed:
self._add_rng_device(guest, flavor)
def _get_guest_memory_backing_config(
self, inst_topology, numatune, flavor):
wantsmempages = False
if inst_topology:
for cell in inst_topology.cells:
if cell.pagesize:
wantsmempages = True
break
wantsrealtime = hardware.is_realtime_enabled(flavor)
membacking = None
if wantsmempages:
pages = self._get_memory_backing_hugepages_support(
inst_topology, numatune)
if pages:
membacking = vconfig.LibvirtConfigGuestMemoryBacking()
membacking.hugepages = pages
if wantsrealtime:
if not membacking:
membacking = vconfig.LibvirtConfigGuestMemoryBacking()
membacking.locked = True
membacking.sharedpages = False
return membacking
def _get_memory_backing_hugepages_support(self, inst_topology, numatune):
if not self._has_hugepage_support():
# We should not get here, since we should have avoided
# reporting NUMA topology from _get_host_numa_topology
# in the first place. Just in case of a scheduler
# mess up though, raise an exception
raise exception.MemoryPagesUnsupported()
host_topology = self._get_host_numa_topology()
if host_topology is None:
# As above, we should not get here but just in case...
raise exception.MemoryPagesUnsupported()
# Currently libvirt does not support the smallest
# pagesize set as a backend memory.
# https://bugzilla.redhat.com/show_bug.cgi?id=1173507
avail_pagesize = [page.size_kb
for page in host_topology.cells[0].mempages]
avail_pagesize.sort()
smallest = avail_pagesize[0]
pages = []
for guest_cellid, inst_cell in enumerate(inst_topology.cells):
if inst_cell.pagesize and inst_cell.pagesize > smallest:
for memnode in numatune.memnodes:
if guest_cellid == memnode.cellid:
page = (
vconfig.LibvirtConfigGuestMemoryBackingPage())
page.nodeset = [guest_cellid]
page.size_kb = inst_cell.pagesize
pages.append(page)
break # Quit early...
return pages
def _get_flavor(self, ctxt, instance, flavor):
if flavor is not None:
return flavor
return instance.flavor
def _has_uefi_support(self):
# This means that the host can support uefi booting for guests
supported_archs = [fields.Architecture.X86_64,
fields.Architecture.AARCH64]
caps = self._host.get_capabilities()
return ((caps.host.cpu.arch in supported_archs) and
self._host.has_min_version(MIN_LIBVIRT_UEFI_VERSION) and
os.path.exists(DEFAULT_UEFI_LOADER_PATH[caps.host.cpu.arch]))
def _get_supported_perf_events(self):
if (len(CONF.libvirt.enabled_perf_events) == 0 or
not self._host.has_min_version(MIN_LIBVIRT_PERF_VERSION)):
return []
supported_events = []
host_cpu_info = self._get_cpu_info()
for event in CONF.libvirt.enabled_perf_events:
if self._supported_perf_event(event, host_cpu_info['features']):
supported_events.append(event)
return supported_events
def _supported_perf_event(self, event, cpu_features):
libvirt_perf_event_name = LIBVIRT_PERF_EVENT_PREFIX + event.upper()
if not hasattr(libvirt, libvirt_perf_event_name):
LOG.warning(_LW("Libvirt doesn't support event type %s."),
event)
return False
if (event in PERF_EVENTS_CPU_FLAG_MAPPING
and PERF_EVENTS_CPU_FLAG_MAPPING[event] not in cpu_features):
LOG.warning(_LW("Host does not support event type %s."), event)
return False
return True
def _configure_guest_by_virt_type(self, guest, virt_type, caps, instance,
image_meta, flavor, root_device_name):
if virt_type == "xen":
if guest.os_type == fields.VMMode.HVM:
guest.os_loader = CONF.libvirt.xen_hvmloader_path
elif virt_type in ("kvm", "qemu"):
if caps.host.cpu.arch in (fields.Architecture.I686,
fields.Architecture.X86_64):
guest.sysinfo = self._get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
hw_firmware_type = image_meta.properties.get('hw_firmware_type')
if hw_firmware_type == fields.FirmwareType.UEFI:
if self._has_uefi_support():
global uefi_logged
if not uefi_logged:
LOG.warning(_LW("uefi support is without some kind of "
"functional testing and therefore "
"considered experimental."))
uefi_logged = True
guest.os_loader = DEFAULT_UEFI_LOADER_PATH[
caps.host.cpu.arch]
guest.os_loader_type = "pflash"
else:
raise exception.UEFINotSupported()
guest.os_mach_type = self._get_machine_type(image_meta, caps)
if image_meta.properties.get('hw_boot_menu') is None:
guest.os_bootmenu = strutils.bool_from_string(
flavor.extra_specs.get('hw:boot_menu', 'no'))
else:
guest.os_bootmenu = image_meta.properties.hw_boot_menu
elif virt_type == "lxc":
guest.os_init_path = "/sbin/init"
guest.os_cmdline = CONSOLE
elif virt_type == "uml":
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name
elif virt_type == "parallels":
if guest.os_type == fields.VMMode.EXE:
guest.os_init_path = "/sbin/init"
def _conf_non_lxc_uml(self, virt_type, guest, root_device_name, rescue,
instance, inst_path, image_meta, disk_info):
if rescue:
self._set_guest_for_rescue(rescue, guest, inst_path, virt_type,
root_device_name)
elif instance.kernel_id:
self._set_guest_for_inst_kernel(instance, guest, inst_path,
virt_type, root_device_name,
image_meta)
else:
guest.os_boot_dev = blockinfo.get_boot_order(disk_info)
def _create_consoles(self, virt_type, guest_cfg, instance, flavor,
image_meta):
# NOTE(markus_z): Beware! Below are so many conditionals that it is
# easy to lose track. Use this chart to figure out your case:
#
# case | is serial | has | is qemu | resulting
# | enabled? | virtlogd? | or kvm? | devices
# --------------------------------------------------
# 1 | no | no | no | pty*
# 2 | no | no | yes | file + pty
# 3 | no | yes | no | see case 1
# 4 | no | yes | yes | pty with logd
# 5 | yes | no | no | see case 1
# 6 | yes | no | yes | tcp + pty
# 7 | yes | yes | no | see case 1
# 8 | yes | yes | yes | tcp with logd
# * exception: virt_type "parallels" doesn't create a device
if virt_type == 'parallels':
pass
elif virt_type not in ("qemu", "kvm"):
log_path = self._get_console_log_path(instance)
self._create_pty_device(guest_cfg,
vconfig.LibvirtConfigGuestConsole,
log_path=log_path)
elif (virt_type in ("qemu", "kvm") and
self._is_s390x_guest(image_meta)):
self._create_consoles_s390x(guest_cfg, instance,
flavor, image_meta)
elif virt_type in ("qemu", "kvm"):
self._create_consoles_qemu_kvm(guest_cfg, instance,
flavor, image_meta)
def _is_s390x_guest(self, image_meta):
s390x_archs = (fields.Architecture.S390, fields.Architecture.S390X)
return libvirt_utils.get_arch(image_meta) in s390x_archs
def _create_consoles_qemu_kvm(self, guest_cfg, instance, flavor,
image_meta):
char_dev_cls = vconfig.LibvirtConfigGuestSerial
log_path = self._get_console_log_path(instance)
if CONF.serial_console.enabled:
if not self._serial_ports_already_defined(instance):
num_ports = hardware.get_number_of_serial_ports(flavor,
image_meta)
self._check_number_of_serial_console(num_ports)
self._create_serial_consoles(guest_cfg, num_ports,
char_dev_cls, log_path)
else:
self._create_file_device(guest_cfg, instance, char_dev_cls)
self._create_pty_device(guest_cfg, char_dev_cls, log_path=log_path)
def _create_consoles_s390x(self, guest_cfg, instance, flavor, image_meta):
char_dev_cls = vconfig.LibvirtConfigGuestConsole
log_path = self._get_console_log_path(instance)
if CONF.serial_console.enabled:
if not self._serial_ports_already_defined(instance):
num_ports = hardware.get_number_of_serial_ports(flavor,
image_meta)
self._create_serial_consoles(guest_cfg, num_ports,
char_dev_cls, log_path)
else:
self._create_file_device(guest_cfg, instance, char_dev_cls,
"sclplm")
self._create_pty_device(guest_cfg, char_dev_cls, "sclp", log_path)
def _create_pty_device(self, guest_cfg, char_dev_cls, target_type=None,
log_path=None):
def _create_base_dev():
consolepty = char_dev_cls()
consolepty.target_type = target_type
consolepty.type = "pty"
return consolepty
def _create_logd_dev():
consolepty = _create_base_dev()
log = vconfig.LibvirtConfigGuestCharDeviceLog()
log.file = log_path
consolepty.log = log
return consolepty
# hon: always support "virsh console <guest>"
LOG.info("hon: always support virsh console <guest>", instance=instance)
guest_cfg.add_device(_create_base_dev())
if CONF.serial_console.enabled:
if self._is_virtlogd_available():
return
else:
# NOTE(markus_z): You may wonder why this is necessary and
# so do I. I'm certain that this is *not* needed in any
# real use case. It is, however, useful if you want to
# pypass the Nova API and use "virsh console <guest>" on
# an hypervisor, as this CLI command doesn't work with TCP
# devices (like the serial console is).
# https://bugzilla.redhat.com/show_bug.cgi?id=781467
# Pypassing the Nova API however is a thing we don't want.
# Future changes should remove this and fix the unit tests
# which ask for the existence.
guest_cfg.add_device(_create_base_dev())
else:
if self._is_virtlogd_available():
guest_cfg.add_device(_create_logd_dev())
else:
guest_cfg.add_device(_create_base_dev())
def _create_file_device(self, guest_cfg, instance, char_dev_cls,
target_type=None):
if self._is_virtlogd_available():
return
consolelog = char_dev_cls()
consolelog.target_type = target_type
consolelog.type = "file"
consolelog.source_path = self._get_console_log_path(instance)
guest_cfg.add_device(consolelog)
def _serial_ports_already_defined(self, instance):
try:
guest = self._host.get_guest(instance)
if list(self._get_serial_ports_from_guest(guest)):
# Serial port are already configured for instance that
# means we are in a context of migration.
return True
except exception.InstanceNotFound:
LOG.debug(
"Instance does not exist yet on libvirt, we can "
"safely pass on looking for already defined serial "
"ports in its domain XML", instance=instance)
return False
def _create_serial_consoles(self, guest_cfg, num_ports, char_dev_cls,
log_path):
for port in six.moves.range(num_ports):
console = char_dev_cls()
console.port = port
console.type = "tcp"
console.listen_host = CONF.serial_console.proxyclient_address
listen_port = serial_console.acquire_port(console.listen_host)
console.listen_port = listen_port
# NOTE: only the first serial console gets the boot messages,
# that's why we attach the logd subdevice only to that.
if port == 0 and self._is_virtlogd_available():
log = vconfig.LibvirtConfigGuestCharDeviceLog()
log.file = log_path
console.log = log
guest_cfg.add_device(console)
def _cpu_config_to_vcpu_model(self, cpu_config, vcpu_model):
"""Update VirtCPUModel object according to libvirt CPU config.
:param:cpu_config: vconfig.LibvirtConfigGuestCPU presenting the
instance's virtual cpu configuration.
:param:vcpu_model: VirtCPUModel object. A new object will be created
if None.
:return: Updated VirtCPUModel object, or None if cpu_config is None
"""
if not cpu_config:
return
if not vcpu_model:
vcpu_model = objects.VirtCPUModel()
vcpu_model.arch = cpu_config.arch
vcpu_model.vendor = cpu_config.vendor
vcpu_model.model = cpu_config.model
vcpu_model.mode = cpu_config.mode
vcpu_model.match = cpu_config.match
if cpu_config.sockets:
vcpu_model.topology = objects.VirtCPUTopology(
sockets=cpu_config.sockets,
cores=cpu_config.cores,
threads=cpu_config.threads)
else:
vcpu_model.topology = None
features = [objects.VirtCPUFeature(
name=f.name,
policy=f.policy) for f in cpu_config.features]
vcpu_model.features = features
return vcpu_model
def _vcpu_model_to_cpu_config(self, vcpu_model):
"""Create libvirt CPU config according to VirtCPUModel object.
:param:vcpu_model: VirtCPUModel object.
:return: vconfig.LibvirtConfigGuestCPU.
"""
cpu_config = vconfig.LibvirtConfigGuestCPU()
cpu_config.arch = vcpu_model.arch
cpu_config.model = vcpu_model.model
cpu_config.mode = vcpu_model.mode
cpu_config.match = vcpu_model.match
cpu_config.vendor = vcpu_model.vendor
if vcpu_model.topology:
cpu_config.sockets = vcpu_model.topology.sockets
cpu_config.cores = vcpu_model.topology.cores
cpu_config.threads = vcpu_model.topology.threads
if vcpu_model.features:
for f in vcpu_model.features:
xf = vconfig.LibvirtConfigGuestCPUFeature()
xf.name = f.name
xf.policy = f.policy
cpu_config.features.add(xf)
return cpu_config
def _get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None,
context=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
"""
flavor = instance.flavor
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
virt_type = CONF.libvirt.virt_type
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = virt_type
guest.name = instance.name
guest.uuid = instance.uuid
# We are using default unit for memory: KiB
guest.memory = flavor.memory_mb * units.Ki
guest.vcpus = flavor.vcpus
allowed_cpus = hardware.get_vcpu_pin_set()
guest_numa_config = self._get_guest_numa_config(
instance.numa_topology, flavor, allowed_cpus, image_meta)
guest.cpuset = guest_numa_config.cpuset
guest.cputune = guest_numa_config.cputune
guest.numatune = guest_numa_config.numatune
guest.membacking = self._get_guest_memory_backing_config(
instance.numa_topology,
guest_numa_config.numatune,
flavor)
guest.metadata.append(self._get_guest_config_meta(context,
instance))
guest.idmaps = self._get_guest_idmaps()
for event in self._supported_perf_events:
guest.add_perf_event(event)
self._update_guest_cputune(guest, flavor, virt_type)
guest.cpu = self._get_guest_cpu_config(
flavor, image_meta, guest_numa_config.numaconfig,
instance.numa_topology)
# Notes(yjiang5): we always sync the instance's vcpu model with
# the corresponding config file.
instance.vcpu_model = self._cpu_config_to_vcpu_model(
guest.cpu, instance.vcpu_model)
if 'root' in disk_mapping:
root_device_name = block_device.prepend_dev(
disk_mapping['root']['dev'])
else:
root_device_name = None
if root_device_name:
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
instance.root_device_name = root_device_name
guest.os_type = (fields.VMMode.get_from_instance(instance) or
self._get_guest_os_type(virt_type))
caps = self._host.get_capabilities()
self._configure_guest_by_virt_type(guest, virt_type, caps, instance,
image_meta, flavor,
root_device_name)
if virt_type not in ('lxc', 'uml'):
self._conf_non_lxc_uml(virt_type, guest, root_device_name, rescue,
instance, inst_path, image_meta, disk_info)
self._set_features(guest, instance.os_type, caps, virt_type)
self._set_clock(guest, instance.os_type, image_meta, virt_type)
storage_configs = self._get_guest_storage_config(
instance, image_meta, disk_info, rescue, block_device_info,
flavor, guest.os_type)
for config in storage_configs:
guest.add_device(config)
for vif in network_info:
config = self.vif_driver.get_config(
instance, vif, image_meta,
flavor, virt_type, self._host)
guest.add_device(config)
self._create_consoles(virt_type, guest, instance, flavor, image_meta)
pointer = self._get_guest_pointer_model(guest.os_type, image_meta)
if pointer:
guest.add_device(pointer)
if (CONF.spice.enabled and CONF.spice.agent_enabled and
virt_type not in ('lxc', 'uml', 'xen')):
channel = vconfig.LibvirtConfigGuestChannel()
channel.type = 'spicevmc'
channel.target_name = "com.redhat.spice.0"
guest.add_device(channel)
# NB some versions of libvirt support both SPICE and VNC
# at the same time. We're not trying to second guess which
# those versions are. We'll just let libvirt report the
# errors appropriately if the user enables both.
add_video_driver = False
if ((CONF.vnc.enabled and
virt_type not in ('lxc', 'uml'))):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
graphics.keymap = CONF.vnc.keymap
graphics.listen = CONF.vnc.vncserver_listen
guest.add_device(graphics)
add_video_driver = True
if (CONF.spice.enabled and
virt_type not in ('lxc', 'uml', 'xen')):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.keymap = CONF.spice.keymap
graphics.listen = CONF.spice.server_listen
guest.add_device(graphics)
add_video_driver = True
if add_video_driver:
self._add_video_driver(guest, image_meta, flavor)
# Qemu guest agent only support 'qemu' and 'kvm' hypervisor
if virt_type in ('qemu', 'kvm'):
self._set_qemu_guest_agent(guest, flavor, instance, image_meta)
if virt_type in ('xen', 'qemu', 'kvm'):
# Get all generic PCI devices (non-SR-IOV).
for pci_dev in pci_manager.get_instance_pci_devs(instance):
guest.add_device(self._get_guest_pci_device(pci_dev))
else:
# PCI devices is only supported for hypervisor 'xen', 'qemu' and
# 'kvm'.
pci_devs = pci_manager.get_instance_pci_devs(instance, 'all')
if len(pci_devs) > 0:
raise exception.PciDeviceUnsupportedHypervisor(
type=virt_type)
# image meta takes precedence over flavor extra specs; disable the
# watchdog action by default
watchdog_action = (flavor.extra_specs.get('hw:watchdog_action')
or 'disabled')
watchdog_action = image_meta.properties.get('hw_watchdog_action',
watchdog_action)
# NB(sross): currently only actually supported by KVM/QEmu
if watchdog_action != 'disabled':
if watchdog_action in fields.WatchdogAction.ALL:
bark = vconfig.LibvirtConfigGuestWatchdog()
bark.action = watchdog_action
guest.add_device(bark)
else:
raise exception.InvalidWatchdogAction(action=watchdog_action)
# Memory balloon device only support 'qemu/kvm' and 'xen' hypervisor
if (virt_type in ('xen', 'qemu', 'kvm') and
CONF.libvirt.mem_stats_period_seconds > 0):
balloon = vconfig.LibvirtConfigMemoryBalloon()
if virt_type in ('qemu', 'kvm'):
balloon.model = 'virtio'
else:
balloon.model = 'xen'
balloon.period = CONF.libvirt.mem_stats_period_seconds
guest.add_device(balloon)
return guest
def _get_guest_pointer_model(self, os_type, image_meta):
pointer_model = image_meta.properties.get(
'hw_pointer_model', CONF.pointer_model)
if pointer_model is None and CONF.libvirt.use_usb_tablet:
# TODO(sahid): We set pointer_model to keep compatibility
# until the next release O*. It means operators can continue
# to use the deprecated option "use_usb_tablet" or set a
# specific device to use
pointer_model = "usbtablet"
LOG.warning(_LW('The option "use_usb_tablet" has been '
'deprecated for Newton in favor of the more '
'generic "pointer_model". Please update '
'nova.conf to address this change.'))
if pointer_model == "usbtablet":
# We want a tablet if VNC is enabled, or SPICE is enabled and
# the SPICE agent is disabled. If the SPICE agent is enabled
# it provides a paravirt mouse which drastically reduces
# overhead (by eliminating USB polling).
if CONF.vnc.enabled or (
CONF.spice.enabled and not CONF.spice.agent_enabled):
return self._get_guest_usb_tablet(os_type)
else:
if CONF.pointer_model or CONF.libvirt.use_usb_tablet:
# For backward compatibility We don't want to break
# process of booting an instance if host is configured
# to use USB tablet without VNC or SPICE and SPICE
# agent disable.
LOG.warning(_LW('USB tablet requested for guests by host '
'configuration. In order to accept this '
'request VNC should be enabled or SPICE '
'and SPICE agent disabled on host.'))
else:
raise exception.UnsupportedPointerModelRequested(
model="usbtablet")
def _get_guest_usb_tablet(self, os_type):
tablet = None
if os_type == fields.VMMode.HVM:
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
else:
if CONF.pointer_model or CONF.libvirt.use_usb_tablet:
# For backward compatibility We don't want to break
# process of booting an instance if virtual machine mode
# is not configured as HVM.
LOG.warning(_LW('USB tablet requested for guests by host '
'configuration. In order to accept this '
'request the machine mode should be '
'configured as HVM.'))
else:
raise exception.UnsupportedPointerModelRequested(
model="usbtablet")
return tablet
def _get_guest_xml(self, context, instance, network_info, disk_info,
image_meta, rescue=None,
block_device_info=None):
# NOTE(danms): Stringifying a NetworkInfo will take a lock. Do
# this ahead of time so that we don't acquire it while also
# holding the logging lock.
network_info_str = str(network_info)
msg = ('Start _get_guest_xml '
'network_info=%(network_info)s '
'disk_info=%(disk_info)s '
'image_meta=%(image_meta)s rescue=%(rescue)s '
'block_device_info=%(block_device_info)s' %
{'network_info': network_info_str, 'disk_info': disk_info,
'image_meta': image_meta, 'rescue': rescue,
'block_device_info': block_device_info})
# NOTE(mriedem): block_device_info can contain auth_password so we
# need to sanitize the password in the message.
LOG.debug(strutils.mask_password(msg), instance=instance)
conf = self._get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info,
context)
xml = conf.to_xml()
LOG.debug('End _get_guest_xml xml=%(xml)s',
{'xml': xml}, instance=instance)
return xml
def get_info(self, instance):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
"""
guest = self._host.get_guest(instance)
# Kind of ugly but we need to pass host to get_info as for a
# workaround, see libvirt/compat.py
return guest.get_info(self._host)
def _create_domain_setup_lxc(self, instance, image_meta,
block_device_info, disk_info):
inst_path = libvirt_utils.get_instance_path(instance)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
root_disk = block_device.get_root_bdm(block_device_mapping)
if root_disk:
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type, image_meta, root_disk)
self._connect_volume(root_disk['connection_info'], disk_info)
disk_path = root_disk['connection_info']['data']['device_path']
# NOTE(apmelton) - Even though the instance is being booted from a
# cinder volume, it is still presented as a local block device.
# LocalBlockImage is used here to indicate that the instance's
# disk is backed by a local block device.
image_model = imgmodel.LocalBlockImage(disk_path)
else:
root_disk = self.image_backend.by_name(instance, 'disk')
image_model = root_disk.get_model(self._conn)
container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
rootfs_dev = disk_api.setup_container(image_model,
container_dir=container_dir)
try:
# Save rootfs device to disconnect it when deleting the instance
if rootfs_dev:
instance.system_metadata['rootfs_device_name'] = rootfs_dev
if CONF.libvirt.uid_maps or CONF.libvirt.gid_maps:
id_maps = self._get_guest_idmaps()
libvirt_utils.chown_for_id_maps(container_dir, id_maps)
except Exception:
with excutils.save_and_reraise_exception():
self._create_domain_cleanup_lxc(instance)
def _create_domain_cleanup_lxc(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
try:
state = self.get_info(instance).state
except exception.InstanceNotFound:
# The domain may not be present if the instance failed to start
state = None
if state == power_state.RUNNING:
# NOTE(uni): Now the container is running with its own private
# mount namespace and so there is no need to keep the container
# rootfs mounted in the host namespace
LOG.debug('Attempting to unmount container filesystem: %s',
container_dir, instance=instance)
disk_api.clean_lxc_namespace(container_dir=container_dir)
else:
disk_api.teardown_container(container_dir=container_dir)
@contextlib.contextmanager
def _lxc_disk_handler(self, instance, image_meta,
block_device_info, disk_info):
"""Context manager to handle the pre and post instance boot,
LXC specific disk operations.
An image or a volume path will be prepared and setup to be
used by the container, prior to starting it.
The disk will be disconnected and unmounted if a container has
failed to start.
"""
if CONF.libvirt.virt_type != 'lxc':
yield
return
self._create_domain_setup_lxc(instance, image_meta,
block_device_info, disk_info)
try:
yield
finally:
self._create_domain_cleanup_lxc(instance)
# TODO(sahid): Consider renaming this to _create_guest.
def _create_domain(self, xml=None, domain=None,
power_on=True, pause=False, post_xml_callback=None):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
:returns guest.Guest: Guest just created
"""
if xml:
guest = libvirt_guest.Guest.create(xml, self._host)
if post_xml_callback is not None:
post_xml_callback()
else:
guest = libvirt_guest.Guest(domain)
if power_on or pause:
guest.launch(pause=pause)
if not utils.is_neutron():
guest.enable_hairpin()
return guest
def _neutron_failed_callback(self, event_name, instance):
LOG.error(_LE('Neutron Reported failure on event '
'%(event)s for instance %(uuid)s'),
{'event': event_name, 'uuid': instance.uuid},
instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _get_neutron_events(self, network_info):
# NOTE(danms): We need to collect any VIFs that are currently
# down that we expect a down->up event for. Anything that is
# already up will not undergo that transition, and for
# anything that might be stale (cache-wise) assume it's
# already up so we don't block on it.
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active', True) is False]
def _cleanup_failed_start(self, context, instance, network_info,
block_device_info, guest, destroy_disks):
try:
if guest and guest.is_active():
guest.poweroff()
finally:
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info,
destroy_disks=destroy_disks)
def _create_domain_and_network(self, context, xml, instance, network_info,
disk_info, block_device_info=None,
power_on=True, reboot=False,
vifs_already_plugged=False,
post_xml_callback=None,
destroy_disks_on_failure=False):
"""Do required network setup and create domain."""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
if (not reboot and 'data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
timeout = CONF.vif_plugging_timeout
if (self._conn_supports_start_paused and
utils.is_neutron() and not
vifs_already_plugged and power_on and timeout):
events = self._get_neutron_events(network_info)
else:
events = []
pause = bool(events)
guest = None
try:
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance,
network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
with self._lxc_disk_handler(instance, instance.image_meta,
block_device_info, disk_info):
guest = self._create_domain(
xml, pause=pause, power_on=power_on,
post_xml_callback=post_xml_callback)
self.firewall_driver.apply_instance_filter(instance,
network_info)
except exception.VirtualInterfaceCreateException:
# Neutron reported failure and we didn't swallow it, so
# bail here
with excutils.save_and_reraise_exception():
self._cleanup_failed_start(context, instance, network_info,
block_device_info, guest,
destroy_disks_on_failure)
except eventlet.timeout.Timeout:
# We never heard from Neutron
LOG.warning(_LW('Timeout waiting for vif plugging callback for '
'instance %(uuid)s'), {'uuid': instance.uuid},
instance=instance)
if CONF.vif_plugging_is_fatal:
self._cleanup_failed_start(context, instance, network_info,
block_device_info, guest,
destroy_disks_on_failure)
raise exception.VirtualInterfaceCreateException()
except Exception:
# Any other error, be sure to clean up
LOG.error(_LE('Failed to start libvirt guest'),
instance=instance)
with excutils.save_and_reraise_exception():
self._cleanup_failed_start(context, instance, network_info,
block_device_info, guest,
destroy_disks_on_failure)
# Resume only if domain has been paused
if pause:
guest.resume()
return guest
def _get_vcpu_total(self):
"""Get available vcpu number of physical computer.
:returns: the number of cpu core instances can be used.
"""
try:
total_pcpus = self._host.get_cpu_count()
except libvirt.libvirtError:
LOG.warning(_LW("Cannot get the number of cpu, because this "
"function is not implemented for this platform. "))
return 0
if not CONF.vcpu_pin_set:
return total_pcpus
available_ids = hardware.get_vcpu_pin_set()
# We get the list of online CPUs on the host and see if the requested
# set falls under these. If not, we retain the old behavior.
online_pcpus = None
try:
online_pcpus = self._host.get_online_cpus()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warning(
_LW("Couldn't retrieve the online CPUs due to a Libvirt "
"error: %(error)s with error code: %(error_code)s"),
{'error': ex, 'error_code': error_code})
if online_pcpus:
if not (available_ids <= online_pcpus):
msg = (_("Invalid vcpu_pin_set config, one or more of the "
"specified cpuset is not online. Online cpuset(s): "
"%(online)s, requested cpuset(s): %(req)s"),
{'online': sorted(online_pcpus),
'req': sorted(available_ids)})
raise exception.Invalid(msg)
elif sorted(available_ids)[-1] >= total_pcpus:
raise exception.Invalid(_("Invalid vcpu_pin_set config, "
"out of hypervisor cpu range."))
return len(available_ids)
@staticmethod
def _get_local_gb_info():
"""Get local storage info of the compute node in GB.
:returns: A dict containing:
:total: How big the overall usable filesystem is (in gigabytes)
:free: How much space is free (in gigabytes)
:used: How much space is used (in gigabytes)
"""
if CONF.libvirt.images_type == 'lvm':
info = lvm.get_volume_group_info(
CONF.libvirt.images_volume_group)
elif CONF.libvirt.images_type == 'rbd':
info = LibvirtDriver._get_rbd_driver().get_pool_info()
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in info.items():
info[k] = v / units.Gi
return info
def _get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
:returns: The total number of vcpu(s) that are currently being used.
"""
total = 0
# Not all libvirt drivers will support the get_vcpus_info()
#
# For example, LXC does not have a concept of vCPUs, while
# QEMU (TCG) traditionally handles all vCPUs in a single
# thread. So both will report an exception when the vcpus()
# API call is made. In such a case we should report the
# guest as having 1 vCPU, since that lets us still do
# CPU over commit calculations that apply as the total
# guest count scales.
#
# It is also possible that we might see an exception if
# the guest is just in middle of shutting down. Technically
# we should report 0 for vCPU usage in this case, but we
# we can't reliably distinguish the vcpu not supported
# case from the just shutting down case. Thus we don't know
# whether to report 1 or 0 for vCPU count.
#
# Under-reporting vCPUs is bad because it could conceivably
# let the scheduler place too many guests on the host. Over-
# reporting vCPUs is not a problem as it'll auto-correct on
# the next refresh of usage data.
#
# Thus when getting an exception we always report 1 as the
# vCPU count, as the least worst value.
for guest in self._host.list_guests():
try:
vcpus = guest.get_vcpus_info()
total += len(list(vcpus))
except libvirt.libvirtError:
total += 1
# NOTE(gtt116): give other tasks a chance.
greenthread.sleep(0)
return total
def _get_instance_capabilities(self):
"""Get hypervisor instance capabilities
Returns a list of tuples that describe instances the
hypervisor is capable of hosting. Each tuple consists
of the triplet (arch, hypervisor_type, vm_mode).
:returns: List of tuples describing instance capabilities
"""
caps = self._host.get_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
instance_cap = (
fields.Architecture.canonicalize(g.arch),
fields.HVType.canonicalize(dt),
fields.VMMode.canonicalize(g.ostype))
instance_caps.append(instance_cap)
return instance_caps
def _get_cpu_info(self):
"""Get cpuinfo information.
Obtains cpu feature from virConnect.getCapabilities.
:return: see above description
"""
caps = self._host.get_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['cells'] = len(getattr(caps.host.topology, 'cells', [1]))
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = set()
for f in caps.host.cpu.features:
features.add(f.name)
cpu_info['features'] = features
return cpu_info
def _get_pcidev_info(self, devname):
"""Returns a dict of PCI device."""
def _get_device_type(cfgdev, pci_address):
"""Get a PCI device's device type.
An assignable PCI device can be a normal PCI device,
a SR-IOV Physical Function (PF), or a SR-IOV Virtual
Function (VF). Only normal PCI devices or SR-IOV VFs
are assignable, while SR-IOV PFs are always owned by
hypervisor.
"""
for fun_cap in cfgdev.pci_capability.fun_capability:
if fun_cap.type == 'virt_functions':
return {
'dev_type': fields.PciDeviceType.SRIOV_PF,
}
if (fun_cap.type == 'phys_function' and
len(fun_cap.device_addrs) != 0):
phys_address = "%04x:%02x:%02x.%01x" % (
fun_cap.device_addrs[0][0],
fun_cap.device_addrs[0][1],
fun_cap.device_addrs[0][2],
fun_cap.device_addrs[0][3])
return {
'dev_type': fields.PciDeviceType.SRIOV_VF,
'parent_addr': phys_address,
}
# Note(moshele): libvirt < 1.3 reported virt_functions capability
# only when VFs are enabled. The check below is a workaround
# to get the correct report regardless of whether or not any
# VFs are enabled for the device.
if not self._host.has_min_version(
MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION):
is_physical_function = pci_utils.is_physical_function(
*pci_utils.get_pci_address_fields(pci_address))
if is_physical_function:
return {'dev_type': fields.PciDeviceType.SRIOV_PF}
return {'dev_type': fields.PciDeviceType.STANDARD}
virtdev = self._host.device_lookup_by_name(devname)
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
address = "%04x:%02x:%02x.%1x" % (
cfgdev.pci_capability.domain,
cfgdev.pci_capability.bus,
cfgdev.pci_capability.slot,
cfgdev.pci_capability.function)
device = {
"dev_id": cfgdev.name,
"address": address,
"product_id": "%04x" % cfgdev.pci_capability.product_id,
"vendor_id": "%04x" % cfgdev.pci_capability.vendor_id,
}
device["numa_node"] = cfgdev.pci_capability.numa_node
# requirement by DataBase Model
device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device
device.update(_get_device_type(cfgdev, address))
return device
def _get_pci_passthrough_devices(self):
"""Get host PCI devices information.
Obtains pci devices information from libvirt, and returns
as a JSON string.
Each device information is a dictionary, with mandatory keys
of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id',
'label' and other optional device specific information.
Refer to the objects/pci_device.py for more idea of these keys.
:returns: a JSON string containing a list of the assignable PCI
devices information
"""
# Bail early if we know we can't support `listDevices` to avoid
# repeated warnings within a periodic task
if not getattr(self, '_list_devices_supported', True):
return jsonutils.dumps([])
try:
dev_names = self._host.list_pci_devices() or []
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
self._list_devices_supported = False
LOG.warning(_LW("URI %(uri)s does not support "
"listDevices: %(error)s"),
{'uri': self._uri(), 'error': ex})
return jsonutils.dumps([])
else:
raise
pci_info = []
for name in dev_names:
pci_info.append(self._get_pcidev_info(name))
return jsonutils.dumps(pci_info)
def _has_numa_support(self):
# This means that the host can support LibvirtConfigGuestNUMATune
# and the nodeset field in LibvirtConfigGuestMemoryBackingPage
for ver in BAD_LIBVIRT_NUMA_VERSIONS:
if self._host.has_version(ver):
if not getattr(self, '_bad_libvirt_numa_version_warn', False):
LOG.warning(_LW('You are running with libvirt version %s '
'which is known to have broken NUMA support. '
'Consider patching or updating libvirt on '
'this host if you need NUMA support.'),
self._version_to_string(ver))
self._bad_libvirt_numa_version_warn = True
return False
support_matrix = {
(fields.Architecture.I686,
fields.Architecture.X86_64,
fields.Architecture.AARCH64): MIN_LIBVIRT_NUMA_VERSION,
(fields.Architecture.PPC64,
fields.Architecture.PPC64LE): MIN_LIBVIRT_NUMA_VERSION_PPC}
caps = self._host.get_capabilities()
is_supported = False
for archs, libvirt_ver in support_matrix.items():
if ((caps.host.cpu.arch in archs) and
self._host.has_min_version(libvirt_ver,
MIN_QEMU_NUMA_HUGEPAGE_VERSION,
host.HV_DRIVER_QEMU)):
is_supported = True
return is_supported
def _has_hugepage_support(self):
# This means that the host can support multiple values for the size
# field in LibvirtConfigGuestMemoryBackingPage
supported_archs = [fields.Architecture.I686,
fields.Architecture.X86_64,
fields.Architecture.AARCH64,
fields.Architecture.PPC64LE,
fields.Architecture.PPC64]
caps = self._host.get_capabilities()
return ((caps.host.cpu.arch in supported_archs) and
self._host.has_min_version(MIN_LIBVIRT_HUGEPAGE_VERSION,
MIN_QEMU_NUMA_HUGEPAGE_VERSION,
host.HV_DRIVER_QEMU))
def _get_host_numa_topology(self):
if not self._has_numa_support():
return
caps = self._host.get_capabilities()
topology = caps.host.topology
if topology is None or not topology.cells:
return
cells = []
allowed_cpus = hardware.get_vcpu_pin_set()
online_cpus = self._host.get_online_cpus()
if allowed_cpus:
allowed_cpus &= online_cpus
else:
allowed_cpus = online_cpus
def _get_reserved_memory_for_cell(self, cell_id, page_size):
cell = self._reserved_hugepages.get(cell_id, {})
return cell.get(page_size, 0)
for cell in topology.cells:
cpuset = set(cpu.id for cpu in cell.cpus)
siblings = sorted(map(set,
set(tuple(cpu.siblings)
if cpu.siblings else ()
for cpu in cell.cpus)
))
cpuset &= allowed_cpus
siblings = [sib & allowed_cpus for sib in siblings]
# Filter out singles and empty sibling sets that may be left
siblings = [sib for sib in siblings if len(sib) > 1]
mempages = []
if self._has_hugepage_support():
mempages = [
objects.NUMAPagesTopology(
size_kb=pages.size,
total=pages.total,
used=0,
reserved=_get_reserved_memory_for_cell(
self, cell.id, pages.size))
for pages in cell.mempages]
cell = objects.NUMACell(id=cell.id, cpuset=cpuset,
memory=cell.memory / units.Ki,
cpu_usage=0, memory_usage=0,
siblings=siblings,
pinned_cpus=set([]),
mempages=mempages)
cells.append(cell)
return objects.NUMATopology(cells=cells)
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
vol_usage = []
for instance_bdms in compute_host_bdms:
instance = instance_bdms['instance']
for bdm in instance_bdms['instance_bdms']:
mountpoint = bdm['device_name']
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
volume_id = bdm['volume_id']
LOG.debug("Trying to get stats for the volume %s",
volume_id, instance=instance)
vol_stats = self.block_stats(instance, mountpoint)
if vol_stats:
stats = dict(volume=volume_id,
instance=instance,
rd_req=vol_stats[0],
rd_bytes=vol_stats[1],
wr_req=vol_stats[2],
wr_bytes=vol_stats[3])
LOG.debug(
"Got volume usage stats for the volume=%(volume)s,"
" rd_req=%(rd_req)d, rd_bytes=%(rd_bytes)d, "
"wr_req=%(wr_req)d, wr_bytes=%(wr_bytes)d",
stats, instance=instance)
vol_usage.append(stats)
return vol_usage
def block_stats(self, instance, disk_id):
"""Note that this function takes an instance name."""
try:
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove domain at the end.
domain = guest._domain
return domain.blockStats(disk_id)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.info(_LI('Getting block stats failed, device might have '
'been detached. Instance=%(instance_name)s '
'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'),
{'instance_name': instance.name, 'disk': disk_id,
'errcode': errcode, 'e': e},
instance=instance)
except exception.InstanceNotFound:
LOG.info(_LI('Could not find domain in libvirt for instance %s. '
'Cannot get block stats for device'), instance.name,
instance=instance)
def get_console_pool_info(self, console_type):
# TODO(mdragon): console proxy should be implemented for libvirt,
# in case someone wants to use it with kvm or
# such. For now return fake data.
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: unused in this driver
:returns: dictionary containing resource info
"""
disk_info_dict = self._get_local_gb_info()
data = {}
# NOTE(dprince): calling capabilities before getVersion works around
# an initialization issue with some versions of Libvirt (1.0.5.5).
# See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116
# See: https://bugs.launchpad.net/nova/+bug/1215593
data["supported_instances"] = self._get_instance_capabilities()
data["vcpus"] = self._get_vcpu_total()
data["memory_mb"] = self._host.get_memory_mb_total()
data["local_gb"] = disk_info_dict['total']
data["vcpus_used"] = self._get_vcpu_used()
data["memory_mb_used"] = self._host.get_memory_mb_used()
data["local_gb_used"] = disk_info_dict['used']
data["hypervisor_type"] = self._host.get_driver_type()
data["hypervisor_version"] = self._host.get_version()
data["hypervisor_hostname"] = self._host.get_hostname()
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
# so we could just return the raw capabilities XML
# which 'compare_cpu' could use directly
#
# That said, arch_filter.py now seems to rely on
# the libvirt drivers format which suggests this
# data format needs to be standardized across drivers
data["cpu_info"] = jsonutils.dumps(self._get_cpu_info())
disk_free_gb = disk_info_dict['free']
disk_over_committed = self._get_disk_over_committed_size_total()
available_least = disk_free_gb * units.Gi - disk_over_committed
data['disk_available_least'] = available_least / units.Gi
data['pci_passthrough_devices'] = \
self._get_pci_passthrough_devices()
numa_topology = self._get_host_numa_topology()
if numa_topology:
data['numa_topology'] = numa_topology._to_json()
else:
data['numa_topology'] = None
return data
def check_instance_shared_storage_local(self, context, instance):
"""Check if instance files located on shared storage.
This runs check on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.objects.instance.Instance object
:returns:
- tempfile: A dict containing the tempfile info on the destination
host
- None:
1. If the instance path is not existing.
2. If the image backend is shared block storage type.
"""
if self.image_backend.backend().is_shared_block_storage():
return None
dirpath = libvirt_utils.get_instance_path(instance)
if not os.path.exists(dirpath):
return None
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug("Creating tmpfile %s to verify with other "
"compute node that the instance is on "
"the same shared storage.",
tmp_file, instance=instance)
os.close(fd)
return {"filename": tmp_file}
def check_instance_shared_storage_remote(self, context, data):
return os.path.exists(data['filename'])
def check_instance_shared_storage_cleanup(self, context, data):
fileutils.delete_if_exists(data["filename"])
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a LibvirtLiveMigrateData object
"""
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = (
(disk_available_gb * units.Ki) - CONF.reserved_host_disk_mb)
# Compare CPU
if not instance.vcpu_model or not instance.vcpu_model.model:
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(None, source_cpu_info, instance)
else:
self._compare_cpu(instance.vcpu_model, None, instance)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file(instance)
data = objects.LibvirtLiveMigrateData()
data.filename = filename
data.image_type = CONF.libvirt.images_type
data.graphics_listen_addr_vnc = CONF.vnc.vncserver_listen
data.graphics_listen_addr_spice = CONF.spice.server_listen
if CONF.serial_console.enabled:
data.serial_listen_addr = CONF.serial_console.proxyclient_address
else:
data.serial_listen_addr = None
# Notes(eliqiao): block_migration and disk_over_commit are not
# nullable, so just don't set them if they are None
if block_migration is not None:
data.block_migration = block_migration
if disk_over_commit is not None:
data.disk_over_commit = disk_over_commit
data.disk_available_mb = disk_available_mb
return data
def cleanup_live_migration_destination_check(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
"""
filename = dest_check_data.filename
self._cleanup_shared_storage_test_file(filename)
def check_can_live_migrate_source(self, context, instance,
dest_check_data,
block_device_info=None):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:param block_device_info: result of _get_instance_block_device_info
:returns: a LibvirtLiveMigrateData object
"""
if not isinstance(dest_check_data, migrate_data_obj.LiveMigrateData):
md_obj = objects.LibvirtLiveMigrateData()
md_obj.from_legacy_dict(dest_check_data)
dest_check_data = md_obj
# Checking shared storage connectivity
# if block migration, instances_path should not be on shared storage.
source = CONF.host
dest_check_data.is_shared_instance_path = (
self._check_shared_storage_test_file(
dest_check_data.filename, instance))
dest_check_data.is_shared_block_storage = (
self._is_shared_block_storage(instance, dest_check_data,
block_device_info))
if 'block_migration' not in dest_check_data:
dest_check_data.block_migration = (
not dest_check_data.is_on_shared_storage())
if dest_check_data.block_migration:
# TODO(eliqiao): Once block_migration flag is removed from the API
# we can safely remove the if condition
if dest_check_data.is_on_shared_storage():
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidLocalStorage(reason=reason, path=source)
if 'disk_over_commit' in dest_check_data:
self._assert_dest_node_has_enough_disk(context, instance,
dest_check_data.disk_available_mb,
dest_check_data.disk_over_commit,
block_device_info)
if block_device_info:
bdm = block_device_info.get('block_device_mapping')
# NOTE(pkoniszewski): libvirt from version 1.2.17 upwards
# supports selective block device migration. It means that it
# is possible to define subset of block devices to be copied
# during migration. If they are not specified - block devices
# won't be migrated. However, it does not work when live
# migration is tunnelled through libvirt.
if bdm and not self._host.has_min_version(
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION):
# NOTE(stpierre): if this instance has mapped volumes,
# we can't do a block migration, since that will result
# in volumes being copied from themselves to themselves,
# which is a recipe for disaster.
ver = ".".join([str(x) for x in
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION])
msg = (_('Cannot block migrate instance %(uuid)s with'
' mapped volumes. Selective block device'
' migration feature requires libvirt version'
' %(libvirt_ver)s') %
{'uuid': instance.uuid, 'libvirt_ver': ver})
LOG.error(msg, instance=instance)
raise exception.MigrationPreCheckError(reason=msg)
# NOTE(eliqiao): Selective disk migrations are not supported
# with tunnelled block migrations so we can block them early.
if (bdm and
(self._block_migration_flags &
libvirt.VIR_MIGRATE_TUNNELLED != 0)):
msg = (_('Cannot block migrate instance %(uuid)s with'
' mapped volumes. Selective block device'
' migration is not supported with tunnelled'
' block migrations.') % {'uuid': instance.uuid})
LOG.error(msg, instance=instance)
raise exception.MigrationPreCheckError(reason=msg)
elif not (dest_check_data.is_shared_block_storage or
dest_check_data.is_shared_instance_path):
reason = _("Shared storage live-migration requires either shared "
"storage or boot-from-volume with no local disks.")
raise exception.InvalidSharedStorage(reason=reason, path=source)
# NOTE(mikal): include the instance directory name here because it
# doesn't yet exist on the destination but we want to force that
# same name to be used
instance_path = libvirt_utils.get_instance_path(instance,
relative=True)
dest_check_data.instance_relative_path = instance_path
return dest_check_data
def _is_shared_block_storage(self, instance, dest_check_data,
block_device_info=None):
"""Check if all block storage of an instance can be shared
between source and destination of a live migration.
Returns true if the instance is volume backed and has no local disks,
or if the image backend is the same on source and destination and the
backend shares block storage between compute nodes.
:param instance: nova.objects.instance.Instance object
:param dest_check_data: dict with boolean fields image_type,
is_shared_instance_path, and is_volume_backed
"""
if (dest_check_data.obj_attr_is_set('image_type') and
CONF.libvirt.images_type == dest_check_data.image_type and
self.image_backend.backend().is_shared_block_storage()):
# NOTE(dgenin): currently true only for RBD image backend
return True
if (dest_check_data.is_shared_instance_path and
self.image_backend.backend().is_file_in_instance_path()):
# NOTE(angdraug): file based image backends (Flat, Qcow2)
# place block device files under the instance path
return True
if (dest_check_data.is_volume_backed and
not bool(jsonutils.loads(
self.get_instance_disk_info(instance,
block_device_info)))):
return True
return False
def _assert_dest_node_has_enough_disk(self, context, instance,
available_mb, disk_over_commit,
block_device_info=None):
"""Checks if destination has enough disk for block migration."""
# Libvirt supports qcow2 disk format,which is usually compressed
# on compute nodes.
# Real disk image (compressed) may enlarged to "virtual disk size",
# that is specified as the maximum disk size.
# (See qemu-img -f path-to-disk)
# Scheduler recognizes destination host still has enough disk space
# if real disk size < available disk size
# if disk_over_commit is True,
# otherwise virtual disk size < available disk size.
available = 0
if available_mb:
available = available_mb * units.Mi
ret = self.get_instance_disk_info(instance,
block_device_info=block_device_info)
disk_infos = jsonutils.loads(ret)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
# Check that available disk > necessary disk
if (available - necessary) < 0:
reason = (_('Unable to migrate %(instance_uuid)s: '
'Disk of instance is too large(available'
' on destination host:%(available)s '
'< need:%(necessary)s)') %
{'instance_uuid': instance.uuid,
'available': available,
'necessary': necessary})
raise exception.MigrationPreCheckError(reason=reason)
def _compare_cpu(self, guest_cpu, host_cpu_str, instance):
"""Check the host is compatible with the requested CPU
:param guest_cpu: nova.objects.VirtCPUModel or None
:param host_cpu_str: JSON from _get_cpu_info() method
If the 'guest_cpu' parameter is not None, this will be
validated for migration compatibility with the host.
Otherwise the 'host_cpu_str' JSON string will be used for
validation.
:returns:
None. if given cpu info is not compatible to this server,
raise exception.
"""
# NOTE(kchamart): Comparing host to guest CPU model for emulated
# guests (<domain type='qemu'>) should not matter -- in this
# mode (QEMU "TCG") the CPU is fully emulated in software and no
# hardware acceleration, like KVM, is involved. So, skip the CPU
# compatibility check for the QEMU domain type, and retain it for
# KVM guests.
if CONF.libvirt.virt_type not in ['kvm']:
return
if guest_cpu is None:
info = jsonutils.loads(host_cpu_str)
LOG.info(_LI('Instance launched has CPU info: %s'), host_cpu_str)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
else:
cpu = self._vcpu_model_to_cpu_config(guest_cpu)
u = ("http://libvirt.org/html/libvirt-libvirt-host.html#"
"virCPUCompareResult")
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
# unknown character exists in xml, then libvirt complains
try:
cpu_xml = cpu.to_xml()
LOG.debug("cpu compare xml: %s", cpu_xml, instance=instance)
ret = self._host.compare_cpu(cpu_xml)
except libvirt.libvirtError as e:
error_code = e.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.debug("URI %(uri)s does not support cpu comparison. "
"It will be proceeded though. Error: %(error)s",
{'uri': self._uri(), 'error': e})
return
else:
LOG.error(m, {'ret': e, 'u': u})
raise exception.MigrationPreCheckError(
reason=m % {'ret': e, 'u': u})
if ret <= 0:
LOG.error(m, {'ret': ret, 'u': u})
raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u})
def _create_shared_storage_test_file(self, instance):
"""Makes tmpfile under CONF.instances_path."""
dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.", tmp_file, instance=instance)
os.close(fd)
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename, instance):
"""Confirms existence of the tmpfile under CONF.instances_path.
Cannot confirm tmpfile return False.
"""
# NOTE(tpatzig): if instances_path is a shared volume that is
# under heavy IO (many instances on many compute nodes),
# then checking the existence of the testfile fails,
# just because it takes longer until the client refreshes and new
# content gets visible.
# os.utime (like touch) on the directory forces the client to refresh.
os.utime(CONF.instances_path, None)
tmp_file = os.path.join(CONF.instances_path, filename)
if not os.path.exists(tmp_file):
exists = False
else:
exists = True
LOG.debug('Check if temp file %s exists to indicate shared storage '
'is being used for migration. Exists? %s', tmp_file, exists,
instance=instance)
return exists
def _cleanup_shared_storage_test_file(self, filename):
"""Removes existence of the tmpfile under CONF.instances_path."""
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance, network_info):
"""Ensure that an instance's filtering rules are enabled.
When migrating an instance, we need the filtering rules to
be configured on the destination host before starting the
migration.
Also, when restarting the compute service, we need to ensure
that filtering rules exist for all running services.
"""
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
timeout_count = list(range(CONF.live_migration_retry_count))
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance,
network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('The firewall filter for %s does not exist')
raise exception.InternalError(msg % instance.name)
greenthread.sleep(1)
def filter_defer_apply_on(self):
self.firewall_driver.filter_defer_apply_on()
def filter_defer_apply_off(self):
self.firewall_driver.filter_defer_apply_off()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Spawning live_migration operation for distributing high-load.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: a LibvirtLiveMigrateData object
"""
# 'dest' will be substituted into 'migration_uri' so ensure
# it does't contain any characters that could be used to
# exploit the URI accepted by libivrt
if not libvirt_utils.is_valid_hostname(dest):
raise exception.InvalidHostname(hostname=dest)
self._live_migration(context, instance, dest,
post_method, recover_method, block_migration,
migrate_data)
def live_migration_abort(self, instance):
"""Aborting a running live-migration.
:param instance: instance object that is in migration
"""
guest = self._host.get_guest(instance)
dom = guest._domain
try:
dom.abortJob()
except libvirt.libvirtError as e:
LOG.error(_LE("Failed to cancel migration %s"),
e, instance=instance)
raise
def _verify_serial_console_is_disabled(self):
if CONF.serial_console.enabled:
msg = _('Your destination node does not support'
' retrieving listen addresses. In order'
' for live migration to work properly you'
' must disable serial console.')
raise exception.MigrationError(reason=msg)
def _live_migration_operation(self, context, instance, dest,
block_migration, migrate_data, guest,
device_names):
"""Invoke the live migration operation
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param block_migration: if true, do block migration.
:param migrate_data: a LibvirtLiveMigrateData object
:param guest: the guest domain object
:param device_names: list of device names that are being migrated with
instance
This method is intended to be run in a background thread and will
block that thread until the migration is finished or failed.
"""
try:
if migrate_data.block_migration:
migration_flags = self._block_migration_flags
else:
migration_flags = self._live_migration_flags
serial_listen_addr = libvirt_migrate.serial_listen_addr(
migrate_data)
if not serial_listen_addr:
# In this context we want to ensure that serial console is
# disabled on source node. This is because nova couldn't
# retrieve serial listen address from destination node, so we
# consider that destination node might have serial console
# disabled as well.
self._verify_serial_console_is_disabled()
# NOTE(aplanas) migrate_uri will have a value only in the
# case that `live_migration_inbound_addr` parameter is
# set, and we propose a non tunneled migration.
migrate_uri = None
if ('target_connect_addr' in migrate_data and
migrate_data.target_connect_addr is not None):
dest = migrate_data.target_connect_addr
if (migration_flags &
libvirt.VIR_MIGRATE_TUNNELLED == 0):
migrate_uri = self._migrate_uri(dest)
params = None
new_xml_str = None
if CONF.libvirt.virt_type != "parallels":
new_xml_str = libvirt_migrate.get_updated_guest_xml(
# TODO(sahid): It's not a really good idea to pass
# the method _get_volume_config and we should to find
# a way to avoid this in future.
guest, migrate_data, self._get_volume_config)
if self._host.has_min_version(
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION):
params = {
'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': new_xml_str,
'migrate_disks': device_names,
}
# NOTE(pkoniszewski): Because of precheck which blocks
# tunnelled block live migration with mapped volumes we
# can safely remove migrate_disks when tunnelling is on.
# Otherwise we will block all tunnelled block migrations,
# even when an instance does not have volumes mapped.
# This is because selective disk migration is not
# supported in tunnelled block live migration. Also we
# cannot fallback to migrateToURI2 in this case because of
# bug #1398999
if (migration_flags &
libvirt.VIR_MIGRATE_TUNNELLED != 0):
params.pop('migrate_disks')
# TODO(sahid): This should be in
# post_live_migration_at_source but no way to retrieve
# ports acquired on the host for the guest at this
# step. Since the domain is going to be removed from
# libvird on source host after migration, we backup the
# serial ports to release them if all went well.
serial_ports = []
if CONF.serial_console.enabled:
serial_ports = list(self._get_serial_ports_from_guest(guest))
guest.migrate(self._live_migration_uri(dest),
migrate_uri=migrate_uri,
flags=migration_flags,
params=params,
domain_xml=new_xml_str,
bandwidth=CONF.libvirt.live_migration_bandwidth)
for hostname, port in serial_ports:
serial_console.release_port(host=hostname, port=port)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Live Migration failure: %s"), e,
instance=instance)
# If 'migrateToURI' fails we don't know what state the
# VM instances on each host are in. Possibilities include
#
# 1. src==running, dst==none
#
# Migration failed & rolled back, or never started
#
# 2. src==running, dst==paused
#
# Migration started but is still ongoing
#
# 3. src==paused, dst==paused
#
# Migration data transfer completed, but switchover
# is still ongoing, or failed
#
# 4. src==paused, dst==running
#
# Migration data transfer completed, switchover
# happened but cleanup on source failed
#
# 5. src==none, dst==running
#
# Migration fully succeeded.
#
# Libvirt will aim to complete any migration operation
# or roll it back. So even if the migrateToURI call has
# returned an error, if the migration was not finished
# libvirt should clean up.
#
# So we take the error raise here with a pinch of salt
# and rely on the domain job info status to figure out
# what really happened to the VM, which is a much more
# reliable indicator.
#
# In particular we need to try very hard to ensure that
# Nova does not "forget" about the guest. ie leaving it
# running on a different host to the one recorded in
# the database, as that would be a serious resource leak
LOG.debug("Migration operation thread has finished",
instance=instance)
def _live_migration_copy_disk_paths(self, context, instance, guest):
'''Get list of disks to copy during migration
:param context: security context
:param instance: the instance being migrated
:param guest: the Guest instance being migrated
Get the list of disks to copy during migration.
:returns: a list of local source paths and a list of device names to
copy
'''
disk_paths = []
device_names = []
block_devices = []
# TODO(pkoniszewski): Remove version check when we bump min libvirt
# version to >= 1.2.17.
if (self._block_migration_flags &
libvirt.VIR_MIGRATE_TUNNELLED == 0 and
self._host.has_min_version(
MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION)):
bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = driver.get_block_device_info(instance,
bdm_list)
block_device_mappings = driver.block_device_info_get_mapping(
block_device_info)
for bdm in block_device_mappings:
device_name = str(bdm['mount_device'].rsplit('/', 1)[1])
block_devices.append(device_name)
for dev in guest.get_all_disks():
if dev.readonly or dev.shareable:
continue
if dev.source_type not in ["file", "block"]:
continue
if dev.target_dev in block_devices:
continue
disk_paths.append(dev.source_path)
device_names.append(dev.target_dev)
return (disk_paths, device_names)
def _live_migration_data_gb(self, instance, disk_paths):
'''Calculate total amount of data to be transferred
:param instance: the nova.objects.Instance being migrated
:param disk_paths: list of disk paths that are being migrated
with instance
Calculates the total amount of data that needs to be
transferred during the live migration. The actual
amount copied will be larger than this, due to the
guest OS continuing to dirty RAM while the migration
is taking place. So this value represents the minimal
data size possible.
:returns: data size to be copied in GB
'''
ram_gb = instance.flavor.memory_mb * units.Mi / units.Gi
if ram_gb < 2:
ram_gb = 2
disk_gb = 0
for path in disk_paths:
try:
size = os.stat(path).st_size
size_gb = (size / units.Gi)
if size_gb < 2:
size_gb = 2
disk_gb += size_gb
except OSError as e:
LOG.warning(_LW("Unable to stat %(disk)s: %(ex)s"),
{'disk': path, 'ex': e})
# Ignore error since we don't want to break
# the migration monitoring thread operation
return ram_gb + disk_gb
def _get_migration_flags(self, is_block_migration):
if is_block_migration:
return self._block_migration_flags
return self._live_migration_flags
def _live_migration_monitor(self, context, instance, guest,
dest, post_method,
recover_method, block_migration,
migrate_data, finish_event,
disk_paths):
on_migration_failure = deque()
data_gb = self._live_migration_data_gb(instance, disk_paths)
downtime_steps = list(libvirt_migrate.downtime_steps(data_gb))
migration = migrate_data.migration
curdowntime = None
migration_flags = self._get_migration_flags(
migrate_data.block_migration)
n = 0
start = time.time()
progress_time = start
progress_watermark = None
previous_data_remaining = -1
is_post_copy_enabled = self._is_post_copy_enabled(migration_flags)
while True:
info = guest.get_job_info()
if info.type == libvirt.VIR_DOMAIN_JOB_NONE:
# Either still running, or failed or completed,
# lets untangle the mess
if not finish_event.ready():
LOG.debug("Operation thread is still running",
instance=instance)
else:
info.type = libvirt_migrate.find_job_type(guest, instance)
LOG.debug("Fixed incorrect job type to be %d",
info.type, instance=instance)
if info.type == libvirt.VIR_DOMAIN_JOB_NONE:
# Migration is not yet started
LOG.debug("Migration not running yet",
instance=instance)
elif info.type == libvirt.VIR_DOMAIN_JOB_UNBOUNDED:
# Migration is still running
#
# This is where we wire up calls to change live
# migration status. eg change max downtime, cancel
# the operation, change max bandwidth
libvirt_migrate.run_tasks(guest, instance,
self.active_migrations,
on_migration_failure,
migration,
is_post_copy_enabled)
now = time.time()
elapsed = now - start
if ((progress_watermark is None) or
(progress_watermark == 0) or
(progress_watermark > info.data_remaining)):
progress_watermark = info.data_remaining
progress_time = now
progress_timeout = CONF.libvirt.live_migration_progress_timeout
completion_timeout = int(
CONF.libvirt.live_migration_completion_timeout * data_gb)
if libvirt_migrate.should_abort(instance, now, progress_time,
progress_timeout, elapsed,
completion_timeout,
migration.status):
try:
guest.abort_job()
except libvirt.libvirtError as e:
LOG.warning(_LW("Failed to abort migration %s"),
e, instance=instance)
self._clear_empty_migration(instance)
raise
if (is_post_copy_enabled and
libvirt_migrate.should_switch_to_postcopy(
info.memory_iteration, info.data_remaining,
previous_data_remaining, migration.status)):
libvirt_migrate.trigger_postcopy_switch(guest,
instance,
migration)
previous_data_remaining = info.data_remaining
curdowntime = libvirt_migrate.update_downtime(
guest, instance, curdowntime,
downtime_steps, elapsed)
# We loop every 500ms, so don't log on every
# iteration to avoid spamming logs for long
# running migrations. Just once every 5 secs
# is sufficient for developers to debug problems.
# We log once every 30 seconds at info to help
# admins see slow running migration operations
# when debug logs are off.
if (n % 10) == 0:
# Ignoring memory_processed, as due to repeated
# dirtying of data, this can be way larger than
# memory_total. Best to just look at what's
# remaining to copy and ignore what's done already
#
# TODO(berrange) perhaps we could include disk
# transfer stats in the progress too, but it
# might make memory info more obscure as large
# disk sizes might dwarf memory size
remaining = 100
if info.memory_total != 0:
remaining = round(info.memory_remaining *
100 / info.memory_total)
libvirt_migrate.save_stats(instance, migration,
info, remaining)
lg = LOG.debug
if (n % 60) == 0:
lg = LOG.info
lg(_LI("Migration running for %(secs)d secs, "
"memory %(remaining)d%% remaining; "
"(bytes processed=%(processed_memory)d, "
"remaining=%(remaining_memory)d, "
"total=%(total_memory)d)"),
{"secs": n / 2, "remaining": remaining,
"processed_memory": info.memory_processed,
"remaining_memory": info.memory_remaining,
"total_memory": info.memory_total}, instance=instance)
if info.data_remaining > progress_watermark:
lg(_LI("Data remaining %(remaining)d bytes, "
"low watermark %(watermark)d bytes "
"%(last)d seconds ago"),
{"remaining": info.data_remaining,
"watermark": progress_watermark,
"last": (now - progress_time)}, instance=instance)
n = n + 1
elif info.type == libvirt.VIR_DOMAIN_JOB_COMPLETED:
# Migration is all done
LOG.info(_LI("Migration operation has completed"),
instance=instance)
post_method(context, instance, dest, block_migration,
migrate_data)
break
elif info.type == libvirt.VIR_DOMAIN_JOB_FAILED:
# Migration did not succeed
LOG.error(_LE("Migration operation has aborted"),
instance=instance)
libvirt_migrate.run_recover_tasks(self._host, guest, instance,
on_migration_failure)
recover_method(context, instance, dest, migrate_data)
break
elif info.type == libvirt.VIR_DOMAIN_JOB_CANCELLED:
# Migration was stopped by admin
LOG.warning(_LW("Migration operation was cancelled"),
instance=instance)
libvirt_migrate.run_recover_tasks(self._host, guest, instance,
on_migration_failure)
recover_method(context, instance, dest, migrate_data,
migration_status='cancelled')
break
else:
LOG.warning(_LW("Unexpected migration job type: %d"),
info.type, instance=instance)
time.sleep(0.5)
self._clear_empty_migration(instance)
def _clear_empty_migration(self, instance):
try:
del self.active_migrations[instance.uuid]
except KeyError:
LOG.warning(_LW("There are no records in active migrations "
"for instance"), instance=instance)
def _live_migration(self, context, instance, dest, post_method,
recover_method, block_migration,
migrate_data):
"""Do live migration.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: a LibvirtLiveMigrateData object
This fires off a new thread to run the blocking migration
operation, and then this thread monitors the progress of
migration and controls its operation
"""
guest = self._host.get_guest(instance)
disk_paths = []
device_names = []
if (migrate_data.block_migration and
CONF.libvirt.virt_type != "parallels"):
disk_paths, device_names = self._live_migration_copy_disk_paths(
context, instance, guest)
opthread = utils.spawn(self._live_migration_operation,
context, instance, dest,
block_migration,
migrate_data, guest,
device_names)
finish_event = eventlet.event.Event()
self.active_migrations[instance.uuid] = deque()
def thread_finished(thread, event):
LOG.debug("Migration operation thread notification",
instance=instance)
event.send()
opthread.link(thread_finished, finish_event)
# Let eventlet schedule the new thread right away
time.sleep(0)
try:
LOG.debug("Starting monitoring of live migration",
instance=instance)
self._live_migration_monitor(context, instance, guest, dest,
post_method, recover_method,
block_migration, migrate_data,
finish_event, disk_paths)
except Exception as ex:
LOG.warning(_LW("Error monitoring migration: %(ex)s"),
{"ex": ex}, instance=instance, exc_info=True)
raise
finally:
LOG.debug("Live migration monitoring is all done",
instance=instance)
def _is_post_copy_enabled(self, migration_flags):
if self._is_post_copy_available():
if (migration_flags & libvirt.VIR_MIGRATE_POSTCOPY) != 0:
return True
return False
def live_migration_force_complete(self, instance):
try:
self.active_migrations[instance.uuid].append('force-complete')
except KeyError:
raise exception.NoActiveMigrationForInstance(
instance_id=instance.uuid)
def _try_fetch_image(self, context, path, image_id, instance,
fallback_from_host=None):
try:
libvirt_utils.fetch_image(context, path, image_id)
except exception.ImageNotFound:
if not fallback_from_host:
raise
LOG.debug("Image %(image_id)s doesn't exist anymore on "
"image service, attempting to copy image "
"from %(host)s",
{'image_id': image_id, 'host': fallback_from_host})
libvirt_utils.copy_image(src=path, dest=path,
host=fallback_from_host,
receive=True)
def _fetch_instance_kernel_ramdisk(self, context, instance,
fallback_from_host=None):
"""Download kernel and ramdisk for instance in instance directory."""
instance_dir = libvirt_utils.get_instance_path(instance)
if instance.kernel_id:
kernel_path = os.path.join(instance_dir, 'kernel')
# NOTE(dsanders): only fetch image if it's not available at
# kernel_path. This also avoids ImageNotFound exception if
# the image has been deleted from glance
if not os.path.exists(kernel_path):
self._try_fetch_image(context,
kernel_path,
instance.kernel_id,
instance, fallback_from_host)
if instance.ramdisk_id:
ramdisk_path = os.path.join(instance_dir, 'ramdisk')
# NOTE(dsanders): only fetch image if it's not available at
# ramdisk_path. This also avoids ImageNotFound exception if
# the image has been deleted from glance
if not os.path.exists(ramdisk_path):
self._try_fetch_image(context,
ramdisk_path,
instance.ramdisk_id,
instance, fallback_from_host)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
"""Clean up destination node after a failed live migration."""
try:
self.destroy(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
finally:
# NOTE(gcb): Failed block live migration may leave instance
# directory at destination node, ensure it is always deleted.
is_shared_instance_path = True
if migrate_data:
is_shared_instance_path = migrate_data.is_shared_instance_path
if (migrate_data.obj_attr_is_set("serial_listen_ports")
and migrate_data.serial_listen_ports):
# Releases serial ports reserved.
for port in migrate_data.serial_listen_ports:
serial_console.release_port(
host=migrate_data.serial_listen_addr, port=port)
if not is_shared_instance_path:
instance_dir = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
if os.path.exists(instance_dir):
shutil.rmtree(instance_dir)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data):
"""Preparation live migration."""
if disk_info is not None:
disk_info = jsonutils.loads(disk_info)
LOG.debug('migrate_data in pre_live_migration: %s', migrate_data,
instance=instance)
is_shared_block_storage = migrate_data.is_shared_block_storage
is_shared_instance_path = migrate_data.is_shared_instance_path
is_block_migration = migrate_data.block_migration
if not is_shared_instance_path:
instance_dir = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
LOG.debug('Creating instance directory: %s', instance_dir,
instance=instance)
os.mkdir(instance_dir)
# Recreate the disk.info file and in doing so stop the
# imagebackend from recreating it incorrectly by inspecting the
# contents of each file when using the Raw backend.
if disk_info:
image_disk_info = {}
for info in disk_info:
image_file = os.path.basename(info['path'])
image_path = os.path.join(instance_dir, image_file)
image_disk_info[image_path] = info['type']
LOG.debug('Creating disk.info with the contents: %s',
image_disk_info, instance=instance)
image_disk_info_path = os.path.join(instance_dir,
'disk.info')
libvirt_utils.write_to_file(image_disk_info_path,
jsonutils.dumps(image_disk_info))
if not is_shared_block_storage:
# Ensure images and backing files are present.
LOG.debug('Checking to make sure images and backing files are '
'present before live migration.', instance=instance)
self._create_images_and_backing(
context, instance, instance_dir, disk_info,
fallback_from_host=instance.host)
if (configdrive.required_by(instance) and
CONF.config_drive_format == 'iso9660'):
# NOTE(pkoniszewski): Due to a bug in libvirt iso config
# drive needs to be copied to destination prior to
# migration when instance path is not shared and block
# storage is not shared. Files that are already present
# on destination are excluded from a list of files that
# need to be copied to destination. If we don't do that
# live migration will fail on copying iso config drive to
# destination and writing to read-only device.
# Please see bug/1246201 for more details.
src = "%s:%s/disk.config" % (instance.host, instance_dir)
self._remotefs.copy_file(src, instance_dir)
if not is_block_migration:
# NOTE(angdraug): when block storage is shared between source
# and destination and instance path isn't (e.g. volume backed
# or rbd backed instance), instance path on destination has to
# be prepared
# Required by Quobyte CI
self._ensure_console_log_for_instance(instance)
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
# Establishing connection to volume server.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if len(block_device_mapping):
LOG.debug('Connecting volumes before live migration.',
instance=instance)
for bdm in block_device_mapping:
connection_info = bdm['connection_info']
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type,
instance.image_meta, bdm)
self._connect_volume(connection_info, disk_info)
# We call plug_vifs before the compute manager calls
# ensure_filtering_rules_for_instance, to ensure bridge is set up
# Retry operation is necessary because continuously request comes,
# concurrent request occurs to iptables, then it complains.
LOG.debug('Plugging VIFs before live migration.', instance=instance)
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance, network_info)
break
except processutils.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
LOG.warning(_LW('plug_vifs() failed %(cnt)d. Retry up to '
'%(max_retry)d.'),
{'cnt': cnt,
'max_retry': max_retry},
instance=instance)
greenthread.sleep(1)
# Store vncserver_listen and latest disk device info
if not migrate_data:
migrate_data = objects.LibvirtLiveMigrateData(bdms=[])
else:
migrate_data.bdms = []
# Store live_migration_inbound_addr
migrate_data.target_connect_addr = \
CONF.libvirt.live_migration_inbound_addr
migrate_data.supported_perf_events = self._supported_perf_events
migrate_data.serial_listen_ports = []
if CONF.serial_console.enabled:
num_ports = hardware.get_number_of_serial_ports(
instance.flavor, instance.image_meta)
for port in six.moves.range(num_ports):
migrate_data.serial_listen_ports.append(
serial_console.acquire_port(
migrate_data.serial_listen_addr))
for vol in block_device_mapping:
connection_info = vol['connection_info']
if connection_info.get('serial'):
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type,
instance.image_meta, vol)
bdmi = objects.LibvirtLiveMigrateBDMInfo()
bdmi.serial = connection_info['serial']
bdmi.connection_info = connection_info
bdmi.bus = disk_info['bus']
bdmi.dev = disk_info['dev']
bdmi.type = disk_info['type']
bdmi.format = disk_info.get('format')
bdmi.boot_index = disk_info.get('boot_index')
migrate_data.bdms.append(bdmi)
return migrate_data
def _try_fetch_image_cache(self, image, fetch_func, context, filename,
image_id, instance, size,
fallback_from_host=None):
try:
image.cache(fetch_func=fetch_func,
context=context,
filename=filename,
image_id=image_id,
size=size)
except exception.ImageNotFound:
if not fallback_from_host:
raise
LOG.debug("Image %(image_id)s doesn't exist anymore "
"on image service, attempting to copy "
"image from %(host)s",
{'image_id': image_id, 'host': fallback_from_host},
instance=instance)
def copy_from_host(target):
libvirt_utils.copy_image(src=target,
dest=target,
host=fallback_from_host,
receive=True)
image.cache(fetch_func=copy_from_host,
filename=filename)
def _create_images_and_backing(self, context, instance, instance_dir,
disk_info, fallback_from_host=None):
""":param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param instance_dir:
instance path to use, calculated externally to handle block
migrating an instance with an old style instance path
:param disk_info:
disk info specified in _get_instance_disk_info (list of dicts)
:param fallback_from_host:
host where we can retrieve images if the glance images are
not available.
"""
# Virtuozzo containers don't use backing file
if (CONF.libvirt.virt_type == "parallels" and
instance.vm_mode == fields.VMMode.EXE):
return
if not disk_info:
disk_info = []
for info in disk_info:
base = os.path.basename(info['path'])
# Get image type and create empty disk image, and
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
libvirt_utils.create_image(info['type'], instance_disk,
info['virt_disk_size'])
elif info['backing_file']:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
disk = self.image_backend.by_name(instance, instance_disk,
CONF.libvirt.images_type)
if cache_name.startswith('ephemeral'):
# The argument 'size' is used by image.cache to
# validate disk size retrieved from cache against
# the instance disk size (should always return OK)
# and ephemeral_size is used by _create_ephemeral
# to build the image if the disk is not already
# cached.
disk.cache(
fetch_func=self._create_ephemeral,
fs_label=cache_name,
os_type=instance.os_type,
filename=cache_name,
size=info['virt_disk_size'],
ephemeral_size=info['virt_disk_size'] / units.Gi)
elif cache_name.startswith('swap'):
inst_type = instance.get_flavor()
swap_mb = inst_type.swap
disk.cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=swap_mb * units.Mi,
swap_mb=swap_mb)
else:
self._try_fetch_image_cache(disk,
libvirt_utils.fetch_image,
context, cache_name,
instance.image_ref,
instance,
info['virt_disk_size'],
fallback_from_host)
# if disk has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(
context, instance, fallback_from_host=fallback_from_host)
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
# Disconnect from volume server
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
connector = self.get_volume_connector(instance)
volume_api = self._volume_api
for vol in block_device_mapping:
# Retrieve connection info from Cinder's initialize_connection API.
# The info returned will be accurate for the source server.
volume_id = vol['connection_info']['serial']
connection_info = volume_api.initialize_connection(context,
volume_id,
connector)
# TODO(leeantho) The following multipath_id logic is temporary
# and will be removed in the future once os-brick is updated
# to handle multipath for drivers in a more efficient way.
# For now this logic is needed to ensure the connection info
# data is correct.
# Pull out multipath_id from the bdm information. The
# multipath_id can be placed into the connection info
# because it is based off of the volume and will be the
# same on the source and destination hosts.
if 'multipath_id' in vol['connection_info']['data']:
multipath_id = vol['connection_info']['data']['multipath_id']
connection_info['data']['multipath_id'] = multipath_id
disk_dev = vol['mount_device'].rpartition("/")[2]
self._disconnect_volume(connection_info, disk_dev)
def post_live_migration_at_source(self, context, instance, network_info):
"""Unplug VIFs from networks at source.
:param context: security context
:param instance: instance object reference
:param network_info: instance network information
"""
self.unplug_vifs(instance, network_info)
def post_live_migration_at_destination(self, context,
instance,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
guest = self._host.get_guest(instance)
# TODO(sahid): In Ocata we have added the migration flag
# VIR_MIGRATE_PERSIST_DEST to libvirt, which means that the
# guest XML is going to be set in libvirtd on destination node
# automatically. However we do not remove that part until P*
# because during an upgrade, to ensure migrating instances
# from node running Newton is still going to set the guest XML
# in libvirtd on destination node.
# Make sure we define the migrated instance in libvirt
xml = guest.get_xml_desc()
self._host.write_instance_config(xml)
def _get_instance_disk_info(self, instance_name, xml,
block_device_info=None):
"""Get the non-volume disk information from the domain xml
:param str instance_name: the name of the instance (domain)
:param str xml: the libvirt domain xml for the instance
:param dict block_device_info: block device info for BDMs
:returns disk_info: list of dicts with keys:
* 'type': the disk type (str)
* 'path': the disk path (str)
* 'virt_disk_size': the virtual disk size (int)
* 'backing_file': backing file of a disk image (str)
* 'disk_size': physical disk size (int)
* 'over_committed_disk_size': virt_disk_size - disk_size or 0
"""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
volume_devices = set()
for vol in block_device_mapping:
disk_dev = vol['mount_device'].rpartition("/")[2]
volume_devices.add(disk_dev)
disk_info = []
doc = etree.fromstring(xml)
def find_nodes(doc, device_type):
return (doc.findall('.//devices/%s' % device_type),
doc.findall('.//devices/%s/source' % device_type),
doc.findall('.//devices/%s/driver' % device_type),
doc.findall('.//devices/%s/target' % device_type))
if (CONF.libvirt.virt_type == 'parallels' and
doc.find('os/type').text == fields.VMMode.EXE):
node_type = 'filesystem'
else:
node_type = 'disk'
(disk_nodes, path_nodes,
driver_nodes, target_nodes) = find_nodes(doc, node_type)
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file') or path_node.get('dev')
if (node_type == 'filesystem'):
target = target_nodes[cnt].attrib['dir']
else:
target = target_nodes[cnt].attrib['dev']
if not path:
LOG.debug('skipping disk for %s as it does not have a path',
instance_name)
continue
if disk_type not in ['file', 'block']:
LOG.debug('skipping disk because it looks like a volume', path)
continue
if target in volume_devices:
LOG.debug('skipping disk %(path)s (%(target)s) as it is a '
'volume', {'path': path, 'target': target})
continue
# get the real disk size or
# raise a localized error if image is unavailable
if disk_type == 'file':
if driver_nodes[cnt].get('type') == 'ploop':
dk_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
dk_size += os.path.getsize(fp)
else:
dk_size = int(os.path.getsize(path))
elif disk_type == 'block' and block_device_info:
dk_size = lvm.get_volume_size(path)
else:
LOG.debug('skipping disk %(path)s (%(target)s) - unable to '
'determine if volume',
{'path': path, 'target': target})
continue
disk_type = driver_nodes[cnt].get('type')
if disk_type in ("qcow2", "ploop"):
backing_file = libvirt_utils.get_disk_backing_file(path)
virt_size = disk_api.get_disk_size(path)
over_commit_size = int(virt_size) - dk_size
else:
backing_file = ""
virt_size = dk_size
over_commit_size = 0
disk_info.append({'type': disk_type,
'path': path,
'virt_disk_size': virt_size,
'backing_file': backing_file,
'disk_size': dk_size,
'over_committed_disk_size': over_commit_size})
return disk_info
def get_instance_disk_info(self, instance,
block_device_info=None):
try:
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warning(_LW('Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] '
'%(ex)s'),
{'instance_name': instance.name,
'error_code': error_code,
'ex': ex},
instance=instance)
raise exception.InstanceNotFound(instance_id=instance.uuid)
return jsonutils.dumps(
self._get_instance_disk_info(instance.name, xml,
block_device_info))
def _get_disk_over_committed_size_total(self):
"""Return total over committed disk size for all instances."""
# Disk size that all instance uses : virtual_size - disk_size
disk_over_committed_size = 0
instance_domains = self._host.list_instance_domains(only_running=False)
if not instance_domains:
return disk_over_committed_size
# Get all instance uuids
instance_uuids = [dom.UUIDString() for dom in instance_domains]
ctx = nova_context.get_admin_context()
# Get instance object list by uuid filter
filters = {'uuid': instance_uuids}
# NOTE(ankit): objects.InstanceList.get_by_filters method is
# getting called twice one is here and another in the
# _update_available_resource method of resource_tracker. Since
# _update_available_resource method is synchronized, there is a
# possibility the instances list retrieved here to calculate
# disk_over_committed_size would differ to the list you would get
# in _update_available_resource method for calculating usages based
# on instance utilization.
local_instance_list = objects.InstanceList.get_by_filters(
ctx, filters, use_slave=True)
# Convert instance list to dictionary with instance uuid as key.
local_instances = {inst.uuid: inst for inst in local_instance_list}
# Get bdms by instance uuids
bdms = objects.BlockDeviceMappingList.bdms_by_instance_uuid(
ctx, instance_uuids)
for dom in instance_domains:
try:
guest = libvirt_guest.Guest(dom)
xml = guest.get_xml_desc()
block_device_info = None
if guest.uuid in local_instances \
and (bdms and guest.uuid in bdms):
# Get block device info for instance
block_device_info = driver.get_block_device_info(
local_instances[guest.uuid], bdms[guest.uuid])
disk_infos = self._get_instance_disk_info(guest.name, xml,
block_device_info=block_device_info)
if not disk_infos:
continue
for info in disk_infos:
disk_over_committed_size += int(
info['over_committed_disk_size'])
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warning(_LW(
'Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] %(ex)s'
), {'instance_name': guest.name,
'error_code': error_code,
'ex': ex})
except OSError as e:
if e.errno in (errno.ENOENT, errno.ESTALE):
LOG.warning(_LW('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but disk file was removed by concurrent '
'operations such as resize.'),
{'i_name': guest.name})
elif e.errno == errno.EACCES:
LOG.warning(_LW('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but access is denied. It is most likely '
'due to a VM that exists on the compute '
'node but is not managed by Nova.'),
{'i_name': guest.name})
else:
raise
except exception.VolumeBDMPathNotFound as e:
LOG.warning(_LW('Periodic task is updating the host stats, '
'it is trying to get disk info for %(i_name)s, '
'but the backing volume block device was removed '
'by concurrent operations such as resize. '
'Error: %(error)s'),
{'i_name': guest.name,
'error': e})
# NOTE(gtt116): give other tasks a chance.
greenthread.sleep(0)
return disk_over_committed_size
def unfilter_instance(self, instance, network_info):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
def get_available_nodes(self, refresh=False):
return [self._host.get_hostname()]
def get_host_cpu_stats(self):
"""Return the current CPU state of the host."""
return self._host.get_cpu_stats()
def get_host_uptime(self):
"""Returns the result of calling "uptime"."""
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self.image_cache_manager.update(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize,
shared_storage=False):
"""Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
if not shared_storage:
self._remotefs.remove_dir(dest, inst_base)
except Exception:
pass
def _is_storage_shared_with(self, dest, inst_base):
# NOTE (rmk): There are two methods of determining whether we are
# on the same filesystem: the source and dest IP are the
# same, or we create a file on the dest system via SSH
# and check whether the source system can also see it.
# NOTE (drwahl): Actually, there is a 3rd way: if images_type is rbd,
# it will always be shared storage
if CONF.libvirt.images_type == 'rbd':
return True
shared_storage = (dest == self.get_host_ip_addr())
if not shared_storage:
tmp_file = uuid.uuid4().hex + '.tmp'
tmp_path = os.path.join(inst_base, tmp_file)
try:
self._remotefs.create_file(dest, tmp_path)
if os.path.exists(tmp_path):
shared_storage = True
os.unlink(tmp_path)
else:
self._remotefs.remove_file(dest, tmp_path)
except Exception:
pass
return shared_storage
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
LOG.debug("Starting migrate_disk_and_power_off",
instance=instance)
ephemerals = driver.block_device_info_get_ephemerals(block_device_info)
# get_bdm_ephemeral_disk_size() will return 0 if the new
# instance's requested block device mapping contain no
# ephemeral devices. However, we still want to check if
# the original instance's ephemeral_gb property was set and
# ensure that the new requested flavor ephemeral size is greater
eph_size = (block_device.get_bdm_ephemeral_disk_size(ephemerals) or
instance.flavor.ephemeral_gb)
# Checks if the migration needs a disk resize down.
root_down = flavor.root_gb < instance.flavor.root_gb
ephemeral_down = flavor.ephemeral_gb < eph_size
booted_from_volume = self._is_booted_from_volume(block_device_info)
if (root_down and not booted_from_volume) or ephemeral_down:
reason = _("Unable to resize disk down.")
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
# NOTE(dgenin): Migration is not implemented for LVM backed instances.
if CONF.libvirt.images_type == 'lvm' and not booted_from_volume:
reason = _("Migration is not supported for LVM backed instances")
raise exception.InstanceFaultRollback(
exception.MigrationPreCheckError(reason=reason))
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
shared_storage = self._is_storage_shared_with(dest, inst_base)
# try to create the directory on the remote compute node
# if this fails we pass the exception up the stack so we can catch
# failures here earlier
if not shared_storage:
try:
self._remotefs.create_dir(dest, inst_base)
except processutils.ProcessExecutionError as e:
reason = _("not able to execute ssh command: %s") % e
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
self.power_off(instance, timeout, retry_interval)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self._disconnect_volume(connection_info, disk_dev)
disk_info_text = self.get_instance_disk_info(
instance, block_device_info=block_device_info)
disk_info = jsonutils.loads(disk_info_text)
try:
utils.execute('mv', inst_base, inst_base_resize)
# if we are migrating the instance with shared storage then
# create the directory. If it is a remote node the directory
# has already been created
if shared_storage:
dest = None
utils.execute('mkdir', '-p', inst_base)
on_execute = lambda process: \
self.job_tracker.add_job(instance, process.pid)
on_completion = lambda process: \
self.job_tracker.remove_job(instance, process.pid)
for info in disk_info:
# assume inst_base == dirname(info['path'])
img_path = info['path']
fname = os.path.basename(img_path)
from_path = os.path.join(inst_base_resize, fname)
# We will not copy over the swap disk here, and rely on
# finish_migration to re-create it for us. This is ok because
# the OS is shut down, and as recreating a swap disk is very
# cheap it is more efficient than copying either locally or
# over the network. This also means we don't have to resize it.
if fname == 'disk.swap':
continue
compression = info['type'] not in NO_COMPRESSION_TYPES
libvirt_utils.copy_image(from_path, img_path, host=dest,
on_execute=on_execute,
on_completion=on_completion,
compression=compression)
# Ensure disk.info is written to the new path to avoid disks being
# reinspected and potentially changing format.
src_disk_info_path = os.path.join(inst_base_resize, 'disk.info')
if os.path.exists(src_disk_info_path):
dst_disk_info_path = os.path.join(inst_base, 'disk.info')
libvirt_utils.copy_image(src_disk_info_path,
dst_disk_info_path,
host=dest, on_execute=on_execute,
on_completion=on_completion)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base,
inst_base_resize,
shared_storage)
return disk_info_text
def _wait_for_running(self, instance):
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance running successfully."), instance=instance)
raise loopingcall.LoopingCallDone()
@staticmethod
def _disk_raw_to_qcow2(path):
"""Converts a raw disk to qcow2."""
path_qcow = path + '_qcow'
utils.execute('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', path, path_qcow)
utils.execute('mv', path_qcow, path)
@staticmethod
def _disk_qcow2_to_raw(path):
"""Converts a qcow2 disk to raw."""
path_raw = path + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', path, path_raw)
utils.execute('mv', path_raw, path)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_migration", instance=instance)
block_disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
# assume _create_image does nothing if a target file exists.
# NOTE: This has the intended side-effect of fetching a missing
# backing file.
self._create_image(context, instance, block_disk_info['mapping'],
block_device_info=block_device_info,
ignore_bdi_for_swap=True,
fallback_from_host=migration.source_compute)
# Required by Quobyte CI
self._ensure_console_log_for_instance(instance)
gen_confdrive = functools.partial(
self._create_configdrive, context, instance,
InjectionInfo(admin_pass=None, network_info=network_info,
files=None))
# Convert raw disks to qcow2 if migrating to host which uses
# qcow2 from host which uses raw.
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
path = info['path']
disk_name = os.path.basename(path)
# NOTE(mdbooth): The code below looks wrong, but is actually
# required to prevent a security hole when migrating from a host
# with use_cow_images=False to one with use_cow_images=True.
# Imagebackend uses use_cow_images to select between the
# atrociously-named-Raw and Qcow2 backends. The Qcow2 backend
# writes to disk.info, but does not read it as it assumes qcow2.
# Therefore if we don't convert raw to qcow2 here, a raw disk will
# be incorrectly assumed to be qcow2, which is a severe security
# flaw. The reverse is not true, because the atrociously-named-Raw
# backend supports both qcow2 and raw disks, and will choose
# appropriately between them as long as disk.info exists and is
# correctly populated, which it is because Qcow2 writes to
# disk.info.
#
# In general, we do not yet support format conversion during
# migration. For example:
# * Converting from use_cow_images=True to use_cow_images=False
# isn't handled. This isn't a security bug, but is almost
# certainly buggy in other cases, as the 'Raw' backend doesn't
# expect a backing file.
# * Converting to/from lvm and rbd backends is not supported.
#
# This behaviour is inconsistent, and therefore undesirable for
# users. It is tightly-coupled to implementation quirks of 2
# out of 5 backends in imagebackend and defends against a severe
# security flaw which is not at all obvious without deep analysis,
# and is therefore undesirable to developers. We should aim to
# remove it. This will not be possible, though, until we can
# represent the storage layout of a specific instance
# independent of the default configuration of the local compute
# host.
# Config disks are hard-coded to be raw even when
# use_cow_images=True (see _get_disk_config_image_type),so don't
# need to be converted.
if (disk_name != 'disk.config' and
info['type'] == 'raw' and CONF.use_cow_images):
self._disk_raw_to_qcow2(info['path'])
xml = self._get_guest_xml(context, instance, network_info,
block_disk_info, image_meta,
block_device_info=block_device_info)
# NOTE(mriedem): vifs_already_plugged=True here, regardless of whether
# or not we've migrated to another host, because we unplug VIFs locally
# and the status change in the port might go undetected by the neutron
# L2 agent (or neutron server) so neutron may not know that the VIF was
# unplugged in the first place and never send an event.
guest = self._create_domain_and_network(context, xml, instance,
network_info,
block_disk_info,
block_device_info=block_device_info,
power_on=power_on,
vifs_already_plugged=True,
post_xml_callback=gen_confdrive)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
# Sync guest time after migration.
guest.sync_guest_time()
LOG.debug("finish_migration finished successfully.", instance=instance)
def _cleanup_failed_migration(self, inst_base):
"""Make sure that a failed migrate doesn't prevent us from rolling
back in a revert.
"""
try:
shutil.rmtree(inst_base)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_revert_migration",
instance=instance)
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
# NOTE(danms): if we're recovering from a failed migration,
# make sure we don't have a left-over same-host base directory
# that would conflict. Also, don't fail on the rename if the
# failure happened early.
if os.path.exists(inst_base_resize):
self._cleanup_failed_migration(inst_base)
utils.execute('mv', inst_base_resize, inst_base)
root_disk = self.image_backend.by_name(instance, 'disk')
# Once we rollback, the snapshot is no longer needed, so remove it
# TODO(nic): Remove the try/except/finally in a future release
# To avoid any upgrade issues surrounding instances being in pending
# resize state when the software is updated, this portion of the
# method logs exceptions rather than failing on them. Once it can be
# reasonably assumed that no such instances exist in the wild
# anymore, the try/except/finally should be removed,
# and ignore_errors should be set back to False (the default) so
# that problems throw errors, like they should.
if root_disk.exists():
try:
root_disk.rollback_to_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME)
except exception.SnapshotNotFound:
LOG.warning(_LW("Failed to rollback snapshot (%s)"),
libvirt_utils.RESIZE_SNAPSHOT_NAME)
finally:
root_disk.remove_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME,
ignore_errors=True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
instance.image_meta,
block_device_info)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
instance.image_meta,
block_device_info=block_device_info)
self._create_domain_and_network(context, xml, instance, network_info,
disk_info,
block_device_info=block_device_info,
power_on=power_on,
vifs_already_plugged=True)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
LOG.debug("finish_revert_migration finished successfully.",
instance=instance)
def confirm_migration(self, context, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
@staticmethod
def _get_io_devices(xml_doc):
"""get the list of io devices from the xml document."""
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'),
('./devices/interface', 'ifaces')]
for block, key in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if child.tag == 'target' and child.get('dev'):
result[key].append(child.get('dev'))
return result
def get_diagnostics(self, instance):
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove domain at the end.
domain = guest._domain
output = {}
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
for vcpu in guest.get_vcpus_info():
output["cpu" + str(vcpu.id) + "_time"] = vcpu.time
except libvirt.libvirtError:
pass
# get io status
xml = guest.get_xml_desc()
dom_io = LibvirtDriver._get_io_devices(xml)
for guest_disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(guest_disk)
output[guest_disk + "_read_req"] = stats[0]
output[guest_disk + "_read"] = stats[1]
output[guest_disk + "_write_req"] = stats[2]
output[guest_disk + "_write"] = stats[3]
output[guest_disk + "_errors"] = stats[4]
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
output[interface + "_rx"] = stats[0]
output[interface + "_rx_packets"] = stats[1]
output[interface + "_rx_errors"] = stats[2]
output[interface + "_rx_drop"] = stats[3]
output[interface + "_tx"] = stats[4]
output[interface + "_tx_packets"] = stats[5]
output[interface + "_tx_errors"] = stats[6]
output[interface + "_tx_drop"] = stats[7]
except libvirt.libvirtError:
pass
output["memory"] = domain.maxMemory()
# memoryStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
mem = domain.memoryStats()
for key in mem.keys():
output["memory-" + key] = mem[key]
except (libvirt.libvirtError, AttributeError):
pass
return output
def get_instance_diagnostics(self, instance):
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove domain at the end.
domain = guest._domain
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
# TODO(sahid): Needs to use get_info but more changes have to
# be done since a mapping STATE_MAP LIBVIRT_POWER_STATE is
# needed.
(state, max_mem, mem, num_cpu, cpu_time) = \
guest._get_domain_info(self._host)
config_drive = configdrive.required_by(instance)
launched_at = timeutils.normalize_time(instance.launched_at)
uptime = timeutils.delta_seconds(launched_at,
timeutils.utcnow())
diags = diagnostics.Diagnostics(state=power_state.STATE_MAP[state],
driver='libvirt',
config_drive=config_drive,
hypervisor_os='linux',
uptime=uptime)
diags.memory_details.maximum = max_mem / units.Mi
diags.memory_details.used = mem / units.Mi
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
for vcpu in guest.get_vcpus_info():
diags.add_cpu(time=vcpu.time)
except libvirt.libvirtError:
pass
# get io status
dom_io = LibvirtDriver._get_io_devices(xml)
for guest_disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(guest_disk)
diags.add_disk(read_bytes=stats[1],
read_requests=stats[0],
write_bytes=stats[3],
write_requests=stats[2])
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
diags.add_nic(rx_octets=stats[0],
rx_errors=stats[2],
rx_drop=stats[3],
rx_packets=stats[1],
tx_octets=stats[4],
tx_errors=stats[6],
tx_drop=stats[7],
tx_packets=stats[5])
except libvirt.libvirtError:
pass
# Update mac addresses of interface if stats have been reported
if diags.nic_details:
nodes = xml_doc.findall('./devices/interface/mac')
for index, node in enumerate(nodes):
diags.nic_details[index].mac_address = node.get('address')
return diags
@staticmethod
def _prepare_device_bus(dev):
"""Determines the device bus and its hypervisor assigned address
"""
bus = None
address = (dev.device_addr.format_address() if
dev.device_addr else None)
if isinstance(dev.device_addr,
vconfig.LibvirtConfigGuestDeviceAddressPCI):
bus = objects.PCIDeviceBus()
elif isinstance(dev, vconfig.LibvirtConfigGuestDisk):
if dev.target_bus == 'scsi':
bus = objects.SCSIDeviceBus()
elif dev.target_bus == 'ide':
bus = objects.IDEDeviceBus()
elif dev.target_bus == 'usb':
bus = objects.USBDeviceBus()
if address is not None and bus is not None:
bus.address = address
return bus
def _build_device_metadata(self, context, instance):
"""Builds a metadata object for instance devices, that maps the user
provided tag to the hypervisor assigned device address.
"""
def _get_device_name(bdm):
return block_device.strip_dev(bdm.device_name)
network_info = instance.info_cache.network_info
vlans_by_mac = netutils.get_cached_vifs_with_vlan(network_info)
vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context,
instance.uuid)
vifs_to_expose = {vif.address: vif for vif in vifs
if ('tag' in vif and vif.tag) or
vlans_by_mac.get(vif.address)}
# TODO(mriedem): We should be able to avoid the DB query here by using
# block_device_info['block_device_mapping'] which is passed into most
# methods that call this function.
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
tagged_bdms = {_get_device_name(bdm): bdm for bdm in bdms if bdm.tag}
devices = []
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
xml_dom = etree.fromstring(xml)
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_dom)
for dev in guest_config.devices:
# Build network interfaces related metadata
if isinstance(dev, vconfig.LibvirtConfigGuestInterface):
vif = vifs_to_expose.get(dev.mac_addr)
if not vif:
continue
bus = self._prepare_device_bus(dev)
device = objects.NetworkInterfaceMetadata(mac=vif.address)
if 'tag' in vif and vif.tag:
device.tags = [vif.tag]
if bus:
device.bus = bus
vlan = vlans_by_mac.get(vif.address)
if vlan:
device.vlan = int(vlan)
devices.append(device)
# Build disks related metadata
if isinstance(dev, vconfig.LibvirtConfigGuestDisk):
bdm = tagged_bdms.get(dev.target_dev)
if not bdm:
continue
bus = self._prepare_device_bus(dev)
device = objects.DiskMetadata(tags=[bdm.tag])
if bus:
device.bus = bus
devices.append(device)
if devices:
dev_meta = objects.InstanceDeviceMetadata(devices=devices)
return dev_meta
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug('Checking instance files accessibility %s', instance_path,
instance=instance)
shared_instance_path = os.access(instance_path, os.W_OK)
# NOTE(flwang): For shared block storage scenario, the file system is
# not really shared by the two hosts, but the volume of evacuated
# instance is reachable.
shared_block_storage = (self.image_backend.backend().
is_shared_block_storage())
return shared_instance_path or shared_block_storage
def inject_network_info(self, instance, nw_info):
self.firewall_driver.setup_basic_filtering(instance, nw_info)
def delete_instance_files(self, instance):
target = libvirt_utils.get_instance_path(instance)
# A resize may be in progress
target_resize = target + '_resize'
# Other threads may attempt to rename the path, so renaming the path
# to target + '_del' (because it is atomic) and iterating through
# twice in the unlikely event that a concurrent rename occurs between
# the two rename attempts in this method. In general this method
# should be fairly thread-safe without these additional checks, since
# other operations involving renames are not permitted when the task
# state is not None and the task state should be set to something
# other than None by the time this method is invoked.
target_del = target + '_del'
for i in range(2):
try:
utils.execute('mv', target, target_del)
break
except Exception:
pass
try:
utils.execute('mv', target_resize, target_del)
break
except Exception:
pass
# Either the target or target_resize path may still exist if all
# rename attempts failed.
remaining_path = None
for p in (target, target_resize):
if os.path.exists(p):
remaining_path = p
break
# A previous delete attempt may have been interrupted, so target_del
# may exist even if all rename attempts during the present method
# invocation failed due to the absence of both target and
# target_resize.
if not remaining_path and os.path.exists(target_del):
self.job_tracker.terminate_jobs(instance)
LOG.info(_LI('Deleting instance files %s'), target_del,
instance=instance)
remaining_path = target_del
try:
shutil.rmtree(target_del)
except OSError as e:
LOG.error(_LE('Failed to cleanup directory %(target)s: '
'%(e)s'), {'target': target_del, 'e': e},
instance=instance)
# It is possible that the delete failed, if so don't mark the instance
# as cleaned.
if remaining_path and os.path.exists(remaining_path):
LOG.info(_LI('Deletion of %s failed'), remaining_path,
instance=instance)
return False
LOG.info(_LI('Deletion of %s complete'), target_del, instance=instance)
return True
@property
def need_legacy_block_device_info(self):
return False
def default_root_device_name(self, instance, image_meta, root_bdm):
disk_bus = blockinfo.get_disk_bus_for_device_type(
instance, CONF.libvirt.virt_type, image_meta, "disk")
cdrom_bus = blockinfo.get_disk_bus_for_device_type(
instance, CONF.libvirt.virt_type, image_meta, "cdrom")
root_info = blockinfo.get_root_info(
instance, CONF.libvirt.virt_type, image_meta,
root_bdm, disk_bus, cdrom_bus)
return block_device.prepend_dev(root_info['dev'])
def default_device_names_for_instance(self, instance, root_device_name,
*block_device_lists):
block_device_mapping = list(itertools.chain(*block_device_lists))
# NOTE(ndipanov): Null out the device names so that blockinfo code
# will assign them
for bdm in block_device_mapping:
if bdm.device_name is not None:
LOG.warning(
_LW("Ignoring supplied device name: %(device_name)s. "
"Libvirt can't honour user-supplied dev names"),
{'device_name': bdm.device_name}, instance=instance)
bdm.device_name = None
block_device_info = driver.get_block_device_info(instance,
block_device_mapping)
blockinfo.default_device_names(CONF.libvirt.virt_type,
nova_context.get_admin_context(),
instance,
block_device_info,
instance.image_meta)
def get_device_name_for_instance(self, instance, bdms, block_device_obj):
block_device_info = driver.get_block_device_info(instance, bdms)
instance_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance,
instance.image_meta, block_device_info=block_device_info)
suggested_dev_name = block_device_obj.device_name
if suggested_dev_name is not None:
LOG.warning(
_LW('Ignoring supplied device name: %(suggested_dev)s'),
{'suggested_dev': suggested_dev_name}, instance=instance)
# NOTE(ndipanov): get_info_from_bdm will generate the new device name
# only when it's actually not set on the bd object
block_device_obj.device_name = None
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type, instance.image_meta,
block_device_obj, mapping=instance_info['mapping'])
return block_device.prepend_dev(disk_info['dev'])
def is_supported_fs_format(self, fs_type):
return fs_type in [disk_api.FS_FORMAT_EXT2, disk_api.FS_FORMAT_EXT3,
disk_api.FS_FORMAT_EXT4, disk_api.FS_FORMAT_XFS]
|
{
"content_hash": "08474d01db484ac07c8364eca2fe62f4",
"timestamp": "",
"source": "github",
"line_count": 7872,
"max_line_length": 119,
"avg_line_length": 45.18673780487805,
"alnum_prop": 0.5521632790756515,
"repo_name": "OpenSciViz/cloudstack",
"id": "d719bdc80f870f5c872717e503a5163973d4c10c",
"size": "356637",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack/src/python/nova-libvirt/backup/driver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12737"
},
{
"name": "Shell",
"bytes": "55241"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "flavify.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
{
"content_hash": "b6763cfe7e34fc25fcc50499cc01070b",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 37.285714285714285,
"alnum_prop": 0.6206896551724138,
"repo_name": "swpease/Flavify",
"id": "501e5d602114a1243300ad50df461dbc9e72f544",
"size": "805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2013"
},
{
"name": "HTML",
"bytes": "40382"
},
{
"name": "JavaScript",
"bytes": "5361"
},
{
"name": "Python",
"bytes": "47265"
}
],
"symlink_target": ""
}
|
import os
import tkinter as tk
import tkinter.filedialog
import tkinter.messagebox
from tkinter import ttk
from streamdownloader import thread
def set_children_padding(object, padding_x, padding_y):
for child in object.winfo_children():
child.grid_configure(padx=padding_x, pady=padding_y)
class ResolutionDialog(tk.Toplevel):
CHECK_INTERVAL = 100
def __init__(self, master, url):
tk.Toplevel.__init__(self, master)
self.url = url
self.stream = None
self.streams_thread = thread.StreamsThread(url)
self.streams_thread.start()
self.master.after(self.CHECK_INTERVAL, self.check_thread)
# Label for displaying the status of the url checking thread
self.status_label = ttk.Label(self, text="Checking URL...")
self.status_label.grid(row=0, column=0, columnspan=2)
# Options for resolution
resolutions = ["foo", "bar", "baz", "foobar", "foobaz", "foobarbaz"]
self.resolution = tk.StringVar()
self.ok_button = ttk.Button(self, text="Ok", command=self.ok)
self.ok_button.grid(row=2, column=0, sticky=(tk.W, tk.E))
self.ok_button.config(state=tk.DISABLED)
self.cancel_button = ttk.Button(self, text="Cancel",
command=self.cancel)
self.cancel_button.grid(row=2, column=1, sticky=(tk.W, tk.E))
set_children_padding(self, 5, 5)
# Expand column 1 and 2 equally
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
# Expand rows
for i in range(0, 3):
self.rowconfigure(i, weight=1)
w = 300
h = 100
self.geometry("%dx%d" % (w, h))
self.grid()
# Grab focus on creation
self.grab_set()
self.focus()
def check_thread(self):
if not self.streams_thread.done:
self.master.after(self.CHECK_INTERVAL, self.check_thread)
elif self.streams_thread.plugin_error is not None:
self.status_label.config(text="Error while getting streams from "
"this URL",
foreground="red")
elif self.streams_thread.no_plugin_error is not None:
self.status_label.config(text="This website is currently not "
"supported",
foreground="red")
else:
self.streams = self.streams_thread.streams
self.resolutions = list(self.streams)
self.resolutions.sort()
default_resolution = "best"
if "best" not in self.resolutions:
default_resolution = self.resolutions[0]
self.status_label.config(text="Select resolution")
self.ok_button.config(state=tk.NORMAL)
self.options = ttk.OptionMenu(self, self.resolution,
default_resolution,
*self.resolutions)
self.options.grid(row=1, column=0, columnspan=2,
sticky=(tk.W, tk.E))
def ok(self):
self.stream = self.streams[self.resolution.get()]
self.destroy()
def cancel(self):
self.destroy()
class MainWindow(ttk.Frame):
CHECK_INTERVAL = 500
task = None
def __init__(self, master=None):
ttk.Frame.__init__(self, master)
# Handle the close event
if master is not None:
self.master = master
master.protocol("WM_DELETE_WINDOW", self.handle_close)
# The url of the stream to download
self.url = tk.StringVar()
self.url_label = ttk.Label(self, text="Stream URL:")
self.url_label.grid(row=0, column=0, sticky=tk.W)
self.url_entry = ttk.Entry(self, textvariable=self.url)
self.url_entry.grid(row=0, column=1, columnspan=2,
sticky=(tk.W, tk.E))
# The file to which the stream should be downloaded
self.file_path = tk.StringVar()
self.file_label = ttk.Label(self, text="Target file:")
self.file_label.grid(row=1, column=0, sticky=tk.W)
self.file_entry = ttk.Entry(self, textvariable=self.file_path)
self.file_entry.grid(row=1, column=1, sticky=(tk.W, tk.E))
# The button to browse the target file
self.browse_button = ttk.Button(self, text="Browse...",
command=self.browse_file)
self.browse_button.grid(row=1, column=2, sticky=(tk.W, tk.E))
self.download_button = ttk.Button(self, text="Download",
command=self.download_video)
self.download_button.grid(row=2, column=0, columnspan=3,
sticky=(tk.W, tk.E))
# Label showing download progress
self.progress_label = ttk.Label(self, text="")
self.progress_label.grid(row=3, column=0, columnspan=3)
self.cancel_button = ttk.Button(self, text="Cancel",
command=self.cancel_download)
self.cancel_button.grid(row=4, column=0, columnspan=3,
sticky=(tk.W, tk.E))
set_children_padding(self, 5, 5)
# Make column 1 expand
self.columnconfigure(1, weight=1)
# Display
self.grid(row=0, column=0, sticky=(tk.W, tk.E))
def handle_close(self):
if self.task is None or self.cancel_download():
self.master.destroy()
def browse_file(self):
file_path = tk.filedialog.asksaveasfilename(filetypes=[
("MP4 files", ".mp4")
])
if file_path != "":
if "." not in file_path:
file_path = file_path + ".mp4"
self.file_path.set(file_path)
def download_video(self):
file_path = os.path.expanduser(self.file_path.get())
if not os.access(os.path.dirname(file_path), os.W_OK):
tk.messagebox.showerror("Invalid file",
"Cannot write to target file")
return
dialog = ResolutionDialog(self, self.url_entry.get())
self.wait_window(dialog)
stream = dialog.stream
if stream is not None:
self.url_entry.config(state=tk.DISABLED)
self.file_entry.config(state=tk.DISABLED)
self.browse_button.config(state=tk.DISABLED)
self.download_button.config(state=tk.DISABLED)
self.cancel_button.config(state=tk.NORMAL)
self.progress_label.config(text="Downloading...")
self.thread = thread.DownloadThread(stream, self.file_entry.get())
self.thread.start()
self.task = self.after(self.CHECK_INTERVAL, self.check_download)
def check_download(self):
total_size = self.thread.total_size
size_str = "{:.2f} MiB downloaded".format(total_size / 1024 ** 2)
if not self.thread.done:
progress_text = "Downloading... {}".format(size_str)
self.progress_label.config(text=progress_text)
self.task = self.after(self.CHECK_INTERVAL, self.check_download)
else:
progress_text = "Download complete ({})".format(size_str)
self.progress_label.config(text=progress_text)
self.task = None
self.restore_widgets()
def restore_widgets(self):
self.url_entry.config(state=tk.NORMAL)
self.file_entry.config(state=tk.NORMAL)
self.browse_button.config(state=tk.NORMAL)
self.download_button.config(state=tk.NORMAL)
self.cancel_button.config(state=tk.DISABLED)
def cancel_download(self):
if self.task is not None:
self.thread.pause()
message = ("Your download will be cancelled. Would you like to "
"delete the file as well?")
result = tk.messagebox.askyesnocancel("Cancel download?", message)
if result is not None:
self.after_cancel(self.task)
self.task = None
self.thread.cancel()
self.thread.join()
self.progress_label.config(text="Download cancelled")
self.restore_widgets()
if result:
try:
os.remove(self.file_entry.get())
except OSError:
message = ("The file could not be deleted. You may "
"remove it manually when it is no longer "
"in use.")
tk.messagebox.showwarning("Could not delete file",
message)
return True
else:
self.thread.resume()
return False
|
{
"content_hash": "5b9a36c17ca4a42a823dddc0d2748343",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 78,
"avg_line_length": 37.748971193415635,
"alnum_prop": 0.5408263381663578,
"repo_name": "stashingpixels/streamdownloader",
"id": "caa8828c98997270b28db26393380199b446e0f6",
"size": "9173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "streamdownloader/gui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15617"
}
],
"symlink_target": ""
}
|
from fdfs import Fdfs
fs=Fdfs()
fs.open()
s=fs.upload('/home/insion/Pictures/g.jpg')
print(s)
|
{
"content_hash": "36d6bd2b86b9d522b6c57191062175a6",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 42,
"avg_line_length": 18.6,
"alnum_prop": 0.7204301075268817,
"repo_name": "ptphp/PyLib",
"id": "20d2dc5185865634d4197507ae91b7ee75a28a09",
"size": "93",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tornado/demos/lihuashu/docs/common_bak/k.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1523"
},
{
"name": "C++",
"bytes": "7541"
},
{
"name": "CSS",
"bytes": "625731"
},
{
"name": "JavaScript",
"bytes": "4811257"
},
{
"name": "PHP",
"bytes": "34868"
},
{
"name": "Python",
"bytes": "3824172"
},
{
"name": "Ruby",
"bytes": "322"
},
{
"name": "SQL",
"bytes": "685656"
},
{
"name": "Shell",
"bytes": "4143"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.