gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# -*- coding: utf-8 -*-
"""
Core: The core functionality for Vincent to map to Vega grammar
"""
from __future__ import (print_function, division)
import json
from string import Template
from pkg_resources import resource_string
try:
import pandas as pd
except ImportError:
pd = None
try:
import numpy as np
except ImportError:
np = None
from ._compat import str_types
def initialize_notebook():
"""Initialize the IPython notebook display elements"""
try:
from IPython.core.display import display, HTML
except ImportError:
print("IPython Notebook could not be loaded.")
# Thanks to @jakevdp:
# https://github.com/jakevdp/mpld3/blob/master/mpld3/_display.py#L85
load_lib = """
function vct_load_lib(url, callback){
if(
typeof d3 !== 'undefined' &&
url === '//cdnjs.cloudflare.com/ajax/libs/d3/3.5.3/d3.min.js'){
callback()
}
var s = document.createElement('script');
s.src = url;
s.async = true;
s.onreadystatechange = s.onload = callback;
s.onerror = function(){
console.warn("failed to load library " + url);
};
document.getElementsByTagName("head")[0].appendChild(s);
};
var vincent_event = new CustomEvent(
"vincent_libs_loaded",
{bubbles: true, cancelable: true}
);
"""
lib_urls = [
"'//cdnjs.cloudflare.com/ajax/libs/d3/3.5.3/d3.min.js'",
("'//cdnjs.cloudflare.com/ajax/libs/d3-geo-projection/0.2.9/"
"d3.geo.projection.min.js'"),
"'//wrobstory.github.io/d3-cloud/d3.layout.cloud.js'",
"'//wrobstory.github.io/vega/vega.v1.3.3.js'"
]
get_lib = """vct_load_lib(%s, function(){
%s
});"""
load_js = get_lib
ipy_trigger = "window.dispatchEvent(vincent_event);"
for elem in lib_urls[:-1]:
load_js = load_js % (elem, get_lib)
load_js = load_js % (lib_urls[-1], ipy_trigger)
html = """
<script>
%s
function load_all_libs(){
console.log('Loading Vincent libs...')
%s
};
if(typeof define === "function" && define.amd){
if (window['d3'] === undefined ||
window['topojson'] === undefined){
require.config(
{paths: {
d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.5.3/d3.min',
topojson: '//cdnjs.cloudflare.com/ajax/libs/topojson/1.6.9/topojson.min'
}
}
);
require(["d3"], function(d3){
console.log('Loading Vincent from require.js...')
window.d3 = d3;
require(["topojson"], function(topojson){
window.topojson = topojson;
load_all_libs();
});
});
} else {
load_all_libs();
};
}else{
console.log('Require.js not found, loading manually...')
load_all_libs();
};
</script>""" % (load_lib, load_js,)
return display(HTML(html))
def _assert_is_type(name, value, value_type):
"""Assert that a value must be a given type."""
if not isinstance(value, value_type):
if type(value_type) is tuple:
types = ', '.join(t.__name__ for t in value_type)
raise ValueError('{0} must be one of ({1})'.format(name, types))
else:
raise ValueError('{0} must be {1}'
.format(name, value_type.__name__))
class ValidationError(Exception):
"""Exception raised with validation fails
This exception is raised only when the ``validate`` functions of classes
that inherit from ``FieldClass`` are called. It implies that the classes
do not contain valid Vega JSON."""
pass
class KeyedList(list):
"""A list that can optionally be indexed by the ``name`` attribute of
its elements"""
def __init__(self, attr_name='name', *args, **kwargs):
self.attr_name = attr_name
list.__init__(self, *args, **kwargs)
def get_keys(self):
keys = [getattr(x, self.attr_name) for x in self]
if len(keys) != len(set(keys)):
raise ValidationError('duplicate keys found')
return keys
def __getitem__(self, key):
if isinstance(key, str_types):
keys = self.get_keys()
if key not in keys:
raise KeyError(' "{0}" is an invalid key'.format(key))
else:
return self[keys.index(key)]
else:
return list.__getitem__(self, key)
def __delitem__(self, key):
if isinstance(key, str_types):
keys = self.get_keys()
if key not in keys:
raise KeyError(' "{0}" is an invalid key'.format(key))
else:
list.__delitem__(self, keys.index(key))
else:
return list.__delitem__(self, key)
def __setitem__(self, key, value):
if isinstance(key, str_types):
if not hasattr(value, self.attr_name):
raise ValidationError(
'object must have ' + self.attr_name + ' attribute')
elif getattr(value, self.attr_name) != key:
raise ValidationError(
"key must be equal to '" + self.attr_name +
"' attribute")
keys = self.get_keys()
if key not in keys:
self.append(value)
else:
list.__setitem__(self, keys.index(key), value)
else:
list.__setitem__(self, key, value)
def grammar(grammar_type=None, grammar_name=None):
"""Decorator to define properties that map to the ``grammar``
dict. This dict is the canonical representation of the Vega grammar
within Vincent.
This decorator is intended for classes that map to some pre-defined JSON
structure, such as axes, data, marks, scales, etc. It is assumed that this
decorates functions with an instance of ``self.grammar``.
Parameters
----------
grammar_type : type or tuple of types, default None
If the argument to the decorated function is not of the given types,
then a ValueError is raised. No type checking is done if the type is
None (default).
grammar_name : string, default None
An optional name to map to the internal ``grammar`` dict. If None
(default), then the key for the dict is the name of the function
being decorated. If not None, then it will be the name specified
here. This is useful if the expected JSON field name is a Python
keyword or has an un-Pythonic name.
This should decorate a "validator" function that should return no value
but raise an exception if the provided value is not valid Vega grammar. If
the validator throws no exception, then the value is assigned to the
``grammar`` dict.
The validator function should take only one argument - the value to be
validated - so that no ``self`` argument is included; the validator
should not modify the class.
If no arguments are given, then no type-checking is done the property
will be mapped to a field with the name of the decorated function.
The doc string for the property is taken from the validator functions's
doc string.
"""
def grammar_creator(validator, name):
def setter(self, value):
if isinstance(grammar_type, (type, tuple)):
_assert_is_type(validator.__name__, value, grammar_type)
validator(value)
self.grammar[name] = value
def getter(self):
return self.grammar.get(name, None)
def deleter(self):
if name in self.grammar:
del self.grammar[name]
return property(getter, setter, deleter, validator.__doc__)
if isinstance(grammar_type, (type, tuple)):
# If grammar_type is a type, return another decorator.
def grammar_dec(validator):
# Make sure to use the grammar name if it's there.
if grammar_name:
return grammar_creator(validator, grammar_name)
else:
return grammar_creator(validator, validator.__name__)
return grammar_dec
elif isinstance(grammar_name, str_types):
# If grammar_name is a string, use that name and return another
# decorator.
def grammar_dec(validator):
return grammar_creator(validator, grammar_name)
return grammar_dec
else:
# Otherwise we assume that grammar_type is actually the function being
# decorated.
return grammar_creator(grammar_type, grammar_type.__name__)
class GrammarDict(dict):
"""The Vega Grammar. When called, obj.grammar returns a Python data
structure for the Vega Grammar. When printed, obj.grammar returns a
string representation."""
def __init__(self, *args, **kwargs):
"""Standard Dict init"""
dict.__init__(self, *args, **kwargs)
def encoder(self, obj):
"""Encode grammar objects for each level of hierarchy"""
if hasattr(obj, 'grammar'):
return obj.grammar
def __call__(self):
"""When called, return the Vega grammar as a Python data structure."""
return json.loads(json.dumps(self, default=self.encoder))
def __str__(self):
"""String representation of Vega Grammar"""
return json.dumps(self, default=self.encoder)
class GrammarClass(object):
"""Base class for objects that rely on an internal ``grammar`` dict. This
dict contains the complete Vega grammar.
This should be used as a superclass for classes that map to some JSON
structure. The JSON content is stored in an internal dict named
``grammar``.
"""
def __init__(self, **kwargs):
"""Initialize a GrammarClass
**kwargs are attribute-value pairs that are set on initialization.
These will generally be keys for the ``grammar`` dict. If the
attribute does not already exist as a property, then a
``ValueError`` is raised.
"""
self.grammar = GrammarDict()
for attr, value in sorted(kwargs.items()):
if hasattr(self, attr):
setattr(self, attr, value)
else:
raise ValueError('unknown keyword argument ' + attr)
def validate(self):
"""Validate the contents of the object.
This calls ``setattr`` for each of the class's grammar properties. It
will catch ``ValueError``s raised by the grammar property's setters
and re-raise them as :class:`ValidationError`.
"""
for key, val in self.grammar.items():
try:
setattr(self, key, val)
except ValueError as e:
raise ValidationError('invalid contents: ' + e.args[0])
def to_json(self, path=None, html_out=False,
html_path='vega_template.html', validate=False,
pretty_print=True):
"""Convert object to JSON
Parameters
----------
path: string, default None
Path to write JSON out. If there is no path provided, JSON
will be returned as a string to the console.
html_out: boolean, default False
If True, vincent will output an simple HTML scaffold to
visualize the vega json output.
html_path: string, default 'vega_template.html'
Path for the html file (if html_out=True)
validate : boolean
If True, call the object's `validate` method before
serializing. Default is False.
pretty_print : boolean
If True (default), JSON is printed in more-readable form with
indentation and spaces.
Returns
-------
string
JSON serialization of the class's grammar properties.
"""
if validate:
self.validate()
if pretty_print:
dumps_args = {'indent': 2, 'separators': (',', ': ')}
else:
dumps_args = {}
def encoder(obj):
if hasattr(obj, 'grammar'):
return obj.grammar
if html_out:
template = Template(
str(resource_string('vincent', 'vega_template.html')))
with open(html_path, 'w') as f:
f.write(template.substitute(path=path))
if path:
with open(path, 'w') as f:
json.dump(self.grammar, f, default=encoder, sort_keys=True,
**dumps_args)
else:
return json.dumps(self.grammar, default=encoder, sort_keys=True,
**dumps_args)
def from_json(self):
"""Load object from JSON
Not yet implemented.
"""
raise NotImplementedError()
class LoadError(Exception):
"""Exception for errors on loading data from third-party objects"""
pass
| |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class FunctionVersionContentList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, service_sid, function_sid, sid):
"""
Initialize the FunctionVersionContentList
:param Version version: Version that contains the resource
:param service_sid: The SID of the Service that the Function Version resource is associated with
:param function_sid: The SID of the Function that is the parent of the Function Version
:param sid: The unique string that identifies the Function Version resource
:returns: twilio.rest.serverless.v1.service.function.function_version.function_version_content.FunctionVersionContentList
:rtype: twilio.rest.serverless.v1.service.function.function_version.function_version_content.FunctionVersionContentList
"""
super(FunctionVersionContentList, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, 'function_sid': function_sid, 'sid': sid, }
def get(self):
"""
Constructs a FunctionVersionContentContext
:returns: twilio.rest.serverless.v1.service.function.function_version.function_version_content.FunctionVersionContentContext
:rtype: twilio.rest.serverless.v1.service.function.function_version.function_version_content.FunctionVersionContentContext
"""
return FunctionVersionContentContext(
self._version,
service_sid=self._solution['service_sid'],
function_sid=self._solution['function_sid'],
sid=self._solution['sid'],
)
def __call__(self):
"""
Constructs a FunctionVersionContentContext
:returns: twilio.rest.serverless.v1.service.function.function_version.function_version_content.FunctionVersionContentContext
:rtype: twilio.rest.serverless.v1.service.function.function_version.function_version_content.FunctionVersionContentContext
"""
return FunctionVersionContentContext(
self._version,
service_sid=self._solution['service_sid'],
function_sid=self._solution['function_sid'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Serverless.V1.FunctionVersionContentList>'
class FunctionVersionContentPage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the FunctionVersionContentPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param service_sid: The SID of the Service that the Function Version resource is associated with
:param function_sid: The SID of the Function that is the parent of the Function Version
:param sid: The unique string that identifies the Function Version resource
:returns: twilio.rest.serverless.v1.service.function.function_version.function_version_content.FunctionVersionContentPage
:rtype: twilio.rest.serverless.v1.service.function.function_version.function_version_content.FunctionVersionContentPage
"""
super(FunctionVersionContentPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of FunctionVersionContentInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.serverless.v1.service.function.function_version.function_version_content.FunctionVersionContentInstance
:rtype: twilio.rest.serverless.v1.service.function.function_version.function_version_content.FunctionVersionContentInstance
"""
return FunctionVersionContentInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
function_sid=self._solution['function_sid'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Serverless.V1.FunctionVersionContentPage>'
class FunctionVersionContentContext(InstanceContext):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, service_sid, function_sid, sid):
"""
Initialize the FunctionVersionContentContext
:param Version version: Version that contains the resource
:param service_sid: The SID of the Service to fetch the Function Version content from
:param function_sid: The SID of the Function that is the parent of the Function Version content to fetch
:param sid: The SID that identifies the Function Version content to fetch
:returns: twilio.rest.serverless.v1.service.function.function_version.function_version_content.FunctionVersionContentContext
:rtype: twilio.rest.serverless.v1.service.function.function_version.function_version_content.FunctionVersionContentContext
"""
super(FunctionVersionContentContext, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, 'function_sid': function_sid, 'sid': sid, }
self._uri = '/Services/{service_sid}/Functions/{function_sid}/Versions/{sid}/Content'.format(**self._solution)
def fetch(self):
"""
Fetch the FunctionVersionContentInstance
:returns: The fetched FunctionVersionContentInstance
:rtype: twilio.rest.serverless.v1.service.function.function_version.function_version_content.FunctionVersionContentInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return FunctionVersionContentInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
function_sid=self._solution['function_sid'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Serverless.V1.FunctionVersionContentContext {}>'.format(context)
class FunctionVersionContentInstance(InstanceResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, payload, service_sid, function_sid, sid):
"""
Initialize the FunctionVersionContentInstance
:returns: twilio.rest.serverless.v1.service.function.function_version.function_version_content.FunctionVersionContentInstance
:rtype: twilio.rest.serverless.v1.service.function.function_version.function_version_content.FunctionVersionContentInstance
"""
super(FunctionVersionContentInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'service_sid': payload.get('service_sid'),
'function_sid': payload.get('function_sid'),
'content': payload.get('content'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'service_sid': service_sid, 'function_sid': function_sid, 'sid': sid, }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FunctionVersionContentContext for this FunctionVersionContentInstance
:rtype: twilio.rest.serverless.v1.service.function.function_version.function_version_content.FunctionVersionContentContext
"""
if self._context is None:
self._context = FunctionVersionContentContext(
self._version,
service_sid=self._solution['service_sid'],
function_sid=self._solution['function_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the Function Version resource
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the Function Version resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def service_sid(self):
"""
:returns: The SID of the Service that the Function Version resource is associated with
:rtype: unicode
"""
return self._properties['service_sid']
@property
def function_sid(self):
"""
:returns: The SID of the Function that is the parent of the Function Version
:rtype: unicode
"""
return self._properties['function_sid']
@property
def content(self):
"""
:returns: The content of the Function Version resource
:rtype: unicode
"""
return self._properties['content']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch the FunctionVersionContentInstance
:returns: The fetched FunctionVersionContentInstance
:rtype: twilio.rest.serverless.v1.service.function.function_version.function_version_content.FunctionVersionContentInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Serverless.V1.FunctionVersionContentInstance {}>'.format(context)
| |
# Copyright 2022 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
import json
import re
import csv
import time
import google.auth
import googleapiclient.discovery
try:
from constants import (
ALL_RESOURCES_IN_PROCESSING_ORDER,
MATCHER_EXPRESSION,
FORMAT_MATCHER,
)
except:
pass
from table_logger import TableLogger
from inventory import cai
log_headers = [
"resource_type",
"mapping_method",
"resource",
"role",
"member_to_copy",
"member_copy_result",
]
table_output = TableLogger(
columns=",".join(log_headers),
colwidth={
"resource_type": 20,
"mapping_method": 20,
"resource": 20,
"role": 30,
},
default_colwidth=50,
)
def look_for_gcloud_org():
click.secho(
"No --org-id provided. Trying gcloud config...",
)
try:
creds, project_id = google.auth.default()
client = googleapiclient.discovery.build(
serviceName="cloudresourcemanager",
version="v1",
cache_discovery=False,
)
req = client.projects().getAncestry(projectId=project_id)
resp = req.execute()
find_org = next(
filter(
lambda r: r["resourceId"]["type"] == "organization",
resp["ancestor"],
)
)
org_id = find_org["resourceId"]["id"]
click.secho(
"Using {id} from gcloud config.".format(id=org_id),
)
return org_id
except:
raise SystemExit(
( 'Could not determine org id from gcloud config. Please either '
'pass in the org id via --org-id or ensure your current gcloud '
'configuration has an active project set.' )
)
def parse_csv(file):
with open(file, mode="r") as inp:
reader = csv.reader(inp)
next(reader, None)
return {rows[0]: rows[1] for rows in reader}
@click.group()
def cli():
pass
@cli.command()
@click.option("--org-id")
def generate_inventory_file(org_id):
org_id_to_use = org_id if org_id else look_for_gcloud_org()
cai.fetch_cai_file(org_id_to_use)
def should_keep_fix(member, manual_map, existing_bindings):
match = re.match(MATCHER_EXPRESSION, member)
new_member = None
mapping_type = None
if match:
new_member = FORMAT_MATCHER(match)
mapping_type = "Dynamic"
strip_user = member.replace("user:", "")
if strip_user in manual_map:
new_member = "user:{email}".format(email=manual_map[strip_user])
mapping_type = "Manual"
if new_member in existing_bindings:
return None
if new_member and mapping_type:
return [new_member, mapping_type]
return None
def execute_iam_copy(resources, dry_run, verify_permissions):
timestamp = int(time.time())
filename = "out-{timestamp}.csv".format(timestamp=timestamp)
f = open(filename, "a+", encoding="UTF8", newline="")
writer = csv.writer(f)
writer.writerow(log_headers)
for (instance, bindings) in resources:
for binding in bindings:
iam_migrator = instance(
binding["resource"],
binding["role"],
binding["new_member"],
dry_run,
)
if verify_permissions:
iam_migrator.verify_permissions()
iam_migrator.migrate()
writer.writerow(
[
binding["type"],
binding["mapping_type"],
binding["resource"],
binding["role"],
binding["old_member"],
binding["new_member"],
]
)
click.secho(
"Script Complete. {filename} created with output.".format(
filename=filename
),
)
@cli.command()
@click.option(
"--filename",
default=None,
)
@click.option("--dry-run", default=False)
@click.option("--map-file", default=None)
@click.option("--org-id", envvar="ORG_ID")
@click.option("--verify-permissions", default=True)
def run(filename, dry_run, map_file, org_id, verify_permissions):
org_id = org_id if org_id else look_for_gcloud_org()
if not map_file:
click.secho(
( 'Notice: No manual mapper provided. To provide one '
'set the --map-file parameter.\n' ),
fg="yellow",
)
if not filename:
click.secho(
( 'Notice: No filename provided. To provide one set the '
'--filename parameter. Fetching inventory file...\n' ),
fg="yellow",
)
manual_map = parse_csv(map_file) if map_file else {}
assets = []
asset_types = []
file_to_open = filename if filename else cai.fetch_cai_file(org_id)
f = open(file_to_open)
cai_data = json.load(f)
for resource in ALL_RESOURCES_IN_PROCESSING_ORDER:
filter_resources = list(
filter(
lambda r: resource.ASSET_TYPE == r["assetType"],
cai_data,
)
)
click.secho(
"Processing {count} resources of type {type}...".format(
count=len(filter_resources), type=resource.ASSET_TYPE
),
fg="blue",
)
new_assets = []
for res in filter_resources:
for binding in res["policy"]["bindings"]:
for member in binding["members"]:
should_fix_member = should_keep_fix(
member, manual_map, binding["members"]
)
if should_fix_member is not None:
asset = {
"type": resource.ASSET_TYPE.split(
"googleapis.com/"
)[1],
"mapping_type": should_fix_member[1],
"resource": res["resource"],
"role": binding["role"],
"old_member": member,
"new_member": should_fix_member[0],
}
new_assets.append(asset)
assets.extend(new_assets)
# storing assets with the coresponding resource class to process later on
if len(new_assets) > 0:
asset_types.append((resource, new_assets))
click.secho(
"Found {count} tainted iam permissions on resource {type}... \n".format(
count=len(new_assets), type=resource.ASSET_TYPE
),
fg="yellow",
)
click.secho(
"{count} total permissions to be copied".format(count=len(assets)),
fg="green",
bg="black",
)
for a in assets:
table_output(*a.values())
if dry_run:
click.secho(
"RUNNING AS DRY RUN. NO ACTUAL PERMISSIONS WILL BE TOUCHED.",
fg="black",
bg="green",
)
else:
click.secho(
( '\n\nThis operation will copy the tainted iam permissions. '
'There is no reversal operation. \n' ),
fg="red",
)
if click.confirm("Are you sure you want to execute?"):
execute_iam_copy(asset_types, dry_run, verify_permissions)
if __name__ == "__main__":
cli()
| |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
from zope.interface import implements
from twisted.persisted import styles
from twisted.python import log
from twisted.internet import reactor, defer
from buildbot import interfaces, util
from buildbot.status.logfile import LogFile, HTMLLogFile
class BuildStepStatus(styles.Versioned):
"""
I represent a collection of output status for a
L{buildbot.process.step.BuildStep}.
Statistics contain any information gleaned from a step that is
not in the form of a logfile. As an example, steps that run
tests might gather statistics about the number of passed, failed,
or skipped tests.
@type progress: L{buildbot.status.progress.StepProgress}
@cvar progress: tracks ETA for the step
@type text: list of strings
@cvar text: list of short texts that describe the command and its status
@type text2: list of strings
@cvar text2: list of short texts added to the overall build description
@type logs: dict of string -> L{buildbot.status.logfile.LogFile}
@ivar logs: logs of steps
@type statistics: dict
@ivar statistics: results from running this step
"""
# note that these are created when the Build is set up, before each
# corresponding BuildStep has started.
implements(interfaces.IBuildStepStatus, interfaces.IStatusEvent)
persistenceVersion = 4
persistenceForgets = ( 'wasUpgraded', )
started = None
finished = None
progress = None
text = []
results = None
text2 = []
watchers = []
updates = {}
finishedWatchers = []
statistics = {}
step_number = None
hidden = False
def __init__(self, parent, master, step_number):
assert interfaces.IBuildStatus(parent)
self.build = parent
self.step_number = step_number
self.hidden = False
self.logs = []
self.urls = {}
self.watchers = []
self.updates = {}
self.finishedWatchers = []
self.statistics = {}
self.skipped = False
self.master = master
self.waitingForLocks = False
def getName(self):
"""Returns a short string with the name of this step. This string
may have spaces in it."""
return self.name
def getBuild(self):
return self.build
def getTimes(self):
return (self.started, self.finished)
def getExpectations(self):
"""Returns a list of tuples (name, current, target)."""
if not self.progress:
return []
ret = []
metrics = self.progress.progress.keys()
metrics.sort()
for m in metrics:
t = (m, self.progress.progress[m], self.progress.expectations[m])
ret.append(t)
return ret
def getLogs(self):
return self.logs
def getURLs(self):
return self.urls.copy()
def isStarted(self):
return (self.started is not None)
def isSkipped(self):
return self.skipped
def isFinished(self):
return (self.finished is not None)
def isHidden(self):
return self.hidden
def waitUntilFinished(self):
if self.finished:
d = defer.succeed(self)
else:
d = defer.Deferred()
self.finishedWatchers.append(d)
return d
# while the step is running, the following methods make sense.
# Afterwards they return None
def getETA(self):
if self.started is None:
return None # not started yet
if self.finished is not None:
return None # already finished
if not self.progress:
return None # no way to predict
return self.progress.remaining()
# Once you know the step has finished, the following methods are legal.
# Before this step has finished, they all return None.
def getText(self):
"""Returns a list of strings which describe the step. These are
intended to be displayed in a narrow column. If more space is
available, the caller should join them together with spaces before
presenting them to the user."""
return self.text
def getResults(self):
"""Return a tuple describing the results of the step.
'result' is one of the constants in L{buildbot.status.builder}:
SUCCESS, WARNINGS, FAILURE, or SKIPPED.
'strings' is an optional list of strings that the step wants to
append to the overall build's results. These strings are usually
more terse than the ones returned by getText(): in particular,
successful Steps do not usually contribute any text to the
overall build.
@rtype: tuple of int, list of strings
@returns: (result, strings)
"""
return (self.results, self.text2)
def hasStatistic(self, name):
"""Return true if this step has a value for the given statistic.
"""
return self.statistics.has_key(name)
def getStatistic(self, name, default=None):
"""Return the given statistic, if present
"""
return self.statistics.get(name, default)
def getStatistics(self):
return self.statistics.copy()
# subscription interface
def subscribe(self, receiver, updateInterval=10):
# will get logStarted, logFinished, stepETAUpdate
assert receiver not in self.watchers
self.watchers.append(receiver)
self.sendETAUpdate(receiver, updateInterval)
def sendETAUpdate(self, receiver, updateInterval):
self.updates[receiver] = None
# they might unsubscribe during stepETAUpdate
receiver.stepETAUpdate(self.build, self,
self.getETA(), self.getExpectations())
if receiver in self.watchers:
self.updates[receiver] = reactor.callLater(updateInterval,
self.sendETAUpdate,
receiver,
updateInterval)
def unsubscribe(self, receiver):
if receiver in self.watchers:
self.watchers.remove(receiver)
if receiver in self.updates:
if self.updates[receiver] is not None:
self.updates[receiver].cancel()
del self.updates[receiver]
# methods to be invoked by the BuildStep
def setName(self, stepname):
self.name = stepname
def setColor(self, color):
log.msg("BuildStepStatus.setColor is no longer supported -- ignoring color %s" % (color,))
def setProgress(self, stepprogress):
self.progress = stepprogress
def setHidden(self, hidden):
self.hidden = hidden
def stepStarted(self):
self.started = util.now()
if self.build:
self.build.stepStarted(self)
def addLog(self, name):
assert self.started # addLog before stepStarted won't notify watchers
logfilename = self.build.generateLogfileName(self.name, name)
log = LogFile(self, name, logfilename)
self.logs.append(log)
for w in self.watchers:
receiver = w.logStarted(self.build, self, log)
if receiver:
log.subscribe(receiver, True)
d = log.waitUntilFinished()
d.addCallback(lambda log: log.unsubscribe(receiver))
d = log.waitUntilFinished()
d.addCallback(self.logFinished)
return log
def addHTMLLog(self, name, html):
assert self.started # addLog before stepStarted won't notify watchers
logfilename = self.build.generateLogfileName(self.name, name)
log = HTMLLogFile(self, name, logfilename, html)
self.logs.append(log)
for w in self.watchers:
w.logStarted(self.build, self, log)
w.logFinished(self.build, self, log)
def logFinished(self, log):
for w in self.watchers:
w.logFinished(self.build, self, log)
def addURL(self, name, url):
self.urls[name] = url
def setText(self, text):
self.text = text
for w in self.watchers:
w.stepTextChanged(self.build, self, text)
def setText2(self, text):
self.text2 = text
for w in self.watchers:
w.stepText2Changed(self.build, self, text)
def setStatistic(self, name, value):
"""Set the given statistic. Usually called by subclasses.
"""
self.statistics[name] = value
def setSkipped(self, skipped):
self.skipped = skipped
def stepFinished(self, results):
self.finished = util.now()
self.results = results
cld = [] # deferreds for log compression
logCompressionLimit = self.master.config.logCompressionLimit
for loog in self.logs:
if not loog.isFinished():
loog.finish()
# if log compression is on, and it's a real LogFile,
# HTMLLogFiles aren't files
if logCompressionLimit is not False and \
isinstance(loog, LogFile):
if os.path.getsize(loog.getFilename()) > logCompressionLimit:
loog_deferred = loog.compressLog()
if loog_deferred:
cld.append(loog_deferred)
for r in self.updates.keys():
if self.updates[r] is not None:
self.updates[r].cancel()
del self.updates[r]
watchers = self.finishedWatchers
self.finishedWatchers = []
for w in watchers:
w.callback(self)
if cld:
return defer.DeferredList(cld)
def checkLogfiles(self):
# filter out logs that have been deleted
self.logs = [ l for l in self.logs if l.hasContents() ]
def isWaitingForLocks(self):
return self.waitingForLocks
def setWaitingForLocks(self, waiting):
self.waitingForLocks = waiting
# persistence
def __getstate__(self):
d = styles.Versioned.__getstate__(self)
del d['build'] # filled in when loading
if d.has_key('progress'):
del d['progress']
del d['watchers']
del d['finishedWatchers']
del d['updates']
del d['master']
return d
def __setstate__(self, d):
styles.Versioned.__setstate__(self, d)
# self.build must be filled in by our parent
# point the logs to this object
self.watchers = []
self.finishedWatchers = []
self.updates = {}
def setProcessObjects(self, build, master):
self.build = build
self.master = master
for loog in self.logs:
loog.step = self
loog.master = master
def upgradeToVersion1(self):
if not hasattr(self, "urls"):
self.urls = {}
self.wasUpgraded = True
def upgradeToVersion2(self):
if not hasattr(self, "statistics"):
self.statistics = {}
self.wasUpgraded = True
def upgradeToVersion3(self):
if not hasattr(self, "step_number"):
self.step_number = 0
self.wasUpgraded = True
def upgradeToVersion4(self):
if not hasattr(self, "hidden"):
self.hidden = False
self.wasUpgraded = True
def asDict(self):
result = {}
# Constant
result['name'] = self.getName()
# Transient
result['text'] = self.getText()
result['results'] = self.getResults()
result['isStarted'] = self.isStarted()
result['isFinished'] = self.isFinished()
result['statistics'] = self.statistics
result['times'] = self.getTimes()
result['expectations'] = self.getExpectations()
result['eta'] = self.getETA()
result['urls'] = self.getURLs()
result['step_number'] = self.step_number
result['hidden'] = self.hidden
result['logs'] = [[l.getName(),
self.build.builder.status.getURLForThing(l)]
for l in self.getLogs()]
return result
| |
from ..errors import InvalidVersion
from ..utils import check_resource, minimum_version
from ..utils import version_lt
from .. import utils
class NetworkApiMixin(object):
@minimum_version('1.21')
def networks(self, names=None, ids=None, filters=None):
"""
List networks. Similar to the ``docker networks ls`` command.
Args:
names (:py:class:`list`): List of names to filter by
ids (:py:class:`list`): List of ids to filter by
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- ``label=[<key>]`` or ``label=[<key>=<value>]``.
- ``type=["custom"|"builtin"]`` Filters networks by type.
Returns:
(dict): List of network objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if filters is None:
filters = {}
if names:
filters['name'] = names
if ids:
filters['id'] = ids
params = {'filters': utils.convert_filters(filters)}
url = self._url("/networks")
res = self._get(url, params=params)
return self._result(res, json=True)
@minimum_version('1.21')
def create_network(self, name, driver=None, options=None, ipam=None,
check_duplicate=None, internal=False, labels=None,
enable_ipv6=False, attachable=None, scope=None,
ingress=None):
"""
Create a network. Similar to the ``docker network create``.
Args:
name (str): Name of the network
driver (str): Name of the driver used to create the network
options (dict): Driver options as a key-value dictionary
ipam (IPAMConfig): Optional custom IP scheme for the network.
check_duplicate (bool): Request daemon to check for networks with
same name. Default: ``None``.
internal (bool): Restrict external access to the network. Default
``False``.
labels (dict): Map of labels to set on the network. Default
``None``.
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
attachable (bool): If enabled, and the network is in the global
scope, non-service containers on worker nodes will be able to
connect to the network.
ingress (bool): If set, create an ingress network which provides
the routing-mesh in swarm mode.
Returns:
(dict): The created network reference object
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
A network using the bridge driver:
>>> client.create_network("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
.. code-block:: python
>>> ipam_pool = docker.types.IPAMPool(
subnet='192.168.52.0/24',
gateway='192.168.52.254'
)
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
>>> docker_client.create_network("network1", driver="bridge",
ipam=ipam_config)
"""
if options is not None and not isinstance(options, dict):
raise TypeError('options must be a dictionary')
data = {
'Name': name,
'Driver': driver,
'Options': options,
'IPAM': ipam,
'CheckDuplicate': check_duplicate,
}
if labels is not None:
if version_lt(self._version, '1.23'):
raise InvalidVersion(
'network labels were introduced in API 1.23'
)
if not isinstance(labels, dict):
raise TypeError('labels must be a dictionary')
data["Labels"] = labels
if enable_ipv6:
if version_lt(self._version, '1.23'):
raise InvalidVersion(
'enable_ipv6 was introduced in API 1.23'
)
data['EnableIPv6'] = True
if internal:
if version_lt(self._version, '1.22'):
raise InvalidVersion('Internal networks are not '
'supported in API version < 1.22')
data['Internal'] = True
if attachable is not None:
if version_lt(self._version, '1.24'):
raise InvalidVersion(
'attachable is not supported in API version < 1.24'
)
data['Attachable'] = attachable
if ingress is not None:
if version_lt(self._version, '1.29'):
raise InvalidVersion(
'ingress is not supported in API version < 1.29'
)
data['Ingress'] = ingress
url = self._url("/networks/create")
res = self._post_json(url, data=data)
return self._result(res, json=True)
@minimum_version('1.25')
def prune_networks(self, filters=None):
"""
Delete unused networks
Args:
filters (dict): Filters to process on the prune list.
Returns:
(dict): A dict containing a list of deleted network names and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
url = self._url('/networks/prune')
return self._result(self._post(url, params=params), True)
@minimum_version('1.21')
@check_resource('net_id')
def remove_network(self, net_id):
"""
Remove a network. Similar to the ``docker network rm`` command.
Args:
net_id (str): The network's id
"""
url = self._url("/networks/{0}", net_id)
res = self._delete(url)
self._raise_for_status(res)
@minimum_version('1.21')
@check_resource('net_id')
def inspect_network(self, net_id, verbose=None):
"""
Get detailed information about a network.
Args:
net_id (str): ID of network
verbose (bool): Show the service details across the cluster in
swarm mode.
"""
params = {}
if verbose is not None:
if version_lt(self._version, '1.28'):
raise InvalidVersion('verbose was introduced in API 1.28')
params['verbose'] = verbose
url = self._url("/networks/{0}", net_id)
res = self._get(url, params=params)
return self._result(res, json=True)
@check_resource('container')
@minimum_version('1.21')
def connect_container_to_network(self, container, net_id,
ipv4_address=None, ipv6_address=None,
aliases=None, links=None,
link_local_ips=None):
"""
Connect a container to a network.
Args:
container (str): container-id/name to be connected to the network
net_id (str): network id
aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the
container. Defaults to ``None``.
links (:py:class:`list`): A list of links for this endpoint.
Containers declared in this list will be linked to this
container. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local
(IPv4/IPv6) addresses.
"""
data = {
"Container": container,
"EndpointConfig": self.create_endpoint_config(
aliases=aliases, links=links, ipv4_address=ipv4_address,
ipv6_address=ipv6_address, link_local_ips=link_local_ips
),
}
url = self._url("/networks/{0}/connect", net_id)
res = self._post_json(url, data=data)
self._raise_for_status(res)
@check_resource('container')
@minimum_version('1.21')
def disconnect_container_from_network(self, container, net_id,
force=False):
"""
Disconnect a container from a network.
Args:
container (str): container ID or name to be disconnected from the
network
net_id (str): network ID
force (bool): Force the container to disconnect from a network.
Default: ``False``
"""
data = {"Container": container}
if force:
if version_lt(self._version, '1.22'):
raise InvalidVersion(
'Forced disconnect was introduced in API 1.22'
)
data['Force'] = force
url = self._url("/networks/{0}/disconnect", net_id)
res = self._post_json(url, data=data)
self._raise_for_status(res)
| |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
r"""
From my blog post:
<http://tanghaibao.blogspot.com/2010/02/getting-phylogeny-from-list-of.html>
Example:
>>> mylist = [3702, 3649, 3694, 3880]
>>> t = TaxIDTree(mylist)
>>> print t
(((Carica_papaya,Arabidopsis_thaliana)Brassicales,(Medicago_truncatula,Populus_trichocarpa)fabids)rosids);
>>> t.print_tree()
<BLANKLINE>
/-Carica_papaya
/---|
| \-Arabidopsis_thaliana
---- /---|
| /-Medicago_truncatula
\---|
\-Populus_trichocarpa
"""
import sys
import time
import logging
from functools import lru_cache
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
from ete3 import Tree
from ClientForm import ParseResponse
from BeautifulSoup import BeautifulSoup
from jcvi.apps.base import OptionParser, ActionDispatcher
URL = "http://itol.embl.de/other_trees.shtml"
class TaxIDTree(object):
def __init__(self, list_of_taxids):
# If only one taxid provided, get full tree with nameExp
# else, get default tree
if isinstance(list_of_taxids, int): # single taxon
list_of_taxids = [list_of_taxids]
form_element_id = "nameExp"
else:
form_element_id = "nameCol"
# the data to send in
form_data = "\n".join(str(x) for x in list_of_taxids)
success = False
while not success:
try:
response = urlopen(URL)
success = True
except (URLError, HTTPError, RuntimeError) as e:
logging.error(e)
logging.debug("wait 5 seconds to reconnect...")
time.sleep(5)
forms = ParseResponse(response, backwards_compat=False)
form = forms[0]
form["ncbiIDs"] = form_data
page = urlopen(form.click()).read()
soup = BeautifulSoup(page)
self.newick = ""
for element in soup("textarea"):
if element["id"] == form_element_id:
self.newick = str(element.contents[0])
if self.newick == "":
print(soup)
def __str__(self):
return self.newick
def print_tree(self):
t = Tree(self.newick, format=8)
print(t)
def get_names(list_of_taxids):
"""
>>> mylist = [3702, 3649, 3694, 3880]
>>> get_names(mylist)
['Arabidopsis thaliana', 'Carica papaya', 'Populus trichocarpa', 'Medicago truncatula']
"""
from jcvi.apps.fetch import batch_taxonomy
list_of_taxids = [str(x) for x in list_of_taxids]
return list(batch_taxonomy(list_of_taxids))
def get_taxids(list_of_names):
"""
>>> mylist = ['Arabidopsis thaliana', 'Carica papaya']
>>> get_taxids(mylist)
[1, 2]
"""
from jcvi.apps.fetch import batch_taxids
return [int(x) for x in batch_taxids(list_of_names)]
def MRCA(list_of_taxids):
"""
This gets the most recent common ancester (MRCA) for a list of taxids
>>> mylist = [3702, 3649, 3694, 3880]
>>> MRCA(mylist)
'rosids'
"""
t = TaxIDTree(list_of_taxids)
t = Tree(str(t), format=8)
ancestor = t.get_common_ancestor(*t.get_leaves())
return ancestor.name
@lru_cache(maxsize=None)
def isPlantOrigin(taxid):
"""
Given a taxid, this gets the expanded tree which can then be checked to
see if the organism is a plant or not
>>> isPlantOrigin(29760)
True
"""
assert isinstance(taxid, int)
t = TaxIDTree(taxid)
try:
return "Viridiplantae" in str(t)
except AttributeError:
raise ValueError("{0} is not a valid ID".format(taxid))
def main():
actions = (
("newick", "query a list of IDs to newick"),
("test", "test taxonomy module"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def test(args):
print("Testing isPlantOrigin():")
print(3702, isPlantOrigin(3702)) # Arabidopsis thaliana
print(10090, isPlantOrigin(10090)) # Mus musculus
print("\nTest cache by 10K calls:")
for i in range(10000):
isPlantOrigin(3702)
isPlantOrigin(10090)
print("done")
print("\nTest invalid ID:")
print(10099, isPlantOrigin(10099)) # Wrong ID
def newick(args):
"""
%prog newick idslist
Query a list of IDs to retrieve phylogeny.
"""
p = OptionParser(newick.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(idsfile,) = args
mylist = [x.strip() for x in open(idsfile) if x.strip()]
print(get_taxids(mylist))
t = TaxIDTree(mylist)
print(t)
if __name__ == "__main__":
main()
| |
import math
import string
import sys
import struct
import matplotlib
import matplotlib.pyplot as pyplot
import numpy as np
import array
import cPickle
import asciitable
import scipy.stats as ss
import scipy as sp
import astropy.io.fits as pyfits
import cosmocalc as cc
import datetime
####
#### Name: geometry.py
#### Author: Greg Snyder gsnyder@stsci.edu
#### Purpose: Generates parameter setup file for mock survey fields from
#### continuous-volume hydrodynamical simulations using Sunrise (Jonsson 2006).
#### Disclaimer: This code is provided AS-IS with absolutely NO warranty.
#### It is largely meant as a guide rather than ideal code.
#### It can and should be replaced with other lightcone generation techniques.
#### I make no claims about the immediate usability of this code.
#### That said, I am happy to field questions and discuss issues
#### related to this code. And also to help use it.
#### License: ?
#### Credit: Users should cite Snyder et al. (2014): "Mock UDFs from Hydro Sims with Sunrise"
#### AND ALSO Kitzbichler & White 2007, Henriques et al. 2013, Overzier et al. 2013, etc.
#### These papers were used to create the lightcone generation algorithm below.
####
class Cosmology:
def __init__(self, H0=70.0, WM=0.27,WV=0.73):
self.H0=H0
self.WM=WM
self.WV=WV
self.redshift_grid = np.logspace(-3,2,100)
self.comoving_mpc_grid = np.asarray([(cc.cosmocalc(zf,H0=self.H0,WM=self.WM,WV=self.WV))['DCMR_Mpc'] for zf in self.redshift_grid])
self.DA_mpc_grid = np.asarray([(cc.cosmocalc(zf,H0=self.H0,WM=self.WM,WV=self.WV))['DA_Mpc'] for zf in self.redshift_grid])
class ReplicatedBox:
def __init__(self, v_lab, v_ingress):
self.v_origin=v_lab
self.v_ingress=v_ingress
#maybe should define some print/convert functions for this
class LightCone:
def __init__(self, boxSize, cosmology, name="A Lightcone"):
self.name=name
self.cosmology = cosmology
self.L=boxSize
self.v1=np.ndarray(shape=(3))
self.v2=np.ndarray(shape=(3))
self.v3=np.ndarray(shape=(3))
self.v4=np.ndarray(shape=(3))
self.boxlist=[]
def BasicCone(self, n, m, namelist, zlist, manual_dist_limit=0.0, manual_fov_arcmin=0.0):
self.n = 1.0*n
self.m = 1.0*m
self.namelist = namelist
self.zlist = zlist
self.dist_firstRep = np.linalg.norm(np.asarray([self.n,self.m,self.n*self.m])*self.L)
self.dist_limit = manual_dist_limit
if manual_dist_limit==0.0:
self.dist_limit = self.dist_firstRep
self.redshift_firstRep = np.interp(self.dist_firstRep,self.cosmology.comoving_mpc_grid,self.cosmology.redshift_grid)
self.numRep = int(self.n*self.m)
self.x_com = np.asarray( [(self.n - 0.5/self.m)*self.L, (self.n + 0.5/self.m)*self.L] )
self.y_com = np.asarray( [(self.m - 0.5/self.n)*self.L, (self.m + 0.5/self.n)*self.L] )
self.z_com = np.asarray([self.n*self.m*self.L])
self.delta_a_rad = (1.0/(self.n*self.m**2)) #small angle approx?
self.delta_b_rad = (1.0/(self.m*self.n**2))
print "WARNING: I'm pretty sure you are assuming that the survey area is small, because I am making some small-angle approximations! If you are looking for surveys of bigger than ~degree scales, please fix me!"
self.square_fov_rad = (manual_fov_arcmin/60.0)*(math.pi/180.0)
if manual_fov_arcmin==0.0:
self.square_fov_rad = self.delta_b_rad
self.v1 = np.asarray((self.x_com)[0],(self.y_com[0]),(self.z_com)[0])
self.v2 = np.asarray((self.x_com)[1],(self.y_com[0]),(self.z_com)[0])
self.v3 = np.asarray((self.x_com)[1],(self.y_com[1]),(self.z_com)[0])
self.v4 = np.asarray((self.x_com)[0],(self.y_com[1]),(self.z_com)[0])
self.xaxis = np.asarray([1.0,0.0,0.0])
self.u3 = np.asarray([(self.n),(self.m),(self.n*self.m)])#/(self.n**2 + self.m**2 + (self.n*self.m)**2)**(0.5)
self.u3 = self.u3/np.linalg.norm(self.u3)
self.primaryaxis = np.asarray([0.0,0.0,1.0])
self.u1 = np.cross(self.u3,self.xaxis)#np.cross(self.xaxis,self.u3)
self.u1 = self.u1/np.linalg.norm(self.u1)
self.u2 = np.cross(self.u3,self.u1)
self.u2 = self.u2/np.linalg.norm(self.u2)
self.origin=np.asarray([0.0,0.0,0.0])
self.snapindex = np.where(self.zlist == np.min(self.zlist))
self.BasicInfo()
self.ComputeBoxes()
def BasicInfo(self):
print "\n"
print "Information about: ", self.name
print "\t Comoving Single Box L = ", self.L
print "\t Basic info: n,m = ", self.n, self.m
print "\t Approx. Comoving distance at first repeat: ", round(self.dist_firstRep,2)
print "\t Approx. Redshift at first repeat: ", round(self.redshift_firstRep,2)
print "\t Number of replications: ", self.numRep
print " "
print "\t X range [Mpc] = ", self.x_com
print "\t Y range [Mpc] = ", self.y_com
print "\t Z height [Mpc] = ", self.z_com
print "\n\t del A, arcmin: {:5.2f}".format(self.delta_a_rad*(180.0/math.pi)*60.0)
print "\t del B, arcmin: {:5.2f}".format(self.delta_b_rad*(180.0/math.pi)*60.0)
print "\n\t Direction Unit Vector: ", self.u3
print "\t Alpha Unit Vector: ", self.u1
print "\t Delta Unit Vector: ", self.u2
print "\t Test, should be Direction vector: ", np.cross(self.u1,self.u2)
print " "
def export_runparams(self, filename,follow=False, follow_index=60, swapxy=False , swapxz=False ):
dirvector = 1.0*self.u3
alpha_vector = 1.0*self.u1
delta_vector = 1.0*self.u2
xind=0
yind=1
zind=2
if swapxy==True:
temp=dirvector[0]
dirvector[0]=dirvector[1] ; dirvector[1]=temp
temp=alpha_vector[0]
alpha_vector[0]=alpha_vector[1] ; alpha_vector[1]=temp
temp=delta_vector[0]
delta_vector[0]=delta_vector[1] ; delta_vector[1]=temp
xind= 1 ; yind=0 ; zind=2
if swapxz==True:
temp=dirvector[0]
dirvector[0]=dirvector[2] ; dirvector[2]=temp
temp=alpha_vector[0]
alpha_vector[0]=alpha_vector[2] ; alpha_vector[2]=temp
temp=delta_vector[0]
delta_vector[0]=delta_vector[2] ; delta_vector[2]=temp
xind= 2 ; yind=1 ; zind=0
f = open(filename,'w')
line = '## ' + self.name + ', LightCone Created, '+ str(datetime.date.today()) + '\n' ; f.write(line) ; print line
line = "## Comoving Single Box L = " + str(self.L) +'\n' ; f.write(line) ; print line
line = "## HubbleParam = " + str(self.cosmology.H0/100.0) + '\n' ; f.write(line) ; print line ; h = self.cosmology.H0/100.0
line = "## Basic info: n,m = " +str( self.n) + " , " + str( self.m) + '\n' ; f.write(line) ; print line
line = "## Approx. Comoving distance at first repeat: " + str( round(self.dist_firstRep,6) ) + '\n' ; f.write(line) ; print line
line = "## Approx. Redshift at first repeat: " + str( round(self.redshift_firstRep,6) ) + '\n' ; f.write(line) ; print line
line = "## Number of replications: " + str( self.numRep) + '\n' ; f.write(line) ; print line
line = "## del A, arcmin: {:10.5f}".format(self.delta_a_rad*(180.0/math.pi)*60.0) + '\n' ; f.write(line) ; print line
line = "## del B, arcmin: {:10.5f}".format(self.delta_b_rad*(180.0/math.pi)*60.0) + '\n' ; f.write(line) ; print line
line = "## At 0.04 arcsec/pixel, need > {:6.1f} pixels\n".format(self.square_fov_rad*(180.0/math.pi)*3600.0/0.04) ; f.write(line) ; print line
line = "## Direction Unit Vector: " + str( dirvector ) + '\n' ; f.write(line) ; print line
line = "## Alpha Unit Vector: " + str( alpha_vector ) + '\n' ; f.write(line) ; print line
line = "## Delta Unit Vector: " + str( delta_vector ) + '\n' ; f.write(line) ; print line
line = "## Buffered Cylindricial Radius Maximum: "+str( ((self.boxlist)[-2]).cylinder_radius_approx) + '\n' ; f.write(line) ; print line
line = "## Column 1: ID#\n" ; f.write(line)
line = "## Column 2: Snapshot Label\n" ; f.write(line)
line = "## Column 3: Snapshot Redshift\n" ; f.write(line)
line = "## Column 4: v_Ingress along x [Comoving h^-1 kpc]\n" ; f.write(line)
line = "## Column 5: v_Ingress along y [Comoving h^-1 kpc]\n" ; f.write(line)
line = "## Column 6: v_Ingress along z [Comoving h^-1 kpc]\n" ; f.write(line)
line = "## Column 7: v_Egress along x [Comoving h^-1 kpc]\n" ; f.write(line)
line = "## Column 8: v_Egress along y [Comoving h^-1 kpc]\n" ; f.write(line)
line = "## Column 9: v_Egress along z [Comoving h^-1 kpc]\n" ; f.write(line)
line = "## Column 10: v_Ingress along x [Physical kpc]\n" ; f.write(line)
line = "## Column 11: v_Ingress along y [Physical kpc]\n" ; f.write(line)
line = "## Column 12: v_Ingress along z [Physical kpc]\n" ; f.write(line)
line = "## Column 13: v_Camera along x [Physical kpc] \n" ; f.write(line)
line = "## Column 14: v_Camera along y [Physical kpc] \n" ; f.write(line)
line = "## Column 15: v_Camera along z [Physical kpc] \n" ; f.write(line)
line = "## Column 16: v_Camera - v_Ingress along x [Physical kpc] \n" ; f.write(line)
line = "## Column 17: v_Camera - v_Ingress along y [Physical kpc] \n" ; f.write(line)
line = "## Column 18: v_Camera - v_Ingress along z [Physical kpc] \n" ; f.write(line)
line = "## Column 19: Square Field of View (smaller axis) at v_Ingress [Physical kpc]\n" ; f.write(line)
line = "## Column 20: Geometrically-appropriate redshift at center of box\n" ; f.write(line)
line = "## Column 21: Radius buffered to subtend FOV [Comoving h^-1 kpc]\n" ; f.write(line)
i=0
MaxRadSize = ((self.boxlist)[-2]).cylinder_radius_approx
for box in (self.boxlist)[:-1]:
if follow==True:
followbox = (self.boxlist)[follow_index]
if follow==False:
followbox=box
v_in_snap = followbox.v_ingress_local*1000.0*h#np.mod(box.v_ingress, self.L)*1000.0*h #in comoving kpc h^-1 units
v_out_snap = followbox.v_egress_local*1000.0*h #np.mod(box.v_egress, self.L)*1000.0*h
v_in_phys = followbox.v_ingress_local*1000.0/(1.0 + box.mid_z) # in physical kpc
v_out_phys = followbox.v_egress_local*1000.0/(1.0 + box.mid_z) # in physical kpc
v_cam_phys = v_in_phys - 1.0*box.camera_offset*1000.0*self.u3/(1.0 + box.mid_z) # in physical kpc, laboratory frame -- does Sunrise translate camera coords too?!?!
v_cam_cent_phys = v_cam_phys - v_in_phys # in case we want to center on the ingress point
fov_phys = 2.0*(box.start_distance)*math.sin(self.square_fov_rad/2.0)*1000.0/(1.0 + box.mid_z) #in physical kpc
RadSize_snap = box.cylinder_radius_approx*1000.0*h #MaxRadSize*1000.0*h
line = "{:5d} {:4s} {:7.4f} {:10.4f} {:10.4f} {:10.4f}" \
" {:10.4f} {:10.4f} {:10.4f} {:10.4f} {:10.4f} {:10.4f}" \
" {:10.4f} {:10.4f} {:10.4f} {:10.4f} {:10.4f} {:10.4f}" \
" {:10.4f} {:7.4f} {:10.4f}\n".format(i,box.snaplabel,box.snapredshift,
v_in_snap[xind], v_in_snap[yind], v_in_snap[zind],
v_out_snap[xind], v_out_snap[yind], v_out_snap[zind],
v_in_phys[xind], v_in_phys[yind], v_in_phys[zind],
v_cam_phys[xind], v_cam_phys[yind], v_cam_phys[zind],
v_cam_cent_phys[xind], v_cam_cent_phys[yind], v_cam_cent_phys[zind],
fov_phys, box.mid_z, RadSize_snap) ; f.write(line) ; print line
i=i+1
f.close()
def ComputeBoxes(self):
print "\t Computing camera parameters for lightcone: ", self.name
distancetraveled=0.0
ingress_point=self.origin
ingress_snapindex = self.snapindex
cmpc_from_z0 = np.interp((self.zlist)[ingress_snapindex], self.cosmology.redshift_grid, self.cosmology.comoving_mpc_grid)
print "cmpc: ", cmpc_from_z0
self.boxlist.append(ReplicatedBox(self.origin,ingress_point))
i=0
Nvec = np.asarray([1.0,1.0,1.0])
volfrac = 0.0
while (self.dist_limit - distancetraveled > 1e-10):
box_i = (self.boxlist)[-1]
box_i.num = i
testvec = Nvec*np.asarray([self.L,self.L,self.L]) #boundary to test
ftest = (testvec - box_i.v_ingress)/self.u3 #propagate to nearest boundary
factor = np.min(ftest) #how far til we get one exit?
ind_exit = np.where((ftest - factor) < 1e-10) #which axis/es was it?
#print i, ftest, ind_exit[0]
box_i.v_ingress_local = box_i.v_ingress - (Nvec - 1.0)*self.L
box_i.v_egress = box_i.v_ingress + factor*self.u3 #this is where the ray leaves this box
box_i.v_egress_local = box_i.v_egress - (Nvec-1.0)*self.L
Nvec[ind_exit[0]] = Nvec[ind_exit[0]] + 1.0 #iterate the boundary along these axes; note generically this could be - 1.0 if using arbitrary start/direction
olddist = distancetraveled
distancetraveled = np.linalg.norm(box_i.v_egress)
mid_dist = olddist + (distancetraveled - olddist)/2.0
mid_z = np.interp(mid_dist,self.cosmology.comoving_mpc_grid, self.cosmology.redshift_grid)
box_i.far_z = np.interp(distancetraveled,self.cosmology.comoving_mpc_grid, self.cosmology.redshift_grid)
box_i.near_z = np.interp(olddist,self.cosmology.comoving_mpc_grid, self.cosmology.redshift_grid)
box_i.mid_z = mid_z #this is used later
box_i.mid_dist = mid_dist
diffs = np.abs(self.zlist - mid_z)
closest_ind = np.where(diffs == np.min(diffs)) # is this {snapshot selection} the only thing z is used for here?
box_i.snaplabel = ((self.namelist)[closest_ind[0]])[0]
box_i.snapredshift = ((self.zlist)[closest_ind[0]])[0]
box_i.tot_distance_traveled_through = distancetraveled
box_i.box_distance = (distancetraveled - olddist)
box_i.start_distance = olddist
box_i.camera_offset = box_i.start_distance#/(1.0 + box_i.mid_z) actually, let's keep this in co-moving units #distancetraveled/((1.0 + box_i.snapredshift)) - box_i.box_distance/(1.0 + box_i.snapredshift) #=~ olddist/(1+z) ...
box_i.cylinder_radius_approx = ((self.square_fov_rad/2.0)*(2.0**0.5)*1.01)*distancetraveled
box_i.tot_fov_comoving = (self.square_fov_rad)*distancetraveled #small angle approx...
#print closest_ind[0]
self.boxlist.append(ReplicatedBox((Nvec-1.0)*self.L,box_i.v_egress)) #add the new box
#can update/save some of its basic properties after this
'''print i, "{:10.3f}, {:10.3f}, {:10.3f}, {:10.3f}, {:12.8f}, {:10.3f}, {:10.3f}, {:5s}".format( np.round_(distancetraveled,3),
np.round_(self.delta_b_rad*distancetraveled,3),
np.round_(self.delta_b_rad*np.interp(distancetraveled,self.cosmology.comoving_mpc_grid,self.cosmology.DA_mpc_grid),3),np.round_(np.interp(distancetraveled,self.cosmology.comoving_mpc_grid, self.cosmology.redshift_grid), 3), (self.L*np.round_(self.delta_b_rad*distancetraveled,3)**2)/(self.L**3),mid_dist, mid_z, (self.namelist)[closest_ind[0]])'''
box_i.approx_volume_comoving = (self.L*np.round_(self.square_fov_rad*distancetraveled,3)**2)/(self.L**3)
volfrac = volfrac + (self.L*np.round_(self.square_fov_rad*distancetraveled,3)**2)/(self.L**3)
#, np.round_(box_i.v_ingress-box_i.v_origin,3)
i=i+1
self.volfrac = volfrac
#print "Rough Cumulative Volume Fraction (of single box): ", self.volfrac
if __name__=="__main__":
print "Exploring some things about setting up lightcones..."
h=0.704
L = 20.0/h
#print "L = ", L, " Mpc"
#default HUDF-ish lightcone
n = 15.0 ; m = 14.0
#print "n,m = ", n,",", m
fakez = np.logspace(-3,2,100)
# comds = np.asarray([(cc.cosmocalc(zf))['DCMR_Mpc'] for zf in fakez])
delta_a_rad = (1.0/(n*m**2))
delta_b_rad = (1.0/(m*n**2))
skyPixel_arcsec = 0.04 #arcsec
print "ideal ACS-ish scale: {:8.2f}".format(skyPixel_arcsec)
Npix_A = (delta_a_rad*(180.0/math.pi)*3600.0)/skyPixel_arcsec
Npix_B = (delta_b_rad*(180.0/math.pi)*3600.0)/skyPixel_arcsec
print "Npix_A: {:10.1f}".format(Npix_A)
print "Npix_B: {:10.1f}".format(Npix_B)
GB_per_slice = 4.0*Npix_A*Npix_B/1e9
print "GigaBytes per float: {:7.2f}".format(GB_per_slice)
redshift = np.logspace(-3, 1, 40)
#print z
Nz = (redshift.shape)[0]
#for zi in redshift:
# res = cc.cosmocalc(zi)
# print "At z= {:6.3f}, D_com= {:6.1f}; DA= {:6.1f} Mpc; DL= {:8.1f}; PS= {:3.1f} kpc/arcsec; dXz= {:5.2f}; dYz= {:5.2f}".format(
# round(zi,3), res['DCMR_Mpc'], res['DA_Mpc'], res['DL_Mpc'], res['PS_kpc'], delta_a_rad*res['DCMR_Mpc'], delta_b_rad*res['DCMR_Mpc'])
# test = cc.cosmocalc(2.0,H0=71.0,WM=0.27,WV=None)
#data = asciitable.read('gfm_snaps.txt')
#data = asciitable.read('snap_v_redshift.txt')
data = asciitable.read('snap_v_redshift_Cosmo0_imagepipeline.txt')
zlist = np.array(map(float,(data['col2'])))
#zlist = np.array(map(float,(data['col2'])[:315]))
#namelist = ((data['col1'])[:315])
#namelist = np.asarray([(s)[85:89] for s in namelist])
namelist = ((data['col1']))
namelist = np.asarray([(s)[89:93] for s in namelist])
#namelist = np.asarray([(s)[84:88] for s in namelist])
#print namelist#, zlist
cosmology = Cosmology(H0=70.4,WM=0.27,WV=0.73)
#hudf_default = LightCone(75.0/h,cosmology,"Default Deep")
#hudf_default.BasicCone(11.0, 9.0, namelist, zlist)
#hudf_shallow = LightCone(75.0/h,cosmology,"Default Shallow")
#hudf_shallow.BasicCone(5.0, 4.0, namelist, zlist)
#hudf_narrow = LightCone(25.0/h,cosmology,"Default Deep but Narrow")
#hudf_narrow.BasicCone(11.0, 10.0, namelist, zlist, manual_fov_arcmin=1.0)
#hudf = LightCone(25.0/h,cosmology,"Default 25mpc repeated, 136 snaps")
#hudf.BasicCone(11.0, 10.0, namelist, zlist, manual_dist_limit=11000.0) #z~18
#hudf_narrow = LightCone(20.0/h,cosmology,"Cluster evolution")
#hudf_narrow.BasicCone(15.0, 14.0, namelist, zlist, manual_fov_arcmin=1.2)
#print "Approx. Total Volume: ", hudf_shallow.volfrac
#testbox = (hudf_shallow.boxlist)[-2]
#note: v=lab frame
#note: last box is placeholder/not used
#print testbox.mid_z, testbox.v_origin, testbox.v_ingress, testbox.v_egress, testbox.snaplabel, testbox.snapredshift
#i=0
#for box in (hudf_narrow.boxlist)[:-1]:
# print i, box.snaplabel, np.round_(box.snapredshift,3), np.round_(box.box_distance,6), np.round_(np.linalg.norm(box.v_egress - box.v_ingress)/hudf_narrow.L,3), np.round_(box.cylinder_radius_approx/hudf_narrow.L,3), np.round_(box.cylinder_radius_approx, 3), box.v_ingress, box.v_egress
# i=i+1
#I think we can put the camera at the laboratory origin in comoving units, but need this in current *physical* units for Sunrise input
#OR option to put camera positions and FOV in comoving units?
#hudf_narrow.export_runparams('hudf_narrow_20Mpc_15_14_followcluster.txt', follow=True, follow_index=60)
#hudf.export_runparams('hudf_25Mpc_11_10_repeats_136snaps_mod.txt')
#hudf_bigbox = LightCone(75.0/h,cosmology,"Default 75mpc no repeats, 136 snaps")
#hudf_bigbox.BasicCone(11.0, 10.0, namelist, zlist, manual_dist_limit=11000.0) #z~18
#hudf_bigbox.export_runparams('hudf_75Mpc_11_10_136snaps_fixedh_xyz_NEW.txt')
#hudf_bigbox.export_runparams('hudf_75Mpc_11_10_136snaps_fixedh_yxz_NEW.txt', swapxy=True)
#hudf_bigbox.export_runparams('hudf_75Mpc_11_10_136snaps_fixedh_zyx_NEW.txt', swapxz=True)
#hudf_bigbox_nwide = LightCone(75.0/h,cosmology,"Meh Wide 75mpc repeated, 136 snaps")
#hudf_bigbox_nwide.BasicCone(8.0, 7.0, namelist, zlist, manual_dist_limit=11000.0) #z~18
#hudf_bigbox_nwide.export_runparams('hudfwide_75Mpc_8_7_xyz.txt')
#hudf_bigbox_nwide.export_runparams('hudfwide_75Mpc_8_7_yxz.txt', swapxy=True)
#hudf_bigbox_nwide.export_runparams('hudfwide_75Mpc_8_7_zyx.txt', swapxz=True)
hudf_bigbox_wide = LightCone(75.0/h,cosmology,"Wide 75mpc repeated, 136 snaps")
hudf_bigbox_wide.BasicCone(7.0, 6.0, namelist, zlist, manual_dist_limit=11000.0) #z~18
hudf_bigbox_wide.export_runparams('hudfwide_75Mpc_7_6_xyz_imagepipeline.txt')
hudf_bigbox_wide.export_runparams('hudfwide_75Mpc_7_6_yxz_imagepipeline.txt', swapxy=True)
hudf_bigbox_wide.export_runparams('hudfwide_75Mpc_7_6_zyx_imagepipeline.txt', swapxz=True)
#hudf_bigbox_vwide = LightCone(75.0/h,cosmology,"Very Wide 75mpc repeated, 136 snaps")
#hudf_bigbox_vwide.BasicCone(6.0, 5.0, namelist, zlist, manual_dist_limit=11000.0) #z~18
#hudf_bigbox_vwide.export_runparams('hudfwide_75Mpc_6_5_xyz.txt')
#hudf_bigbox_vwide.export_runparams('hudfwide_75Mpc_6_5_yxz.txt', swapxy=True)
#hudf_bigbox_vwide.export_runparams('hudfwide_75Mpc_6_5_zyx.txt', swapxz=True)
#hudf_default.export_runparams('hudf_default_75Mpc_11_9_wrongsnaps.txt')
mpcgrid = cosmology.comoving_mpc_grid
zgrid = cosmology.redshift_grid
print "{:6s},{:6.0f},{:6.0f},{:6.0f},{:6.0f},{:6.0f},{:6.0f}".format('box', 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0)
m = 10.0 ; n = 11.0
print "{:6.1f},{:6.2f},{:6.2f},{:6.2f},{:6.2f},{:6.2f},{:6.2f}".format(1.0/(m*n**2.0)*(180.0/math.pi)*60.0,
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*25.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*50.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*100.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*250.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*500.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*1000.0),mpcgrid,zgrid))
m = 8.0 ; n = 9.0
print "{:6.1f},{:6.2f},{:6.2f},{:6.2f},{:6.2f},{:6.2f},{:6.2f}".format(1.0/(m*n**2.0)*(180.0/math.pi)*60.0,
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*25.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*50.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*100.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*250.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*500.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*1000.0),mpcgrid,zgrid))
m = 6.0 ; n = 7.0
print "{:6.1f},{:6.2f},{:6.2f},{:6.2f},{:6.2f},{:6.2f},{:6.2f}".format(1.0/(m*n**2.0)*(180.0/math.pi)*60.0,
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*25.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*50.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*100.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*250.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*500.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*1000.0),mpcgrid,zgrid))
m = 4.0 ; n = 5.0
print "{:6.1f},{:6.2f},{:6.2f},{:6.2f},{:6.2f},{:6.2f},{:6.2f}".format(1.0/(m*n**2.0)*(180.0/math.pi),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*25.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*50.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*100.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*250.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*500.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*1000.0),mpcgrid,zgrid))
m = 2.0 ; n = 3.0
print "{:6.1f},{:6.2f},{:6.2f},{:6.2f},{:6.2f},{:6.2f},{:6.2f}".format(1.0/(m*n**2.0)*(180.0/math.pi),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*25.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*50.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*100.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*250.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*500.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*1000.0),mpcgrid,zgrid))
m = 2.0 ; n = 1.0
print "{:6.1f},{:6.2f},{:6.2f},{:6.2f},{:6.2f},{:6.2f},{:6.2f},{:6.2f}".format(1.0/(m*n**2.0)*(180.0/math.pi),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*25.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*50.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*100.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*250.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*500.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*1000.0),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*3000.0),mpcgrid,zgrid))
| |
import sys
from django import forms
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import get_resolver
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, render
from django.template import Context, RequestContext, TemplateDoesNotExist
from django.views.debug import technical_500_response, SafeExceptionReporterFilter
from django.views.decorators.debug import (sensitive_post_parameters,
sensitive_variables)
from django.utils.log import getLogger
from regressiontests.views import BrokenException, except_args
from models import Article
def index_page(request):
"""Dummy index page"""
return HttpResponse('<html><body>Dummy page</body></html>')
def custom_create(request):
"""
Calls create_object generic view with a custom form class.
"""
class SlugChangingArticleForm(forms.ModelForm):
"""Custom form class to overwrite the slug."""
class Meta:
model = Article
def save(self, *args, **kwargs):
self.instance.slug = 'some-other-slug'
return super(SlugChangingArticleForm, self).save(*args, **kwargs)
from django.views.generic.create_update import create_object
return create_object(request,
post_save_redirect='/create_update/view/article/%(slug)s/',
form_class=SlugChangingArticleForm)
def raises(request):
# Make sure that a callable that raises an exception in the stack frame's
# local vars won't hijack the technical 500 response. See:
# http://code.djangoproject.com/ticket/15025
def callable():
raise Exception
try:
raise Exception
except Exception:
return technical_500_response(request, *sys.exc_info())
def raises404(request):
resolver = get_resolver(None)
resolver.resolve('')
def raises403(request):
raise PermissionDenied
def redirect(request):
"""
Forces an HTTP redirect.
"""
return HttpResponseRedirect("target/")
def view_exception(request, n):
raise BrokenException(except_args[int(n)])
def template_exception(request, n):
return render_to_response('debug/template_exception.html',
{'arg': except_args[int(n)]})
# Some views to exercise the shortcuts
def render_to_response_view(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_to_response_view_with_request_context(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, context_instance=RequestContext(request))
def render_to_response_view_with_mimetype(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, mimetype='application/x-rendertest')
def render_view(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_view_with_base_context(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, context_instance=Context())
def render_view_with_content_type(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, content_type='application/x-rendertest')
def render_view_with_status(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, status=403)
def render_view_with_current_app(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, current_app="foobar_app")
def render_view_with_current_app_conflict(request):
# This should fail because we don't passing both a current_app and
# context_instance:
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, current_app="foobar_app", context_instance=RequestContext(request))
def raises_template_does_not_exist(request):
# We need to inspect the HTML generated by the fancy 500 debug view but
# the test client ignores it, so we send it explicitly.
try:
return render_to_response('i_dont_exist.html')
except TemplateDoesNotExist:
return technical_500_response(request, *sys.exc_info())
def send_log(request, exc_info):
logger = getLogger('django.request')
# The default logging config has a logging filter to ensure admin emails are
# only sent with DEBUG=False, but since someone might choose to remove that
# filter, we still want to be able to test the behavior of error emails
# with DEBUG=True. So we need to remove the filter temporarily.
admin_email_handler = [
h for h in logger.handlers
if h.__class__.__name__ == "AdminEmailHandler"
][0]
orig_filters = admin_email_handler.filters
admin_email_handler.filters = []
logger.error('Internal Server Error: %s' % request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
admin_email_handler.filters = orig_filters
def non_sensitive_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables('sauce')
@sensitive_post_parameters('bacon-key', 'sausage-key')
def sensitive_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables()
@sensitive_post_parameters()
def paranoid_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
class UnsafeExceptionReporterFilter(SafeExceptionReporterFilter):
"""
Ignores all the filtering done by its parent class.
"""
def get_post_parameters(self, request):
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return tb_frame.f_locals.items()
@sensitive_variables()
@sensitive_post_parameters()
def custom_exception_reporter_filter_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
request.exception_reporter_filter = UnsafeExceptionReporterFilter()
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
| |
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
from error import *
from nodes import *
import datetime
import sys, copy_reg, types
class RepresenterError(YAMLError):
pass
class BaseRepresenter(object):
yaml_representers = {}
yaml_multi_representers = {}
def __init__(self, default_style=None, default_flow_style=None):
self.default_style = default_style
self.default_flow_style = default_flow_style
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent(self, data):
node = self.represent_data(data)
self.serialize(node)
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def get_classobj_bases(self, cls):
bases = [cls]
for base in cls.__bases__:
bases.extend(self.get_classobj_bases(base))
return bases
def represent_data(self, data):
if self.ignore_aliases(data):
self.alias_key = None
else:
self.alias_key = id(data)
if self.alias_key is not None:
if self.alias_key in self.represented_objects:
node = self.represented_objects[self.alias_key]
#if node is None:
# raise RepresenterError("recursive objects are not allowed: %r" % data)
return node
#self.represented_objects[alias_key] = None
self.object_keeper.append(data)
data_types = type(data).__mro__
if type(data) is types.InstanceType:
data_types = self.get_classobj_bases(data.__class__)+list(data_types)
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
else:
for data_type in data_types:
if data_type in self.yaml_multi_representers:
node = self.yaml_multi_representers[data_type](self, data)
break
else:
if None in self.yaml_multi_representers:
node = self.yaml_multi_representers[None](self, data)
elif None in self.yaml_representers:
node = self.yaml_representers[None](self, data)
else:
node = ScalarNode(None, unicode(data))
#if alias_key is not None:
# self.represented_objects[alias_key] = node
return node
def add_representer(cls, data_type, representer):
if not 'yaml_representers' in cls.__dict__:
cls.yaml_representers = cls.yaml_representers.copy()
cls.yaml_representers[data_type] = representer
add_representer = classmethod(add_representer)
def add_multi_representer(cls, data_type, representer):
if not 'yaml_multi_representers' in cls.__dict__:
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
cls.yaml_multi_representers[data_type] = representer
add_multi_representer = classmethod(add_multi_representer)
def represent_scalar(self, tag, value, style=None):
if style is None:
style = self.default_style
node = ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
def represent_sequence(self, tag, sequence, flow_style=None):
value = []
node = SequenceNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
for item in sequence:
node_item = self.represent_data(item)
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_mapping(self, tag, mapping, flow_style=None):
value = []
node = MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
mapping.sort()
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def ignore_aliases(self, data):
return False
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
if data is None:
return True
if isinstance(data, tuple) and data == ():
return True
if isinstance(data, (str, unicode, bool, int, float)):
return True
def represent_none(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:null',
u'null')
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:str', data)
def represent_bool(self, data):
if data:
value = u'true'
else:
value = u'false'
return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
def represent_int(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
def represent_long(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value*inf_value):
inf_value *= inf_value
def represent_float(self, data):
if data != data or (data == 0.0 and data == 1.0):
value = u'.nan'
elif data == self.inf_value:
value = u'.inf'
elif data == -self.inf_value:
value = u'-.inf'
else:
value = unicode(repr(data)).lower()
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag. We fix this by adding
# '.0' before the 'e' symbol.
if u'.' not in value and u'e' in value:
value = value.replace(u'e', u'.0e', 1)
return self.represent_scalar(u'tag:yaml.org,2002:float', value)
def represent_list(self, data):
#pairs = (len(data) > 0 and isinstance(data, list))
#if pairs:
# for item in data:
# if not isinstance(item, tuple) or len(item) != 2:
# pairs = False
# break
#if not pairs:
return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
#value = []
#for item_key, item_value in data:
# value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
# [(item_key, item_value)]))
#return SequenceNode(u'tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
def represent_set(self, data):
value = {}
for key in data:
value[key] = None
return self.represent_mapping(u'tag:yaml.org,2002:set', value)
def represent_date(self, data):
value = unicode(data.isoformat())
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
value = unicode(data.isoformat(' '))
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
raise RepresenterError("cannot represent an object", data)
SafeRepresenter.add_representer(type(None),
SafeRepresenter.represent_none)
SafeRepresenter.add_representer(str,
SafeRepresenter.represent_str)
SafeRepresenter.add_representer(unicode,
SafeRepresenter.represent_unicode)
SafeRepresenter.add_representer(bool,
SafeRepresenter.represent_bool)
SafeRepresenter.add_representer(int,
SafeRepresenter.represent_int)
SafeRepresenter.add_representer(long,
SafeRepresenter.represent_long)
SafeRepresenter.add_representer(float,
SafeRepresenter.represent_float)
SafeRepresenter.add_representer(list,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(tuple,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(dict,
SafeRepresenter.represent_dict)
SafeRepresenter.add_representer(set,
SafeRepresenter.represent_set)
SafeRepresenter.add_representer(datetime.date,
SafeRepresenter.represent_date)
SafeRepresenter.add_representer(datetime.datetime,
SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None,
SafeRepresenter.represent_undefined)
class Representer(SafeRepresenter):
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:python/str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
tag = None
try:
data.encode('ascii')
tag = u'tag:yaml.org,2002:python/unicode'
except UnicodeEncodeError:
tag = u'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data)
def represent_long(self, data):
tag = u'tag:yaml.org,2002:int'
if int(data) is not data:
tag = u'tag:yaml.org,2002:python/long'
return self.represent_scalar(tag, unicode(data))
def represent_complex(self, data):
if data.imag == 0.0:
data = u'%r' % data.real
elif data.real == 0.0:
data = u'%rj' % data.imag
elif data.imag > 0:
data = u'%r+%rj' % (data.real, data.imag)
else:
data = u'%r%rj' % (data.real, data.imag)
return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
def represent_tuple(self, data):
return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
def represent_name(self, data):
name = u'%s.%s' % (data.__module__, data.__name__)
return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
def represent_module(self, data):
return self.represent_scalar(
u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
def represent_instance(self, data):
# For instances of classic classes, we use __getinitargs__ and
# __getstate__ to serialize the data.
# If data.__getinitargs__ exists, the object must be reconstructed by
# calling cls(**args), where args is a tuple returned by
# __getinitargs__. Otherwise, the cls.__init__ method should never be
# called and the class instance is created by instantiating a trivial
# class and assigning to the instance's __class__ variable.
# If data.__getstate__ exists, it returns the state of the object.
# Otherwise, the state of the object is data.__dict__.
# We produce either a !!python/object or !!python/object/new node.
# If data.__getinitargs__ does not exist and state is a dictionary, we
# produce a !!python/object node . Otherwise we produce a
# !!python/object/new node.
cls = data.__class__
class_name = u'%s.%s' % (cls.__module__, cls.__name__)
args = None
state = None
if hasattr(data, '__getinitargs__'):
args = list(data.__getinitargs__())
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__
if args is None and isinstance(state, dict):
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+class_name, state)
if isinstance(state, dict) and not state:
return self.represent_sequence(
u'tag:yaml.org,2002:python/object/new:'+class_name, args)
value = {}
if args:
value['args'] = args
value['state'] = state
return self.represent_mapping(
u'tag:yaml.org,2002:python/object/new:'+class_name, value)
def represent_object(self, data):
# We use __reduce__ API to save the data. data.__reduce__ returns
# a tuple of length 2-5:
# (function, args, state, listitems, dictitems)
# For reconstructing, we calls function(*args), then set its state,
# listitems, and dictitems if they are not None.
# A special case is when function.__name__ == '__newobj__'. In this
# case we create the object with args[0].__new__(*args).
# Another special case is when __reduce__ returns a string - we don't
# support it.
# We produce a !!python/object, !!python/object/new or
# !!python/object/apply node.
cls = type(data)
if cls in copy_reg.dispatch_table:
reduce = copy_reg.dispatch_table[cls](data)
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
raise RepresenterError("cannot represent an object", data)
reduce = (list(reduce)+[None]*5)[:5]
function, args, state, listitems, dictitems = reduce
args = list(args)
if state is None:
state = {}
if listitems is not None:
listitems = list(listitems)
if dictitems is not None:
dictitems = dict(dictitems)
if function.__name__ == '__newobj__':
function = args[0]
args = args[1:]
tag = u'tag:yaml.org,2002:python/object/new:'
newobj = True
else:
tag = u'tag:yaml.org,2002:python/object/apply:'
newobj = False
function_name = u'%s.%s' % (function.__module__, function.__name__)
if not args and not listitems and not dictitems \
and isinstance(state, dict) and newobj:
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+function_name, state)
if not listitems and not dictitems \
and isinstance(state, dict) and not state:
return self.represent_sequence(tag+function_name, args)
value = {}
if args:
value['args'] = args
if state or not isinstance(state, dict):
value['state'] = state
if listitems:
value['listitems'] = listitems
if dictitems:
value['dictitems'] = dictitems
return self.represent_mapping(tag+function_name, value)
Representer.add_representer(str,
Representer.represent_str)
Representer.add_representer(unicode,
Representer.represent_unicode)
Representer.add_representer(long,
Representer.represent_long)
Representer.add_representer(complex,
Representer.represent_complex)
Representer.add_representer(tuple,
Representer.represent_tuple)
Representer.add_representer(type,
Representer.represent_name)
Representer.add_representer(types.ClassType,
Representer.represent_name)
Representer.add_representer(types.FunctionType,
Representer.represent_name)
Representer.add_representer(types.BuiltinFunctionType,
Representer.represent_name)
Representer.add_representer(types.ModuleType,
Representer.represent_module)
Representer.add_multi_representer(types.InstanceType,
Representer.represent_instance)
Representer.add_multi_representer(object,
Representer.represent_object)
| |
""" Virana plot tool for plotting. Part of the Virana package.
(c) 2013, Michael Zeidler, MPI for Informatics.
"""
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import colorsys
from numpy import arange
from matplotlib.patches import Rectangle
from matplotlib import rcParams
try:
from plumbum import cli
except ImportError:
message = 'This script requires the plumbum python package\n'
sys.stderr.write(message)
sys.exit(1)
class Family:
def __init__(self,name,typ,samples=None):
self.name=name
if samples!=None:
self.samples=samples
else:
self.samples={}
self.typ=typ
def add(self,sample,region,state,reads,bp,length):
if sample not in self.samples:
s = Sample(sample)
self.samples[sample]=s
return self.samples[sample].add(region,state,reads,bp,length)
def __str__(self):
return "{'type':'%s', 'samples':%s}"%(self.typ,str(self.samples))
def __repr__(self):
return "{'type':'%s', 'samples':%s}"%(self.typ,str(self.samples))
class Sample:
def __init__(self,name,regions=None):
self.name=name
if regions!=None:
self.regions=regions
else:
self.regions={}
def add(self,region,state,reads,bp,length):
if region not in self.regions:
r = Region(region,state,reads,bp,length)
self.regions[region]=r
return True
else:
return False
def __str__(self):
return str(self.regions)
def __repr__(self):
return str(self.regions)
class Region:
def __init__(self,id,state,reads,bp,length):
self.id=id
self.state=state
self.reads=reads
self.bp=bp
self.length=length
def __str__(self):
return "{'state':'%s', 'reads':%s, 'bp':%s, 'length':%s}" %(self.state,self.reads,self.bp,self.length)
def __repr__(self):
return "{'state':'%s', 'reads':%s, 'bp':%s, 'length':%s}" %(self.state,self.reads,self.bp,self.length)
class CLI(cli.Application):
"""Plot."""
PROGNAME = "vref"
VERSION = "1.0.0"
DESCRIPTION = \
"""DESCRIPTION: Virana vplot - plot.
The Virana plot utility ('vplot') generates plots for the statistic file output of vhom.
https://github.com/schelhorn/virana
Schelhorn S-E, Fischer M, Tolosi L, Altmueller J, Nuernberg P, et al. (2013)
Sensitive Detection of Viral Transcripts in Human Tumor Transcriptomes.
PLoS Comput Biol 9(10): e1003228. doi:10.1371/journal.pcbi.1003228"""
USAGE = """USAGE: The program has one mode that can be accessed by
[vplot | python vplot.py] plot
"""
def main(self, *args):
if args:
print self.DESCRIPTION
print
print self.USAGE
print("ERROR: Unknown command %r" % (args[0]))
return 1
if not self.nested_command:
print self.DESCRIPTION
print
print self.USAGE
print("ERROR : No command given")
return 1
@CLI.subcommand("plot")
class Plotter(cli.Application):
""" Generate plot for vhom statistic file."""
stat_file = cli.SwitchAttr(['-s', '--stats'],str,
mandatory=True,
help="Path to statistic file generated with vhom.")
plot_path = cli.SwitchAttr(['-o', '--output_file'], str, mandatory=True,
help="Sets the pdf output file. Note that all the plots are stored within a single pdf file.")
debug = cli.Flag(["-d", "--debug"], help="Enable debug messages")
rcParams['figure.dpi'] = 100
rcParams['axes.facecolor'] = 'white'
rcParams['font.size'] = 10
rcParams['patch.edgecolor'] = 'white'
rcParams['patch.linewidth']=0.5
rcParams['patch.facecolor'] = 'black'
rcParams['font.family'] = 'StixGeneral'
def means(self,start,stop):
list = arange(start,stop)
return (float(sum(list))/float(len(list)))
def remove_border(self,axes=None, top=False, right=False, left=True, bottom=True):
"""
Minimize chartjunk by stripping out unnecesasry plot borders and axis ticks
The top/right/left/bottom keywords toggle whether the corresponding plot border is drawn
"""
ax = axes or plt.gca()
ax.spines['top'].set_visible(top)
ax.spines['right'].set_visible(right)
ax.spines['left'].set_visible(left)
ax.spines['bottom'].set_visible(bottom)
#turn off all ticks
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticks_position('none')
#now re-enable visibles
if top:
ax.xaxis.tick_top()
if bottom:
ax.xaxis.tick_bottom()
if left:
ax.yaxis.tick_left()
if right:
ax.yaxis.tick_right()
def customeLegend(self,color_map):
legend_map=[[],[]]
for sample in color_map:
legend_map[0].append(Rectangle((0,0),0,0,visible=False))
legend_map[1].append(sample)
for label in color_map[sample]:
box=Rectangle((0,0),1,1,color=color_map[sample][label],fill=True)
legend_map[0].append(box)
legend_map[1].append(label)
plt.figlegend(legend_map[0],legend_map[1],loc=9,ncol=len(color_map),prop={'size':8})
def rgb_color_variants(self,stat_dict):
n_samples=0
sample_names=[]
for fam,obj in stat_dict.iteritems():
if len(obj.samples)>n_samples:
n_samples=len(obj.samples)
for sample in obj.samples.iterkeys():
if sample not in sample_names:
sample_names.append(sample)
assert n_samples == len(sample_names) #iterate over samples -> color_map
rgbs={}
i=1
for sample in sample_names:
rgbs[sample]={}
h=1.*(i/n_samples)
h=float(h)/2.
l=[0.1,0.4,0.7]
rgbs[sample]["pathogen"]=colorsys.hls_to_rgb(h,l[0],1.0)
rgbs[sample]["ambiguous"]=colorsys.hls_to_rgb(h,l[1],1.0)
rgbs[sample]["human"]=colorsys.hls_to_rgb(h,l[2],1.0)
i+=1
return rgbs
def parseStatFile(self,filename):
with open(filename,'r') as file:
values=[ map(str,line.split('\t')) for line in file ]
family_dict={}
for line in values[1:]:
fam_type,fam,region,sample,state,reads,length,bp,coverage=line
if fam not in family_dict:
family= Family(fam,fam_type)
family_dict[fam]=family
family_dict[fam].add(sample,region,state,reads,bp,length)
return family_dict
def generatePyPlot(self,stat_dict,output_file):
pp = PdfPages(output_file)
color_map=self.rgb_color_variants(stat_dict)
n_samples=len(color_map)
for plot in ["reads","bp"]:
fig=plt.figure()
i=0
for f,fam in stat_dict.iteritems():
j=0
for s,sample in fam.samples.iteritems():
values = []
color = []
for r,region in sample.regions.iteritems():
values.append(int(getattr(region,plot)))
color.append(color_map[s][region.state])
if(len(values)>1):
b = values[:-1]
b.insert(0,0);
for u in range(1,len(b)):
b[u]+=b[u-1]
plt.bar([i+j]*len(values),values,bottom=b,width=0.8,color=color)
else:
plt.bar([i+j]*len(values),values,width=0.8,color=color)
j+=1
i+= 1+n_samples
pos=[self.means(x,x+n_samples)+0.4 for x in arange(0,i+n_samples,1+n_samples)]
plt.xticks(pos, stat_dict.keys(), rotation='vertical')
if(plot=="reads"):
plt.ylabel("Cumulative reads assigned to family")
if(plot=="bp"):
plt.ylabel("Cumulative basepairs assigned to family")
plt.xlabel("")
fig.subplots_adjust(bottom=0.25,wspace=0.5,top=0.8)
self.remove_border()
self.customeLegend(color_map)
pp.savefig(fig)
pp.close()
def main(self):
"""plots"""
if self.debug:
logging.getLogger().setLevel(logging.DEBUG)
stat_dict=self.parseStatFile(self.stat_file)
self.generatePyPlot(stat_dict,self.plot_path)
if __name__ == "__main__":
CLI.run()
| |
# This is a copy of the Python logging.config.dictconfig module. It is
# provided here for backwards compatibility for Python versions prior to 2.7.
#
# Copyright 2009-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import logging.handlers
import re
import sys
import types
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
#
# This function is defined in logging only in recent versions of Python
#
try:
from logging import _checkLevel
except ImportError:
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in logging._levelNames:
raise ValueError('Unknown level: %r' % level)
rv = logging._levelNames[level]
else:
raise TypeError('Level not an integer or a '
'valid string: %r' % level)
return rv
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = __import__
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, basestring): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
# incremental handler config only if handler name
# ties in to logging._handlers (Python 2.7)
if sys.version_info[:2] == (2, 7):
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(_checkLevel(level))
except StandardError, e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except StandardError, e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except StandardError, e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except StandardError, e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except StandardError, e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except StandardError, e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = root.manager.loggerDict.keys()
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name)
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
i = i + 1 # look at the entry after name
while (i < num_existing) and\
(existing[i][:pflen] == prefixed):
child_loggers.append(existing[i])
i = i + 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except StandardError, e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
elif disable_existing:
logger.disabled = True
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except StandardError, e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError, te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
result = logging.Formatter(fmt, dfmt)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except StandardError, e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except StandardError, e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
factory = c
else:
klass = self.resolve(config.pop('class'))
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
config['target'] = self.config['handlers'][config['target']]
except StandardError, e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError, te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(_checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except StandardError, e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(_checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
| |
from qtpy.QtWidgets import (
QPushButton,
QHBoxLayout,
QVBoxLayout,
QCheckBox,
QLabel,
QTableWidget,
QTableWidgetItem,
QHeaderView,
QWidget,
QMessageBox,
)
from qtpy.QtGui import QBrush, QColor
from qtpy.QtCore import Qt, Slot, Signal, QThreadPool, QRunnable
from .useful_widgets import (
SecondaryWindow,
set_tooltip,
LineEditExtended,
PushButtonNamed,
CheckBoxNamed,
RangeManager,
)
import logging
logger = logging.getLogger(__name__)
class WndComputeRoiMaps(SecondaryWindow):
# Signal that is sent (to main window) to update global state of the program
update_global_state = Signal()
computations_complete = Signal(object)
signal_roi_computation_complete = Signal()
signal_activate_tab_xrf_maps = Signal()
def __init__(self, *, gpc, gui_vars):
super().__init__()
# Global processing classes
self.gpc = gpc
# Global GUI variables (used for control of GUI state)
self.gui_vars = gui_vars
# Reference to the main window. The main window will hold
# references to all non-modal windows that could be opened
# from multiple places in the program.
self.ref_main_window = self.gui_vars["ref_main_window"]
self.update_global_state.connect(self.ref_main_window.update_widget_state)
self.initialize()
def initialize(self):
self.setWindowTitle("PyXRF: Compute XRF Maps Based on ROIs")
self.setMinimumWidth(600)
self.setMinimumHeight(300)
self.resize(600, 600)
header_vbox = self._setup_header()
self._setup_table()
footer_hbox = self._setup_footer()
vbox = QVBoxLayout()
vbox.addLayout(header_vbox)
vbox.addWidget(self.table)
vbox.addLayout(footer_hbox)
self.setLayout(vbox)
self._set_tooltips()
def _setup_header(self):
self.pb_clear = QPushButton("Clear")
self.pb_clear.clicked.connect(self.pb_clear_clicked)
self.pb_use_lines_for_fitting = QPushButton("Use Lines Selected For Fitting")
self.pb_use_lines_for_fitting.clicked.connect(self.pb_use_lines_for_fitting_clicked)
self.le_sel_emission_lines = LineEditExtended()
self.le_sel_emission_lines.textChanged.connect(self.le_sel_emission_lines_text_changed)
self.le_sel_emission_lines.editingFinished.connect(self.le_sel_emission_lines_editing_finished)
sample_elements = ""
self.le_sel_emission_lines.setText(sample_elements)
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel("Enter emission lines, e.g. Fe_K, Gd_L "))
hbox.addStretch(1)
hbox.addWidget(self.pb_clear)
hbox.addWidget(self.pb_use_lines_for_fitting)
vbox.addLayout(hbox)
vbox.addWidget(self.le_sel_emission_lines)
return vbox
def _setup_table(self):
# Labels for horizontal header
self.tbl_labels = ["Line", "E, keV", "ROI, keV", "Show", "Reset"]
# The list of columns that stretch with the table
self.tbl_cols_stretch = ("E, keV", "ROI, keV")
# Table item representation if different from default
self.tbl_format = {"E, keV": ".3f"}
# Editable items (highlighted with lighter background)
self.tbl_cols_editable = {"ROI, keV"}
# Columns that contain Range Manager
self.tbl_cols_range_manager = ("ROI, keV",)
self.table = QTableWidget()
self.table.setColumnCount(len(self.tbl_labels))
self.table.setHorizontalHeaderLabels(self.tbl_labels)
self.table.verticalHeader().hide()
self.table.setSelectionMode(QTableWidget.NoSelection)
self.table.setStyleSheet("QTableWidget::item{color: black;}")
header = self.table.horizontalHeader()
for n, lbl in enumerate(self.tbl_labels):
# Set stretching for the columns
if lbl in self.tbl_cols_stretch:
header.setSectionResizeMode(n, QHeaderView.Stretch)
else:
header.setSectionResizeMode(n, QHeaderView.ResizeToContents)
self._table_contents = []
self.cb_list = []
self.range_manager_list = []
self.pb_default_list = []
self.fill_table(self._table_contents)
def fill_table(self, table_contents):
self.table.clearContents()
self._table_contents = table_contents # Save new table contents
for item in self.range_manager_list:
item.selection_changed.disconnect(self.range_manager_selection_changed)
self.range_manager_list = []
for cb in self.cb_list:
cb.stateChanged.disconnect(self.cb_state_changed)
self.cb_list = []
for pb in self.pb_default_list:
pb.clicked.connect(self.pb_default_clicked)
self.pb_default_list = []
self.table.setRowCount(len(table_contents))
for nr, row in enumerate(table_contents):
eline_name = row["eline"] + "a1"
energy = row["energy_center"]
energy_left = row["energy_left"]
energy_right = row["energy_right"]
range_displayed = row["range_displayed"]
table_row = [eline_name, energy, (energy_left, energy_right)]
for nc, entry in enumerate(table_row):
label = self.tbl_labels[nc]
# Set alternating background colors for the table rows
# Make background for editable items a little brighter
brightness = 240 if label in self.tbl_cols_editable else 220
if nr % 2:
rgb_bckg = (255, brightness, brightness)
else:
rgb_bckg = (brightness, 255, brightness)
if self.tbl_labels[nc] not in self.tbl_cols_range_manager:
if self.tbl_labels[nc] in self.tbl_format:
fmt = self.tbl_format[self.tbl_labels[nc]]
s = ("{:" + fmt + "}").format(entry)
else:
s = f"{entry}"
item = QTableWidgetItem(s)
if nc > 0:
item.setTextAlignment(Qt.AlignCenter)
else:
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
# Set all columns not editable (unless needed)
item.setFlags(item.flags() & ~Qt.ItemIsEditable)
# Note, that there is no way to set style sheet for QTableWidgetItem
item.setBackground(QBrush(QColor(*rgb_bckg)))
self.table.setItem(nr, nc, item)
else:
spin_name = f"{nr}"
item = RangeManager(name=spin_name, add_sliders=False, selection_to_range_min=0.0001)
item.set_range(0.0, 100.0) # The range is greater than needed (in keV)
item.set_selection(value_low=entry[0], value_high=entry[1])
item.setTextColor((0, 0, 0)) # In case of dark theme
item.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.range_manager_list.append(item)
item.selection_changed.connect(self.range_manager_selection_changed)
color = (rgb_bckg[0], rgb_bckg[1], rgb_bckg[2])
item.setBackground(color)
self.table.setCellWidget(nr, nc, item)
brightness = 220
if nr % 2:
rgb_bckg = (255, brightness, brightness)
else:
rgb_bckg = (brightness, 255, brightness)
item = QWidget()
cb = CheckBoxNamed(name=f"{nr}")
cb.setChecked(Qt.Checked if range_displayed else Qt.Unchecked)
self.cb_list.append(cb)
cb.stateChanged.connect(self.cb_state_changed)
item_hbox = QHBoxLayout(item)
item_hbox.addWidget(cb)
item_hbox.setAlignment(Qt.AlignCenter)
item_hbox.setContentsMargins(0, 0, 0, 0)
color_css = f"rgb({rgb_bckg[0]}, {rgb_bckg[1]}, {rgb_bckg[2]})"
item.setStyleSheet(
f"QWidget {{ background-color: {color_css}; }} "
f"QCheckBox {{ color: black; background-color: white }}"
)
self.table.setCellWidget(nr, nc + 1, item)
item = PushButtonNamed("Reset", name=f"{nr}")
item.clicked.connect(self.pb_default_clicked)
self.pb_default_list.append(item)
rgb_bckg = [_ - 35 if (_ < 255) else _ for _ in rgb_bckg]
color_css = f"rgb({rgb_bckg[0]}, {rgb_bckg[1]}, {rgb_bckg[2]})"
item.setStyleSheet(f"QPushButton {{ color: black; background-color: {color_css}; }}")
self.table.setCellWidget(nr, nc + 2, item)
def _setup_footer(self):
self.cb_subtract_baseline = QCheckBox("Subtract baseline")
self.cb_subtract_baseline.setChecked(
Qt.Checked if self.gpc.get_roi_subtract_background() else Qt.Unchecked
)
self.cb_subtract_baseline.toggled.connect(self.cb_subtract_baseline_toggled)
self.pb_compute_roi = QPushButton("Compute ROIs")
self.pb_compute_roi.clicked.connect(self.pb_compute_roi_clicked)
hbox = QHBoxLayout()
hbox.addWidget(self.cb_subtract_baseline)
hbox.addStretch(1)
hbox.addWidget(self.pb_compute_roi)
return hbox
def _set_tooltips(self):
set_tooltip(self.pb_clear, "<b>Clear</b> the list")
set_tooltip(
self.pb_use_lines_for_fitting,
"Copy the contents of <b>the list of emission lines selected for fitting</b> to the list of ROIs",
)
set_tooltip(self.le_sel_emission_lines, "The list of <b>emission lines</b> selected for ROI computation.")
set_tooltip(self.table, "The list of ROIs")
set_tooltip(
self.cb_subtract_baseline,
"<b>Subtract baseline</b> from the pixel spectra before computing ROIs. "
"Subtracting baseline slows down computations and usually have no benefit. "
"In most cases it should remain <b>unchecked</b>.",
)
set_tooltip(
self.pb_compute_roi,
"<b>Run</b> computations of the ROIs. The resulting <b>ROI</b> dataset "
"may be viewed in <b>XRF Maps</b> tab.",
)
def update_widget_state(self, condition=None):
# Update the state of the menu bar
state = not self.gui_vars["gui_state"]["running_computations"]
self.setEnabled(state)
# Hide the window if required by the program state
state_file_loaded = self.gui_vars["gui_state"]["state_file_loaded"]
state_model_exist = self.gui_vars["gui_state"]["state_model_exists"]
if not state_file_loaded or not state_model_exist:
self.hide()
if condition == "tooltips":
self._set_tooltips()
def pb_clear_clicked(self):
self.gpc.clear_roi_element_list()
self._update_displayed_element_list()
self._validate_element_list()
def pb_use_lines_for_fitting_clicked(self):
self.gpc.load_roi_element_list_from_selected()
self._update_displayed_element_list()
self._validate_element_list()
def le_sel_emission_lines_text_changed(self, text):
self._validate_element_list(text)
def le_sel_emission_lines_editing_finished(self):
text = self.le_sel_emission_lines.text()
if self._validate_element_list(text):
self.gpc.set_roi_selected_element_list(text)
self._update_table()
else:
element_list = self.gpc.get_roi_selected_element_list()
self.le_sel_emission_lines.setText(element_list)
def cb_subtract_baseline_toggled(self, state):
self.gpc.set_roi_subtract_background(bool(state))
def cb_state_changed(self, name, state):
try:
nr = int(name) # Row number
checked = state == Qt.Checked
eline = self._table_contents[nr]["eline"]
self._table_contents[nr]["range_displayed"] = checked
self.gpc.show_roi(eline, checked)
except Exception as ex:
logger.error(f"Failed to process selection change. Exception occurred: {ex}.")
def _find_spin_box(self, name):
for item in self.spin_list:
if item.getName() == name:
return item
return None
def spin_value_changed(self, name, value):
try:
nr, side = name.split(",")
nr = int(nr)
keys = {"left": "energy_left", "right": "energy_right"}
side = keys[side]
eline = self._table_contents[nr]["eline"]
if self._table_contents[nr][side] == value:
return
if side == "energy_left": # Left boundary
if value < self._table_contents[nr]["energy_right"]:
self._table_contents[nr][side] = value
else: # Right boundary
if value > self._table_contents[nr]["energy_left"]:
self._table_contents[nr][side] = value
# Update plot
left, right = self._table_contents[nr]["energy_left"], self._table_contents[nr]["energy_right"]
self.gpc.change_roi(eline, left, right)
except Exception as ex:
logger.error(f"Failed to change the ROI. Exception occurred: {ex}.")
def range_manager_selection_changed(self, left, right, name):
try:
nr = int(name)
eline = self._table_contents[nr]["eline"]
self.gpc.change_roi(eline, left, right)
except Exception as ex:
logger.error(f"Failed to change the ROI. Exception occurred: {ex}.")
def pb_default_clicked(self, name):
try:
nr = int(name)
eline = self._table_contents[nr]["eline"]
left = self._table_contents[nr]["energy_left_default"]
right = self._table_contents[nr]["energy_right_default"]
self.range_manager_list[nr].set_selection(value_low=left, value_high=right)
self.gpc.change_roi(eline, left, right)
except Exception as ex:
logger.error(f"Failed to change the ROI. Exception occurred: {ex}.")
def pb_compute_roi_clicked(self):
def cb():
try:
self.gpc.compute_rois()
success, msg = True, ""
except Exception as ex:
success, msg = False, str(ex)
return {"success": success, "msg": msg}
self._compute_in_background(cb, self.slot_compute_roi_clicked)
@Slot(object)
def slot_compute_roi_clicked(self, result):
self._recover_after_compute(self.slot_compute_roi_clicked)
success = result["success"]
if success:
self.gui_vars["gui_state"]["state_xrf_map_exists"] = True
else:
msg = result["msg"]
msgbox = QMessageBox(QMessageBox.Critical, "Failed to Compute ROIs", msg, QMessageBox.Ok, parent=self)
msgbox.exec()
self.signal_roi_computation_complete.emit()
self.update_global_state.emit()
if success:
self.signal_activate_tab_xrf_maps.emit()
def _update_displayed_element_list(self):
element_list = self.gpc.get_roi_selected_element_list()
self.le_sel_emission_lines.setText(element_list)
self._validate_element_list()
self._update_table()
def _update_table(self):
table_contents = self.gpc.get_roi_settings()
self.fill_table(table_contents)
def _validate_element_list(self, text=None):
if text is None:
text = self.le_sel_emission_lines.text()
el_list = text.split(",")
el_list = [_.strip() for _ in el_list]
if el_list == [""]:
el_list = []
valid = bool(len(el_list))
for eline in el_list:
if self.gpc.get_eline_name_category(eline) != "eline":
valid = False
self.le_sel_emission_lines.setValid(valid)
self.pb_compute_roi.setEnabled(valid)
return valid
def _compute_in_background(self, func, slot, *args, **kwargs):
"""
Run function `func` in a background thread. Send the signal
`self.computations_complete` once computation is finished.
Parameters
----------
func: function
Reference to a function that is supposed to be executed at the background.
The function return value is passed as a signal parameter once computation is
complete.
slot: qtpy.QtCore.Slot or None
Reference to a slot. If not None, then the signal `self.computation_complete`
is connected to this slot.
args, kwargs
arguments of the function `func`.
"""
signal_complete = self.computations_complete
def func_to_run(func, *args, **kwargs):
class LoadFile(QRunnable):
def run(self):
result_dict = func(*args, **kwargs)
signal_complete.emit(result_dict)
return LoadFile()
if slot is not None:
self.computations_complete.connect(slot)
self.gui_vars["gui_state"]["running_computations"] = True
self.update_global_state.emit()
QThreadPool.globalInstance().start(func_to_run(func, *args, **kwargs))
def _recover_after_compute(self, slot):
"""
The function should be called after the signal `self.computations_complete` is
received. The slot should be the same as the one used when calling
`self.compute_in_background`.
"""
if slot is not None:
self.computations_complete.disconnect(slot)
self.gui_vars["gui_state"]["running_computations"] = False
self.update_global_state.emit()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Generate Pygments Documentation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Generates a bunch of html files containing the documentation.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
from datetime import datetime
from cgi import escape
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.core import publish_parts
from docutils.writers import html4css1
from jinja2 import Template
# try to use the right Pygments to build the docs
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from pygments import highlight, __version__
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
LEXERDOC = '''
`%s`
%s
:Short names: %s
:Filename patterns: %s
:Mimetypes: %s
'''
def generate_lexer_docs():
from pygments.lexers import LEXERS
out = []
modules = {}
moduledocstrings = {}
for classname, data in sorted(LEXERS.iteritems(), key=lambda x: x[0]):
module = data[0]
mod = __import__(module, None, None, [classname])
cls = getattr(mod, classname)
if not cls.__doc__:
print "Warning: %s does not have a docstring." % classname
modules.setdefault(module, []).append((
classname,
cls.__doc__,
', '.join(data[2]) or 'None',
', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
', '.join(data[4]) or 'None'))
if module not in moduledocstrings:
moduledocstrings[module] = mod.__doc__
for module, lexers in sorted(modules.iteritems(), key=lambda x: x[0]):
heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
out.append('\n' + heading + '\n' + '-'*len(heading) + '\n')
for data in lexers:
out.append(LEXERDOC % data)
return ''.join(out).decode('utf-8')
def generate_formatter_docs():
from pygments.formatters import FORMATTERS
out = []
for cls, data in sorted(FORMATTERS.iteritems(),
key=lambda x: x[0].__name__):
heading = cls.__name__
out.append('`' + heading + '`\n' + '-'*(2+len(heading)) + '\n')
out.append(cls.__doc__)
out.append('''
:Short names: %s
:Filename patterns: %s
''' % (', '.join(data[1]) or 'None', ', '.join(data[2]).replace('*', '\\*') or 'None'))
return ''.join(out).decode('utf-8')
def generate_filter_docs():
from pygments.filters import FILTERS
out = []
for name, cls in FILTERS.iteritems():
out.append('''
`%s`
%s
:Name: %s
''' % (cls.__name__, cls.__doc__, name))
return ''.join(out).decode('utf-8')
def generate_changelog():
fn = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'CHANGES'))
f = file(fn)
result = []
in_header = False
header = True
for line in f:
if header:
if not in_header and line.strip():
in_header = True
elif in_header and not line.strip():
header = False
else:
result.append(line.rstrip())
f.close()
return '\n'.join(result).decode('utf-8')
def generate_authors():
fn = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'AUTHORS'))
f = file(fn)
r = f.read().rstrip().decode('utf-8')
f.close()
return r
LEXERDOCS = generate_lexer_docs()
FORMATTERDOCS = generate_formatter_docs()
FILTERDOCS = generate_filter_docs()
CHANGELOG = generate_changelog()
AUTHORS = generate_authors()
PYGMENTS_FORMATTER = HtmlFormatter(style='pastie', cssclass='syntax')
USAGE = '''\
Usage: %s <mode> <destination> [<source.txt> ...]
Generate either python or html files out of the documentation.
Mode can either be python or html.\
''' % sys.argv[0]
TEMPLATE = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>{{ title }} — Pygments</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<style type="text/css">
{{ style }}
</style>
</head>
<body>
<div id="content">
<h1 class="heading">Pygments</h1>
<h2 class="subheading">{{ title }}</h2>
{% if file_id != "index" %}
<a id="backlink" href="index.html">« Back To Index</a>
{% endif %}
{% if toc %}
<div class="toc">
<h2>Contents</h2>
<ul class="contents">
{% for key, value in toc %}
<li><a href="{{ key }}">{{ value }}</a></li>
{% endfor %}
</ul>
</div>
{% endif %}
{{ body }}
</div>
</body>
<!-- generated on: {{ generation_date }}
file id: {{ file_id }} -->
</html>\
'''
STYLESHEET = '''\
body {
background-color: #f2f2f2;
margin: 0;
padding: 0;
font-family: 'Georgia', serif;
color: #111;
}
#content {
background-color: white;
padding: 20px;
margin: 20px auto 20px auto;
max-width: 800px;
border: 4px solid #ddd;
}
h1 {
font-weight: normal;
font-size: 40px;
color: #09839A;
}
h2 {
font-weight: normal;
font-size: 30px;
color: #C73F00;
}
h1.heading {
margin: 0 0 30px 0;
}
h2.subheading {
margin: -30px 0 0 45px;
}
h3 {
margin-top: 30px;
}
table.docutils {
border-collapse: collapse;
border: 2px solid #aaa;
margin: 0.5em 1.5em 0.5em 1.5em;
}
table.docutils td {
padding: 2px;
border: 1px solid #ddd;
}
p, li, dd, dt, blockquote {
font-size: 15px;
color: #333;
}
p {
line-height: 150%;
margin-bottom: 0;
margin-top: 10px;
}
hr {
border-top: 1px solid #ccc;
border-bottom: 0;
border-right: 0;
border-left: 0;
margin-bottom: 10px;
margin-top: 20px;
}
dl {
margin-left: 10px;
}
li, dt {
margin-top: 5px;
}
dt {
font-weight: bold;
}
th {
text-align: left;
}
a {
color: #990000;
}
a:hover {
color: #c73f00;
}
pre {
background-color: #f9f9f9;
border-top: 1px solid #ccc;
border-bottom: 1px solid #ccc;
padding: 5px;
font-size: 13px;
font-family: Bitstream Vera Sans Mono,monospace;
}
tt {
font-size: 13px;
font-family: Bitstream Vera Sans Mono,monospace;
color: black;
padding: 1px 2px 1px 2px;
background-color: #f0f0f0;
}
cite {
/* abusing <cite>, it's generated by ReST for `x` */
font-size: 13px;
font-family: Bitstream Vera Sans Mono,monospace;
font-weight: bold;
font-style: normal;
}
#backlink {
float: right;
font-size: 11px;
color: #888;
}
div.toc {
margin: 0 0 10px 0;
}
div.toc h2 {
font-size: 20px;
}
''' #'
def pygments_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
try:
lexer = get_lexer_by_name(arguments[0])
except ValueError:
# no lexer found
lexer = get_lexer_by_name('text')
parsed = highlight(u'\n'.join(content), lexer, PYGMENTS_FORMATTER)
return [nodes.raw('', parsed, format="html")]
pygments_directive.arguments = (1, 0, 1)
pygments_directive.content = 1
directives.register_directive('sourcecode', pygments_directive)
def create_translator(link_style):
class Translator(html4css1.HTMLTranslator):
def visit_reference(self, node):
refuri = node.get('refuri')
if refuri is not None and '/' not in refuri and refuri.endswith('.txt'):
node['refuri'] = link_style(refuri[:-4])
html4css1.HTMLTranslator.visit_reference(self, node)
return Translator
class DocumentationWriter(html4css1.Writer):
def __init__(self, link_style):
html4css1.Writer.__init__(self)
self.translator_class = create_translator(link_style)
def translate(self):
html4css1.Writer.translate(self)
# generate table of contents
contents = self.build_contents(self.document)
contents_doc = self.document.copy()
contents_doc.children = contents
contents_visitor = self.translator_class(contents_doc)
contents_doc.walkabout(contents_visitor)
self.parts['toc'] = self._generated_toc
def build_contents(self, node, level=0):
sections = []
i = len(node) - 1
while i >= 0 and isinstance(node[i], nodes.section):
sections.append(node[i])
i -= 1
sections.reverse()
toc = []
for section in sections:
try:
reference = nodes.reference('', '', refid=section['ids'][0], *section[0])
except IndexError:
continue
ref_id = reference['refid']
text = escape(reference.astext())
toc.append((ref_id, text))
self._generated_toc = [('#%s' % href, caption) for href, caption in toc]
# no further processing
return []
def generate_documentation(data, link_style):
writer = DocumentationWriter(link_style)
data = data.replace('[builtin_lexer_docs]', LEXERDOCS).\
replace('[builtin_formatter_docs]', FORMATTERDOCS).\
replace('[builtin_filter_docs]', FILTERDOCS).\
replace('[changelog]', CHANGELOG).\
replace('[authors]', AUTHORS)
parts = publish_parts(
data,
writer=writer,
settings_overrides={
'initial_header_level': 3,
'field_name_limit': 50,
}
)
return {
'title': parts['title'],
'body': parts['body'],
'toc': parts['toc']
}
def handle_python(filename, fp, dst):
now = datetime.now()
title = os.path.basename(filename)[:-4]
content = fp.read()
def urlize(href):
# create links for the pygments webpage
if href == 'index.txt':
return '/docs/'
else:
return '/docs/%s/' % href
parts = generate_documentation(content, urlize)
result = file(os.path.join(dst, title + '.py'), 'w')
result.write('# -*- coding: utf-8 -*-\n')
result.write('"""\n Pygments Documentation - %s\n' % title)
result.write(' %s\n\n' % ('~' * (24 + len(title))))
result.write(' Generated on: %s\n"""\n\n' % now)
result.write('import datetime\n')
result.write('DATE = %r\n' % now)
result.write('TITLE = %r\n' % parts['title'])
result.write('TOC = %r\n' % parts['toc'])
result.write('BODY = %r\n' % parts['body'])
result.close()
def handle_html(filename, fp, dst):
now = datetime.now()
title = os.path.basename(filename)[:-4]
content = fp.read().decode('utf-8')
c = generate_documentation(content, (lambda x: './%s.html' % x))
result = file(os.path.join(dst, title + '.html'), 'w')
c['style'] = STYLESHEET + PYGMENTS_FORMATTER.get_style_defs('.syntax')
c['generation_date'] = now
c['file_id'] = title
t = Template(TEMPLATE)
result.write(t.render(c).encode('utf-8'))
result.close()
def run(handle_file, dst, sources=()):
path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'src'))
if not sources:
sources = [os.path.join(path, fn) for fn in os.listdir(path)]
if not os.path.isdir(dst):
os.makedirs(dst)
print 'Making docs for Pygments %s in %s' % (__version__, dst)
for fn in sources:
if not os.path.isfile(fn):
continue
print 'Processing %s' % fn
f = open(fn)
try:
handle_file(fn, f, dst)
finally:
f.close()
def main(mode, dst='build/', *sources):
try:
handler = {
'html': handle_html,
'python': handle_python
}[mode]
except KeyError:
print 'Error: unknown mode "%s"' % mode
sys.exit(1)
run(handler, os.path.realpath(dst), sources)
if __name__ == '__main__':
if len(sys.argv) == 1:
print USAGE
else:
main(*sys.argv[1:])
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import hmac
import io
import re
import packaging.specifiers
import packaging.version
import pkg_resources
import wtforms
import wtforms.validators
from rfc3986 import uri_reference
from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPGone
from pyramid.response import Response
from pyramid.view import forbidden_view_config, view_config
from sqlalchemy import func
from sqlalchemy.orm.exc import NoResultFound
from warehouse import forms
from warehouse.classifiers.models import Classifier
from warehouse.csrf import csrf_exempt
from warehouse.packaging.interfaces import IFileStorage
from warehouse.packaging.models import (
Project, Release, Dependency, DependencyKind, Role, File, Filename,
)
from warehouse.utils.http import require_POST
MAX_FILESIZE = 60 * 1024 * 1024 # 60M
MAX_SIGSIZE = 8 * 1024 # 8K
ALLOWED_PLATFORMS = {
"any", "win32", "win-amd64", "win_amd64", "win-ia64", "win_ia64",
}
_error_message_order = ["metadata_version", "name", "version"]
_dist_file_re = re.compile(
r".+?\.(exe|tar\.gz|bz2|rpm|deb|zip|tgz|egg|dmg|msi|whl)$",
re.I,
)
_wheel_file_re = re.compile(
r"""
^
(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
(
(-(?P<build>\d.*?))?
-(?P<pyver>.+?)
-(?P<abi>.+?)
-(?P<plat>.+?)
(?:\.whl|\.dist-info)
)
$
""",
re.VERBOSE,
)
_project_name_re = re.compile(
r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$",
re.IGNORECASE,
)
_legacy_specifier_re = re.compile(
r"^(?P<name>\S+)(?: \((?P<specifier>\S+)\))?$"
)
def _exc_with_message(exc, message):
# The crappy old API that PyPI offered uses the status to pass down
# messages to the client. So this function will make that easier to do.
resp = exc(message)
resp.status = "{} {}".format(resp.status_code, message)
return resp
def _validate_pep440_version(form, field):
parsed = packaging.version.parse(field.data)
# Check that this version is a valid PEP 440 version at all.
if not isinstance(parsed, packaging.version.Version):
raise wtforms.validators.ValidationError(
"Must start and end with a letter or numeral and contain only "
"ascii numeric and '.', '_' and '-'."
)
# Check that this version does not have a PEP 440 local segment attached
# to it.
if parsed.local is not None:
raise wtforms.validators.ValidationError(
"Cannot use PEP 440 local versions."
)
def _parse_legacy_requirement(requirement):
parsed = _legacy_specifier_re.search(requirement)
if parsed is None:
raise ValueError("Invalid Requirement.")
return parsed.groupdict()["name"], parsed.groupdict()["specifier"]
def _validate_pep440_specifier(specifier):
try:
packaging.specifiers.SpecifierSet(specifier)
except packaging.specifiers.InvalidSpecifier:
raise wtforms.validators.ValidationError(
"Invalid specifier in requirement."
) from None
def _validate_legacy_non_dist_req(requirement):
name, specifier = _parse_legacy_requirement(requirement)
if "_" in name:
name = name.replace("_", "")
if not name.isalnum() or name[0].isdigit():
raise wtforms.validators.ValidationError(
"Must be a valid Python identifier."
)
if specifier is not None:
_validate_pep440_specifier(specifier)
def _validate_legacy_non_dist_req_list(form, field):
for datum in field.data:
_validate_legacy_non_dist_req(datum)
def _validate_legacy_dist_req(requirement):
name, specifier = _parse_legacy_requirement(requirement)
if not _project_name_re.search(name):
raise wtforms.validators.ValidationError(
"Must be a valid project name."
)
if specifier is not None:
_validate_pep440_specifier(specifier)
def _validate_legacy_dist_req_list(form, field):
for datum in field.data:
_validate_legacy_dist_req(datum)
def _validate_requires_external(requirement):
name, specifier = _parse_legacy_requirement(requirement)
# TODO: Is it really reasonable to parse the specifier using PEP 440?
if specifier is not None:
_validate_pep440_specifier(specifier)
def _validate_requires_external_list(form, field):
for datum in field.data:
_validate_requires_external(datum)
def _validate_project_url(value):
try:
label, url = value.split(", ", 1)
except ValueError:
raise wtforms.validators.ValidationError(
"Must have both a label and an URL.",
) from None
if not label:
raise wtforms.validators.ValidationError("Must have a label.")
if len(label) > 32:
raise wtforms.validators.ValidationError(
"Label must not be longer than 32 characters."
)
if not url:
raise wtforms.validators.ValidationError("Must have an URL.")
url = uri_reference(url)
url = url.normalize()
if not (url.is_valid() and url.scheme in ('http', 'https')):
raise wtforms.validators.ValidationError("Invalid URL.")
def _validate_project_url_list(form, field):
for datum in field.data:
_validate_project_url(datum)
def _construct_dependencies(form, types):
for name, kind in types.items():
for item in getattr(form, name).data:
yield Dependency(kind=kind.value, specifier=item)
class ListField(wtforms.Field):
def process_formdata(self, valuelist):
self.data = [v.strip() for v in valuelist]
# TODO: Eventually this whole validation thing should move to the packaging
# library and we should just call that. However until PEP 426 is done
# that library won't have an API for this.
class MetadataForm(forms.Form):
# Metadata version
metadata_version = wtforms.StringField(
validators=[
wtforms.validators.DataRequired(),
wtforms.validators.AnyOf(
# Note: This isn't really Metadata 2.0, however bdist_wheel
# claims it is producing a Metadata 2.0 metadata when in
# reality it's more like 1.2 with some extensions.
["1.0", "1.1", "1.2", "2.0"],
message="Unknown Metadata Version",
),
],
)
# Identity Project and Release
name = wtforms.StringField(
validators=[
wtforms.validators.DataRequired(),
wtforms.validators.Regexp(
_project_name_re,
re.IGNORECASE,
message=(
"Must start and end with a letter or numeral and contain "
"only ascii numeric and '.', '_' and '-'."
),
),
],
)
version = wtforms.StringField(
validators=[
wtforms.validators.DataRequired(),
wtforms.validators.Regexp(
r"^(?!\s).*(?<!\s)$",
message="Cannot have leading or trailing whitespace.",
),
_validate_pep440_version,
],
)
# Additional Release metadata
summary = wtforms.StringField(
validators=[
wtforms.validators.Optional(),
wtforms.validators.Length(max=512),
wtforms.validators.Regexp(
r"^.+$", # Rely on the fact that . doesn't match a newline.
message="Multiple lines are not allowed.",
)
],
)
description = wtforms.StringField(
validators=[wtforms.validators.Optional()],
)
author = wtforms.StringField(validators=[wtforms.validators.Optional()])
author_email = wtforms.StringField(
validators=[
wtforms.validators.Optional(),
wtforms.validators.Email(),
],
)
maintainer = wtforms.StringField(
validators=[wtforms.validators.Optional()],
)
maintainer_email = wtforms.StringField(
validators=[
wtforms.validators.Optional(),
wtforms.validators.Email(),
],
)
license = wtforms.StringField(validators=[wtforms.validators.Optional()])
keywords = wtforms.StringField(validators=[wtforms.validators.Optional()])
classifiers = wtforms.fields.SelectMultipleField()
platform = wtforms.StringField(validators=[wtforms.validators.Optional()])
# URLs
home_page = wtforms.StringField(
validators=[
wtforms.validators.Optional(),
wtforms.validators.URL(),
],
)
download_url = wtforms.StringField(
validators=[
wtforms.validators.Optional(),
wtforms.validators.URL(),
],
)
# Dependency Information
requires_python = wtforms.StringField(
validators=[
wtforms.validators.Optional(),
_validate_pep440_specifier,
],
)
# File information
pyversion = wtforms.StringField(
validators=[wtforms.validators.Optional()],
)
filetype = wtforms.StringField(
validators=[
wtforms.validators.DataRequired(),
wtforms.validators.AnyOf(
[
"bdist_dmg", "bdist_dumb", "bdist_egg", "bdist_msi",
"bdist_rpm", "bdist_wheel", "bdist_wininst", "sdist",
],
message="Unknown type of file.",
),
]
)
comment = wtforms.StringField(validators=[wtforms.validators.Optional()])
md5_digest = wtforms.StringField(
validators=[
wtforms.validators.DataRequired(),
],
)
# Legacy dependency information
requires = ListField(
validators=[
wtforms.validators.Optional(),
_validate_legacy_non_dist_req_list,
]
)
provides = ListField(
validators=[
wtforms.validators.Optional(),
_validate_legacy_non_dist_req_list,
],
)
obsoletes = ListField(
validators=[
wtforms.validators.Optional(),
_validate_legacy_non_dist_req_list,
],
)
# Newer dependency information
requires_dist = ListField(
validators=[
wtforms.validators.Optional(),
_validate_legacy_dist_req_list,
],
)
provides_dist = ListField(
validators=[
wtforms.validators.Optional(),
_validate_legacy_dist_req_list,
],
)
obsoletes_dist = ListField(
validators=[
wtforms.validators.Optional(),
_validate_legacy_dist_req_list,
],
)
requires_external = ListField(
validators=[
wtforms.validators.Optional(),
_validate_requires_external_list,
],
)
# Newer metadata information
project_urls = ListField(
validators=[
wtforms.validators.Optional(),
_validate_project_url_list,
],
)
def full_validate(self):
# All non source releases *must* have a pyversion
if (self.filetype.data
and self.filetype.data != "sdist" and not self.pyversion.data):
raise wtforms.validators.ValidationError(
"Python version is required for binary distribution uploads."
)
# All source releases *must* have a pyversion of "source"
if self.filetype.data == "sdist":
if not self.pyversion.data:
self.pyversion.data = "source"
elif self.pyversion.data != "source":
raise wtforms.validators.ValidationError(
"The only valid Python version for a sdist is 'source'."
)
# TODO: Uncomment the below code once the upload view is safe to be used on
# warehouse.python.org. For now, we'll disable it so people can't use
# Warehouse to upload and get broken or not properly validated data.
# @view_config(
# route_name="legacy.api.pypi.file_upload",
# decorator=[require_POST, csrf_exempt, uses_session],
# )
def file_upload(request):
# Before we do anything, if their isn't an authenticated user with this
# request, then we'll go ahead and bomb out.
if request.authenticated_userid is None:
raise _exc_with_message(
HTTPForbidden,
"Invalid or non-existent authentication information.",
)
# distutils "helpfully" substitutes unknown, but "required" values with the
# string "UNKNOWN". This is basically never what anyone actually wants so
# we'll just go ahead and delete anything whose value is UNKNOWN.
for key in list(request.POST):
if request.POST.get(key) == "UNKNOWN":
del request.POST[key]
# We require protocol_version 1, it's the only supported version however
# passing a different version should raise an error.
if request.POST.get("protocol_version", "1") != "1":
raise _exc_with_message(HTTPBadRequest, "Unknown protocol version.")
# Look up all of the valid classifiers
all_classifiers = request.db.query(Classifier).all()
# Validate and process the incoming metadata.
form = MetadataForm(request.POST)
form.classifiers.choices = [
(c.classifier, c.classifier) for c in all_classifiers
]
if not form.validate():
for field_name in _error_message_order:
if field_name in form.errors:
break
else:
field_name = sorted(form.errors.keys())[0]
raise _exc_with_message(
HTTPBadRequest,
"{field}: {msgs[0]}".format(
field=field_name,
msgs=form.errors[field_name],
),
)
# TODO: We need a better method of blocking names rather than jsut
# hardcoding some names into source control.
if form.name.data.lower() in {"requirements.txt", "rrequirements.txt"}:
raise _exc_with_message(
HTTPBadRequest,
"The name {!r} is not allowed.".format(form.name.data),
)
# Ensure that we have file data in the request.
if "content" not in request.POST:
raise _exc_with_message(
HTTPBadRequest,
"Upload payload does not have a file.",
)
# Look up the project first before doing anything else, this is so we can
# automatically register it if we need to and can check permissions before
# going any further.
try:
project = (
request.db.query(Project)
.filter(
Project.normalized_name ==
func.normalize_pep426_name(form.name.data)).one()
)
except NoResultFound:
# The project doesn't exist in our database, so we'll add it along with
# a role setting the current user as the "Owner" of the project.
project = Project(name=form.name.data)
request.db.add(project)
request.db.add(
Role(user=request.user, project=project, role_name="Owner")
)
# Check that the user has permission to do things to this project, if this
# is a new project this will act as a sanity check for the role we just
# added above.
if not request.has_permission("upload", project):
raise _exc_with_message(
HTTPForbidden,
"You are not allowed to upload to {!r}.".format(project.name)
)
try:
release = (
request.db.query(Release)
.filter(
(Release.project == project) &
(Release.version == form.version.data)).one()
)
except NoResultFound:
release = Release(
project=project,
_classifiers=[
c for c in all_classifiers
if c.classifier in form.classifiers.data
],
dependencies=list(_construct_dependencies(
form,
{
"requires": DependencyKind.requires,
"provides": DependencyKind.provides,
"obsoletes": DependencyKind.obsoletes,
"requires_dist": DependencyKind.requires_dist,
"provides_dist": DependencyKind.provides_dist,
"obsoletes_dist": DependencyKind.obsoletes_dist,
"requires_external": DependencyKind.requires_external,
"project_urls": DependencyKind.project_url,
}
)),
**{
k: getattr(form, k).data
for k in {
# This is a list of all the fields in the form that we
# should pull off and insert into our new release.
"version",
"summary", "description", "license",
"author", "author_email", "maintainer", "maintainer_email",
"keywords", "platform",
"home_page", "download_url",
"requires_python",
}
}
)
request.db.add(release)
# TODO: We need a better solution to this than to just do it inline inside
# this method. Ideally the version field would just be sortable, but
# at least this should be some sort of hook or trigger.
releases = (
request.db.query(Release)
.filter(Release.project == project)
.all()
)
for i, r in enumerate(sorted(
releases, key=lambda x: packaging.version.parse(x.version))):
r._pypi_ordering = i
# Pull the filename out of our POST data.
filename = request.POST["content"].filename
# Make sure that the filename does not contain and path seperators.
if "/" in filename or "\\" in filename:
raise _exc_with_message(
HTTPBadRequest,
"Cannot upload a file with '/' or '\\' in the name.",
)
# Make sure the filename ends with an allowed extension.
if _dist_file_re.search(filename) is None:
raise _exc_with_message(HTTPBadRequest, "Invalid file extension.")
# Make sure that our filename matches the project that it is being uploaded
# to.
prefix = pkg_resources.safe_name(project.name).lower()
if not pkg_resources.safe_name(filename).lower().startswith(prefix):
raise _exc_with_message(
HTTPBadRequest,
"The filename for {!r} must start with {!r}.".format(
project.name,
prefix,
)
)
# Check to see if the file that was uploaded exists already or not.
if request.db.query(
request.db.query(File)
.filter(File.filename == filename)
.exists()).scalar():
raise _exc_with_message(HTTPBadRequest, "File already exists.")
# Check to see if the file that was uploaded exists in our filename log.
if (request.db.query(
request.db.query(Filename)
.filter(Filename.filename == filename)
.exists()).scalar()):
raise _exc_with_message(
HTTPBadRequest,
"This filename has previously been used, you should use a "
"different version.",
)
# The project may or may not have a file size specified on the project, if
# it does then it may or may not be smaller or larger than our global file
# size limits.
file_size_limit = max(filter(None, [MAX_FILESIZE, project.upload_limit]))
# Buffer the entire file into memory, checking the hash of the file as we
# go along.
file_content = io.BytesIO()
file_size = 0
file_hash = hashlib.md5()
for chunk in iter(lambda: request.POST["content"].file.read(8096), b""):
file_size += len(chunk)
if file_size > file_size_limit:
raise _exc_with_message(HTTPBadRequest, "File too large.")
file_content.write(chunk)
file_hash.update(chunk)
file_content.seek(0)
# Get the signature if it was included.
signature_size = 0
if "gpg_signature" in request.POST:
signature = io.BytesIO()
for chunk in iter(
lambda: request.POST["gpg_signature"].file.read(8096), b""):
signature_size += len(chunk)
if signature_size > MAX_SIGSIZE:
raise _exc_with_message(HTTPBadRequest, "Signature too large.")
signature.write(chunk)
signature.seek(0)
else:
signature = None
# Actually verify that the md5 hash of the file matches the expected md5
# hash. We probably don't actually need to use hmac.compare_digest here
# since both the md5_digest and the file whose file_hash we've compute
# comes from the remote user, however better safe than sorry.
if not hmac.compare_digest(form.md5_digest.data, file_hash.hexdigest()):
raise _exc_with_message(
HTTPBadRequest,
"The MD5 digest supplied does not match a digest calculated from "
"the uploaded file."
)
# TODO: Check the file to make sure it is a valid distribution file.
# Check that if it's a binary wheel, it's on a supported platform
if filename.endswith(".whl"):
wheel_info = _wheel_file_re.match(filename)
plats = wheel_info.group("plat").split(".")
if set(plats) - ALLOWED_PLATFORMS:
raise _exc_with_message(
HTTPBadRequest,
"Binary wheel for an unsupported platform.",
)
# Check whether signature is ASCII armored
if (signature is not None and
not signature.getvalue().startswith(
b"-----BEGIN PGP SIGNATURE-----")):
raise _exc_with_message(
HTTPBadRequest,
"PGP signature is not ASCII armored.",
)
# TODO: We need some sort of trigger that will automatically add filenames
# to Filename instead of relying on this code running inside of our
# upload API.
request.db.add(Filename(filename=filename))
# Store the information about the file in the database.
file_ = File(
release=release,
filename=filename,
python_version=form.pyversion.data,
packagetype=form.filetype.data,
comment_text=form.comment.data,
size=file_size,
has_signature=bool(signature),
md5_digest=form.md5_digest.data,
)
request.db.add(file_)
# TODO: We need a better answer about how to make this transactional so
# this won't take affect until after a commit has happened, for now
# we'll just ignore it and save it before the transaction is
# commited.
storage = request.find_service(IFileStorage)
storage.store(file_.path, file_content)
if signature is not None:
storage.store(file_.pgp_path, signature)
return Response()
@view_config(
route_name="legacy.api.pypi.submit",
decorator=[require_POST, csrf_exempt],
)
@view_config(
route_name="legacy.api.pypi.submit_pkg_info",
decorator=[require_POST, csrf_exempt],
)
def submit(request):
return _exc_with_message(
HTTPGone,
"This API is no longer supported, instead simply upload the file.",
)
@view_config(
route_name="legacy.api.pypi.doc_upload",
decorator=[require_POST, csrf_exempt],
)
def doc_upload(request):
return _exc_with_message(
HTTPGone,
"Uploading documentation is no longer supported, we recommend using "
"https://readthedocs.org/.",
)
@view_config(route_name="legacy.api.pypi.doap")
def doap(request):
return _exc_with_message(HTTPGone, "DOAP is no longer supported.")
@forbidden_view_config(request_param=":action")
def forbidden_legacy(exc, request):
# We're not going to do anything amazing here, this just exists to override
# the default forbidden handler we have which does redirects to the login
# view, which we do not want on this API.
return exc
| |
import tensorflow as tf
import numpy as np
import time
import h5py
import matplotlib.pyplot as plt
# from sklearn.metrics import confusion_matrix
# import itertools
# from copy import deepcopy
# import os
# import os.path
from collections import OrderedDict
import pickle
# import cPickle as pickle
# from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
class cnnMNIST(object):
def __init__(self):
self.lr = 1e-3
self.runname = 'grusidcnn'
self.epochs = 10000
self.build_graph()
def onehot_labels(self, labels):
out = np.zeros((labels.shape[0], 7))
for i in range(labels.shape[0]):
out[i, :] = np.eye(7, dtype=int)[int(labels[i])]
return out
def onenothot_labels(self, labels):
out = np.zeros((labels.shape[0],))
for i in range(labels.shape[0]):
out[i] = np.argmax(labels[i, :])
return out
def get_data(self):
# data_norm = True
# data_augmentation = False
# f = h5py.File('./sequential_dataset_balanced.h5', 'r')
f = h5py.File('./cnnfeatures_sequential_dataset.h5', 'r')
X = f['train']
X_test = f['test']
self.x_train = X
self.x_test = X_test
# NOTE: always use the keylist to get data
self.data_keylist = list(X.keys())
return
def batch(self, iterable, n=1, shuffle=True, small_test=True, usethesekeys = None, shortset=False):
if shuffle:
self.shuffle()
if usethesekeys is None:
keylist = self.data_keylist
else:
keylist = usethesekeys
if shortset:
keylist = usethesekeys[:1000]
# l = len(iterable)
for i in range(len(keylist)):
# x = np.array(iterable[keylist[i]]['measured_spectra'])
# y = np.array(iterable[keylist[i]]['labels'])
# NOTE: For using cnnfeatures sequential dataset
x = np.array(iterable[keylist[i]]['features'])
y = np.array(iterable[keylist[i]]['labels'])
mask = y >= 0.5
z = np.ones((y.shape[0],))
z[mask] = 100000.0
y = self.onehot_labels(y)
yield x, y, z
def validation_batcher(self):
# f = h5py.File('./sequential_dataset_validation.h5', 'r')
# NOTE: for using cnnfeatures sequential dataset
f = h5py.File('./cnnfeatures_sequential_dataset.h5', 'r')
samplelist = list(f.keys())
# samplelist = samplelist[:10]
for i in range(len(samplelist)):
data = f[samplelist[i]]
yield data
def build_graph(self):
self.x = tf.placeholder(tf.float32, shape=[None, 15, 1024])
self.y_ = tf.placeholder(tf.float32, shape=[None, 7])
self.weights = tf.placeholder(tf.float32, shape=[None])
num_units = 64
num_layers = 2
lstm_in = tf.transpose(self.x, [1,0,2])
lstm_in = tf.reshape(lstm_in, [-1, 1024])
lstm_in = tf.layers.dense(lstm_in, num_units, activation=None)
lstm_in = tf.split(lstm_in, 15, 0)
lstm = tf.contrib.rnn.GRUCell(num_units)
cell = tf.contrib.rnn.MultiRNNCell([lstm] * num_layers)
batch_size = tf.shape(self.x)[0]
initial_state = cell.zero_state(batch_size, tf.float32)
output, state = tf.contrib.rnn.static_rnn(cell, lstm_in, dtype=tf.float32, initial_state=initial_state)
self.y_conv = tf.layers.dense(output[-1], 7, name='logits')
# self.loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(
# logits=self.y_conv, labels=self.y_))
# self.y_conv = tf.nn.softmax(logit) # probably a mistake here
# ratio = 1.0 / 1000000.0
# ratio = 1.0 / ratio
# class_weight = tf.constant([ratio, 1.0 - ratio])
# weighted_logits = tf.multiply(self.y_conv, class_weight) # shape [batch_size, 2]
# self.loss = tf.nn.softmax_cross_entropy_with_logits(
# logits=weighted_logits, labels=self.y_, name="xent_raw")
# NOTE: Normal gru
# self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y_conv))
# NOTE Normal gru with summing instead of mean
self.loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y_conv))
# NOTE: Weighted gru
# self.loss = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(targets=self.y_, logits=self.y_conv, pos_weight=200.0))
# NOTE: Weighted gru with summing instead of mean
# self.loss = tf.reduce_sum(tf.nn.weighted_cross_entropy_with_logits(targets=self.y_, logits=self.y_conv, pos_weight=5.0))
# self.loss = tf.reduce_sum(tf.losses.sparse_softmax_cross_entropy(labels=self.y_), logits=self.y_conv, weights=self.weights))
self.train_step = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
def shuffle(self):
np.random.shuffle(self.data_keylist)
return
def train(self):
self.sess = tf.Session()
init = tf.global_variables_initializer()
self.sess.run(init)
self.eval() # creating evaluation
a = time.time()
for i in range(self.epochs):
# batch = mnist.train.next_batch(50)
x_generator = self.batch(self.x_train, shuffle=True)
if i % 100 == 0 and i != 0:
counter = 0
sum_acc = 0
sum_loss = 0
hits = 0
meh = 0
x_generator_test = self.batch(self.x_test,
usethesekeys=list(self.x_test.keys()), shortset=True)
for j, k, z in x_generator_test:
# NOTE: quick and dirty preprocessing once again
# feedme = j / j.sum(axis=-1, keepdims=True)
accuracy, train_loss, prediction = self.sess.run([self.accuracy, self.loss, self.prediction],feed_dict={self.x: feedme,
self.y_: k,
self.weights: z})
sum_loss += np.sum(train_loss)
hits += np.sum(prediction)
sum_acc += accuracy
counter += feedme.shape[0]
meh += 1
b = time.time()
print('step {}:\navg acc {}\navg loss {}\ntotalhits {}\ntime elapsed: {} s'.format(i, sum_acc / meh, sum_loss / counter, hits, b-a))
x, y, z = next(x_generator)
# NOTE: QUick and dirty preprocessing. normalize to counts
# x = x / x.sum(axis=-1, keepdims=True)
# stop
# for j in range(x.shape[1]):
# spectra = x[7, j, :]
# fig = plt.figure()
# plt.plot(spectra)
# fig.savefig('seqspec_{}'.format(j))
# plt.close()
# print(y[7, :])
# stop
self.sess.run([self.train_step], feed_dict={
self.x: x,
self.y_: y,
self.weights: z})
# self.shuffle()
def eval(self):
# self.time_index = np.arange(self.y_conv.get_shape()[0])
self.prediction = tf.argmax(self.y_conv, 1)
truth = tf.argmax(self.y_, 1)
correct_prediction = tf.equal(self.prediction, truth)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# def test_eval(self):
# self.eval()
# x_generator = self.batch(self.x_test, n=100, shuffle=False)
# y_generator = self.batch(self.y_test, n=100, shuffle=False)
# test_acc = []
# counter = 0
# for data in x_generator:
# test_acc += [self.sess.run(self.accuracy, feed_dict={
# self.x: data, self.y_: next(y_generator), self.keep_prob: 1.0})]
# total_test_acc = sum(test_acc) / float(len(test_acc))
# print('test accuracy %g' % total_test_acc)
def weight_variable(self, shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(self, shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def hack_1dreshape(self, x):
# expand its dimensionality to fit into conv2d
tensor_expand = tf.expand_dims(x, 1)
tensor_expand = tf.expand_dims(tensor_expand, -1)
return tensor_expand
def conv2d(self, x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(self, x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def get_label_predictions(self):
x_batcher = self.batch(self.x_test, n=1000, shuffle=False,
usethesekeys=list(self.x_test.keys()))
# y_batcher = self.batch(self.y_test, n=1000, shuffle=False)
predictions = []
correct_predictions = np.zeros((0, 7))
for x, y, z in x_batcher:
# x_features = x / x.sum(axis=-1, keepdims=True)
x_features = x
temp_predictions = self.sess.run(
self.prediction,
feed_dict={self.x: x_features})
predictions += temp_predictions.tolist()
correct_predictions = np.vstack((correct_predictions, y))
return predictions, correct_predictions
# def plot_confusion_matrix(cm, classes,
# normalize=False,
# title='Confusion matrix',
# cmap=plt.cm.Blues):
# """
# This function prints and plots the confusion matrix.
# Normalization can be applied by setting `normalize=True`.
# """
# if normalize:
# cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print("Normalized confusion matrix")
# else:
# print('Confusion matrix, without normalization')
#
# print(cm)
#
# plt.imshow(cm, interpolation='nearest', cmap=cmap)
# plt.title(title)
# plt.colorbar()
# tick_marks = np.arange(len(classes))
# plt.xticks(tick_marks, classes, rotation=45)
# plt.yticks(tick_marks, classes)
#
# fmt = '.2f' if normalize else 'd'
# thresh = cm.max() / 2.
# for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
# plt.text(j, i, format(cm[i, j], fmt),
# horizontalalignment="center",
# color="white" if cm[i, j] > thresh else "black")
#
# plt.tight_layout()
# plt.ylabel('True label')
# plt.xlabel('Predicted label')
def save_obj(obj, name ):
with open('obj/'+ name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open('obj/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def main():
cnn = cnnMNIST()
runname = cnn.runname
a = time.time()
print('Retrieving data')
cnn.get_data()
b = time.time()
print('Built the data in {} s'.format(b-a))
a = time.time()
cnn.train()
b = time.time()
print('Training time: {} s'.format(b-a))
# cnn.test_eval()
predictions, y = cnn.get_label_predictions()
predictions_decode = predictions
labels_decode = cnn.onenothot_labels(y)
#
np.save('{}_predictions.npy'.format(runname), predictions_decode)
np.save('{}_ground_truth.npy'.format(runname), labels_decode)
stop
# Validation time
validation_data = cnn.validation_batcher()
answers = OrderedDict()
for sample in validation_data:
x = np.array(sample)
x_features = x / x.sum(axis=-1, keepdims=True)
predictions = cnn.sess.run(
cnn.prediction,
feed_dict = {cnn.x: x_features})
time_index = np.arange(predictions.shape[0])
mask = predictions >= 0.5
runname = sample.name.split('/')[-1]
if np.sum(mask) != 0:
counts = np.sum(np.squeeze(x[:, -1, :]), axis=-1)
t = time_index[mask]
t = [int(i) for i in t]
index_guess = np.argmax(counts[t])
current_spectra = np.squeeze(x[t[index_guess], -1, :])
current_time = t[index_guess] + 15
answers[runname] = {'time': current_time,
'spectra': current_spectra}
else:
answers[runname] = {'time': 0,
'spectra': 0}
save_obj(answers, '{}_hits'.format(runname))
return
main()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mandeep Dhami, Big Switch Networks, Inc.
# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc.
"""
Quantum REST Proxy Plug-in for Big Switch and FloodLight Controllers
QuantumRestProxy provides a generic quantum plugin that translates all plugin
function calls to equivalent authenticated REST calls to a set of redundant
external network controllers. It also keeps persistent store for all quantum
state to allow for re-sync of the external controller(s), if required.
The local state on the plugin also allows for local response and fast-fail
semantics where it can be determined based on the local persistent store.
Network controller specific code is decoupled from this plugin and expected
to reside on the controller itself (via the REST interface).
This allows for:
- independent authentication and redundancy schemes between quantum and the
network controller
- independent upgrade/development cycles between quantum and the controller
as it limits the proxy code upgrade requirement to quantum release cycle
and the controller specific code upgrade requirement to controller code
- ability to sync the controller with quantum for independent recovery/reset
External REST API used by proxy is the same API as defined for quantum (JSON
subset) with some additional parameters (gateway on network-create and macaddr
on port-attach) on an additional PUT to do a bulk dump of all persistent data.
"""
import base64
import httplib
import json
import socket
from quantum.common import exceptions
from quantum.common import rpc as q_rpc
from quantum.common import topics
from quantum import context as qcontext
from quantum.db import api as db
from quantum.db import db_base_plugin_v2
from quantum.db import dhcp_rpc_base
from quantum.db import models_v2
from quantum.openstack.common import cfg
from quantum.openstack.common import log as logging
from quantum.openstack.common import rpc
from quantum.plugins.bigswitch.version import version_string_with_vcs
LOG = logging.getLogger(__name__)
database_opts = [
cfg.StrOpt('sql_connection', default='sqlite://'),
cfg.IntOpt('sql_max_retries', default=-1),
cfg.IntOpt('reconnect_interval', default=2),
]
restproxy_opts = [
cfg.StrOpt('servers', default='localhost:8800'),
cfg.StrOpt('serverauth', default='username:password'),
cfg.BoolOpt('serverssl', default=False),
cfg.BoolOpt('syncdata', default=False),
cfg.IntOpt('servertimeout', default=10),
]
cfg.CONF.register_opts(database_opts, "DATABASE")
cfg.CONF.register_opts(restproxy_opts, "RESTPROXY")
# The following are used to invoke the API on the external controller
NET_RESOURCE_PATH = "/tenants/%s/networks"
PORT_RESOURCE_PATH = "/tenants/%s/networks/%s/ports"
NETWORKS_PATH = "/tenants/%s/networks/%s"
PORTS_PATH = "/tenants/%s/networks/%s/ports/%s"
ATTACHMENT_PATH = "/tenants/%s/networks/%s/ports/%s/attachment"
SUCCESS_CODES = range(200, 207)
FAILURE_CODES = [0, 301, 302, 303, 400, 401, 403, 404, 500, 501, 502, 503,
504, 505]
SYNTAX_ERROR_MESSAGE = 'Syntax error in server config file, aborting plugin'
class RemoteRestError(exceptions.QuantumException):
def __init__(self, message):
if message is None:
message = "None"
self.message = _("Error in REST call to remote network "
"controller") + ": " + message
super(RemoteRestError, self).__init__()
class ServerProxy(object):
"""REST server proxy to a network controller."""
def __init__(self, server, port, ssl, auth, timeout, base_uri, name):
self.server = server
self.port = port
self.ssl = ssl
self.base_uri = base_uri
self.timeout = timeout
self.name = name
self.success_codes = SUCCESS_CODES
self.auth = None
if auth:
self.auth = 'Basic ' + base64.encodestring(auth).strip()
def rest_call(self, action, resource, data, headers):
uri = self.base_uri + resource
body = json.dumps(data)
if not headers:
headers = {}
headers['Content-type'] = 'application/json'
headers['Accept'] = 'application/json'
headers['QuantumProxy-Agent'] = self.name
if self.auth:
headers['Authorization'] = self.auth
LOG.debug('ServerProxy: server=%s, port=%d, ssl=%r, action=%s' %
(self.server, self.port, self.ssl, action))
LOG.debug('ServerProxy: resource=%s, data=%r, headers=%r' %
(resource, data, headers))
conn = None
if self.ssl:
conn = httplib.HTTPSConnection(
self.server, self.port, timeout=self.timeout)
if conn is None:
LOG.error('ServerProxy: Could not establish HTTPS connection')
return 0, None, None, None
else:
conn = httplib.HTTPConnection(
self.server, self.port, timeout=self.timeout)
if conn is None:
LOG.error('ServerProxy: Could not establish HTTP connection')
return 0, None, None, None
try:
conn.request(action, uri, body, headers)
response = conn.getresponse()
respstr = response.read()
respdata = respstr
if response.status in self.success_codes:
try:
respdata = json.loads(respstr)
except ValueError:
# response was not JSON, ignore the exception
pass
ret = (response.status, response.reason, respstr, respdata)
except (socket.timeout, socket.error) as e:
LOG.error('ServerProxy: %s failure, %r' % (action, e))
ret = 0, None, None, None
conn.close()
LOG.debug('ServerProxy: status=%d, reason=%r, ret=%s, data=%r' % ret)
return ret
class ServerPool(object):
def __init__(self, servers, ssl, auth, timeout=10,
base_uri='/quantum/v1.0', name='QuantumRestProxy'):
self.base_uri = base_uri
self.timeout = timeout
self.name = name
self.auth = auth
self.ssl = ssl
self.servers = []
for server_port in servers:
self.servers.append(self.server_proxy_for(*server_port))
def server_proxy_for(self, server, port):
return ServerProxy(server, port, self.ssl, self.auth, self.timeout,
self.base_uri, self.name)
def server_failure(self, resp):
"""Define failure codes as required.
Note: We assume 301-303 is a failure, and try the next server in
the server pool.
"""
return resp[0] in FAILURE_CODES
def action_success(self, resp):
"""Defining success codes as required.
Note: We assume any valid 2xx as being successful response.
"""
return resp[0] in SUCCESS_CODES
def rest_call(self, action, resource, data, headers):
failed_servers = []
while self.servers:
active_server = self.servers[0]
ret = active_server.rest_call(action, resource, data, headers)
if not self.server_failure(ret):
self.servers.extend(failed_servers)
return ret
else:
LOG.error('ServerProxy: %s failure for servers: %r' % (
action, (active_server.server, active_server.port)))
failed_servers.append(self.servers.pop(0))
# All servers failed, reset server list and try again next time
LOG.error('ServerProxy: %s failure for all servers: %r' % (
action, tuple((s.server, s.port) for s in failed_servers)))
self.servers.extend(failed_servers)
return (0, None, None, None)
def get(self, resource, data='', headers=None):
return self.rest_call('GET', resource, data, headers)
def put(self, resource, data, headers=None):
return self.rest_call('PUT', resource, data, headers)
def post(self, resource, data, headers=None):
return self.rest_call('POST', resource, data, headers)
def delete(self, resource, data='', headers=None):
return self.rest_call('DELETE', resource, data, headers)
class RpcProxy(dhcp_rpc_base.DhcpRpcCallbackMixin):
RPC_API_VERSION = '1.0'
def create_rpc_dispatcher(self):
return q_rpc.PluginRpcDispatcher([self])
class QuantumRestProxyV2(db_base_plugin_v2.QuantumDbPluginV2):
def __init__(self):
LOG.info('QuantumRestProxy: Starting plugin. Version=%s' %
version_string_with_vcs())
# init DB, proxy's persistent store defaults to in-memory sql-lite DB
options = {"sql_connection": "%s" % cfg.CONF.DATABASE.sql_connection,
"sql_max_retries": cfg.CONF.DATABASE.sql_max_retries,
"reconnect_interval": cfg.CONF.DATABASE.reconnect_interval,
"base": models_v2.model_base.BASEV2}
db.configure_db(options)
# 'servers' is the list of network controller REST end-points
# (used in order specified till one suceeds, and it is sticky
# till next failure). Use 'serverauth' to encode api-key
servers = cfg.CONF.RESTPROXY.servers
serverauth = cfg.CONF.RESTPROXY.serverauth
serverssl = cfg.CONF.RESTPROXY.serverssl
syncdata = cfg.CONF.RESTPROXY.syncdata
timeout = cfg.CONF.RESTPROXY.servertimeout
# validate config
assert servers is not None, 'Servers not defined. Aborting plugin'
servers = tuple(s.rsplit(':', 1) for s in servers.split(','))
servers = tuple((server, int(port)) for server, port in servers)
assert all(len(s) == 2 for s in servers), SYNTAX_ERROR_MESSAGE
# init network ctrl connections
self.servers = ServerPool(servers, serverssl, serverauth,
timeout)
# init dhcp support
self.topic = topics.PLUGIN
self.conn = rpc.create_connection(new=True)
self.callbacks = RpcProxy()
self.dispatcher = self.callbacks.create_rpc_dispatcher()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
if syncdata:
self._send_all_data()
LOG.debug("QuantumRestProxyV2: initialization done")
def create_network(self, context, network):
"""Create a network, which represents an L2 network segment which
can have a set of subnets and ports associated with it.
:param context: quantum api request context
:param network: dictionary describing the network
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can specify
a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: RemoteRestError
"""
LOG.debug("QuantumRestProxyV2: create_network() called")
# Validate args
tenant_id = self._get_tenant_id_for_create(context, network["network"])
net_name = network["network"]["name"]
if network["network"]["admin_state_up"] is False:
LOG.warning("Network with admin_state_up=False are not yet "
"supported by this plugin. Ignoring setting for "
"network %s", net_name)
# create in DB
new_net = super(QuantumRestProxyV2, self).create_network(context,
network)
# create on networl ctrl
try:
resource = NET_RESOURCE_PATH % tenant_id
data = {
"network": {
"id": new_net["id"],
"name": new_net["name"],
}
}
ret = self.servers.post(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error("QuantumRestProxyV2:Unable to create remote network:%s" %
e.message)
super(QuantumRestProxyV2, self).delete_network(context,
new_net['id'])
raise
# return created network
return new_net
def update_network(self, context, net_id, network):
"""Updates the properties of a particular Virtual Network.
:param context: quantum api request context
:param net_id: uuid of the network to update
:param network: dictionary describing the updates
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can
specify a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug("QuantumRestProxyV2.update_network() called")
# Validate Args
if network["network"].get("admin_state_up"):
if network["network"]["admin_state_up"] is False:
LOG.warning("Network with admin_state_up=False are not yet "
"supported by this plugin. Ignoring setting for "
"network %s", net_name)
# update DB
orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)
tenant_id = orig_net["tenant_id"]
new_net = super(QuantumRestProxyV2, self).update_network(
context, net_id, network)
# update network on network controller
if new_net["name"] != orig_net["name"]:
try:
resource = NETWORKS_PATH % (tenant_id, net_id)
data = {
"network": new_net,
}
ret = self.servers.put(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error(
"QuantumRestProxyV2: Unable to update remote network: %s" %
e.message)
# reset network to original state
super(QuantumRestProxyV2, self).update_network(
context, id, orig_net)
raise
# return updated network
return new_net
def delete_network(self, context, net_id):
"""Delete a network.
:param context: quantum api request context
:param id: UUID representing the network to delete.
:returns: None
:raises: exceptions.NetworkInUse
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug("QuantumRestProxyV2: delete_network() called")
# Validate args
orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)
tenant_id = orig_net["tenant_id"]
# delete from network ctrl. Remote error on delete is ignored
try:
resource = NETWORKS_PATH % (tenant_id, net_id)
ret = self.servers.delete(resource)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
ret_val = super(QuantumRestProxyV2, self).delete_network(context,
net_id)
return ret_val
except RemoteRestError as e:
LOG.error(
"QuantumRestProxyV2: Unable to update remote network: %s" %
e.message)
def create_port(self, context, port):
"""Create a port, which is a connection point of a device
(e.g., a VM NIC) to attach to a L2 Quantum network.
:param context: quantum api request context
:param port: dictionary describing the port
:returns:
{
"id": uuid represeting the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": Sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet ID"s and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.NetworkNotFound
:raises: exceptions.StateInvalid
:raises: RemoteRestError
"""
LOG.debug("QuantumRestProxyV2: create_port() called")
# Update DB
port["port"]["admin_state_up"] = False
new_port = super(QuantumRestProxyV2, self).create_port(context, port)
net = super(QuantumRestProxyV2,
self).get_network(context, new_port["network_id"])
# create on networl ctrl
try:
resource = PORT_RESOURCE_PATH % (net["tenant_id"], net["id"])
data = {
"port": {
"id": new_port["id"],
"state": "ACTIVE",
}
}
ret = self.servers.post(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
# connect device to network, if present
if port["port"].get("device_id"):
self._plug_interface(context,
net["tenant_id"], net["id"],
new_port["id"], new_port["id"] + "00")
except RemoteRestError as e:
LOG.error("QuantumRestProxyV2: Unable to create remote port: %s" %
e.message)
super(QuantumRestProxyV2, self).delete_port(context,
new_port["id"])
raise
# Set port state up and return that port
port_update = {"port": {"admin_state_up": True}}
return super(QuantumRestProxyV2, self).update_port(context,
new_port["id"],
port_update)
def update_port(self, context, port_id, port):
"""Update values of a port.
:param context: quantum api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
:returns: a mapping sequence with the following signature:
{
"id": uuid represeting the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet ID's and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.StateInvalid
:raises: exceptions.PortNotFound
:raises: RemoteRestError
"""
LOG.debug("QuantumRestProxyV2: update_port() called")
# Validate Args
orig_port = super(QuantumRestProxyV2, self).get_port(context, port_id)
# Update DB
new_port = super(QuantumRestProxyV2, self).update_port(context,
port_id, port)
# update on networl ctrl
try:
resource = PORTS_PATH % (orig_port["tenant_id"],
orig_port["network_id"], port_id)
data = {"port": new_port, }
ret = self.servers.put(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
if new_port.get("device_id") != orig_port.get("device_id"):
if orig_port.get("device_id"):
self._unplug_interface(context, orig_port["tenant_id"],
orig_port["network_id"],
orig_port["id"])
if new_port.get("device_id"):
self._plug_interface(context, new_port["tenant_id"],
new_port["network_id"],
new_port["id"], new_port["id"] + "00")
except RemoteRestError as e:
LOG.error(
"QuantumRestProxyV2: Unable to create remote port: %s" %
e.message)
# reset port to original state
super(QuantumRestProxyV2, self).update_port(context, port_id,
orig_port)
raise
# return new_port
return new_port
def delete_port(self, context, port_id):
"""Delete a port.
:param context: quantum api request context
:param id: UUID representing the port to delete.
:raises: exceptions.PortInUse
:raises: exceptions.PortNotFound
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug("QuantumRestProxyV2: delete_port() called")
# Delete from DB
port = super(QuantumRestProxyV2, self).get_port(context, port_id)
# delete from network ctrl. Remote error on delete is ignored
try:
resource = PORTS_PATH % (port["tenant_id"], port["network_id"],
port_id)
ret = self.servers.delete(resource)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
if port.get("device_id"):
self._unplug_interface(context, port["tenant_id"],
port["network_id"], port["id"])
ret_val = super(QuantumRestProxyV2, self).delete_port(context,
port_id)
return ret_val
except RemoteRestError as e:
LOG.error(
"QuantumRestProxyV2: Unable to update remote port: %s" %
e.message)
def _plug_interface(self, context, tenant_id, net_id, port_id,
remote_interface_id):
"""Attaches a remote interface to the specified port on the
specified Virtual Network.
:returns: None
:raises: exceptions.NetworkNotFound
:raises: exceptions.PortNotFound
:raises: RemoteRestError
"""
LOG.debug("QuantumRestProxyV2: _plug_interface() called")
# update attachment on network controller
try:
port = super(QuantumRestProxyV2, self).get_port(context, port_id)
mac = port["mac_address"]
for ip in port["fixed_ips"]:
if ip.get("subnet_id") is not None:
subnet = super(QuantumRestProxyV2, self).get_subnet(
context, ip["subnet_id"])
gateway = subnet.get("gateway_ip")
if gateway is not None:
resource = NETWORKS_PATH % (tenant_id, net_id)
data = {"network":
{"id": net_id,
"gateway": gateway,
}
}
ret = self.servers.put(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
if mac is not None:
resource = ATTACHMENT_PATH % (tenant_id, net_id, port_id)
data = {"attachment":
{"id": remote_interface_id,
"mac": mac,
}
}
ret = self.servers.put(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error("QuantumRestProxyV2:Unable to update remote network:%s" %
e.message)
raise
def _unplug_interface(self, context, tenant_id, net_id, port_id):
"""Detaches a remote interface from the specified port on the
network controller
:returns: None
:raises: RemoteRestError
"""
LOG.debug("QuantumRestProxyV2: _unplug_interface() called")
# delete from network ctrl. Remote error on delete is ignored
try:
resource = ATTACHMENT_PATH % (tenant_id, net_id, port_id)
ret = self.servers.delete(resource)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error(
"QuantumRestProxyV2: Unable to update remote port: %s" %
e.message)
def _send_all_data(self):
"""Pushes all data to network ctrl (networks/ports, ports/attachments)
to give the controller an option to re-sync it's persistent store
with quantum's current view of that data.
"""
admin_context = qcontext.get_admin_context()
networks = {}
ports = {}
all_networks = super(QuantumRestProxyV2,
self).get_networks(admin_context) or []
for net in all_networks:
networks[net.get('id')] = {
'id': net.get('id'),
'name': net.get('name'),
'op-status': net.get('admin_state_up'),
}
subnets = net.get('subnets', [])
for subnet_id in subnets:
subnet = self.get_subnet(admin_context, subnet_id)
gateway_ip = subnet.get('gateway_ip')
if gateway_ip:
# FIX: For backward compatibility with wire protocol
networks[net.get('id')]['gateway'] = gateway_ip
ports = []
net_filter = {'network_id': [net.get('id')]}
net_ports = super(QuantumRestProxyV2,
self).get_ports(admin_context,
filters=net_filter) or []
for port in net_ports:
port_details = {
'id': port.get('id'),
'attachment': {
'id': port.get('id') + '00',
'mac': port.get('mac_address'),
},
'state': port.get('status'),
'op-status': port.get('admin_state_up'),
'mac': None
}
ports.append(port_details)
networks[net.get('id')]['ports'] = ports
try:
resource = '/topology'
data = {
'networks': networks,
}
ret = self.servers.put(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
return ret
except RemoteRestError as e:
LOG.error(
'QuantumRestProxy: Unable to update remote network: %s' %
e.message)
raise
| |
"""The test for binary_sensor device automation."""
from datetime import timedelta
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.binary_sensor import DEVICE_CLASSES, DOMAIN
from homeassistant.components.binary_sensor.device_trigger import ENTITY_TRIGGERS
from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
async_fire_time_changed,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a binary_sensor."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
for device_class in DEVICE_CLASSES:
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES[device_class].unique_id,
device_id=device_entry.id,
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": trigger["type"],
"device_id": device_entry.id,
"entity_id": platform.ENTITIES[device_class].entity_id,
}
for device_class in DEVICE_CLASSES
for trigger in ENTITY_TRIGGERS[device_class]
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert triggers == expected_triggers
async def test_get_trigger_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a binary_sensor trigger."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
for trigger in triggers:
capabilities = await async_get_device_automation_capabilities(
hass, "trigger", trigger
)
assert capabilities == expected_capabilities
async def test_if_fires_on_state_change(hass, calls):
"""Test for on and off triggers firing."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "bat_low",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "bat_low {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "not_bat_low",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "not_bat_low {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "not_bat_low device - {} - on - off - None".format(
sensor1.entity_id
)
hass.states.async_set(sensor1.entity_id, STATE_ON)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "bat_low device - {} - off - on - None".format(
sensor1.entity_id
)
async def test_if_fires_on_state_change_with_for(hass, calls):
"""Test for triggers firing with delay."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "turned_off",
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
await hass.async_block_till_done()
assert calls[0].data["some"] == "turn_off device - {} - on - off - 0:00:05".format(
sensor1.entity_id
)
| |
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import logging
import netaddr
import functools
from nose.tools import *
from ryu.lib import ofctl_v1_2
from ryu.ofproto import ofproto_v1_2, ofproto_v1_2_parser
from ryu.lib import ofctl_v1_3
from ryu.ofproto import ofproto_v1_3, ofproto_v1_3_parser
from ryu.ofproto import ofproto_protocol
from ryu.ofproto import inet
LOG = logging.getLogger('test_ofctl_v1_2, v1_3')
""" Common Functions """
def _str_to_int(src):
if isinstance(src, str):
if src.startswith("0x") or src.startswith("0X"):
dst = int(src, 16)
else:
dst = int(src)
else:
dst = src
return dst
def _to_match_eth(value):
if '/' in value:
value = value.split('/')
return value[0], value[1]
else:
return value, None
def _to_match_ip(value):
if '/' in value:
ip = netaddr.ip.IPNetwork(value)
ip_addr = str(ip.network)
ip_mask = str(ip.netmask)
return ip_addr, ip_mask
else:
return value, None
def _to_match_masked_int(value):
if isinstance(value, str) and '/' in value:
value = value.split('/')
return _str_to_int(value[0]), _str_to_int(value[1])
else:
return _str_to_int(value), None
conv_of10_to_of12_dict = {
'dl_dst': 'eth_dst',
'dl_src': 'eth_src',
'dl_type': 'eth_type',
'dl_vlan': 'vlan_vid',
'nw_src': 'ipv4_src',
'nw_dst': 'ipv4_dst',
'nw_proto': 'ip_proto'
}
conv_of12_to_of10_dict = {
'eth_src': 'dl_src',
'eth_dst': 'dl_dst',
'eth_type': 'dl_type',
'ipv4_dst': 'nw_dst',
'ipv4_src': 'nw_src',
'ip_proto': 'nw_proto',
'vlan_vid': 'dl_vlan',
'tcp_src': 'tp_src',
'tcp_dst': 'tp_dst',
'udp_src': 'tp_src',
'udp_dst': 'tp_dst'
}
""" Test_ofctl """
class Test_ofctl(unittest.TestCase):
def __init__(self, methodName):
super(Test_ofctl, self).__init__(methodName)
def setUp(self):
pass
def tearDown(self):
pass
def _test_actions(self, act, test):
act_type = act["type"]
to_actions = test.to_actions
actions_to_str = test.actions_to_str
dp = ofproto_protocol.ProtocolDesc(version=test.ver)
act_list = []
act_list.append(act)
# str -> action
result = to_actions(dp, act_list)
insts = result[0]
if act_type in test.supported_action:
cls = test.supported_action[act_type]
else:
cls = None
if act_type == 'GOTO_TABLE':
ok_(isinstance(insts, cls))
eq_(insts.table_id, act["table_id"])
elif act_type == 'WRITE_METADATA':
ok_(isinstance(insts, cls))
eq_(insts.metadata, act["metadata"])
eq_(insts.metadata_mask, act["metadata_mask"])
elif act_type == 'METER':
ok_(isinstance(insts, cls))
eq_(insts.meter_id, act["meter_id"])
else:
ok_(isinstance(insts.actions[0], cls))
if act_type == 'OUTPUT':
eq_(insts.actions[0].port, act["port"])
elif act_type == 'SET_MPLS_TTL':
eq_(insts.actions[0].mpls_ttl, act["mpls_ttl"])
elif act_type in ['PUSH_VLAN', 'PUSH_MPLS',
'POP_MPLS', 'PUSH_PBB']:
eq_(insts.actions[0].ethertype, act["ethertype"])
elif act_type == 'SET_QUEUE':
eq_(insts.actions[0].queue_id, act["queue_id"])
elif act_type == 'GROUP':
eq_(insts.actions[0].group_id, act["group_id"])
elif act_type == 'SET_NW_TTL':
eq_(insts.actions[0].nw_ttl, act["nw_ttl"])
# action -> str
action_str = actions_to_str(result)
action_str_list = action_str[0].split(':')
eq_(action_str_list[0], act_type)
if act_type == 'GOTO_TABLE':
eq_(int(action_str_list[1]), act["table_id"])
elif act_type == 'WRITE_METADATA':
met = action_str_list[1].split('/')
eq_(int(met[0], 16), act["metadata"])
eq_(int(met[1], 16), act["metadata_mask"])
elif act_type == 'METER':
eq_(int(action_str_list[1]), act["meter_id"])
else:
if act_type == 'OUTPUT':
eq_(int(action_str_list[1]), act["port"])
elif act_type == 'SET_MPLS_TTL':
eq_(int(action_str_list[1]), act["mpls_ttl"])
elif act_type == 'PUSH_VLAN':
eq_(int(action_str_list[1]), act["ethertype"])
elif act_type == 'PUSH_MPLS':
eq_(int(action_str_list[1]), act["ethertype"])
elif act_type == 'POP_MPLS':
eq_(int(action_str_list[1]), act["ethertype"])
elif act_type == 'SET_QUEUE':
eq_(int(action_str_list[1]), act["queue_id"])
elif act_type == 'GROUP':
eq_(int(action_str_list[1]), act["group_id"])
elif act_type == 'SET_NW_TTL':
eq_(int(action_str_list[1]), act["nw_ttl"])
elif act_type == 'SET_FIELD':
eq_(action_str_list[1].strip(' {'), act["field"])
eq_(action_str_list[2].strip('} '), act["value"])
elif act_type == 'PUSH_PBB':
eq_(int(action_str_list[1]), act["ethertype"])
def _test_to_match(self, attrs, test):
to_match = test.to_match
match_to_str = test.match_to_str
dp = ofproto_protocol.ProtocolDesc(version=test.ver)
ofproto = dp.ofproto
vid_present = dp.ofproto.OFPVID_PRESENT
expected_value = {
"vlan_vid": {
0: {"to_match": 0 | vid_present, "to_str": "0"},
3: {"to_match": 3 | vid_present, "to_str": "3"},
4095: {"to_match": 4095 | vid_present, "to_str": "4095"},
"0": {"to_match": 0 | vid_present, "to_str": "0"},
"3": {"to_match": 3 | vid_present, "to_str": "3"},
"4095": {"to_match": 4095 | vid_present, "to_str": "4095"},
"0x0000": {"to_match": 0x0000, "to_str": "0x0000"},
"0x0003": {"to_match": 0x0003, "to_str": "0x0003"},
"0x0fff": {"to_match": 0x0fff, "to_str": "0x0fff"},
"0x1000": {"to_match": 0x1000, "to_str": "0"},
"0x1003": {"to_match": 0x1003, "to_str": "3"},
"0x1fff": {"to_match": 0x1fff, "to_str": "4095"},
"4096/4096": {"to_match": (4096, 4096),
"to_str": "0x1000/0x1000"},
"4096/4097": {"to_match": (4096, 4097),
"to_str": "0x1000/0x1001"},
"2744/2748": {"to_match": (2744, 2748),
"to_str": "0x0ab8/0x0abc"},
"2748/2748": {"to_match": (2748, 2748),
"to_str": "0x0abc/0x0abc"},
"2748/2749": {"to_match": (2748, 2749),
"to_str": "0x0abc/0x0abd"},
"0x1000/0x1000": {"to_match": (0x1000, 0x1000),
"to_str": "0x1000/0x1000"},
"0x1000/0x1001": {"to_match": (0x1000, 0x1001),
"to_str": "0x1000/0x1001"},
"0x0ab8/0x0abc": {"to_match": (0x0ab8, 0x0abc),
"to_str": "0x0ab8/0x0abc"},
"0x0abc/0x0abc": {"to_match": (0x0abc, 0x0abc),
"to_str": "0x0abc/0x0abc"},
"0x0abc/0x0abd": {"to_match": (0x0abc, 0x0abd),
"to_str": "0x0abc/0x0abd"}
}
}
# str -> match
match = to_match(dp, attrs)
def equal_match(key, value, match):
field_value = match[key]
if key in ['eth_src', 'eth_dst', 'arp_sha', 'arp_tha']:
# MAC address
eth, mask = _to_match_eth(value)
if mask is not None:
# with mask
for i in range(0, len(mask)):
if mask[i] == 'f':
eq_(eth[i], field_value[0][i])
eq_(mask, field_value[1])
else:
# without mask
eq_(eth, field_value)
return
elif key in ['ipv4_src', 'ipv4_dst', 'arp_spa', 'arp_tpa']:
# IPv4 address
ipv4, mask = _to_match_ip(value)
if mask is not None:
# with mask
eq_(ipv4, field_value[0])
eq_(mask, field_value[1])
else:
# without mask
eq_(ipv4, field_value)
return
elif key in ['ipv6_src', 'ipv6_dst']:
# IPv6 address
ipv6, mask = _to_match_ip(value)
if mask is not None:
# with mask
eq_(ipv6, field_value[0])
eq_(mask, field_value[1])
else:
# without mask
eq_(ipv6, field_value)
return
elif key == 'vlan_vid':
eq_(expected_value['vlan_vid'][value]['to_match'], field_value)
return
elif key == 'metadata' or key == 'ipv6_exthdr':
# Metadata or IPv6 Extension Header pseudo-field
value, mask = _to_match_masked_int(value)
if mask is not None:
# with mask
value &= mask
eq_(value, field_value[0])
eq_(mask, field_value[1])
else:
# without mask
eq_(value, field_value)
return
else:
eq_(value, field_value)
return
for key, value in attrs.items():
if key in conv_of10_to_of12_dict:
# For old field name
key_new = conv_of10_to_of12_dict[key]
elif key == 'tp_src' or key == 'tp_dst':
# TCP/UDP port
conv = {inet.IPPROTO_TCP: {'tp_src': 'tcp_src',
'tp_dst': 'tcp_dst'},
inet.IPPROTO_UDP: {'tp_src': 'udp_src',
'tp_dst': 'udp_dst'}}
ip_proto = attrs.get('nw_proto', attrs.get('ip_proto', 0))
key_new = conv[ip_proto][key]
else:
key_new = key
equal_match(key_new, value, match)
# match -> str
match_str = match_to_str(match)
def equal_str(key, value, match_str):
field_value = match_str[key]
if key in ['dl_src', 'dl_dst', 'arp_sha', 'arp_tha']:
# MAC address
eth, mask = _to_match_eth(value)
if mask is not None:
# with mask
field_value = field_value.split('/')
for i in range(0, len(mask)):
if mask[i] == 'f':
eq_(eth[i], field_value[0][i])
eq_(mask, field_value[1])
else:
# without mask
eq_(eth, field_value)
return
elif key in['nw_src', 'nw_dst', 'arp_spa', 'arp_tpa']:
# IPv4 address
ipv4, mask = _to_match_ip(value)
if mask is not None:
# with mask
field_value = field_value.split('/')
eq_(ipv4, field_value[0])
eq_(mask, field_value[1])
else:
# without mask
eq_(ipv4, field_value)
return
elif key in ['ipv6_src', 'ipv6_dst']:
# IPv6 address
ipv6, mask = _to_match_ip(value)
if mask is not None:
# with mask
field_value = field_value.split('/')
eq_(ipv6, field_value[0])
eq_(mask, field_value[1])
else:
# without mask
eq_(ipv6, field_value)
return
elif key == 'dl_vlan':
eq_(expected_value['vlan_vid'][value]['to_str'], field_value)
return
elif key == 'metadata' or key == 'ipv6_exthdr':
# Metadata or IPv6 Extension Header pseudo-field
value, mask = _to_match_masked_int(value)
if mask is not None:
# with mask
field_value = field_value.split('/')
value &= mask
eq_(str(value), field_value[0])
eq_(str(mask), field_value[1])
else:
# without mask
eq_(str(value), field_value)
return
else:
eq_(value, field_value)
return
for key, value in attrs.items():
if key in conv_of12_to_of10_dict:
key_old = conv_of12_to_of10_dict[key]
else:
key_old = key
equal_str(key_old, value, match_str)
""" Test_data for of_v1_2 """
class test_data_v1_2():
def __init__(self):
self.supported_action = {}
self.supported_match = {}
self.act_list = [
{'type': 'OUTPUT', 'port': 3},
{'type': 'COPY_TTL_OUT'},
{'type': 'COPY_TTL_IN'},
{'type': 'SET_MPLS_TTL', 'mpls_ttl': 64},
{'type': 'DEC_MPLS_TTL'},
{'type': 'PUSH_VLAN', 'ethertype': 0x0800},
{'type': 'POP_VLAN'},
{'type': 'PUSH_MPLS', 'ethertype': 0x0800},
{'type': 'POP_MPLS', 'ethertype': 0x0800},
{'type': 'SET_QUEUE', 'queue_id': 7},
{'type': 'GROUP', 'group_id': 5},
{'type': 'SET_NW_TTL', 'nw_ttl': 64},
{'type': 'DEC_NW_TTL'},
{'type': 'GOTO_TABLE', 'table_id': 8},
{'type': 'WRITE_METADATA', 'metadata': 8,
'metadata_mask': (1 << 64) - 1},
]
self.attr_list = [
{'in_port': 7},
{'in_phy_port': 5, 'in_port': 3},
{'metadata': '0x1212121212121212'},
{'metadata': '0x19af28be37fa91b/0x1010101010101010'},
{'dl_src': "aa:bb:cc:11:22:33"},
{'dl_src': "aa:bb:cc:11:22:33/00:00:00:00:ff:ff"},
{'dl_dst': "aa:bb:cc:11:22:33"},
{'dl_dst': "aa:bb:cc:11:22:33/00:00:00:00:ff:ff"},
{'dl_type': 123},
{'eth_src': "aa:bb:cc:11:22:33"},
{'eth_src': "aa:bb:cc:11:22:33/00:00:00:00:ff:ff"},
{'eth_dst': "aa:bb:cc:11:22:33"},
{'eth_dst': "aa:bb:cc:11:22:33/00:00:00:00:ff:ff"},
{'eth_type': 0x800},
{'dl_vlan': 0},
{'dl_vlan': 3},
{'dl_vlan': 4095},
{'dl_vlan': "0"},
{'dl_vlan': "3"},
{'dl_vlan': "4095"},
{'dl_vlan': "0x0000"},
{'dl_vlan': "0x0003"},
{'dl_vlan': "0x0fff"},
{'dl_vlan': "0x1000"},
{'dl_vlan': "0x1003"},
{'dl_vlan': "0x1fff"},
{'dl_vlan': "4096/4096"},
{'dl_vlan': "4096/4097"},
{'dl_vlan': "2744/2748"},
{'dl_vlan': "2748/2748"},
{'dl_vlan': "2748/2749"},
{'dl_vlan': "0x1000/0x1000"},
{'dl_vlan': "0x1000/0x1001"},
{'dl_vlan': "0x0ab8/0x0abc"},
{'dl_vlan': "0x0abc/0x0abc"},
{'dl_vlan': "0x0abc/0x0abd"},
{'vlan_pcp': 3, 'vlan_vid': 3},
{'ip_dscp': 3, 'eth_type': 0x0800},
{'ip_ecn': 4, 'eth_type': 0x86dd},
{'nw_src': "192.168.0.1", 'eth_type': 0x0800},
{'nw_src': "192.168.0.1/24", 'eth_type': 0x0800},
{'nw_src': "192.168.10.10/255.255.0.0", 'eth_type': 0x0800},
{'nw_dst': "192.168.0.1", 'eth_type': 0x0800},
{'nw_dst': "192.168.0.1/24", 'eth_type': 0x0800},
{'nw_dst': "192.168.10.10/255.255.255.0"},
{'nw_proto': 5, 'eth_type': 0x0800},
{'ip_proto': 5, 'eth_type': 0x86dd},
{'ipv4_src': "192.168.0.1", 'eth_type': 0x0800},
{'ipv4_src': "192.168.0.1/24", 'eth_type': 0x0800},
{'ipv4_src': "192.168.10.10/255.255.0.0", 'eth_type': 0x0800},
{'ipv4_dst': "192.168.0.1", 'eth_type': 0x0800},
{'ipv4_dst': "192.168.0.1/24", 'eth_type': 0x0800},
{'ipv4_dst': "192.168.10.10/255.255.255.0", 'eth_type': 0x0800},
{'tp_src': 1, 'ip_proto': 6},
{'tp_dst': 2, 'ip_proto': 6},
{'tp_src': 3, 'ip_proto': 17},
{'tp_dst': 4, 'ip_proto': 17},
{'vlan_vid': 0},
{'vlan_vid': 3},
{'vlan_vid': 4095},
{'vlan_vid': "0"},
{'vlan_vid': "3"},
{'vlan_vid': "4095"},
{'vlan_vid': "0x0000"},
{'vlan_vid': "0x0003"},
{'vlan_vid': "0x0fff"},
{'vlan_vid': "0x1000"},
{'vlan_vid': "0x1003"},
{'vlan_vid': "0x1fff"},
{'vlan_vid': "4096/4096"},
{'vlan_vid': "4096/4097"},
{'vlan_vid': "2744/2748"},
{'vlan_vid': "2748/2748"},
{'vlan_vid': "2748/2749"},
{'vlan_vid': "0x1000/0x1000"},
{'vlan_vid': "0x1000/0x1001"},
{'vlan_vid': "0x0ab8/0x0abc"},
{'vlan_vid': "0x0abc/0x0abc"},
{'vlan_vid': "0x0abc/0x0abd"},
{'tcp_src': 3, 'ip_proto': 6},
{'tcp_dst': 5, 'ip_proto': 6},
{'udp_src': 2, 'ip_proto': 17},
{'udp_dst': 6, 'ip_proto': 17},
{'sctp_src': 99, 'ip_proto': 132},
{'sctp_dst': 99, 'ip_proto': 132},
{'icmpv4_type': 5, 'ip_proto': 1},
{'icmpv4_code': 6, 'ip_proto': 1},
{'arp_op': 3, 'eth_type': 0x0806},
{'arp_spa': "192.168.0.11", 'eth_type': 0x0806},
{'arp_spa': "192.168.0.22/24", 'eth_type': 0x0806},
{'arp_tpa': "192.168.0.33", 'eth_type': 0x0806},
{'arp_tpa': "192.168.0.44/24", 'eth_type': 0x0806},
{'arp_sha': "aa:bb:cc:11:22:33", 'eth_type': 0x0806},
{'arp_sha': "aa:bb:cc:11:22:33/00:00:00:00:ff:ff",
'eth_type': 0x0806},
{'arp_tha': "aa:bb:cc:11:22:33", 'eth_type': 0x0806},
{'arp_tha': "aa:bb:cc:11:22:33/00:00:00:00:ff:ff",
'eth_type': 0x0806},
{'ipv6_src': '2001::aaaa:bbbb:cccc:1111', 'eth_type': 0x86dd},
{'ipv6_src': '2001::aaaa:bbbb:cccc:1111/64', 'eth_type': 0x86dd},
{'ipv6_dst': '2001::ffff:cccc:bbbb:1111', 'eth_type': 0x86dd},
{'ipv6_dst': '2001::ffff:cccc:bbbb:1111/64', 'eth_type': 0x86dd},
{'ipv6_flabel': 2, 'eth_type': 0x86dd},
{'icmpv6_type': 3, 'ip_proto': 58},
{'icmpv6_code': 4, 'ip_proto': 58},
{'ipv6_nd_target': '2001::ffff:cccc:bbbb:1111',
'icmpv6_type': 135, 'ip_proto': 58},
{'ipv6_nd_sll': "aa:bb:cc:11:22:33",
'icmpv6_type': 135, 'ip_proto': 58},
{'ipv6_nd_tll': "aa:bb:cc:11:22:33",
'icmpv6_type': 136, 'ip_proto': 58},
{'mpls_label': 3, 'eth_type': 0x8848},
{'mpls_tc': 2, 'eth_type': 0x8848}
]
def set_ver(self, ver):
self.ver = ver
def set_attr(self, ofctl):
self.to_match = getattr(ofctl, "to_match")
self.match_to_str = getattr(ofctl, "match_to_str")
self.to_actions = getattr(ofctl, "to_actions")
self.actions_to_str = getattr(ofctl, "actions_to_str")
def set_action_v1_2(self, parser):
self.supported_action.update(
{
'OUTPUT': getattr(parser, "OFPActionOutput"),
'COPY_TTL_OUT': getattr(parser, "OFPActionCopyTtlOut"),
'COPY_TTL_IN': getattr(parser, "OFPActionCopyTtlIn"),
'SET_MPLS_TTL': getattr(parser, "OFPActionSetMplsTtl"),
'DEC_MPLS_TTL': getattr(parser, "OFPActionDecMplsTtl"),
'PUSH_VLAN': getattr(parser, "OFPActionPushVlan"),
'POP_VLAN': getattr(parser, "OFPActionPopVlan"),
'PUSH_MPLS': getattr(parser, "OFPActionPushMpls"),
'POP_MPLS': getattr(parser, "OFPActionPopMpls"),
'SET_QUEUE': getattr(parser, "OFPActionSetQueue"),
'GROUP': getattr(parser, "OFPActionGroup"),
'SET_NW_TTL': getattr(parser, "OFPActionSetNwTtl"),
'DEC_NW_TTL': getattr(parser, "OFPActionDecNwTtl"),
'SET_FIELD': getattr(parser, "OFPActionSetField"),
'GOTO_TABLE': getattr(parser, "OFPInstructionGotoTable"),
'WRITE_METADATA': getattr(parser,
"OFPInstructionWriteMetadata"),
})
def set_match_v1_2(self, parser):
self.supported_match.update(
{
'in_port': getattr(parser, "MTInPort"),
'in_phy_port': getattr(parser, "MTInPhyPort"),
'metadata': getattr(parser, "MTMetadata"),
'eth_dst': getattr(parser, "MTEthDst"),
'dl_dst': getattr(parser, "MTEthDst"),
'eth_src': getattr(parser, "MTEthSrc"),
'dl_src': getattr(parser, "MTEthSrc"),
'dl_type': getattr(parser, "MTEthType"),
'eth_type': getattr(parser, "MTEthType"),
'dl_vlan': getattr(parser, "MTVlanVid"),
'vlan_vid': getattr(parser, "MTVlanVid"),
'vlan_pcp': getattr(parser, "MTVlanPcp"),
'ip_dscp': getattr(parser, "MTIPDscp"),
'ip_ecn': getattr(parser, "MTIPECN"),
'nw_proto': getattr(parser, "MTIPProto"),
'ip_proto': getattr(parser, "MTIPProto"),
'nw_src': getattr(parser, "MTIPV4Src"),
'nw_dst': getattr(parser, "MTIPV4Dst"),
'ipv4_src': getattr(parser, "MTIPV4Src"),
'ipv4_dst': getattr(parser, "MTIPV4Dst"),
'tp_src': {6: getattr(parser, "MTTCPSrc"),
17: getattr(parser, "MTUDPSrc")},
'tp_dst': {6: getattr(parser, "MTTCPDst"),
17: getattr(parser, "MTUDPDst")},
'tcp_src': getattr(parser, "MTTCPSrc"),
'tcp_dst': getattr(parser, "MTTCPDst"),
'udp_src': getattr(parser, "MTUDPSrc"),
'udp_dst': getattr(parser, "MTUDPDst"),
'sctp_src': getattr(parser, "MTSCTPSrc"),
'sctp_dst': getattr(parser, "MTSCTPDst"),
'icmpv4_type': getattr(parser, "MTICMPV4Type"),
'icmpv4_code': getattr(parser, "MTICMPV4Code"),
'arp_op': getattr(parser, "MTArpOp"),
'arp_spa': getattr(parser, "MTArpSpa"),
'arp_tpa': getattr(parser, "MTArpTpa"),
'arp_sha': getattr(parser, "MTArpSha"),
'arp_tha': getattr(parser, "MTArpTha"),
'ipv6_src': getattr(parser, "MTIPv6Src"),
'ipv6_dst': getattr(parser, "MTIPv6Dst"),
'ipv6_flabel': getattr(parser, "MTIPv6Flabel"),
'icmpv6_type': getattr(parser, "MTICMPV6Type"),
'icmpv6_code': getattr(parser, "MTICMPV6Code"),
'ipv6_nd_target': getattr(parser, "MTIPv6NdTarget"),
'ipv6_nd_sll': getattr(parser, "MTIPv6NdSll"),
'ipv6_nd_tll': getattr(parser, "MTIPv6NdTll"),
'mpls_label': getattr(parser, "MTMplsLabel"),
'mpls_tc': getattr(parser, "MTMplsTc"),
})
""" Test_data for of_v1_3 """
class test_data_v1_3(test_data_v1_2):
def __init__(self):
test_data_v1_2.__init__(self)
self.act_list.extend(
[
{'type': 'PUSH_PBB', 'ethertype': 0x0800},
{'type': 'POP_PBB'},
{'type': 'METER', 'meter_id': 3},
]
)
self.attr_list.extend(
[
{'mpls_bos': 3, 'eth_type': 0x8848},
{'pbb_isid': 5, 'eth_type': 0x88E7},
{'tunnel_id': 7},
{'ipv6_exthdr': 3, 'eth_type': 0x86dd},
{'ipv6_exthdr': "0x40", 'eth_type': 0x86dd},
{'ipv6_exthdr': "0x40/0x1F0", 'eth_type': 0x86dd},
]
)
def set_action_v1_3(self, parser):
self.set_action_v1_2(parser)
self.supported_action.update(
{
'PUSH_PBB': getattr(parser, "OFPActionPushPbb"),
'POP_PBB': getattr(parser, "OFPActionPopPbb"),
'METER': getattr(parser, "OFPInstructionMeter"),
})
def set_match_v1_3(self, parser):
self.set_match_v1_2(parser)
self.supported_match.update(
{
'mpls_bos': getattr(parser, "MTMplsBos"),
'pbb_isid': getattr(parser, "MTPbbIsid"),
'tunnel_id': getattr(parser, "MTTunnelId"),
'ipv6_exthdr': getattr(parser, "MTIPv6ExtHdr"),
})
""" Test_data for of_v1_4 """
# class test_data_v1_4(test_data_v1_3):
# def __init__(self):
# test_data_v1_3.__init__(self)
# def set_action_v1_4(self, parser):
# self.set_action_v1_3(parser)
# def set_match_v1_4(self, parser):
# self.set_match_v1_3(parser)
def _add_tests_actions(cls):
for act in cls.act_list:
method_name = 'test_' + str(cls.ver) + '_' + act["type"] + '_action'
def _run(self, name, act, cls):
print('processing %s ...' % name)
cls_ = Test_ofctl(name)
cls_._test_actions(act, cls)
print('adding %s ...' % method_name)
func = functools.partial(_run, name=method_name, act=act, cls=cls)
func.func_name = method_name
func.__name__ = method_name
setattr(Test_ofctl, method_name, func)
def _add_tests_match(cls):
for attr in cls.attr_list:
for key, value in attr.items():
method_name = 'test_' + \
str(cls.ver) + '_' + key + '_' + str(
value) + str(type(value)) + '_match'
def _run(self, name, attr, cls):
print('processing %s ...' % name)
cls_ = Test_ofctl(name)
cls_._test_to_match(attr, cls)
print('adding %s ...' % method_name)
func = functools.partial(
_run, name=method_name, attr=attr, cls=cls)
func.func_name = method_name
func.__name__ = method_name
setattr(Test_ofctl, method_name, func)
""" Test case """
# for of12
cls = test_data_v1_2()
cls.set_action_v1_2(ofproto_v1_2_parser)
cls.set_match_v1_2(ofproto_v1_2_parser)
cls.set_ver(ofproto_v1_2.OFP_VERSION)
cls.set_attr(ofctl_v1_2)
_add_tests_actions(cls)
_add_tests_match(cls)
# for of13
cls = test_data_v1_3()
cls.set_action_v1_3(ofproto_v1_3_parser)
cls.set_match_v1_3(ofproto_v1_3_parser)
cls.set_ver(ofproto_v1_3.OFP_VERSION)
cls.set_attr(ofctl_v1_3)
_add_tests_actions(cls)
_add_tests_match(cls)
# for of14
# cls = test_data_v1_4()
# cls.set_action_v1_4(ofproto_v1_4_parser)
# cls.set_match_v1_4(ofproto_v1_4_parser)
# cls.set_ver(ofproto_v1_4.OFP_VERSION)
# cls.set_attr(ofctl_v1_4)
# _add_tests_actions(cls)
# _add_tests_match(cls)
| |
"""Treadmill master process.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import json
import logging
import os
import re
import time
import zlib
import six
from treadmill import appevents
from treadmill import scheduler
from treadmill import utils
from treadmill import zknamespace as z
from treadmill import zkutils
from treadmill.appcfg import abort as app_abort
from treadmill.apptrace import events as traceevents
from . import loader
_LOGGER = logging.getLogger(__name__)
def _time_past(when):
"""Check if time past the given timestamp."""
return time.time() > when
# Timer interval to reevaluate time events (seconds).
# TIMER_INTERVAL = 60
# Time interval between running the scheduler (seconds).
_SCHEDULER_INTERVAL = 2
# Save reports on the scheduler state to ZooKeeper every minute.
_STATE_REPORT_INTERVAL = 60
# Interval to sleep before checking if there is new event in the queue.
_CHECK_EVENT_INTERVAL = 0.5
# Check integrity of the scheduler every 5 minutes.
_INTEGRITY_INTERVAL = 5 * 60
# Check for reboots every hour.
_REBOOT_CHECK_INTERVAL = 60 * 60
# Max number of events to process before checking if scheduler is due.
_EVENT_BATCH_COUNT = 20
class Master(loader.Loader):
"""Treadmill master scheduler."""
def __init__(self, backend, cellname, events_dir=None):
super(Master, self).__init__(backend, cellname)
self.backend = backend
self.events_dir = events_dir
self.queue = collections.deque()
self.up_to_date = False
self.exit = False
# Signals that processing of a given event.
self.process_complete = dict()
self.event_handlers = {
z.SERVER_PRESENCE: self.process_server_presence,
z.SCHEDULED: self.process_scheduled,
z.EVENTS: self.process_events,
}
def create_rootns(self):
"""Create root nodes and set appropriate acls."""
root_ns = [
'/',
z.ALLOCATIONS,
z.APPMONITORS,
z.BUCKETS,
z.CELL,
z.DISCOVERY,
z.DISCOVERY_STATE,
z.IDENTITY_GROUPS,
z.PLACEMENT,
z.PARTITIONS,
z.SCHEDULED,
z.SCHEDULER,
z.SERVERS,
z.STATE_REPORTS,
z.STRATEGIES,
z.FINISHED,
z.FINISHED_HISTORY,
z.TRACE,
z.TRACE_HISTORY,
z.VERSION_ID,
z.ZOOKEEPER,
z.BLACKEDOUT_SERVERS,
z.ENDPOINTS,
z.path.endpoint_proid('root'),
z.EVENTS,
z.RUNNING,
z.SERVER_PRESENCE,
z.VERSION,
z.VERSION_HISTORY,
z.REBOOTS,
]
for path in root_ns:
self.backend.ensure_exists(path)
for path in z.trace_shards():
self.backend.ensure_exists(path)
@utils.exit_on_unhandled
def process(self, event):
"""Process state change event."""
path, children = event
_LOGGER.info('processing: %r', event)
assert path in self.event_handlers
self.event_handlers[path](children)
_LOGGER.info('waiting for completion.')
self.process_complete[path].set()
self.up_to_date = False
_LOGGER.info('done processing events.')
def process_scheduled(self, scheduled):
"""Callback invoked when on scheduling changes."""
current = set(self.cell.apps.keys())
target = set(scheduled)
for appname in current - target:
self.remove_app(appname)
for appname in target - current:
self.load_app(appname)
# Store by-proid aggregates.
aggregate = self._calculate_aggregate(target)
self.backend.put(z.SCHEDULED_STATS, aggregate)
def process_server_presence(self, servers):
"""Callback invoked when server presence is modified."""
self.adjust_presence(set(servers))
def process_events(self, events):
"""Callback invoked on state change/admin event."""
# Events are sequential nodes in the form <prio>-<event>-<seq #>
#
# They are processed in order of (prio, seq_num, event)
ordered = sorted([tuple([event.split('-')[i] for i in [0, 2, 1]])
for event in events
if re.match(r'\d+\-\w+\-\d+$', event)])
for prio, seq, resource in ordered:
_LOGGER.info('event: %s %s %s', prio, seq, resource)
node_name = '-'.join([prio, resource, seq])
if resource == 'allocations':
# Changing allocations has potential of complete
# reshuffle, so while ineffecient, reload all apps as well.
#
# If application is assigned to different partition, from
# scheduler perspective is no different than host deleted. It
# will be detected on schedule and app will be assigned new
# host from proper partition.
self.load_allocations()
self.load_apps()
elif resource == 'apps':
# The event node contains list of apps to be re-evaluated.
apps = self.backend.get_default(
z.path.event(node_name),
default=[])
for app in apps:
self.load_app(app)
elif resource == 'cell':
self.load_cell()
elif resource == 'buckets':
self.load_buckets()
elif resource == 'servers':
servers = self.backend.get_default(
z.path.event(node_name),
default=[])
if not servers:
# If not specified, reload all. Use union of servers in
# the model and in zookeeper.
servers = (set(self.servers.keys()) ^
set(self.backend.list(z.SERVERS)))
self.reload_servers(servers)
elif resource == 'identity_groups':
self.load_identity_groups()
else:
_LOGGER.warning('Unsupported event resource: %s', resource)
for node in events:
_LOGGER.info('Deleting event: %s', z.path.event(node))
self.backend.delete(z.path.event(node))
def watch(self, path):
"""Constructs a watch on a given path."""
@self.backend.zkclient.ChildrenWatch(path)
@utils.exit_on_unhandled
def _watch(children):
"""Watch children events."""
_LOGGER.debug('watcher begin: %s', path)
# On first invocation, we create event and do not wait on it,
# as the event loop not started yet.
#
# On subsequent calls, wait for processing to complete before
# renewing the watch, to avoid busy loops.
if path in self.process_complete:
self.process_complete[path].clear()
self.queue.append((path, children))
if path in self.process_complete:
_LOGGER.debug('watcher waiting for completion: %s', path)
self.process_complete[path].wait()
else:
self.process_complete[path] = \
self.backend.zkclient.handler.event_object()
_LOGGER.debug('watcher finished: %s', path)
return True
def attach_watchers(self):
"""Attach watchers that push ZK children events into a queue."""
self.watch(z.SERVER_PRESENCE)
self.watch(z.SCHEDULED)
self.watch(z.EVENTS)
def store_timezone(self):
"""Store local timezone in root ZK node."""
tz = time.tzname[0]
self.backend.update('/', {'timezone': tz})
@utils.exit_on_unhandled
def run_loop(self):
"""Run the master loop."""
self.create_rootns()
self.store_timezone()
self.load_model()
self.init_schedule()
self.attach_watchers()
last_sched_time = time.time()
last_integrity_check = 0
last_reboot_check = 0
last_state_report = 0
while not self.exit:
# Process ZK children events queue
queue_empty = False
for _idx in range(0, _EVENT_BATCH_COUNT):
try:
event = self.queue.popleft()
self.process(event)
except IndexError:
queue_empty = True
break
# Run periodic tasks
if _time_past(last_sched_time + _SCHEDULER_INTERVAL):
last_sched_time = time.time()
if not self.up_to_date:
self.reschedule()
self.check_placement_integrity()
if _time_past(last_state_report + _STATE_REPORT_INTERVAL):
last_state_report = time.time()
self.save_state_reports()
if _time_past(last_integrity_check + _INTEGRITY_INTERVAL):
assert self.check_integrity()
self.tick_reboots()
last_integrity_check = time.time()
if _time_past(last_reboot_check + _REBOOT_CHECK_INTERVAL):
self.check_reboot()
last_reboot_check = time.time()
if queue_empty:
time.sleep(_CHECK_EVENT_INTERVAL)
@utils.exit_on_unhandled
def run(self):
"""Runs the master (once it is elected leader)."""
lock = zkutils.make_lock(self.backend.zkclient,
z.path.election(__name__))
_LOGGER.info('Waiting for leader lock.')
with lock:
self.run_loop()
def tick_reboots(self):
"""Tick partition reboot schedulers."""
now = time.time()
for partition in self.cell.partitions.values():
partition.tick(now)
def _schedule_reboot(self, servername):
"""Schedule server reboot."""
self.backend.ensure_exists(z.path.reboot(servername))
def check_reboot(self):
"""Identify all expired servers."""
self.cell.resolve_reboot_conflicts()
now = time.time()
# expired servers rebooted unconditionally, as they are no use anumore.
for name, server in six.iteritems(self.servers):
# Ignore servers that are not yet assigned to the reboot buckets.
if server.valid_until == 0:
_LOGGER.info(
'Server reboot bucket not initialized: %s', name
)
continue
if now > server.valid_until:
_LOGGER.info(
'Expired: %s at %s',
name,
server.valid_until
)
self._schedule_reboot(name)
continue
def _placement_data(self, app):
"""Return placement data for given app."""
return {
'identity': self.cell.apps[app].identity,
'expires': self.cell.apps[app].placement_expiry
}
def _save_placement(self, placement):
"""Store latest placement as reference."""
placement_data = json.dumps(placement)
placement_zdata = zlib.compress(placement_data.encode())
self.backend.put(z.path.placement(), placement_zdata)
def init_schedule(self):
"""Run scheduler first time and update scheduled data."""
placement = self.cell.schedule()
for servername, server in six.iteritems(self.cell.members()):
placement_node = z.path.placement(servername)
self.backend.ensure_exists(placement_node)
current = set(self.backend.list(placement_node))
correct = set(server.apps.keys())
for app in current - correct:
_LOGGER.info('Unscheduling: %s - %s', servername, app)
self.backend.delete(os.path.join(placement_node, app))
for app in correct - current:
_LOGGER.info('Scheduling: %s - %s,%s',
servername, app, self.cell.apps[app].identity)
placement_data = self._placement_data(app)
self.backend.put(
os.path.join(placement_node, app),
placement_data
)
self._update_task(app, servername, why=None)
self._save_placement(placement)
self.up_to_date = True
def reschedule(self):
"""Run scheduler and adjust placement."""
placement = self.cell.schedule()
# Filter out placement records where nothing changed.
changed_placement = [
(app, before, exp_before, after, exp_after)
for app, before, exp_before, after, exp_after in placement
if before != after or exp_before != exp_after
]
# We run two loops. First - remove all old placement, before creating
# any new ones. This ensures that in the event of loop interruption
# for anyreason (like Zookeeper connection lost or master restart)
# there are no duplicate placements.
for app, before, _exp_before, after, _exp_after in changed_placement:
if before and before != after:
_LOGGER.info('Unscheduling: %s - %s', before, app)
self.backend.delete(z.path.placement(before, app))
for app, before, _exp_before, after, exp_after in changed_placement:
placement_data = self._placement_data(app)
why = ''
if before is not None:
if (before not in self.servers or
self.servers[before].state == scheduler.State.down):
why = '{server}:down'.format(server=before)
else:
# TODO: it will be nice to put app utilization at the time
# of eviction, but this info is not readily
# available yet in the scheduler.
why = 'evicted'
if after:
_LOGGER.info('Scheduling: %s - %s,%s, expires at: %s',
after,
app,
self.cell.apps[app].identity,
exp_after)
self.backend.put(
z.path.placement(after, app),
placement_data
)
self._update_task(app, after, why=why)
else:
self._update_task(app, None, why=why)
self._unschedule_evicted()
self._save_placement(placement)
self.up_to_date = True
def _unschedule_evicted(self):
"""Delete schedule once and evicted apps."""
# Apps that were evicted and are configured to be scheduled once
# should be removed.
#
# Remove will trigger rescheduling which will be harmless but
# strictly speaking unnecessary.
for appname, app in six.iteritems(self.cell.apps):
if app.schedule_once and app.evicted:
_LOGGER.info('Removing schedule_once/evicted app: %s',
appname)
# TODO: unfortunately app.server is already None at this point.
self.backend.put(
z.path.finished(appname),
{'state': 'terminated',
'when': time.time(),
'host': None,
'data': 'schedule_once'},
)
self.backend.delete(z.path.scheduled(appname))
def _update_task(self, appname, server, why):
"""Creates/updates application task with the new placement."""
# Servers in the cell have full control over task node.
if self.events_dir:
if server:
appevents.post(
self.events_dir,
traceevents.ScheduledTraceEvent(
instanceid=appname,
where=server,
why=why
)
)
else:
appevents.post(
self.events_dir,
traceevents.PendingTraceEvent(
instanceid=appname,
why=why
)
)
def _abort_task(self, appname, exception):
"""Set task into aborted state in case of scheduling error."""
if self.events_dir:
appevents.post(
self.events_dir,
traceevents.AbortedTraceEvent(
instanceid=appname,
why=app_abort.SCHEDULER,
payload=exception
)
)
def remove_app(self, appname):
"""Remove app from scheduler."""
if appname not in self.cell.apps:
return
app = self.cell.apps[appname]
if app.server:
self.backend.delete(z.path.placement(app.server, appname))
if self.events_dir:
appevents.post(
self.events_dir,
traceevents.DeletedTraceEvent(
instanceid=appname
)
)
# If finished does not exist, it means app is terminated by
# explicit request, not because it finished on the node.
if not self.backend.exists(z.path.finished(appname)):
self.backend.put(
z.path.finished(appname),
{'state': 'terminated',
'when': time.time(),
'host': app.server,
'data': None},
)
super(Master, self).remove_app(appname)
def _calculate_aggregate(self, apps):
"""Calculate aggregate # of apps by proid."""
aggregate = collections.Counter()
for app in apps:
aggregate[app[:app.find('.')]] += 1
return dict(aggregate)
| |
#
# Copyright (c) 2015-2018 Nest Labs, Inc.
# Copyright (c) 2019-2020 Google LLC.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# BLE Central support for Weave Device Manager via OSX CoreBluetooth APIs.
#
from __future__ import absolute_import
from __future__ import print_function
import abc
import logging
import select
import socket
import sys
import six.moves.queue
import subprocess
import threading
import time
import binascii
from ctypes import *
import readline
from Foundation import *
import objc
from PyObjCTools import AppHelper
from .WeaveBleUtility import *
from .WeaveUtility import WeaveUtility
from .WeaveBleBase import WeaveBleBase
try:
objc.loadBundle("CoreBluetooth", globals(),
bundle_path=objc.pathForFramework(u'/System/Library/Frameworks/IOBluetooth.framework/Versions/A/Frameworks/CoreBluetooth.framework'))
except:
objc.loadBundle("CoreBluetooth", globals(),
bundle_path=objc.pathForFramework(u'/System/Library/Frameworks/CoreBluetooth.framework'))
weave_service = CBUUID.UUIDWithString_(u'0000FEAF-0000-1000-8000-00805F9B34FB')
weave_service_short = CBUUID.UUIDWithString_(u'FEAF')
weave_tx = CBUUID.UUIDWithString_(u'18EE2EF5-263D-4559-959F-4F9C429F9D11')
weave_rx = CBUUID.UUIDWithString_(u'18EE2EF5-263D-4559-959F-4F9C429F9D12')
chromecast_setup_service = CBUUID.UUIDWithString_(u'0000FEA0-0000-1000-8000-00805F9B34FB')
chromecast_setup_service_short = CBUUID.UUIDWithString_(u'FEA0')
def _VoidPtrToCBUUID(ptr, len):
try:
ptr = WeaveUtility.VoidPtrToByteArray(ptr, len)
ptr = WeaveUtility.Hexlify(ptr)
ptr = ptr[:8] + '-' + ptr[8:12] + '-' + ptr[12:16] + '-' + ptr[16:20] + '-' + ptr[20:]
ptr = CBUUID.UUIDWithString_(ptr)
except:
print("ERROR: failed to convert void * to CBUUID")
ptr = None
return ptr
class CoreBluetoothManager(WeaveBleBase):
def __init__(self, devMgr, logger=None):
if logger:
self.logger = logger
else:
self.logger = logging.getLogger('WeaveBLEMgr')
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
self.manager = None
self.peripheral = None
self.service = None
self.scan_quiet = False
self.characteristics = {}
self.peripheral_list = []
self.bg_peripheral_name = None
self.weave_queue = six.moves.queue.Queue()
self.manager = CBCentralManager.alloc()
self.manager.initWithDelegate_queue_options_(self, None, None)
self.ready_condition = False
self.loop_condition = False # indicates whether the cmd requirement has been met in the runloop.
self.connect_state = False # reflects whether or not there is a connection.
self.send_condition = False
self.subscribe_condition = False
self.runLoopUntil(("ready", time.time(), 10.0))
self.orig_input_hook = None
self.hookFuncPtr = None
self.setInputHook(self.readlineCB)
self.devMgr = devMgr
self.devMgr.SetBlockingCB(self.devMgrCB)
def HandleBleEventCB():
return self.GetBleEvent()
def HandleBleWriteCharCB(connObj, svcId, charId, buffer, length):
return self.WriteBleCharacteristic(connObj, svcId, charId, buffer, length)
def HandleBleSubscribeCB(connObj, svcId, charId, subscribe):
return self.SubscribeBleCharacteristic(connObj, svcId, charId, subscribe)
def HandleBleCloseCB(connObj):
return self.CloseBle(connObj)
self.devMgr.SetBleEventCB(HandleBleEventCB)
self.devMgr.SetBleWriteCharCB(HandleBleWriteCharCB)
self.devMgr.SetBleSubscribeCharCB(HandleBleSubscribeCB)
self.devMgr.SetBleCloseCB(HandleBleCloseCB)
# test if any connections currently exist (left around from a previous run) and disconnect if need be.
peripherals = self.manager.retrieveConnectedPeripheralsWithServices_([weave_service_short, weave_service])
if peripherals and len(peripherals):
for periph in peripherals:
self.logger.info("disconnecting old connection.")
self.loop_condition = False
self.manager.cancelPeripheralConnection_(periph)
self.runLoopUntil(("disconnect", time.time(), 5.0))
self.connect_state = False
self.loop_condition = False
def __del__(self):
self.disconnect()
self.setInputHook(self.orig_input_hook)
self.devMgr.SetBlockingCB(None)
self.devMgr.SetBleEventCB(None)
def devMgrCB(self):
""" A callback used by WeaveDeviceMgr.py to drive the OSX runloop while the
main thread waits for the Weave thread to complete its operation."""
runLoop = NSRunLoop.currentRunLoop()
nextfire = runLoop.limitDateForMode_(NSDefaultRunLoopMode)
def readlineCB(self):
""" A callback used by readline to drive the OSX runloop while the main thread
waits for commandline input from the user."""
runLoop = NSRunLoop.currentRunLoop()
nextfire = runLoop.limitDateForMode_(NSDefaultRunLoopMode)
if self.orig_input_hook:
self.orig_input_hook()
def setInputHook(self, hookFunc):
"""Set the PyOS_InputHook to call the specific function."""
hookFunctionType = CFUNCTYPE(None)
self.hookFuncPtr = hookFunctionType(hookFunc)
pyos_inputhook_ptr = c_void_p.in_dll(pythonapi, "PyOS_InputHook")
# save the original so that on del we can revert it back to the way it was.
self.orig_input_hook = cast(pyos_inputhook_ptr.value, PYFUNCTYPE(c_int))
# set the new hook. readLine will call this periodically as it polls for input.
pyos_inputhook_ptr.value = cast(self.hookFuncPtr, c_void_p).value
def shouldLoop(self, should_tuple):
""" Used by runLoopUntil to determine whether it should exit the runloop."""
result = False
time_expired = time.time() >= should_tuple[1] + should_tuple[2]
if should_tuple[0] == "ready":
if not self.ready_condition and not time_expired:
result = True
elif should_tuple[0] == "scan":
if not time_expired:
result = True
for peripheral in self.peripheral_list:
if should_tuple[3] and str(peripheral._.name) == should_tuple[3]:
result = False
break
elif should_tuple[0] == "connect":
if not self.loop_condition and not time_expired:
result = True
elif should_tuple[0] == "disconnect":
if not self.loop_condition and not time_expired:
result = True
elif should_tuple[0] == "send":
if not self.send_condition and not time_expired:
result = True
elif should_tuple[0] == "subscribe":
if not self.subscribe_condition and not time_expired:
result = True
elif should_tuple[0] == "unsubscribe":
if self.subscribe_condition and not time_expired:
result = True
return result
def runLoopUntil(self, should_tuple):
""" Helper function to drive OSX runloop until an expected event is received or
the timeout expires."""
runLoop = NSRunLoop.currentRunLoop()
nextfire = 1
while nextfire and self.shouldLoop(should_tuple):
nextfire = runLoop.limitDateForMode_(NSDefaultRunLoopMode)
def centralManagerDidUpdateState_(self, manager):
""" IO Bluetooth initialization is successful."""
state = manager.state()
string = "BLE is ready!" if state > 4 else "BLE is not ready!"
self.logger.info(string)
self.manager = manager
self.ready_condition = True if state > 4 else False
def centralManager_didDiscoverPeripheral_advertisementData_RSSI_(self, manager, peripheral, data, rssi):
""" Called for each peripheral discovered during scan."""
if self.bg_peripheral_name is None:
if peripheral not in self.peripheral_list:
if not self.scan_quiet:
self.logger.info("adding to scan list:")
self.logger.info("")
self.logger.info("{0:<10}{1:<80}".format("Name =", str(peripheral._.name)))
self.logger.info("{0:<10}{1:<80}".format("ID =", str(peripheral._.identifier.UUIDString())))
self.logger.info("{0:<10}{1:<80}".format("RSSI =", rssi))
self.logger.info("ADV data: " + repr(data))
self.logger.info("")
self.peripheral_list.append(peripheral)
else:
if peripheral._.name == self.bg_peripheral_name:
if len(self.peripheral_list) == 0:
self.logger.info("found background peripheral")
self.peripheral_list = [peripheral]
def centralManager_didConnectPeripheral_(self, manager, peripheral):
"""Called by CoreBluetooth via runloop when a connection succeeds."""
self.logger.debug(repr(peripheral))
# make this class the delegate for peripheral events.
self.peripheral.setDelegate_(self)
# invoke service discovery on the periph.
self.logger.info("Discovering services")
self.peripheral.discoverServices_([weave_service_short, weave_service])
def centralManager_didFailToConnectPeripheral_error_(self, manager, peripheral, error):
"""Called by CoreBluetooth via runloop when a connection fails."""
self.logger.info("Failed to connect error = " + repr(error))
self.loop_condition = True
self.connect_state = False
def centralManager_didDisconnectPeripheral_error_(self, manager, peripheral, error):
"""Called by CoreBluetooth via runloop when a disconnect completes. error = None on success."""
self.loop_condition = True
self.connect_state = False
if self.devMgr:
self.logger.info("BLE disconnected, error = " + repr(error))
dcEvent = BleDisconnectEvent(BLE_ERROR_REMOTE_DEVICE_DISCONNECTED)
self.weave_queue.put(dcEvent)
self.devMgr.DriveBleIO()
def peripheral_didDiscoverServices_(self, peripheral, services):
"""Called by CoreBluetooth via runloop when peripheral services are discovered."""
if len(self.peripheral.services()) == 0:
self.logger.error("Weave service not found")
self.connect_state = False
else:
# in debugging, we found connect being called twice. This
# would trigger discovering the services twice, and
# consequently, discovering characteristics twice. We use the
# self.service as a flag to indicate whether the
# characteristics need to be invalidated immediately.
if (self.service == self.peripheral.services()[0]):
self.logger.debug("didDiscoverServices already happened")
else:
self.service = self.peripheral.services()[0]
self.characteristics[self.service.UUID()] = []
# NOTE: currently limiting discovery to only the pair of Weave characteristics.
self.peripheral.discoverCharacteristics_forService_([weave_rx, weave_tx], self.service)
def peripheral_didDiscoverCharacteristicsForService_error_(self, peripheral, service, error):
"""Called by CoreBluetooth via runloop when a characteristic for a service is discovered."""
self.logger.debug("didDiscoverCharacteristicsForService:error "+str(repr(peripheral)) + " "+ str(repr(service)))
self.logger.debug(repr(service))
self.logger.debug(repr(error))
if not error:
self.characteristics[service.UUID()] = [char for char in self.service.characteristics()]
self.connect_state = True
else:
self.logger.error("ERROR: failed to discover characteristics for service.")
self.connect_state = False
self.loop_condition = True
def peripheral_didWriteValueForCharacteristic_error_(self, peripheral, characteristic, error):
""" Called by CoreBluetooth via runloop when a write to characteristic
operation completes. error = None on success."""
self.logger.debug("didWriteValue error = " + repr(error))
self.send_condition = True
charId = bytearray(characteristic.UUID().data().bytes().tobytes())
svcId = bytearray(weave_service.data().bytes().tobytes())
if self.devMgr:
txEvent = BleTxEvent(charId=charId, svcId=svcId, status=True if not error else False)
self.weave_queue.put(txEvent)
self.devMgr.DriveBleIO()
def peripheral_didUpdateNotificationStateForCharacteristic_error_(self, peripheral, characteristic, error):
""" Called by CoreBluetooth via runloop when a subscribe for notification operation completes.
Error = None on success."""
self.logger.debug("Receiving notifications")
charId = bytearray(characteristic.UUID().data().bytes().tobytes())
svcId = bytearray(weave_service.data().bytes().tobytes())
# look at error and send True/False on Success/Failure
success = True if not error else False
if characteristic.isNotifying():
operation = BleSubscribeOperation_Subscribe
self.subscribe_condition = True
else:
operation = BleSubscribeOperation_Unsubscribe
self.subscribe_condition = False
self.logger.debug("Operation = " + repr(operation))
self.logger.debug("success = " + repr(success))
if self.devMgr:
subscribeEvent = BleSubscribeEvent(charId=charId, svcId=svcId, status=success, operation=operation)
self.weave_queue.put(subscribeEvent)
self.devMgr.DriveBleIO()
def peripheral_didUpdateValueForCharacteristic_error_(self, peripheral, characteristic, error):
""" Called by CoreBluetooth via runloop when a new characteristic value is received for a
characteristic to which this device has subscribed."""
#len = characteristic.value().length()
bytes = bytearray(characteristic.value().bytes().tobytes())
charId = bytearray(characteristic.UUID().data().bytes().tobytes())
svcId = bytearray(weave_service.data().bytes().tobytes())
# Kick Weave thread to retrieve the saved packet.
if self.devMgr:
# Save buffer, length, service UUID and characteristic UUID
rxEvent = BleRxEvent(charId=charId, svcId=svcId, buffer=bytes)
self.weave_queue.put(rxEvent)
self.devMgr.DriveBleIO()
self.logger.debug("received")
self.logger.debug("received (" + str(len) + ") bytes: " + repr(characteristic.value().bytes().tobytes()))
def GetBleEvent(self):
""" Called by WeaveDeviceMgr.py on behalf of Weave to retrieve a queued message."""
if not self.weave_queue.empty():
ev = self.weave_queue.get()
if isinstance(ev, BleRxEvent):
eventStruct = BleRxEventStruct.fromBleRxEvent(ev)
return cast( pointer(eventStruct), c_void_p).value
elif isinstance(ev, BleTxEvent):
eventStruct = BleTxEventStruct.fromBleTxEvent(ev)
return cast( pointer(eventStruct), c_void_p).value
elif isinstance(ev, BleSubscribeEvent):
eventStruct = BleSubscribeEventStruct.fromBleSubscribeEvent(ev)
return cast( pointer(eventStruct), c_void_p).value
elif isinstance(ev, BleDisconnectEvent):
eventStruct = BleDisconnectEventStruct.fromBleDisconnectEvent(ev)
return cast( pointer(eventStruct), c_void_p).value
return None
def scan(self, line):
""" API to initiatae BLE scanning for -t user_timeout seconds."""
args = self.ParseInputLine(line, "scan")
if not args:
return
self.scan_quiet = args[1]
self.bg_peripheral_name = None
del self.peripheral_list[:]
self.peripheral_list = []
# Filter on the service UUID Array or None to accept all scan results.
self.manager.scanForPeripheralsWithServices_options_([weave_service_short, weave_service, chromecast_setup_service_short, chromecast_setup_service], None)
#self.manager.scanForPeripheralsWithServices_options_(None, None)
self.runLoopUntil(("scan", time.time(), args[0], args[2]))
self.manager.stopScan()
self.logger.info("scanning stopped")
def bgScanStart(self, name):
""" API to initiate background BLE scanning."""
self.logger.info("scanning started")
self.bg_peripheral_name = name
del self.peripheral_list[:]
self.peripheral_list = []
# Filter on the service UUID Array or None to accept all scan results.
self.manager.scanForPeripheralsWithServices_options_([weave_service_short, weave_service, chromecast_setup_service_short, chromecast_setup_service], None)
def bgScanStop(self):
""" API to stop background BLE scanning."""
self.manager.stopScan()
self.bg_peripheral_name = None
self.logger.info("scanning stopped")
def connect(self, identifier):
""" API to initiate BLE connection to peripheral device whose identifier == identifier."""
self.logger.info("trying to connect to " + identifier)
if self.connect_state:
self.logger.error("ERROR: Connection to a BLE device already exists!")
else:
for p in self.peripheral_list:
p_id = str(p.identifier().UUIDString())
p_name = str(p.name())
self.logger.debug(p_id + " vs " + str(identifier))
self.logger.debug(p_name + " vs " + str(identifier))
if p_id == str(identifier) or p_name == str(identifier):
self.loop_condition = False
self.peripheral = p
self.manager.connectPeripheral_options_(p, None)
self.runLoopUntil(("connect", time.time(), 15.0))
# Cleanup when the connect fails due to timeout,
# otherwise CoreBluetooth will continue to try to connect after this
# API exits.
if not self.connect_state:
self.manager.cancelPeripheralConnection_(p)
self.peripheral = None
break
ret = True if self.loop_condition and self.connect_state else False
resString = "connect " + ("success" if ret else "fail")
self.logger.info(resString)
return ret
def disconnect(self):
""" API to initiate BLE disconnect procedure."""
self.logger.info("disconnecting")
if self.peripheral and self.peripheral.state() != BlePeripheralState_Disconnected:
self.loop_condition = False
self.manager.cancelPeripheralConnection_(self.peripheral)
self.runLoopUntil(("disconnect", time.time(), 10.0))
resString = "disconnect " + ("success" if self.loop_condition and not self.connect_state else "fail")
self.logger.info(resString)
self.characteristics = {}
#del self.peripheral_list[:]
#self.peripheral_list = []
self.peripheral = None
self.service = None
def scan_connect(self, line):
""" API to perform both scan and connect operations in one call."""
args = self.ParseInputLine(line, "scan-connect")
if not args:
return
self.scan_quiet = args[1]
self.scan(line)
if len(self.peripheral_list):
return self.connect(args[2])
else:
self.logger.info("Failed to scan device named: " + args[2] + ". Connection skipped.")
return False
def isConnected(self):
if self.peripheral and self.peripheral.state() != BlePeripheralState_Disconnected:
return True
return False
def WriteBleCharacteristic(self, connObj, svcId, charId, buffer, length):
""" Called by WeaveDeviceMgr.py to satisfy a request by Weave to transmit a packet over BLE."""
result = False
bytes = WeaveUtility.VoidPtrToByteArray(buffer, length)
bytes = NSData.dataWithBytes_length_(bytes, len(bytes)) # convert bytearray to NSData
svcId = _VoidPtrToCBUUID(svcId, 16)
charId = _VoidPtrToCBUUID(charId, 16)
if self.peripheral and self.peripheral.state() != BlePeripheralState_Disconnected:
for char in self.characteristics[svcId]:
if char.UUID() == charId:
self.peripheral.writeValue_forCharacteristic_type_(bytes, char, CBCharacteristicWriteWithResponse)
result = True
break
else:
self.logger.warning("WARNING: peripheral is no longer connected.")
return result
def SubscribeBleCharacteristic(self, connObj, svcId, charId, subscribe):
""" Called by Weave to (un-)subscribe to a characteristic of a service."""
result = False
svcId = _VoidPtrToCBUUID(svcId, 16)
charId = _VoidPtrToCBUUID(charId, 16)
if self.peripheral and self.peripheral.state() != BlePeripheralState_Disconnected:
for char in self.characteristics[svcId]:
if char.UUID() == charId:
self.peripheral.setNotifyValue_forCharacteristic_(True if subscribe else False, char)
result = True
break
else:
self.logger.warning("WARNING: peripheral is no longer connected.")
return result
def ble_debug_log(self, line):
args = self.ParseInputLine(line)
if int(args[0]) == 1:
self.logger.setLevel(logging.DEBUG)
self.logger.debug("current logging level is debug")
else:
self.logger.setLevel(logging.INFO)
self.logger.info("current logging level is info")
return True
def CloseBle(self, connObj):
""" Called by Weave to close the BLE connection."""
if self.peripheral:
self.manager.cancelPeripheralConnection_(self.peripheral)
self.characteristics = {}
#del self.peripheral_list[:]
#self.peripheral_list = []
self.peripheral = None
self.service = None
self.connect_state = False
return True
def updateCharacteristic(self, bytes, svcId, charId):
# TODO: implement this for Peripheral support.
return False
| |
import re
import os
from util.models.meta_dict import MetaDict
from util.models.regex_model import RegexModel
from util.models.synonym_model import SynonymModel
from util.models.common_example_model import CommonExampleModel
import sys
import json
class StateEnum:
NONE = 0
META = 1
REGEX = 2
SYNONYM = 3
TEXT = 4
class CreateJson:
META_HEADER = re.compile('\[meta\]')
REGEX_FEATURES_HEADER = re.compile('\[regex_features\]')
ENTITY_SYNONYM_HEADER = re.compile('\[entity_synonyms\]')
TEXT_HEADER = re.compile('\[common_examples:.*\]')
EMPTY_LINE = re.compile('\s*')
COMMENTED_LINE = re.compile('^#')
nlu_dict = {"rasa_nlu_data":
{
"regex_features": [
],
"common_examples": [
]
}
}
def __init__(self):
self.current_intent = ""
self.meta_list = []
self.regex_list = []
self.synonym_list = []
self.intent_list = []
def reset(self):
self.current_intent = ""
self.meta_list = []
self.regex_list = []
self.synonym_list = []
self.intent_list = []
self.meta_string = ""
"""
Takes a directory of text files and converts them to JSON files for RASA NLU training.
read_dir: Directory with txt files to be converted to JSON.
save_dir: Directory in which to save the converted JSON files. File names will be same as the source text files.
"""
def parse_directory(self, read_dir, save_dir):
read_dir = os.getcwd() + read_dir
save_dir = os.getcwd() + save_dir
print("RASA Json Creator - Parse Directory\n\t-Read Directory: {}\n\t-Save Directory: {}".format(read_dir,
save_dir))
if not os.path.exists(os.path.dirname(save_dir)):
print("\t!Directory {} does not exist, creating it".format(save_dir))
os.makedirs(os.path.dirname(save_dir))
for file in os.listdir(read_dir):
self.reset()
with open(read_dir + file, "r") as myfile:
text = myfile.read()
self.parse_file(text)
self.nlu_dict['rasa_nlu_data']['regex_features'] = self.regex_list
self.nlu_dict['rasa_nlu_data']['common_examples'] = self.intent_list
filename = save_dir + file.split('.')[0] + '.json'
with open(filename, 'w+') as fp:
json.dump(self.nlu_dict, fp)
print("\t+Writing file: {}".format(filename))
"""
Takes a single input text file to use as a base and creates JSON files for RASA NLU training for each file in the output file list.
input_file: Input text file to be used as the base.
output_file_names: A list of names for the output files. All files will have identical training data.
save_dir: Directory in which to save the JSON files. File names will be taken from the output file name list.
"""
def identical_fact_list(self, input_file, output_file_names, save_dir):
input_file = os.getcwd() + "/" + input_file
save_dir = os.getcwd() + save_dir
print(
"RASA Json Creator - Identical Fact List\n\t-Input File: {}\n\t-Output File Names: {}\n\t-Save Directory: {}"
.format(input_file, output_file_names, save_dir))
if not os.path.exists(os.path.dirname(save_dir)):
print("\t!Directory {} does not exist, creating it".format(save_dir))
os.makedirs(os.path.dirname(save_dir))
self.reset()
with open(input_file) as file:
text = file.read()
self.parse_file(text)
self.nlu_dict['rasa_nlu_data']['regex_features'] = self.regex_list
self.nlu_dict['rasa_nlu_data']['common_examples'] = self.intent_list
for filename in output_file_names:
full_filename = save_dir + filename + '.json'
with open(full_filename, 'w+') as output_file:
json.dump(self.nlu_dict, output_file)
print("\t+Writing file: {}".format(full_filename))
def parse_file(self, file):
self.state = StateEnum.NONE
lines = file.split("\n")
for line in lines:
if self.META_HEADER.search(line):
self.state = StateEnum.META
elif self.REGEX_FEATURES_HEADER.search(line):
self.state = StateEnum.REGEX
elif self.ENTITY_SYNONYM_HEADER.search(line):
self.state = StateEnum.SYNONYM
elif self.TEXT_HEADER.search(line):
intent = line.split(": ")[1]
self.current_intent = intent.replace("]", "")
self.state = StateEnum.TEXT
elif len(line) < 5:
pass
elif self.COMMENTED_LINE.search(line):
pass
elif self.state == StateEnum.META:
self.meta_list.append(self.find_meta_characters(line))
elif self.state == StateEnum.REGEX:
self.regex_list.append(self.find_regex(line))
elif self.state == StateEnum.SYNONYM:
self.synonym_list.append(self.find_synonyms(line))
elif self.state == StateEnum.TEXT:
self.intent_list.append(self.find_text(line))
def find_meta_characters(self, line):
meta_dict = MetaDict()
meta_characters = line.split('=')[0]
meta_characters = meta_characters.replace(" ", "")
meta_dict.open(meta_characters[0])
meta_dict.close(meta_characters[1])
meta_meaning = line.split('=')[1]
meta_meaning = meta_meaning.replace(" ", "")
if ',' in meta_meaning:
meta_dict.entity(meta_meaning.split(",")[0])
meta_dict.extractor(meta_meaning.split(",")[1])
else:
meta_dict.entity(meta_meaning)
return meta_dict.meta
def find_regex(self, line):
reg_dict = RegexModel()
reg_dict.name(line.split(": ")[0])
reg_dict.pattern(line.split(": ")[1])
return reg_dict.regex_dict
def find_synonyms(self, line):
syn_dict = SynonymModel()
syn_dict.entity(line.split(": ")[0])
synonyms = line.split(": ")[1]
syn_list = synonyms.split(", ")
syn_dict.synonyms(syn_list)
return syn_dict.syn_dict
def find_text(self, line):
intent_dict = CommonExampleModel().intent_dict
text = line
intent_dict['text'] = text
intent_dict['intent'] = self.current_intent
entity_list = []
for dictionary in self.meta_list:
if dictionary['open'] in line:
start = text.find(dictionary['open'])
text = text.replace(dictionary['open'], "", 1)
end = text.find(dictionary['close'])
text = text.replace(dictionary['close'], "", 1)
value = text[start:end]
entity = dictionary['entity']
try:
extractor = dictionary['extractor']
except KeyError:
extractor = ""
ent_dict = {
"start": start,
"end": end,
"value": value,
"entity": entity,
}
if extractor is not "":
ent_dict['extractor'] = extractor
entity_list.append(ent_dict)
intent_dict['entities'] = entity_list
intent_dict['text'] = text
return intent_dict
if __name__ == '__main__':
read_directory = sys.argv[1]
save_directory = sys.argv[2]
parser = CreateJson()
parser.parse_directory(read_directory, save_directory)
| |
import time
import numpy as np
import scipy as sp
import pandas as pd
import xgboost as xgb
import re
def get_leaf_values(tree_str):
# To find 'leaf=0.123\n'
prog=re.compile(r"(?<=leaf\=)(.+)\n")
result = [float(rval) for rval in prog.findall(tree_str)]
return np.array(result)
def get_all_leaves(bst):
dmp = bst.get_dump()
return [get_leaf_values(tree) for tree in dmp]
# init begin
data_train = pd.read_csv('/home/tks/download/higgs/binary.train750', header=None)
data_valid = pd.read_csv('/home/tks/download/higgs/binary.valid250', header=None)
y_train = data_train[0].values
data_train.drop(0, axis=1, inplace=True)
y_valid = data_valid[0].values
data_valid.drop(0, axis=1, inplace=True)
dtrain = xgb.DMatrix(data_train.values, label = y_train)
dvalid = xgb.DMatrix(data_valid.values, label = y_valid)
# data = pd.read_csv('/home/tks/download/higgs/binary.train', header=None)
# data_test = pd.read_csv('/home/tks/download/higgs/binary.test', header=None)
# y = data[0].values
# data.drop(0, axis=1, inplace=True)
# y_test = data_test[0].values
# data_test.drop(0, axis=1, inplace=True)
# dtrain = xgb.DMatrix(data.values, label = y)
# dtest = xgb.DMatrix(data_test.values, label = y_test)
# init end
# split -l 10000000 HIGGS.csv out
# mv outaa binary.train
# mv outab binary.test
# split -l 7500000 binary.train out
# mv outaa binary.train750
# mv outab binary.valid250
param = {'objective':'binary:logistic', 'tree_method':'exact',
'eta':.1, 'max_depth':8, 'min_child_weight':100,
'nthread':8, 'seed':123, 'silent':1}
# 2016/10/17
# #train=10M
param = {'objective':'binary:logistic', 'tree_method':'approx', 'sketch_eps':0.004,
'eta':.1, 'max_depth':8, 'eval_metric':'auc',
'nthread':8, 'seed':123, 'silent':1}
n_rounds=500
n_nodes = []
scores = []
for mc in [1, 100]:
t0 = time.time()
param['min_child_weight'] = mc
bst = xgb.train(param, dtrain, n_rounds, [(dtrain, 'train'), (dtest, 'test')])
tmp = get_all_leaves(bst)
n_nodes.append([len(s) for s in tmp])
scores.append({'min_c':mc, 'time':time.time() - t0,
'total_leaves':np.sum(n_nodes[-1])})
print(scores[-1])
[499] train-auc:0.846833 test-auc:0.840352
[499] train-auc:0.845507 test-auc:0.840589
pd.DataFrame(scores)
min_c time total_leaves
0 1 3655.648553 120937
1 100 3628.614431 91478
# r004
# 2016/10/19 5.2h
param = {'objective':'binary:logistic','tree_method':'approx', 'sketch_eps':0.004,
'eta':.1, 'min_child_weight':100, 'lambda':0, 'eval_metric':'auc',
'nthread':8, 'seed':123, 'silent':1}
n_rounds=500
n_nodes = []
scores = []
result = []
for max_depth in [8, 9, 10, 11, 12]:
t0 = time.time()
evals_result = {}
param['max_depth'] = max_depth
bst = xgb.train(param, dtrain, n_rounds, [(dtrain, 'train'), (dvalid, 'valid')],
evals_result=evals_result)
tmp = get_all_leaves(bst)
n_nodes.append([len(s) for s in tmp])
scores.append({'max_depth':max_depth, 'total_leaves':np.sum(n_nodes[-1]),
'time':time.time() - t0})
result.append(evals_result)
print(scores[-1])
df_train = pd.DataFrame({i+8:result[i]['train']['auc'] for i in range(5)})
df_valid = pd.DataFrame({i+8:result[i]['valid']['auc'] for i in range(5)})
df_leaf_cnt = pd.DataFrame({i+8:n_nodes[i] for i in range(5)})
df_train.to_csv('log/r004_train.csv')
df_valid.to_csv('log/r004_valid.csv')
df_leaf_cnt.to_csv('log/r004_leaf_cnt.csv')
print(df_valid.tail(5))
8 9 10 11 12
495 0.838657 0.842426 0.846096 0.848043 0.850070
496 0.838678 0.842448 0.846115 0.848057 0.850082
497 0.838705 0.842453 0.846135 0.848068 0.850089
498 0.838722 0.842472 0.846140 0.848096 0.850113
499 0.838746 0.842518 0.846142 0.848101 0.850116
print(pd.DataFrame(scores))
max_depth time total_leaves
0 8 2724.755577 85618
1 9 3174.557975 139994
2 10 3698.960060 222218
3 11 4207.922560 319139
4 12 4858.091027 447266
# r005
# 2016/10/21 7.5m
param = {'objective':'binary:logistic','tree_method':'approx', 'sketch_eps':0.00392,
'eta':.1, 'max_depth':1000, 'lambda':0, 'eval_metric':'auc',
'nthread':8, 'seed':123, 'silent':1}
n_rounds=10
n_nodes = []
scores = []
result = []
for mc in [1000, 2000, 4000]:
t0 = time.time()
evals_result = {}
param['min_child_weight'] = mc
bst = xgb.train(param, dtrain, n_rounds, [(dtrain, 'train'), (dvalid, 'valid')],
evals_result=evals_result)
tmp = get_all_leaves(bst)
n_nodes.append([len(s) for s in tmp])
scores.append({'min_child_weight':mc, 'total_leaves':np.sum(n_nodes[-1]),
'time':time.time() - t0})
result.append(evals_result)
print(scores[-1])
df = pd.DataFrame(scores)
df_leaf_cnt = pd.DataFrame({2**i*1000:n_nodes[i] for i in range(3)})
df.to_csv('log/r005.csv')
df_leaf_cnt.to_csv('log/r005_leaf_cnt.csv')
print(df_leaf_cnt)
1000 2000 4000
0 1464 733 368
1 1442 724 364
2 1444 721 363
3 1434 715 365
4 1410 714 359
5 1394 713 357
6 1374 702 345
7 1362 688 343
8 1348 671 342
9 1311 665 335
print(df)
min_child_weight time total_leaves
0 1000 179.808144 13983
1 2000 153.772096 7046
2 4000 116.155873 3541
# r006
# 2016/10/21 3.4h
# 'min_child_weight':1000
param = {'objective':'binary:logistic','tree_method':'approx', 'sketch_eps':0.00392,
'eta':.1, 'min_child_weight':1000, 'max_depth':1000, 'lambda':0,
'eval_metric':['logloss','auc'],
'nthread':8, 'seed':123, 'silent':1}
n_rounds=500
n_nodes = []
scores = []
t0 = time.time()
evals_result = {}
bst = xgb.train(param, dtrain, n_rounds, [(dtrain, 'train'), (dvalid, 'valid')],
evals_result=evals_result)
tmp = get_all_leaves(bst)
n_nodes.append([len(s) for s in tmp])
scores.append({'min_child_weight':param['min_child_weight'], 'total_leaves':np.sum(n_nodes[-1]),
'time':time.time() - t0})
print(scores[-1])
df = pd.DataFrame(scores)
df_auc_loss = pd.DataFrame({'auc_train':evals_result['train']['auc'],
'auc_valid':evals_result['valid']['auc'],
'loss_train':evals_result['train']['logloss'],
'loss_valid':evals_result['valid']['logloss'],
'leaf_cnt':n_nodes[0]})
min_child_weight time total_leaves
0 1000 12198.695484 476780
df_auc_loss.tail(10)
auc_train auc_valid leaf_cnt loss_train loss_valid
490 0.881540 0.852546 890 0.433844 0.472947
491 0.881603 0.852561 907 0.433757 0.472926
492 0.881663 0.852573 892 0.433671 0.472909
493 0.881726 0.852588 894 0.433581 0.472889
494 0.881788 0.852603 905 0.433493 0.472869
495 0.881846 0.852610 909 0.433415 0.472859
496 0.881901 0.852613 899 0.433338 0.472854
497 0.881962 0.852626 903 0.433247 0.472834
498 0.882019 0.852633 899 0.433165 0.472824
499 0.882082 0.852651 889 0.433071 0.472797
df.to_csv('log/r006.csv')
df_auc_loss.to_csv('log/r006_auc_loss.csv')
# r008
# 'min_child_weight':1000
# 'subsample':0.5
# 2016/10/25 2.64h
param = {'objective':'binary:logistic','tree_method':'approx', 'sketch_eps':0.00392,
'eta':.1, 'min_child_weight':1000, 'max_depth':1000, 'lambda':0,
'subsample':0.5,
'eval_metric':['logloss','auc'],
'nthread':8, 'seed':123, 'silent':1}
n_rounds=500
n_nodes = []
scores = []
t0 = time.time()
evals_result = {}
bst = xgb.train(param, dtrain, n_rounds, [(dtrain, 'train'), (dvalid, 'valid')],
evals_result=evals_result)
tmp = get_all_leaves(bst)
n_nodes.append([len(s) for s in tmp])
scores.append({'min_child_weight':param['min_child_weight'], 'total_leaves':np.sum(n_nodes[-1]),
'time':time.time() - t0})
print(scores[-1])
df = pd.DataFrame(scores)
df_auc_loss = pd.DataFrame({'auc_train':evals_result['train']['auc'],
'auc_valid':evals_result['valid']['auc'],
'loss_train':evals_result['train']['logloss'],
'loss_valid':evals_result['valid']['logloss'],
'leaf_cnt':n_nodes[0]})
min_child_weight time total_leaves
0 1000 9504.795172 242552
df_auc_loss.tail(10)
auc_train auc_valid leaf_cnt loss_train loss_valid
490 0.863225 0.847655 456 0.459379 0.479772
491 0.863262 0.847664 451 0.459328 0.479758
492 0.863296 0.847672 464 0.459277 0.479748
493 0.863335 0.847684 462 0.459224 0.479730
494 0.863373 0.847694 467 0.459173 0.479718
495 0.863407 0.847703 454 0.459128 0.479705
496 0.863440 0.847713 460 0.459078 0.479691
497 0.863478 0.847722 462 0.459026 0.479678
498 0.863513 0.847729 457 0.458977 0.479668
499 0.863564 0.847754 457 0.458909 0.479633
df.to_csv('log/r008.csv')
df_auc_loss.to_csv('log/r008_auc_loss.csv')
# r009
# 2016/10/26 56m
# 'min_child_weight':1000
# 'max_depth':10
param = {'objective':'binary:logistic','tree_method':'approx', 'sketch_eps':0.00392,
'eta':.1, 'min_child_weight':1000, 'max_depth':10, 'lambda':0,
'eval_metric':['logloss','auc'],
'nthread':8, 'seed':123, 'silent':1}
n_nodes = []
scores = []
t0 = time.time()
n_rounds = 500
evals_result = {}
bst = xgb.train(param, dtrain, n_rounds, [(dtrain, 'train'), (dvalid, 'valid')],
evals_result=evals_result)
tmp = get_all_leaves(bst)
n_nodes.append([len(s) for s in tmp])
scores.append({'min_child_weight':param['min_child_weight'], 'total_leaves':np.sum(n_nodes[-1]),
'time':time.time() - t0})
print(scores[-1])
df = pd.DataFrame(scores)
df_auc_loss = pd.DataFrame({'auc_train':evals_result['train']['auc'],
'auc_valid':evals_result['valid']['auc'],
'loss_train':evals_result['train']['logloss'],
'loss_valid':evals_result['valid']['logloss'],
'leaf_cnt':n_nodes[0]})
min_child_weight time total_leaves
0 1000 3337.426304 97258
df_auc_loss.tail(10)
auc_train auc_valid leaf_cnt loss_train loss_valid
490 0.849419 0.841926 127 0.478304 0.487696
491 0.849476 0.841967 255 0.478227 0.487641
492 0.849567 0.842037 357 0.478100 0.487547
493 0.849594 0.842055 179 0.478063 0.487523
494 0.849610 0.842064 102 0.478043 0.487511
495 0.849630 0.842075 161 0.478014 0.487494
496 0.849651 0.842085 150 0.477987 0.487480
497 0.849663 0.842093 89 0.477969 0.487469
498 0.849666 0.842095 43 0.477963 0.487465
499 0.849677 0.842101 95 0.477948 0.487456
df.to_csv('log/r009.csv')
df_auc_loss.to_csv('log/r009_auc_loss.csv')
# r010
# 2016/10/26 25.4m
# 'min_child_weight':1000
# 'max_depth':4
param = {'objective':'binary:logistic','tree_method':'approx', 'sketch_eps':0.00392,
'eta':.1, 'min_child_weight':1000, 'max_depth':4, 'lambda':0,
'eval_metric':['logloss','auc'],
'nthread':8, 'seed':123, 'silent':1}
n_nodes = []
scores = []
t0 = time.time()
n_rounds = 500
evals_result = {}
bst = xgb.train(param, dtrain, n_rounds, [(dtrain, 'train'), (dvalid, 'valid')],
evals_result=evals_result)
tmp = get_all_leaves(bst)
n_nodes.append([len(s) for s in tmp])
scores.append({'min_child_weight':param['min_child_weight'], 'total_leaves':np.sum(n_nodes[-1]),
'time':time.time() - t0})
print(scores[-1])
df = pd.DataFrame(scores)
df_auc_loss = pd.DataFrame({'auc_train':evals_result['train']['auc'],
'auc_valid':evals_result['valid']['auc'],
'loss_train':evals_result['train']['logloss'],
'loss_valid':evals_result['valid']['logloss'],
'leaf_cnt':n_nodes[0]})
min_child_weight time total_leaves
0 1000 1525.824757 7389
df_auc_loss.tail(10)
auc_train auc_valid leaf_cnt loss_train loss_valid
490 0.816977 0.815593 8 0.520314 0.521733
491 0.816989 0.815604 13 0.520295 0.521716
492 0.817009 0.815624 16 0.520266 0.521688
493 0.817042 0.815657 16 0.520226 0.521648
494 0.817045 0.815659 10 0.520222 0.521645
495 0.817075 0.815687 13 0.520194 0.521619
496 0.817084 0.815694 10 0.520184 0.521610
497 0.817096 0.815705 13 0.520166 0.521594
498 0.817117 0.815723 16 0.520136 0.521568
499 0.817120 0.815726 9 0.520131 0.521564
df.to_csv('log/r010.csv')
df_auc_loss.to_csv('log/r010_auc_loss.csv')
# r011
# 2016/10/28
# cpu vs gpu
# n=4000000 ==>> grow_gpu crash (memory)
n_rounds = 10
param = {'objective':'binary:logistic',
'max_depth':6,
'seed':123, 'silent':1}
scores = []
for n in [1000000, 2000000]:
dtrain = xgb.DMatrix(data_train.values[:n], label = y_train[:n])
for updater in ['grow_colmaker', 'grow_gpu']:
param['updater'] = updater
for nthread in [1, 4, 8]:
param['nthread'] = nthread
t0 = time.time()
bst = xgb.train(param, dtrain, n_rounds)
scores.append({'n':n, 'updater':updater, 'nthread':nthread,
'time':time.time() - t0})
print(scores[-1])
df = pd.DataFrame(scores)
df.to_csv('log/r011.csv')
pd.set_option('display.precision', 1)
print(df.set_index(['n', 'updater', 'nthread'])['time'].unstack())
nthread 1 4 8
n updater
1000000 grow_colmaker 113.4 31.7 24.4
grow_gpu 13.4 7.4 5.4
2000000 grow_colmaker 237.8 67.7 52.1
grow_gpu 25.1 11.8 11.3
# r012
# 2016/10/28 5.6m
# sketch_eps
n = 2000000
dtrain = xgb.DMatrix(data_train.values[:n], label = y_train[:n])
n_rounds = 10
param = {'objective':'binary:logistic',
'max_depth':6, 'tree_method':'approx',
'seed':123, 'silent':1, 'nthread':8}
scores = []
for r_sketch_eps in [1024, 512, 256, 128, 64, 32, 16, 8, 4]:
param['sketch_eps'] = 1./ r_sketch_eps
t0 = time.time()
bst = xgb.train(param, dtrain, n_rounds)
scores.append({'r_sketch_eps':r_sketch_eps,
'time':time.time() - t0})
print(scores[-1])
df = pd.DataFrame(scores)
df.to_csv('log/r012.csv')
pd.set_option('display.precision', 1)
print(df.set_index('r_sketch_eps'))
time
r_sketch_eps
1024 44.0
512 39.9
256 37.7
128 36.1
64 35.8
32 35.5
16 35.5
8 35.9
4 35.6
## end
from xgboost.sklearn import XGBClassifier
n = data_train.shape[0]
n=6000000
X_tr = data_train.values[:n]
y_tr = y_train[:n]
X_va = data_valid.values
y_va = y_valid
model = XGBClassifier(n_estimators=1,
learning_rate=0.1,
max_depth=1000,
min_child_weight=1000,
reg_lambda=0,
seed=12)
for cb in [0.1, 1.]:
print('\ncolsample_bytree: %.1f' % cb)
model.colsample_bylevel = cb
model.fit(X_tr, y_tr, eval_set=[(X_tr, y_tr), (X_va, y_va)],
eval_metric='auc', verbose=True)
y_train=y_train.astype(int)
n = data_train.shape[0]
n = 327690
dtrain = xgb.DMatrix(data_train.values[:n], label = y_train[:n])
param2 = {'objective':'binary:logistic','tree_method':'approx', 'sketch_eps':0.00392,
'eta':.1, 'min_child_weight':10, 'max_depth':10, 'lambda':0,
'eval_metric':['logloss','auc'],
'nthread':2, 'seed':123, 'silent':1}
param2 = {'objective':'binary:logistic',
'eta':.1, 'max_depth':10,# 'lambda':0,
'eval_metric':['logloss','auc'],
'nthread':8, 'seed':123, 'silent':1}
for cb in [0.1, 1.]:
print('colsample_bylevel:%.1f' % cb)
param2.update({'colsample_bylevel':cb})
bst2 = xgb.train(param2, dtrain, 2, [(dtrain, 'train')],
evals_result=evals_result)
| |
from __future__ import absolute_import
import os
import shlex
import sys
from operator import attrgetter
from six import StringIO
from .. import mock
from .testcases import DockerClientTestCase
from compose.cli.errors import UserError
from compose.cli.main import TopLevelCommand
from compose.project import NoSuchService
class CLITestCase(DockerClientTestCase):
def setUp(self):
super(CLITestCase, self).setUp()
self.old_sys_exit = sys.exit
sys.exit = lambda code=0: None
self.command = TopLevelCommand()
self.command.base_dir = 'tests/fixtures/simple-composefile'
def tearDown(self):
sys.exit = self.old_sys_exit
self.project.kill()
self.project.remove_stopped()
for container in self.project.containers(stopped=True, one_off=True):
container.remove(force=True)
super(CLITestCase, self).tearDown()
@property
def project(self):
# Hack: allow project to be overridden. This needs refactoring so that
# the project object is built exactly once, by the command object, and
# accessed by the test case object.
if hasattr(self, '_project'):
return self._project
return self.command.get_project()
def test_help(self):
old_base_dir = self.command.base_dir
self.command.base_dir = 'tests/fixtures/no-composefile'
with self.assertRaises(SystemExit) as exc_context:
self.command.dispatch(['help', 'up'], None)
self.assertIn('Usage: up [options] [SERVICE...]', str(exc_context.exception))
# self.project.kill() fails during teardown
# unless there is a composefile.
self.command.base_dir = old_base_dir
# TODO: address the "Inappropriate ioctl for device" warnings in test output
@mock.patch('sys.stdout', new_callable=StringIO)
def test_ps(self, mock_stdout):
self.project.get_service('simple').create_container()
self.command.dispatch(['ps'], None)
self.assertIn('simplecomposefile_simple_1', mock_stdout.getvalue())
@mock.patch('sys.stdout', new_callable=StringIO)
def test_ps_default_composefile(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/multiple-composefiles'
self.command.dispatch(['up', '-d'], None)
self.command.dispatch(['ps'], None)
output = mock_stdout.getvalue()
self.assertIn('multiplecomposefiles_simple_1', output)
self.assertIn('multiplecomposefiles_another_1', output)
self.assertNotIn('multiplecomposefiles_yetanother_1', output)
@mock.patch('sys.stdout', new_callable=StringIO)
def test_ps_alternate_composefile(self, mock_stdout):
config_path = os.path.abspath(
'tests/fixtures/multiple-composefiles/compose2.yml')
self._project = self.command.get_project(config_path)
self.command.base_dir = 'tests/fixtures/multiple-composefiles'
self.command.dispatch(['-f', 'compose2.yml', 'up', '-d'], None)
self.command.dispatch(['-f', 'compose2.yml', 'ps'], None)
output = mock_stdout.getvalue()
self.assertNotIn('multiplecomposefiles_simple_1', output)
self.assertNotIn('multiplecomposefiles_another_1', output)
self.assertIn('multiplecomposefiles_yetanother_1', output)
@mock.patch('compose.service.log')
def test_pull(self, mock_logging):
self.command.dispatch(['pull'], None)
mock_logging.info.assert_any_call('Pulling simple (busybox:latest)...')
mock_logging.info.assert_any_call('Pulling another (busybox:latest)...')
@mock.patch('compose.service.log')
def test_pull_with_digest(self, mock_logging):
self.command.dispatch(['-f', 'digest.yml', 'pull'], None)
mock_logging.info.assert_any_call('Pulling simple (busybox:latest)...')
mock_logging.info.assert_any_call(
'Pulling digest (busybox@'
'sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d)...')
@mock.patch('sys.stdout', new_callable=StringIO)
def test_build_no_cache(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/simple-dockerfile'
self.command.dispatch(['build', 'simple'], None)
mock_stdout.truncate(0)
cache_indicator = 'Using cache'
self.command.dispatch(['build', 'simple'], None)
output = mock_stdout.getvalue()
self.assertIn(cache_indicator, output)
mock_stdout.truncate(0)
self.command.dispatch(['build', '--no-cache', 'simple'], None)
output = mock_stdout.getvalue()
self.assertNotIn(cache_indicator, output)
def test_up_detached(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
another = self.project.get_service('another')
self.assertEqual(len(service.containers()), 1)
self.assertEqual(len(another.containers()), 1)
# Ensure containers don't have stdin and stdout connected in -d mode
container, = service.containers()
self.assertFalse(container.get('Config.AttachStderr'))
self.assertFalse(container.get('Config.AttachStdout'))
self.assertFalse(container.get('Config.AttachStdin'))
def test_up_attached(self):
with mock.patch(
'compose.cli.main.attach_to_logs',
autospec=True
) as mock_attach:
self.command.dispatch(['up'], None)
_, args, kwargs = mock_attach.mock_calls[0]
_project, log_printer, _names, _timeout = args
service = self.project.get_service('simple')
another = self.project.get_service('another')
self.assertEqual(len(service.containers()), 1)
self.assertEqual(len(another.containers()), 1)
self.assertEqual(
set(log_printer.containers),
set(self.project.containers())
)
def test_up_with_links(self):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['up', '-d', 'web'], None)
web = self.project.get_service('web')
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(web.containers()), 1)
self.assertEqual(len(db.containers()), 1)
self.assertEqual(len(console.containers()), 0)
def test_up_with_no_deps(self):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['up', '-d', '--no-deps', 'web'], None)
web = self.project.get_service('web')
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(web.containers()), 1)
self.assertEqual(len(db.containers()), 0)
self.assertEqual(len(console.containers()), 0)
def test_up_with_force_recreate(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
old_ids = [c.id for c in service.containers()]
self.command.dispatch(['up', '-d', '--force-recreate'], None)
self.assertEqual(len(service.containers()), 1)
new_ids = [c.id for c in service.containers()]
self.assertNotEqual(old_ids, new_ids)
def test_up_with_no_recreate(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
old_ids = [c.id for c in service.containers()]
self.command.dispatch(['up', '-d', '--no-recreate'], None)
self.assertEqual(len(service.containers()), 1)
new_ids = [c.id for c in service.containers()]
self.assertEqual(old_ids, new_ids)
def test_up_with_force_recreate_and_no_recreate(self):
with self.assertRaises(UserError):
self.command.dispatch(['up', '-d', '--force-recreate', '--no-recreate'], None)
def test_up_with_timeout(self):
self.command.dispatch(['up', '-d', '-t', '1'], None)
service = self.project.get_service('simple')
another = self.project.get_service('another')
self.assertEqual(len(service.containers()), 1)
self.assertEqual(len(another.containers()), 1)
# Ensure containers don't have stdin and stdout connected in -d mode
config = service.containers()[0].inspect()['Config']
self.assertFalse(config['AttachStderr'])
self.assertFalse(config['AttachStdout'])
self.assertFalse(config['AttachStdin'])
@mock.patch('dockerpty.start')
def test_run_service_without_links(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['run', 'console', '/bin/true'], None)
self.assertEqual(len(self.project.containers()), 0)
# Ensure stdin/out was open
container = self.project.containers(stopped=True, one_off=True)[0]
config = container.inspect()['Config']
self.assertTrue(config['AttachStderr'])
self.assertTrue(config['AttachStdout'])
self.assertTrue(config['AttachStdin'])
@mock.patch('dockerpty.start')
def test_run_service_with_links(self, _):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['run', 'web', '/bin/true'], None)
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(db.containers()), 1)
self.assertEqual(len(console.containers()), 0)
@mock.patch('dockerpty.start')
def test_run_with_no_deps(self, _):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['run', '--no-deps', 'web', '/bin/true'], None)
db = self.project.get_service('db')
self.assertEqual(len(db.containers()), 0)
@mock.patch('dockerpty.start')
def test_run_does_not_recreate_linked_containers(self, _):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['up', '-d', 'db'], None)
db = self.project.get_service('db')
self.assertEqual(len(db.containers()), 1)
old_ids = [c.id for c in db.containers()]
self.command.dispatch(['run', 'web', '/bin/true'], None)
self.assertEqual(len(db.containers()), 1)
new_ids = [c.id for c in db.containers()]
self.assertEqual(old_ids, new_ids)
@mock.patch('dockerpty.start')
def test_run_without_command(self, _):
self.command.base_dir = 'tests/fixtures/commands-composefile'
self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test')
self.command.dispatch(['run', 'implicit'], None)
service = self.project.get_service('implicit')
containers = service.containers(stopped=True, one_off=True)
self.assertEqual(
[c.human_readable_command for c in containers],
[u'/bin/sh -c echo "success"'],
)
self.command.dispatch(['run', 'explicit'], None)
service = self.project.get_service('explicit')
containers = service.containers(stopped=True, one_off=True)
self.assertEqual(
[c.human_readable_command for c in containers],
[u'/bin/true'],
)
@mock.patch('dockerpty.start')
def test_run_service_with_entrypoint_overridden(self, _):
self.command.base_dir = 'tests/fixtures/dockerfile_with_entrypoint'
name = 'service'
self.command.dispatch(
['run', '--entrypoint', '/bin/echo', name, 'helloworld'],
None
)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(
shlex.split(container.human_readable_command),
[u'/bin/echo', u'helloworld'],
)
@mock.patch('dockerpty.start')
def test_run_service_with_user_overridden(self, _):
self.command.base_dir = 'tests/fixtures/user-composefile'
name = 'service'
user = 'sshd'
args = ['run', '--user={user}'.format(user=user), name]
self.command.dispatch(args, None)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(user, container.get('Config.User'))
@mock.patch('dockerpty.start')
def test_run_service_with_user_overridden_short_form(self, _):
self.command.base_dir = 'tests/fixtures/user-composefile'
name = 'service'
user = 'sshd'
args = ['run', '-u', user, name]
self.command.dispatch(args, None)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(user, container.get('Config.User'))
@mock.patch('dockerpty.start')
def test_run_service_with_environement_overridden(self, _):
name = 'service'
self.command.base_dir = 'tests/fixtures/environment-composefile'
self.command.dispatch(
['run', '-e', 'foo=notbar', '-e', 'allo=moto=bobo',
'-e', 'alpha=beta', name],
None
)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
# env overriden
self.assertEqual('notbar', container.environment['foo'])
# keep environement from yaml
self.assertEqual('world', container.environment['hello'])
# added option from command line
self.assertEqual('beta', container.environment['alpha'])
# make sure a value with a = don't crash out
self.assertEqual('moto=bobo', container.environment['allo'])
@mock.patch('dockerpty.start')
def test_run_service_without_map_ports(self, _):
# create one off container
self.command.base_dir = 'tests/fixtures/ports-composefile'
self.command.dispatch(['run', '-d', 'simple'], None)
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
port_random = container.get_local_port(3000)
port_assigned = container.get_local_port(3001)
# close all one off containers we just created
container.stop()
# check the ports
self.assertEqual(port_random, None)
self.assertEqual(port_assigned, None)
@mock.patch('dockerpty.start')
def test_run_service_with_map_ports(self, _):
# create one off container
self.command.base_dir = 'tests/fixtures/ports-composefile'
self.command.dispatch(['run', '-d', '--service-ports', 'simple'], None)
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
port_random = container.get_local_port(3000)
port_assigned = container.get_local_port(3001)
port_range = container.get_local_port(3002), container.get_local_port(3003)
# close all one off containers we just created
container.stop()
# check the ports
self.assertNotEqual(port_random, None)
self.assertIn("0.0.0.0", port_random)
self.assertEqual(port_assigned, "0.0.0.0:49152")
self.assertEqual(port_range[0], "0.0.0.0:49153")
self.assertEqual(port_range[1], "0.0.0.0:49154")
@mock.patch('dockerpty.start')
def test_run_service_with_explicitly_maped_ports(self, _):
# create one off container
self.command.base_dir = 'tests/fixtures/ports-composefile'
self.command.dispatch(['run', '-d', '-p', '30000:3000', '--publish', '30001:3001', 'simple'], None)
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
port_short = container.get_local_port(3000)
port_full = container.get_local_port(3001)
# close all one off containers we just created
container.stop()
# check the ports
self.assertEqual(port_short, "0.0.0.0:30000")
self.assertEqual(port_full, "0.0.0.0:30001")
@mock.patch('dockerpty.start')
def test_run_service_with_explicitly_maped_ip_ports(self, _):
# create one off container
self.command.base_dir = 'tests/fixtures/ports-composefile'
self.command.dispatch(['run', '-d', '-p', '127.0.0.1:30000:3000', '--publish', '127.0.0.1:30001:3001', 'simple'], None)
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
port_short = container.get_local_port(3000)
port_full = container.get_local_port(3001)
# close all one off containers we just created
container.stop()
# check the ports
self.assertEqual(port_short, "127.0.0.1:30000")
self.assertEqual(port_full, "127.0.0.1:30001")
@mock.patch('dockerpty.start')
def test_run_with_custom_name(self, _):
self.command.base_dir = 'tests/fixtures/environment-composefile'
name = 'the-container-name'
self.command.dispatch(['run', '--name', name, 'service'], None)
service = self.project.get_service('service')
container, = service.containers(stopped=True, one_off=True)
self.assertEqual(container.name, name)
def test_rm(self):
service = self.project.get_service('simple')
service.create_container()
service.kill()
self.assertEqual(len(service.containers(stopped=True)), 1)
self.command.dispatch(['rm', '--force'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
service = self.project.get_service('simple')
service.create_container()
service.kill()
self.assertEqual(len(service.containers(stopped=True)), 1)
self.command.dispatch(['rm', '-f'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
def test_stop(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['stop', '-t', '1'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_pause_unpause(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertFalse(service.containers()[0].is_paused)
self.command.dispatch(['pause'], None)
self.assertTrue(service.containers()[0].is_paused)
self.command.dispatch(['unpause'], None)
self.assertFalse(service.containers()[0].is_paused)
def test_logs_invalid_service_name(self):
with self.assertRaises(NoSuchService):
self.command.dispatch(['logs', 'madeupname'], None)
def test_kill(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['kill'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_kill_signal_sigstop(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['kill', '-s', 'SIGSTOP'], None)
self.assertEqual(len(service.containers()), 1)
# The container is still running. It has only been paused
self.assertTrue(service.containers()[0].is_running)
def test_kill_stopped_service(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.command.dispatch(['kill', '-s', 'SIGSTOP'], None)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['kill', '-s', 'SIGKILL'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_restart(self):
service = self.project.get_service('simple')
container = service.create_container()
service.start_container(container)
started_at = container.dictionary['State']['StartedAt']
self.command.dispatch(['restart', '-t', '1'], None)
container.inspect()
self.assertNotEqual(
container.dictionary['State']['FinishedAt'],
'0001-01-01T00:00:00Z',
)
self.assertNotEqual(
container.dictionary['State']['StartedAt'],
started_at,
)
def test_scale(self):
project = self.project
self.command.scale(project, {'SERVICE=NUM': ['simple=1']})
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.command.scale(project, {'SERVICE=NUM': ['simple=3', 'another=2']})
self.assertEqual(len(project.get_service('simple').containers()), 3)
self.assertEqual(len(project.get_service('another').containers()), 2)
self.command.scale(project, {'SERVICE=NUM': ['simple=1', 'another=1']})
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.assertEqual(len(project.get_service('another').containers()), 1)
self.command.scale(project, {'SERVICE=NUM': ['simple=1', 'another=1']})
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.assertEqual(len(project.get_service('another').containers()), 1)
self.command.scale(project, {'SERVICE=NUM': ['simple=0', 'another=0']})
self.assertEqual(len(project.get_service('simple').containers()), 0)
self.assertEqual(len(project.get_service('another').containers()), 0)
def test_port(self):
self.command.base_dir = 'tests/fixtures/ports-composefile'
self.command.dispatch(['up', '-d'], None)
container = self.project.get_service('simple').get_container()
@mock.patch('sys.stdout', new_callable=StringIO)
def get_port(number, mock_stdout):
self.command.dispatch(['port', 'simple', str(number)], None)
return mock_stdout.getvalue().rstrip()
self.assertEqual(get_port(3000), container.get_local_port(3000))
self.assertEqual(get_port(3001), "0.0.0.0:49152")
self.assertEqual(get_port(3002), "0.0.0.0:49153")
def test_port_with_scale(self):
self.command.base_dir = 'tests/fixtures/ports-composefile-scale'
self.command.dispatch(['scale', 'simple=2'], None)
containers = sorted(
self.project.containers(service_names=['simple']),
key=attrgetter('name'))
@mock.patch('sys.stdout', new_callable=StringIO)
def get_port(number, mock_stdout, index=None):
if index is None:
self.command.dispatch(['port', 'simple', str(number)], None)
else:
self.command.dispatch(['port', '--index=' + str(index), 'simple', str(number)], None)
return mock_stdout.getvalue().rstrip()
self.assertEqual(get_port(3000), containers[0].get_local_port(3000))
self.assertEqual(get_port(3000, index=1), containers[0].get_local_port(3000))
self.assertEqual(get_port(3000, index=2), containers[1].get_local_port(3000))
self.assertEqual(get_port(3002), "")
def test_env_file_relative_to_compose_file(self):
config_path = os.path.abspath('tests/fixtures/env-file/docker-compose.yml')
self.command.dispatch(['-f', config_path, 'up', '-d'], None)
self._project = self.command.get_project(config_path)
containers = self.project.containers(stopped=True)
self.assertEqual(len(containers), 1)
self.assertIn("FOO=1", containers[0].get('Config.Env'))
@mock.patch.dict(os.environ)
def test_home_and_env_var_in_volume_path(self):
os.environ['VOLUME_NAME'] = 'my-volume'
os.environ['HOME'] = '/tmp/home-dir'
expected_host_path = os.path.join(os.environ['HOME'], os.environ['VOLUME_NAME'])
self.command.base_dir = 'tests/fixtures/volume-path-interpolation'
self.command.dispatch(['up', '-d'], None)
container = self.project.containers(stopped=True)[0]
actual_host_path = container.get('Volumes')['/container-path']
components = actual_host_path.split('/')
self.assertTrue(components[-2:] == ['home-dir', 'my-volume'],
msg="Last two components differ: %s, %s" % (actual_host_path, expected_host_path))
def test_up_with_extends(self):
self.command.base_dir = 'tests/fixtures/extends'
self.command.dispatch(['up', '-d'], None)
self.assertEqual(
set([s.name for s in self.project.services]),
set(['mydb', 'myweb']),
)
# Sort by name so we get [db, web]
containers = sorted(
self.project.containers(stopped=True),
key=lambda c: c.name,
)
self.assertEqual(len(containers), 2)
web = containers[1]
self.assertEqual(set(web.links()), set(['db', 'mydb_1', 'extends_mydb_1']))
expected_env = set([
"FOO=1",
"BAR=2",
"BAZ=2",
])
self.assertTrue(expected_env <= set(web.get('Config.Env')))
| |
# vim:ts=4:sts=4:sw=4:expandtab
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
import sys
from satori.core.export.docstring import trim_docstring
from satori.core.export.type_helpers import Struct, DefineException
BadAttributeType = DefineException('BadAttributeType', 'The requested attribute "{name}" is not a {requested_type} attribute',
[('name', unicode, False), ('requested_type', unicode, False)])
MissingBlob = DefineException('MissingBlob', 'The requested blob does not exist', [])
Attribute = Struct('Attribute', (
('name', str, False),
('is_blob', bool, False),
('value', str, False),
('filename', str, True),
))
AnonymousAttribute = Struct('AnonymousAttribute', (
('is_blob', bool, False),
('value', str, False),
('name', str, True),
('filename', str, True),
))
code_attributegroup_fixup = """
def fixup_{1}(self):
pass
# try:
# x = self.{1}
# except AttributeGroup.DoesNotExist:
# {1} = AttributeGroup()
# {1}.save()
# self.{1} = {1}
"""
code_attributegroup_methods = """
@ExportMethod(Attribute, [DjangoId('{0}'), unicode], pc_read)
def {1}_get(self, name):
\"\"\"Attribute group: {1}\"\"\"
self = self{2}
try:
return self.attributes.get(name=name)
except OpenAttribute.DoesNotExist:
return None
@ExportMethod(unicode, [DjangoId('{0}'), unicode, unicode], pc_read, [BadAttributeType])
def {1}_get_str(self, name, fallback=None):
\"\"\"Attribute group: {1}\"\"\"
oa = self.{1}_get(name)
if oa is None:
return fallback
elif oa.is_blob:
raise BadAttributeType(name=name, requested_type='string')
else:
return oa.value
@ExportMethod(unicode, [DjangoId('{0}'), unicode], pc_read, [BadAttributeType])
def {1}_get_blob(self, name):
\"\"\"Attribute group: {1}\"\"\"
oa = self.{1}_get(name)
if oa is None:
return None
elif not oa.is_blob:
raise BadAttributeType(name=name, requested_type='blob')
return Blob.open(oa.value, oa.filename)
@ExportMethod(unicode, [DjangoId('{0}'), unicode], pc_read, [BadAttributeType])
def {1}_get_blob_hash(self, name):
\"\"\"Attribute group: {1}\"\"\"
oa = self.{1}_get(name)
if oa is None:
return None
elif not oa.is_blob:
raise BadAttributeType(name=name, requested_type='blob')
else:
return oa.value
@ExportMethod(TypedList(Attribute), [DjangoId('{0}')], pc_read)
def {1}_get_list(self):
\"\"\"Attribute group: {1}\"\"\"
return self{2}.attributes.all()
@ExportMethod(TypedMap(unicode, AnonymousAttribute), [DjangoId('{0}')], pc_read)
def {1}_get_map(self):
\"\"\"Attribute group: {1}\"\"\"
return dict((oa.name, oa) for oa in self{2}.attributes.all())
@ExportMethod(NoneType, [DjangoId('{0}'), Attribute], PCAnd(pc_write, PCRawBlob('value')), [MissingBlob])
def {1}_set(self, value):
\"\"\"Attribute group: {1}\"\"\"
self = self{2}
if value.is_blob and not Blob.exists(value.value):
raise MissingBlob()
(newoa, created) = self.attributes.get_or_create(name=value.name)
newoa.is_blob = value.is_blob
newoa.value = value.value
if value.is_blob and hasattr(value, 'filename') and value.filename is not None:
newoa.filename = value.filename
else:
newoa.filename = ''
newoa.save()
@ExportMethod(NoneType, [DjangoId('{0}'), unicode, unicode], pc_write)
def {1}_set_str(self, name, value):
\"\"\"Attribute group: {1}\"\"\"
self.{1}_set(Attribute(name=name, value=value, is_blob=False))
@ExportMethod(NoneType, [DjangoId('{0}'), unicode, int, unicode], pc_write)
def {1}_set_blob(self, name, length=-1, filename=''):
\"\"\"Attribute group: {1}\"\"\"
def set_hash(hash):
self.{1}_set(OpenAttribute(name=name, value=hash, filename=filename, is_blob=True))
return Blob.create(length, set_hash)
@ExportMethod(NoneType, [DjangoId('{0}'), unicode, unicode, unicode], PCAnd(pc_write, PCGlobal('RAW_BLOB')), [MissingBlob])
def {1}_set_blob_hash(self, name, value, filename=''):
\"\"\"Attribute group: {1}\"\"\"
self.{1}_set(Attribute(name=name, value=value, filename=filename, is_blob=True))
@ExportMethod(NoneType, [DjangoId('{0}'), TypedList(Attribute)], PCAnd(pc_write, PCEach('attributes', PCRawBlob('item'))), [MissingBlob])
def {1}_add_list(self, attributes):
\"\"\"Attribute group: {1}\"\"\"
for struct in attributes:
self.{1}_set(struct)
@ExportMethod(NoneType, [DjangoId('{0}'), TypedList(Attribute)], PCAnd(pc_write, PCEach('attributes', PCRawBlob('item'))), [MissingBlob])
def {1}_set_list(self, attributes):
\"\"\"Attribute group: {1}\"\"\"
self{2}.attributes.all().delete()
self.{1}_add_list(attributes)
@ExportMethod(NoneType, [DjangoId('{0}'), TypedMap(unicode, AnonymousAttribute)], PCAnd(pc_write, PCEachValue('attributes', PCRawBlob('item'))), [MissingBlob])
def {1}_add_map(self, attributes):
\"\"\"Attribute group: {1}\"\"\"
for name, struct in attributes.items():
struct.name = name
self.{1}_set(struct)
@ExportMethod(NoneType, [DjangoId('{0}'), TypedMap(unicode, AnonymousAttribute)], PCAnd(pc_write, PCEachValue('attributes', PCRawBlob('item'))), [MissingBlob])
def {1}_set_map(self, attributes):
\"\"\"Attribute group: {1}\"\"\"
self{2}.attributes.all().delete()
self.{1}_add_map(attributes)
@ExportMethod(NoneType, [DjangoId('{0}'), unicode], pc_write)
def {1}_delete(self, name):
\"\"\"Attribute group: {1}\"\"\"
oa = self.{1}_get(name)
if oa is not None:
oa.delete()
"""
docstrings_to_append = []
class DefaultAttributeGroupField(object):
def __init__(self, pc_read, pc_write, doc):
self.doc = doc
self.pc_read = pc_read
self.pc_write = pc_write
def contribute_to_class(self, cls, name):
global_dict = sys.modules['satori.core.models'].__dict__
local_dict = {'pc_read': self.pc_read, 'pc_write': self.pc_write}
exec compile(code_attributegroup_methods.format(cls.__name__, name, ''), '<oa methods code>', 'exec') in global_dict, local_dict
del local_dict['pc_read']
del local_dict['pc_write']
for (meth_name, meth) in local_dict.items():
setattr(cls, meth_name, meth)
docstrings_to_append.append((cls, name, self.doc))
class AttributeGroupField(models.OneToOneField):
def __init__(self, pc_read, pc_write, doc):
super(AttributeGroupField, self).__init__('AttributeGroup', related_name='+', on_delete=models.CASCADE)
self.doc = doc
self.pc_read = pc_read
self.pc_write = pc_write
def contribute_to_class(self, cls, name):
super(AttributeGroupField, self).contribute_to_class(cls, name)
global_dict = sys.modules['satori.core.models'].__dict__
local_dict = {'pc_read': self.pc_read, 'pc_write': self.pc_write}
exec compile(code_attributegroup_methods.format(cls.__name__, name, '.' + name), '<oa methods code>', 'exec') in global_dict, local_dict
exec compile(code_attributegroup_fixup.format(cls.__name__, name, '.' + name), '<oa methods code>', 'exec') in global_dict, local_dict
del local_dict['pc_read']
del local_dict['pc_write']
for (meth_name, meth) in local_dict.items():
setattr(cls, meth_name, meth)
docstrings_to_append.append((cls, name, self.doc))
@receiver(post_save, sender=cls, weak=False)
def update_refs(sender, instance, created, **kwargs):
if created:
oag = getattr(instance, self.name)
oag.enclosing_entity = instance
oag.save()
def pre_save(self, model_instance, add):
if add:
ag = AttributeGroup()
ag.save()
setattr(model_instance, self.name, ag)
return super(AttributeGroupField, self).pre_save(model_instance, add)
# bad, because installed after post_syncdb signal:
# def post_create_sql(self, style, table_name):
# trigger = """
#CREATE FUNCTION fixup_oagroup_{0}_{1}() RETURNS trigger AS $$
# BEGIN
# UPDATE core_attributegroup SET enclosing_entity_id = NEW.{2} WHERE parent_entity_id = NEW.{1};
# RETURN NEW;
# END;
#$$ LANGUAGE plpgsql;
#
#CREATE TRIGGER fixup_oagroup_{0}_{1} AFTER INSERT ON {0} FOR EACH ROW EXECUTE PROCEDURE fixup_oagroup_{0}_{1}();
#""".format(table_name, self.column, self.model._meta.pk.column)
# return [trigger]
def init():
global AttributeGroup
from satori.core.models import AttributeGroup
for (cls, name, docstring) in docstrings_to_append:
doc = trim_docstring(cls.__doc__)
if doc:
doc = doc + '\n\n'
if doc.find('Attribute groups:') == -1:
doc = doc + 'Attribute groups:\n\n'
doc = doc + ' .. ars:attributegroup:: {0}\n\n {1}'.format(name, docstring)
cls.__doc__ = doc
| |
"""
CENTRIPETAL!
Sorry for the messy code! I only found out about the contest ~2 days
before it was over, so I didn't have much time to write nice code. :(
Copyright 2012 Dillon Cower, d <last name> @ gmail . com
"""
import kivy
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty
from kivy.vector import Vector
from kivy.factory import Factory
from kivy.clock import Clock
from kivy.graphics import Color, Rectangle, Point, GraphicException, Line, Quad, Ellipse
from kivy.graphics.opengl import glLineWidth
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.core.window import Window
from cmath import polar, rect
from kivy.core.audio import SoundLoader
from math import cos,sin,pi,sqrt,atan,atan2
import random
cx = 0
cy = 0
# The collision functions are somewhat based around code by papalazaru and Dave Eberly:
# http://www.gamedev.net/topic/546941-circle-to-polygon-collision-detection-solved/
def circleToPolygon(circle, quad):
cpos = Vector(circle.pos[0], circle.pos[1])
x = cpos[0]
y = cpos[1]
r = circle.r
r2 = circle.r2
w = quad
if (x+r > w.lowerx) and (x-r < w.upperx) and (y+r > w.lowery) and (y-r < w.uppery):
quad = quad.pts
#print random.randint(0,1000000)
# for each edge
best_closest = None
normal = None
best_d = None
for i in range(4):
edge_pt1 = Vector(float(quad[i*2]), float(quad[(i*2+1)]))
edge_pt2 = Vector(float(quad[((i+1)*2)%8]), float(quad[((i+1)*2+1)%8]))
pt = closestPointOnEdge(cpos, edge_pt1, edge_pt2)
d = pt.distance2(cpos)
if d < r2 and (best_d is None or d < best_d):
closest = pt
normal = edge_pt2 - edge_pt1
normal = Vector(-normal.y, normal.x).normalize()
best_d = d
#return (True, closest, normal)
if best_d is not None:
return (True, closest, normal)
# if we still haven't gotten anything, try this test..
if polygonContainsPoint(cpos, quad):
#print "GRRRRRRR"
return (True, cpos, cpos.normalize())
return (False, None, None)
def polygonContainsPoint(p, quad):
flags = 0
i = 0
j = 4-1
c = False
testx = p[0]
testy = p[1]
while i < 4:
edge_pt1 = (float(quad[i*2]), float(quad[(i*2+1)]))
edge_pt2 = (float(quad[((i+1)*2)%8]), float(quad[((i+1)*2+1)%8]))
if ((edge_pt1[1] > testy) != (edge_pt2[1] > testy)) and (testx < (edge_pt2[0] - edge_pt1[0]) * (testy-edge_pt1[1]) / (edge_pt2[1]-edge_pt1[1]) + edge_pt1[0]):
c = ~c
j = i
i+=1
return c
def circleToCircle(a, b):
v = Vector(a.pos) - Vector(b.pos)
if v.length() < a.r+b.r:
return (True, None, v.normalize())
return (False, None, None)
# Vector p point
# Vector a edge_pt1
# Vector b edge_pt2
def closestPointOnEdge(p, a, b):
e = b - a
f = p - a
e2 = e.length2()
if e2 != 0:
t = (f.dot(e)) / e2
else:
t = 0.0
if t < 0.0:
t = 0.0
elif t > 1.0:
t = 1.0
closest = a + (e * t)
return closest
class Ball(Widget):
r = 30 / 2
r2 = r**2
trail_pts = []
g_trail = "trail"
def setup(self):
self.pos = (cx, cy+cy/2)
self.velocity = Vector(0,5)
def move(self, dt):
self.canvas.remove_group(self.g_trail)
with self.canvas:
Color(1, 1, 1, 0.5, mode='rgba', group=self.g_trail)
Line(points=sum(self.trail_pts, []), group=self.g_trail)
to_center = (Vector(cx, cy) - Vector(self.pos))
l = self.velocity.length()
cen_d = to_center.length()
self.velocity = self.velocity * 0.99 + to_center.normalize() * sqrt(cen_d) * 0.04
if l > 25:
self.velocity = self.velocity.normalize() * 20
self.pos = Vector(self.pos) + self.velocity * dt * 30
if len(self.trail_pts) == 0:
self.trail_pts = [self.pos]
else:
self.trail_pts.insert(0, self.pos)
while len(self.trail_pts) > 30:
self.trail_pts.pop()
class Block(Widget):
def collide_widget(self, wid):
return circleToPolygon (wid, self)
class CentripetalGame(Widget):
g_level = "level"
paddle = ObjectProperty(None)
ball = ObjectProperty(None)
killspace = ObjectProperty(None)
score = NumericProperty(0)
best_score = NumericProperty(0)
sounds = {}
blocks = []
def start(self):
# are these necessary? no time to find out!!!!!!
self.width = Window.width
self.height = Window.height
self.size = Window.size
for i in ["hit", "hit_2", "die"]:
if i not in self.sounds:
self.sounds[i] = SoundLoader.load(i + ".ogg")
self.sounds[i].volume = 0.8
self.n_rings = 3
self.num_segments = 10
self.generate_level()
self.paddle.move(pi/2)
self.ball.setup()
self.killspace.pos = (cx, cy)
self.score = 0
self.level_num = 0
with self.canvas:
glLineWidth(3)
def generate_level(self):
for w in self.blocks:
self.remove_widget(w)
self.canvas.remove_group(CentripetalGame.g_level)
self.blocks = []
color = 0
width = min(Window.width, Window.height)
#r_start = 300
n_rings = self.n_rings
#num_segments = self.num_segments
num_segments = n_rings + 7
ring_spacing = width / 60.0 / n_rings * 7 # maybe this shouldn't depend on the window width/height?
r_start = width/2 - ring_spacing * n_rings
for ring in range(n_rings):
r_1 = ring * ring_spacing + r_start
r_2 = (ring+1) * ring_spacing + r_start
#num_segments = ring*4
step = (2*pi)/num_segments
for i in range(num_segments):
angle = step * i
#color += random.uniform(0,0.5)
color += random.uniform(0,1.0 / (num_segments * n_rings) * 2)
w = Block()
self.add_widget (w)
pts=[
cx + cos(angle)*r_1, cy + sin(angle)*r_1,
cx + cos(angle)*r_2, cy + sin(angle)*r_2,
cx + cos(angle + step)*r_2, cy + sin(angle + step)*r_2,
cx + cos(angle + step)*r_1, cy + sin(angle + step)*r_1
]
w.pts = pts
w.lowerx = pts[0]
w.lowery = pts[1]
w.upperx = pts[0]
w.uppery = pts[1]
for i in range(0, 8, 2):
if pts[i] < w.lowerx: w.lowerx = pts[i]
if pts[i] > w.upperx: w.upperx = pts[i]
if pts[i+1] < w.lowery: w.lowery = pts[i+1]
if pts[i+1] > w.uppery: w.uppery = pts[i+1]
with w.canvas:
Color(color, random.uniform(0.4,1), random.uniform(0.8,1), mode='hsv', group=CentripetalGame.g_level)
Quad(points=pts, group=CentripetalGame.g_level)
self.blocks.append(w)
def on_touch_down(self,touch):
self.on_touch_move(touch)
def on_touch_move(self, touch):
angle = atan2 (touch.y - cy, touch.x - cx)
self.paddle.move (angle)
#self.ball.pos = (touch.x, touch.y)
def on_touch_up(self,touch):
self.on_touch_move(touch)
def update(self, dt):
self.ball.move(dt)
col = self.paddle.collide_widget(self.ball)
if col[0]:
#print dt,"bam"
self.sounds["hit_2"].play()
d = self.ball.r + self.paddle.r - (Vector(self.ball.pos) - Vector(self.paddle.pos)).length()
#print d
self.ball.pos = Vector(self.ball.pos) + col[2] * d
v = self.ball.velocity
v_n = v.normalize()
normal = col[2]
m = max((280-v.length2())/100,1)
v = v - normal * 2 * (normal.dot(v)) * m
self.ball.velocity = v
killspace_col_paddle = self.killspace.collide_widget (self.ball)
# WE DIED!
if killspace_col_paddle[0]:
self.sounds["die"].play()
self.start()
for w in self.blocks:
col = w.collide_widget(self.ball)
if col[0]:
self.sounds["hit"].play()
closest = col[1]
circle = self.ball
mtd = Vector(circle.pos[0], circle.pos[1]) - closest
mtd = mtd.normalize()
pos = closest + mtd * 1.05 * self.ball.size[0]/2
self.ball.pos = pos
v = self.ball.velocity
normal = col[2]
v = v - normal * 2 * (normal.dot(v))
self.ball.velocity = v
self.remove_widget (w)
self.blocks.remove(w)
self.score += 5 + 2 * self.level_num
self.best_score = max(self.score, self.best_score)
if len(self.blocks) == 0:
self.n_rings += 1
self.level_num += 1
self.ball.setup()
self.generate_level()
break
class KillSpace(Widget):
r = 50/2
r2 = r**r
def collide_widget(self, wid):
return circleToCircle (wid, self)
class Paddle(Widget):
group = "paddle"
r = 40/2
r2 = r**r
def move(self, angle):
self.canvas.remove_group(self.group)
r = rect (90/2+self.r-20, angle)
self.pos = (r.real + cx, r.imag + cy)
def collide_widget(self, wid):
return circleToCircle (wid, self)
# Unfortunately, on my PC, the looping is not seamless. :(
class Music(Widget):
sound = None
def start(self):
if self.sound is None:
self.sound = SoundLoader.load("music.ogg")
self.sound.volume = 0.8
self.sound.play()
self.sound.on_stop = self.sound.play
class CentripetalMenu(Widget):
def start(self):
self.t = 0.0
with self.canvas:
self.logo = Rectangle(size=(405, 153), pos=(self.size[0]/2 - 405/2, self.size[1]/2 - 153/2 + 150), source='logo.png')
def update(self, dt):
self.logo.pos = (self.size[0]/2 - 405/2, self.size[1]/2 - 153/2 + 157 + sin(self.t * 3) * 10)
self.t += dt
class CentripetalRoot(Widget):
STATE_MENU = 0
STATE_PLAY = 1
STATE_WIN = 2
STATE_LOSE = 3
def start(self):
self.state = CentripetalRoot.STATE_MENU
self.menu = CentripetalMenu()
self.menu.size = Window.size
self.add_widget (self.menu)
self.menu.start()
Clock.schedule_interval(self.menu.update, 1.0/60.0)
self.music = Music()
self.add_widget (self.music)
self.music.start()
def start_game(self):
Clock.unschedule(self.menu.update)
self.state = CentripetalRoot.STATE_PLAY
self.remove_widget (self.menu)
self.game = CentripetalGame()
self.game.start()
self.add_widget (self.game)
Clock.schedule_interval(self.game.update, 1.0/60.0)
def on_touch_up(self,touch):
pass
#if self.state == CentripetalRoot.STATE_MENU:
# self.start_game()
Factory.register("Paddle", Paddle)
Factory.register("KillSpace", KillSpace)
Factory.register("Block", Block)
Factory.register("Ball", Ball)
Factory.register("CentripetalGame", CentripetalGame)
Factory.register("CentripetalMenu", CentripetalMenu)
Factory.register("CentripetalRoot", CentripetalRoot)
class CentripetalApp(App):
icon = 'icon.png'
def build(self):
#self.root = FloatLayout()
#self.grid = None
#game.start()
global cx,cy
cx = Window.center[0]
cy = Window.center[1]
root = CentripetalRoot()
root.size = Window.size
root.start()
#Clock.schedule_interval(game.music.loop, 1.0/240.0)
return root
if __name__ in ('__android__', '__main__'):
CentripetalApp().run()
| |
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
from cbint.utils.daemon import CbIntegrationDaemon, ConfigurationError
from cbint.utils.detonation.binary_queue import (SqliteQueue,
SqliteFeedServer,
BinaryDatabaseController)
from cbint.utils.detonation.binary_analysis import (CbAPIHistoricalProducerThread,
CbAPIUpToDateProducerThread,
QuickScanThread,
DeepAnalysisThread)
import cbint.utils.feed
import cbint.utils.cbserver
import os.path
from threading import Event, Thread
from time import sleep
from logging.handlers import RotatingFileHandler
import datetime
import socket
import time
try:
import simplejson as json
except ImportError:
import json
from cbapi.response import CbResponseAPI, Feed
from cbapi.example_helpers import get_object_by_name_or_id
from cbapi.errors import ServerError
class IntegrationError(Exception):
pass
def touch(path):
try:
os.utime(path, None)
except:
open(path, 'a').close()
class FeedSyncRunner(Thread):
"""
performs feed synchronization logic
synchronizes a feed using the provided cb_api reference
sync_needed should be set to true when a sync is needed
"""
def __init__(self, cb_api, feed_name, dirty_event, interval=60):
Thread.__init__(self)
self.cb = cb_api
self.feed_name = feed_name
self.interval = int(interval)
self.sync_needed = False
self.sync_supported = False
self.dirty_event = dirty_event
self.daemon = True
def run(self):
while True:
log.info("Feed synchronizing thread sleeping for {} seconds".format(self.interval))
sleep(self.interval)
try:
if self.dirty_event.is_set():
self.dirty_event.clear()
feeds = get_object_by_name_or_id(self.cb, Feed, name=self.feed_name)
if not feeds:
log.error("Error locating feed {} for synchronization".format(self.feed_name))
continue
if len(feeds) > 1:
log.error("Multiple feeds found for synchronization")
for feed in feeds:
log.info("Synchronizing Feed {}...".format(feed.name))
feed.synchronize(False)
except Exception as e:
log.error(e.message)
class DetonationDaemon(CbIntegrationDaemon):
def __init__(self, name, **kwargs):
work_directory = kwargs.pop('work_directory', None)
CbIntegrationDaemon.__init__(self, name, **kwargs)
self.cb = None
self.work_queue = None
self.work_directory = work_directory or os.path.join("usr", "share", "cb", "integrations", "%s" % self.name)
self.database_file = os.path.join(self.work_directory, "sqlite.db")
self._queue_initialized = False
self.done = False
self.feed_dirty = Event()
self.feed_url = None
self.feed_base_url = None
self.link_base_url = None
self.days_rescan = 365
### Start: Functions which must be overriden in subclasses of DetonationDaemon ###
@property
def integration_name(self):
return ''
@property
def num_quick_scan_threads(self):
return 1
@property
def num_deep_scan_threads(self):
return 5
@property
def filter_spec(self):
return ''
@property
def historical_rate_limiter(self):
return 0.5
@property
def up_to_date_rate_limiter(self):
return 0.1
def get_provider(self):
raise IntegrationError("Integration did not provide a 'get_provider' function, which is required")
def get_metadata(self):
raise IntegrationError("Integration did not provide a 'get_metadata' function, which is required")
### End: Functions which must be overriden in subclasses of DetonationDaemon ###
def validate_config(self):
if not self.cfg.has_section('bridge'):
raise ConfigurationError("Configuration file does not have required section 'bridge'")
self.check_required_options(['carbonblack_server_url', 'carbonblack_server_token'])
ssl_verify = self.get_config_boolean("carbonblack_server_sslverify", False)
server_url = self.cfg.get("bridge", "carbonblack_server_url")
server_token = self.cfg.get("bridge", "carbonblack_server_token")
#
# There are times we need to wait for the Cb Response Server to be back up after a reboot.
# So lets just sleep for 30 secs while we do 3 max retries
#
log.info(self.integration_name)
cbinfo = None
for i in range(3):
try:
self.cb = CbResponseAPI(url=server_url,
token=server_token,
ssl_verify=ssl_verify,
integration_name=self.integration_name)
cbinfo = self.cb.info()
if cbinfo:
break
except Exception as e:
log.info(e.message)
log.info("Failed to connect to Cb Response Server, retrying in 30 secs...")
time.sleep(30)
continue
if not cbinfo:
raise ConfigurationError("Could not connect to Cb server at %s" % (server_url))
if self.get_config_boolean("use_streaming", False):
self.check_required_options(['carbonblack_streaming_host', 'carbonblack_streaming_username',
'carbonblack_streaming_password'])
self.streaming_host = self.cfg.get('bridge', 'carbonblack_streaming_host')
self.streaming_username = self.cfg.get('bridge', 'carbonblack_streaming_username')
self.streaming_password = self.cfg.get('bridge', 'carbonblack_streaming_password')
self.use_streaming = True
else:
self.use_streaming = False
self.cert_file = self.get_config_string('cert_file')
self.key_file = self.get_config_string('key_file')
if self.key_file and self.cert_file:
self.feed_base_url = "https://%s:%d" % (self.get_config_string('feed_host', '127.0.0.1'),
self.get_config_integer('listener_port', 8080))
else:
self.feed_base_url = "http://%s:%d" % (self.get_config_string('feed_host', '127.0.0.1'),
self.get_config_integer('listener_port', 8080))
self.link_base_url = self.get_config_string('link_host_url')
if not self.link_base_url:
self.link_base_url = self.feed_base_url
self.feed_url = "%s%s" % (self.feed_base_url, '/feed.json')
if self.cfg.has_option('bridge', 'days_rescan'):
self.days_rescan = self.get_config_integer('days_rescan', 365)
return True
def initialize_queue(self):
if not self._queue_initialized:
self.work_queue = SqliteQueue(self.database_file, num_days_before_rescan=self.days_rescan)
self.work_queue.reprocess_on_restart()
self._queue_initialized = True
return self.work_queue
def migrate_legacy_reports(self, legacy_directory):
migrated_count = 0
if not os.path.isdir(legacy_directory):
log.info("Legacy directory %s doesn't exist, nothing to migrate" % legacy_directory)
return migrated_count
if os.path.isfile(os.path.join(legacy_directory, '.migrated')):
log.info("Feed reports from %s already migrated" % legacy_directory)
return migrated_count
for fn in (f for f in os.listdir(legacy_directory) if os.path.isfile(os.path.join(legacy_directory,f))):
try:
d = json.load(open(os.path.join(legacy_directory, fn), 'rb'))
short_result = d['title']
timestamp = int(d['timestamp'])
iocs = d['iocs']
score = int(d['score'])
link = d['link']
# NOTE: we are assuming the first md5 in the list is the md5sum of the binary.
md5_iocs = iocs.get('md5', [])
if not md5_iocs:
log.warning("No MD5 IOCs in file %s" % fn)
continue
md5sum = md5_iocs[0]
md5_iocs.remove(md5sum)
if not md5_iocs:
del(iocs['md5'])
if not iocs:
iocs = None
succeeded = (score >= 0)
except Exception as e:
log.warning("Could not parse file %s: %s" % (fn, e))
continue
try:
if not self.work_queue.binary_exists_in_database(md5sum):
self.work_queue.append(md5sum)
self.work_queue.mark_as_analyzed(md5sum, succeeded, 0, short_result, '', score=score, link=link,
iocs=iocs)
migrated_count += 1
except Exception as e:
log.warning("Could not migrate file %s to new database: %s" % (fn, e))
import traceback
log.warning(traceback.format_exc())
continue
# try:
# os.remove(os.path.join(legacy_directory, fn))
# except IOError:
# log.warning("Could not remove old file %s after migration: %s" % (fn, e))
touch(os.path.join(legacy_directory, '.migrated'))
log.info("Migrated %d reports from %s into database" % (migrated_count, legacy_directory))
return migrated_count
def start_binary_collectors(self, filter_spec):
collectors = []
now = datetime.datetime.utcnow()
collectors.append(CbAPIHistoricalProducerThread(self.database_controller.register("producer"), self.cb, self.name,
sleep_between=self.get_config_integer('sleep_between_batches', 1200),
rate_limiter=self.historical_rate_limiter, start_time=now,
filter_spec=filter_spec)) # historical query
collectors.append(CbAPIUpToDateProducerThread(self.database_controller.register("producer"), self.cb, self.name,
sleep_between=self.get_config_integer('sleep_between_batches', 30),
rate_limiter=self.up_to_date_rate_limiter, start_time=now,
filter_spec=filter_spec)) # constantly up-to-date query
# if self.use_streaming:
# # TODO: need filter_spec for streaming
# collectors.append(CbStreamingProducerThread(self.database_controller.register("producer"), self.streaming_host, self.streaming_username,
# self.streaming_password))
for collector in collectors:
collector.start()
return collectors
def start_feed_server(self, feed_metadata):
self.feed_server = SqliteFeedServer(self.database_file,
self.get_config_integer('listener_port', 8080),
feed_metadata,
self.link_base_url,
self.work_directory,
cert_file=self.cert_file,
key_file=self.key_file,
listener_address=self.get_config_string('listener_address', '0.0.0.0'))
self.feed_server.start()
#
# With Cb Response 6.1, it is much faster to respond back to check if the feed exists.
# So lets delay here a bit to make sure our feed server is running
#
for i in range(10):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((self.get_config_string('listener_address', '0.0.0.0'),
self.get_config_integer('listener_port', 8080)))
if result == 0:
log.info("Feed server is running...")
return
else:
log.info("Feed server isn't running yet, sleep for 5 seconds and trying again...")
time.sleep(5)
log.warning("Feed server doesn't seem to have started...")
def get_or_create_feed(self, retry=3):
feed_id = None
for i in range(retry):
try:
feeds = get_object_by_name_or_id(self.cb, Feed, name=self.name)
if not feeds:
log.info("Feed {} was not found, so we are going to create it".format(self.name))
break
if len(feeds) > 1:
log.warning("Multiple feeds found, selecting Feed id {}".format(feeds[0].id))
feed_id = feeds[0].id
log.info("Feed {} was found as Feed ID {}".format(self.name, feed_id))
break
except Exception as e:
log.info(e.message)
break
if not feed_id:
log.info("Creating %s feed for the first time" % self.name)
# TODO: clarification of feed_host vs listener_address
f = self.cb.create(Feed)
f.feed_url = self.feed_url
f.enabled = True
f.use_proxy = False
f.validate_server_cert = False
try:
f.save()
except ServerError as se:
if se.error_code == 500:
log.info("Could not add feed:")
log.info(
" Received error code 500 from server. This is usually because the server cannot retrieve the feed.")
log.info(" Check to ensure the Cb server has network connectivity and the credentials are correct.")
else:
log.info("Could not add feed: {0:s}".format(str(se)))
except Exception as e:
log.info("Could not add feed: {0:s}".format(str(e)))
else:
log.info("Feed data: {0:s}".format(str(f)))
log.info("Added feed. New feed ID is {0:d}".format(f.id))
feed_id = f.id
return feed_id
def run(self):
try:
work_queue = self.initialize_queue()
self.database_controller = BinaryDatabaseController(work_queue)
self.database_controller.start()
# Import previous work, if enabled
legacy_feed_directory = self.get_config_string("legacy_feed_directory", None)
if legacy_feed_directory:
self.migrate_legacy_reports(legacy_feed_directory)
# Prepare binary analysis ("detonation") provider
consumer_threads = []
provider = self.get_provider()
for i in range(self.num_quick_scan_threads):
database_arbiter = self.database_controller.register("consumer", quick_scan=True)
t = QuickScanThread(database_arbiter, self.cb, provider, dirty_event=self.feed_dirty)
consumer_threads.append(t)
t.start()
for i in range(self.num_deep_scan_threads):
database_arbiter = self.database_controller.register("consumer", quick_scan=False)
t = DeepAnalysisThread(database_arbiter, self.cb, provider, dirty_event=self.feed_dirty)
consumer_threads.append(t)
t.start()
# Start feed server
metadata = self.get_metadata()
self.start_feed_server(metadata)
# Start collecting binaries
collectors = self.start_binary_collectors(self.filter_spec)
# Synchronize feed with Carbon Black
self.get_or_create_feed()
if cbint.utils.cbserver.is_server_at_least(self.cb, "4.1"):
feed_synchronizer = FeedSyncRunner(self.cb, self.name, self.feed_dirty)
feed_synchronizer.start()
except Exception as e:
log.error(e.message)
try:
while True:
sleep(1)
except KeyboardInterrupt:
print('stopping...')
for t in consumer_threads + collectors:
t.stop()
for t in consumer_threads + collectors:
t.join()
print('stopped %s' % t)
| |
from __future__ import unicode_literals
import datetime
import re
from datetime import date
from decimal import Decimal
from django import forms
from django.db import models
from django.forms.models import (_get_foreign_key, inlineformset_factory,
modelformset_factory, BaseModelFormSet)
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from .models import (Author, BetterAuthor, Book, BookWithCustomPK,
BookWithOptionalAltEditor, AlternateBook, AuthorMeeting, CustomPrimaryKey,
Place, Owner, Location, OwnerProfile, Restaurant, Product, Price,
MexicanRestaurant, ClassyMexicanRestaurant, Repository, Revision,
Person, Membership, Team, Player, Poet, Poem, Post)
class DeletionTests(TestCase):
def test_deletion(self):
PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '1',
'form-MAX_NUM_FORMS': '0',
'form-0-id': str(poet.pk),
'form-0-name': 'test',
'form-0-DELETE': 'on',
}
formset = PoetFormSet(data, queryset=Poet.objects.all())
formset.save(commit=False)
self.assertEqual(Poet.objects.count(), 1)
formset.save()
self.assertTrue(formset.is_valid())
self.assertEqual(Poet.objects.count(), 0)
def test_add_form_deletion_when_invalid(self):
"""
Make sure that an add form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True)
poet = Poet.objects.create(name='test')
# One existing untouched and two new unvalid forms
data = {
'form-TOTAL_FORMS': '3',
'form-INITIAL_FORMS': '1',
'form-MAX_NUM_FORMS': '0',
'form-0-id': six.text_type(poet.id),
'form-0-name': 'test',
'form-1-id': '',
'form-1-name': 'x' * 1000, # Too long
'form-2-id': six.text_type(poet.id), # Violate unique constraint
'form-2-name': 'test2',
}
formset = PoetFormSet(data, queryset=Poet.objects.all())
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poet.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data in new forms aren't actually valid.
data['form-0-DELETE'] = 'on'
data['form-1-DELETE'] = 'on'
data['form-2-DELETE'] = 'on'
formset = PoetFormSet(data, queryset=Poet.objects.all())
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poet.objects.count(), 0)
def test_change_form_deletion_when_invalid(self):
"""
Make sure that a change form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '1',
'form-MAX_NUM_FORMS': '0',
'form-0-id': six.text_type(poet.id),
'form-0-name': 'x' * 1000,
}
formset = PoetFormSet(data, queryset=Poet.objects.all())
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poet.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['form-0-DELETE'] = 'on'
formset = PoetFormSet(data, queryset=Poet.objects.all())
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poet.objects.count(), 0)
def test_outdated_deletion(self):
poet = Poet.objects.create(name='test')
poem = Poem.objects.create(name='Brevity is the soul of wit', poet=poet)
PoemFormSet = inlineformset_factory(Poet, Poem, fields="__all__", can_delete=True)
# Simulate deletion of an object that doesn't exist in the database
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '2',
'form-0-id': str(poem.pk),
'form-0-name': 'foo',
'form-1-id': str(poem.pk + 1), # doesn't exist
'form-1-name': 'bar',
'form-1-DELETE': 'on',
}
formset = PoemFormSet(data, instance=poet, prefix="form")
# The formset is valid even though poem.pk + 1 doesn't exist,
# because it's marked for deletion anyway
self.assertTrue(formset.is_valid())
formset.save()
# Make sure the save went through correctly
self.assertEqual(Poem.objects.get(pk=poem.pk).name, "foo")
self.assertEqual(poet.poem_set.count(), 1)
self.assertFalse(Poem.objects.filter(pk=poem.pk + 1).exists())
class ModelFormsetTest(TestCase):
def test_simple_save(self):
qs = Author.objects.all()
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" maxlength="100" /><input type="hidden" name="form-0-id" id="id_form-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" maxlength="100" /><input type="hidden" name="form-1-id" id="id_form-1-id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label> <input id="id_form-2-name" type="text" name="form-2-name" maxlength="100" /><input type="hidden" name="form-2-id" id="id_form-2-id" /></p>')
data = {
'form-TOTAL_FORMS': '3', # the number of forms rendered
'form-INITIAL_FORMS': '0', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-name': 'Charles Baudelaire',
'form-1-name': 'Arthur Rimbaud',
'form-2-name': '',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
author1, author2 = saved
self.assertEqual(author1, Author.objects.get(name='Charles Baudelaire'))
self.assertEqual(author2, Author.objects.get(name='Arthur Rimbaud'))
authors = list(Author.objects.order_by('name'))
self.assertEqual(authors, [author2, author1])
# Gah! We forgot Paul Verlaine. Let's create a formset to edit the
# existing authors with an extra form to add him. We *could* pass in a
# queryset to restrict the Author objects we edit, but in this case
# we'll use it to display them in alphabetical order by name.
qs = Author.objects.order_by('name')
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=1, can_delete=False)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" value="Arthur Rimbaud" maxlength="100" /><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></p>' % author2.id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" value="Charles Baudelaire" maxlength="100" /><input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" /></p>' % author1.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label> <input id="id_form-2-name" type="text" name="form-2-name" maxlength="100" /><input type="hidden" name="form-2-id" id="id_form-2-id" /></p>')
data = {
'form-TOTAL_FORMS': '3', # the number of forms rendered
'form-INITIAL_FORMS': '2', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(author2.id),
'form-0-name': 'Arthur Rimbaud',
'form-1-id': str(author1.id),
'form-1-name': 'Charles Baudelaire',
'form-2-name': 'Paul Verlaine',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# Only changed or new objects are returned from formset.save()
saved = formset.save()
self.assertEqual(len(saved), 1)
author3 = saved[0]
self.assertEqual(author3, Author.objects.get(name='Paul Verlaine'))
authors = list(Author.objects.order_by('name'))
self.assertEqual(authors, [author2, author1, author3])
# This probably shouldn't happen, but it will. If an add form was
# marked for deletion, make sure we don't save that form.
qs = Author.objects.order_by('name')
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=1, can_delete=True)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" value="Arthur Rimbaud" maxlength="100" /></p>\n'
'<p><label for="id_form-0-DELETE">Delete:</label> <input type="checkbox" name="form-0-DELETE" id="id_form-0-DELETE" /><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></p>' % author2.id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" value="Charles Baudelaire" maxlength="100" /></p>\n'
'<p><label for="id_form-1-DELETE">Delete:</label> <input type="checkbox" name="form-1-DELETE" id="id_form-1-DELETE" /><input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" /></p>' % author1.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label> <input id="id_form-2-name" type="text" name="form-2-name" value="Paul Verlaine" maxlength="100" /></p>\n'
'<p><label for="id_form-2-DELETE">Delete:</label> <input type="checkbox" name="form-2-DELETE" id="id_form-2-DELETE" /><input type="hidden" name="form-2-id" value="%d" id="id_form-2-id" /></p>' % author3.id)
self.assertHTMLEqual(formset.forms[3].as_p(),
'<p><label for="id_form-3-name">Name:</label> <input id="id_form-3-name" type="text" name="form-3-name" maxlength="100" /></p>\n'
'<p><label for="id_form-3-DELETE">Delete:</label> <input type="checkbox" name="form-3-DELETE" id="id_form-3-DELETE" /><input type="hidden" name="form-3-id" id="id_form-3-id" /></p>')
data = {
'form-TOTAL_FORMS': '4', # the number of forms rendered
'form-INITIAL_FORMS': '3', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(author2.id),
'form-0-name': 'Arthur Rimbaud',
'form-1-id': str(author1.id),
'form-1-name': 'Charles Baudelaire',
'form-2-id': str(author3.id),
'form-2-name': 'Paul Verlaine',
'form-3-name': 'Walt Whitman',
'form-3-DELETE': 'on',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# No objects were changed or saved so nothing will come back.
self.assertEqual(formset.save(), [])
authors = list(Author.objects.order_by('name'))
self.assertEqual(authors, [author2, author1, author3])
# Let's edit a record to ensure save only returns that one record.
data = {
'form-TOTAL_FORMS': '4', # the number of forms rendered
'form-INITIAL_FORMS': '3', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(author2.id),
'form-0-name': 'Walt Whitman',
'form-1-id': str(author1.id),
'form-1-name': 'Charles Baudelaire',
'form-2-id': str(author3.id),
'form-2-name': 'Paul Verlaine',
'form-3-name': '',
'form-3-DELETE': '',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# One record has changed.
saved = formset.save()
self.assertEqual(len(saved), 1)
self.assertEqual(saved[0], Author.objects.get(name='Walt Whitman'))
def test_commit_false(self):
# Test the behavior of commit=False and save_m2m
author1 = Author.objects.create(name='Charles Baudelaire')
author2 = Author.objects.create(name='Paul Verlaine')
author3 = Author.objects.create(name='Walt Whitman')
meeting = AuthorMeeting.objects.create(created=date.today())
meeting.authors = Author.objects.all()
# create an Author instance to add to the meeting.
author4 = Author.objects.create(name='John Steinbeck')
AuthorMeetingFormSet = modelformset_factory(AuthorMeeting, fields="__all__", extra=1, can_delete=True)
data = {
'form-TOTAL_FORMS': '2', # the number of forms rendered
'form-INITIAL_FORMS': '1', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(meeting.id),
'form-0-name': '2nd Tuesday of the Week Meeting',
'form-0-authors': [author2.id, author1.id, author3.id, author4.id],
'form-1-name': '',
'form-1-authors': '',
'form-1-DELETE': '',
}
formset = AuthorMeetingFormSet(data=data, queryset=AuthorMeeting.objects.all())
self.assertTrue(formset.is_valid())
instances = formset.save(commit=False)
for instance in instances:
instance.created = date.today()
instance.save()
formset.save_m2m()
self.assertQuerysetEqual(instances[0].authors.all(), [
'<Author: Charles Baudelaire>',
'<Author: John Steinbeck>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
def test_max_num(self):
# Test the behavior of max_num with model formsets. It should allow
# all existing related objects/inlines for a given object to be
# displayed, but not allow the creation of new inlines beyond max_num.
Author.objects.create(name='Charles Baudelaire')
Author.objects.create(name='Paul Verlaine')
Author.objects.create(name='Walt Whitman')
qs = Author.objects.order_by('name')
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=None, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 6)
self.assertEqual(len(formset.extra_forms), 3)
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=4, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 4)
self.assertEqual(len(formset.extra_forms), 1)
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=0, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertEqual(len(formset.extra_forms), 0)
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=None)
formset = AuthorFormSet(queryset=qs)
self.assertQuerysetEqual(formset.get_queryset(), [
'<Author: Charles Baudelaire>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=0)
formset = AuthorFormSet(queryset=qs)
self.assertQuerysetEqual(formset.get_queryset(), [
'<Author: Charles Baudelaire>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=4)
formset = AuthorFormSet(queryset=qs)
self.assertQuerysetEqual(formset.get_queryset(), [
'<Author: Charles Baudelaire>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
def test_min_num(self):
# Test the behavior of min_num with model formsets. It should be
# added to extra.
qs = Author.objects.none()
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=0)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 0)
AuthorFormSet = modelformset_factory(Author, fields="__all__", min_num=1, extra=0)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 1)
AuthorFormSet = modelformset_factory(Author, fields="__all__", min_num=1, extra=1)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 2)
def test_min_num_with_existing(self):
# Test the behavior of min_num with existing objects.
Author.objects.create(name='Charles Baudelaire')
qs = Author.objects.all()
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=0, min_num=1)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 1)
def test_custom_save_method(self):
class PoetForm(forms.ModelForm):
def save(self, commit=True):
# change the name to "Vladimir Mayakovsky" just to be a jerk.
author = super(PoetForm, self).save(commit=False)
author.name = "Vladimir Mayakovsky"
if commit:
author.save()
return author
PoetFormSet = modelformset_factory(Poet, fields="__all__", form=PoetForm)
data = {
'form-TOTAL_FORMS': '3', # the number of forms rendered
'form-INITIAL_FORMS': '0', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-name': 'Walt Whitman',
'form-1-name': 'Charles Baudelaire',
'form-2-name': '',
}
qs = Poet.objects.all()
formset = PoetFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
poets = formset.save()
self.assertEqual(len(poets), 2)
poet1, poet2 = poets
self.assertEqual(poet1.name, 'Vladimir Mayakovsky')
self.assertEqual(poet2.name, 'Vladimir Mayakovsky')
def test_custom_form(self):
""" Test that model_formset respects fields and exclude parameters of
custom form
"""
class PostForm1(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'posted')
class PostForm2(forms.ModelForm):
class Meta:
model = Post
exclude = ('subtitle',)
PostFormSet = modelformset_factory(Post, form=PostForm1)
formset = PostFormSet()
self.assertFalse("subtitle" in formset.forms[0].fields)
PostFormSet = modelformset_factory(Post, form=PostForm2)
formset = PostFormSet()
self.assertFalse("subtitle" in formset.forms[0].fields)
def test_custom_queryset_init(self):
"""
Test that a queryset can be overridden in the __init__ method.
https://docs.djangoproject.com/en/dev/topics/forms/modelforms/#changing-the-queryset
"""
Author.objects.create(name='Charles Baudelaire')
Author.objects.create(name='Paul Verlaine')
class BaseAuthorFormSet(BaseModelFormSet):
def __init__(self, *args, **kwargs):
super(BaseAuthorFormSet, self).__init__(*args, **kwargs)
self.queryset = Author.objects.filter(name__startswith='Charles')
AuthorFormSet = modelformset_factory(Author, fields='__all__', formset=BaseAuthorFormSet)
formset = AuthorFormSet()
self.assertEqual(len(formset.get_queryset()), 1)
def test_model_inheritance(self):
BetterAuthorFormSet = modelformset_factory(BetterAuthor, fields="__all__")
formset = BetterAuthorFormSet()
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" maxlength="100" /></p>\n'
'<p><label for="id_form-0-write_speed">Write speed:</label> <input type="number" name="form-0-write_speed" id="id_form-0-write_speed" /><input type="hidden" name="form-0-author_ptr" id="id_form-0-author_ptr" /></p>')
data = {
'form-TOTAL_FORMS': '1', # the number of forms rendered
'form-INITIAL_FORMS': '0', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-author_ptr': '',
'form-0-name': 'Ernest Hemingway',
'form-0-write_speed': '10',
}
formset = BetterAuthorFormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
author1, = saved
self.assertEqual(author1, BetterAuthor.objects.get(name='Ernest Hemingway'))
hemingway_id = BetterAuthor.objects.get(name="Ernest Hemingway").pk
formset = BetterAuthorFormSet()
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" value="Ernest Hemingway" maxlength="100" /></p>\n'
'<p><label for="id_form-0-write_speed">Write speed:</label> <input type="number" name="form-0-write_speed" value="10" id="id_form-0-write_speed" /><input type="hidden" name="form-0-author_ptr" value="%d" id="id_form-0-author_ptr" /></p>' % hemingway_id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" maxlength="100" /></p>\n'
'<p><label for="id_form-1-write_speed">Write speed:</label> <input type="number" name="form-1-write_speed" id="id_form-1-write_speed" /><input type="hidden" name="form-1-author_ptr" id="id_form-1-author_ptr" /></p>')
data = {
'form-TOTAL_FORMS': '2', # the number of forms rendered
'form-INITIAL_FORMS': '1', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-author_ptr': hemingway_id,
'form-0-name': 'Ernest Hemingway',
'form-0-write_speed': '10',
'form-1-author_ptr': '',
'form-1-name': '',
'form-1-write_speed': '',
}
formset = BetterAuthorFormSet(data)
self.assertTrue(formset.is_valid())
self.assertEqual(formset.save(), [])
def test_inline_formsets(self):
# We can also create a formset that is tied to a parent model. This is
# how the admin system's edit inline functionality works.
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=3, fields="__all__")
author = Author.objects.create(name='Charles Baudelaire')
formset = AuthorBooksFormSet(instance=author)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" maxlength="100" /><input type="hidden" name="book_set-0-author" value="%d" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" id="id_book_set-0-id" /></p>' % author.id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" /><input type="hidden" name="book_set-1-author" value="%d" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>' % author.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" /><input type="hidden" name="book_set-2-author" value="%d" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>' % author.id)
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '0', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-title': 'Les Fleurs du Mal',
'book_set-1-title': '',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book1, = saved
self.assertEqual(book1, Book.objects.get(title='Les Fleurs du Mal'))
self.assertQuerysetEqual(author.book_set.all(), ['<Book: Les Fleurs du Mal>'])
# Now that we've added a book to Charles Baudelaire, let's try adding
# another one. This time though, an edit form will be available for
# every existing book.
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2, fields="__all__")
author = Author.objects.get(name='Charles Baudelaire')
formset = AuthorBooksFormSet(instance=author)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" value="Les Fleurs du Mal" maxlength="100" /><input type="hidden" name="book_set-0-author" value="%d" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" value="%d" id="id_book_set-0-id" /></p>' % (author.id, book1.id))
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" /><input type="hidden" name="book_set-1-author" value="%d" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>' % author.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" /><input type="hidden" name="book_set-2-author" value="%d" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>' % author.id)
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '1', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': str(book1.id),
'book_set-0-title': 'Les Fleurs du Mal',
'book_set-1-title': 'Les Paradis Artificiels',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book2, = saved
self.assertEqual(book2, Book.objects.get(title='Les Paradis Artificiels'))
# As you can see, 'Les Paradis Artificiels' is now a book belonging to
# Charles Baudelaire.
self.assertQuerysetEqual(author.book_set.order_by('title'), [
'<Book: Les Fleurs du Mal>',
'<Book: Les Paradis Artificiels>',
])
def test_inline_formsets_save_as_new(self):
# The save_as_new parameter lets you re-associate the data to a new
# instance. This is used in the admin for save_as functionality.
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2, fields="__all__")
Author.objects.create(name='Charles Baudelaire')
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '2', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': '1',
'book_set-0-title': 'Les Fleurs du Mal',
'book_set-1-id': '2',
'book_set-1-title': 'Les Paradis Artificiels',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=Author(), save_as_new=True)
self.assertTrue(formset.is_valid())
new_author = Author.objects.create(name='Charles Baudelaire')
formset = AuthorBooksFormSet(data, instance=new_author, save_as_new=True)
saved = formset.save()
self.assertEqual(len(saved), 2)
book1, book2 = saved
self.assertEqual(book1.title, 'Les Fleurs du Mal')
self.assertEqual(book2.title, 'Les Paradis Artificiels')
# Test using a custom prefix on an inline formset.
formset = AuthorBooksFormSet(prefix="test")
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_test-0-title">Title:</label> <input id="id_test-0-title" type="text" name="test-0-title" maxlength="100" /><input type="hidden" name="test-0-author" id="id_test-0-author" /><input type="hidden" name="test-0-id" id="id_test-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_test-1-title">Title:</label> <input id="id_test-1-title" type="text" name="test-1-title" maxlength="100" /><input type="hidden" name="test-1-author" id="id_test-1-author" /><input type="hidden" name="test-1-id" id="id_test-1-id" /></p>')
def test_inline_formsets_with_custom_pk(self):
# Test inline formsets where the inline-edited object has a custom
# primary key that is not the fk to the parent object.
self.maxDiff = 1024
AuthorBooksFormSet2 = inlineformset_factory(Author, BookWithCustomPK, can_delete=False, extra=1, fields="__all__")
author = Author.objects.create(pk=1, name='Charles Baudelaire')
formset = AuthorBooksFormSet2(instance=author)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_bookwithcustompk_set-0-my_pk">My pk:</label> <input id="id_bookwithcustompk_set-0-my_pk" type="number" name="bookwithcustompk_set-0-my_pk" step="1" /></p>\n'
'<p><label for="id_bookwithcustompk_set-0-title">Title:</label> <input id="id_bookwithcustompk_set-0-title" type="text" name="bookwithcustompk_set-0-title" maxlength="100" /><input type="hidden" name="bookwithcustompk_set-0-author" value="1" id="id_bookwithcustompk_set-0-author" /></p>')
data = {
'bookwithcustompk_set-TOTAL_FORMS': '1', # the number of forms rendered
'bookwithcustompk_set-INITIAL_FORMS': '0', # the number of forms with initial data
'bookwithcustompk_set-MAX_NUM_FORMS': '', # the max number of forms
'bookwithcustompk_set-0-my_pk': '77777',
'bookwithcustompk_set-0-title': 'Les Fleurs du Mal',
}
formset = AuthorBooksFormSet2(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book1, = saved
self.assertEqual(book1.pk, 77777)
book1 = author.bookwithcustompk_set.get()
self.assertEqual(book1.title, 'Les Fleurs du Mal')
def test_inline_formsets_with_multi_table_inheritance(self):
# Test inline formsets where the inline-edited object uses multi-table
# inheritance, thus has a non AutoField yet auto-created primary key.
AuthorBooksFormSet3 = inlineformset_factory(Author, AlternateBook, can_delete=False, extra=1, fields="__all__")
author = Author.objects.create(pk=1, name='Charles Baudelaire')
formset = AuthorBooksFormSet3(instance=author)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_alternatebook_set-0-title">Title:</label> <input id="id_alternatebook_set-0-title" type="text" name="alternatebook_set-0-title" maxlength="100" /></p>\n'
'<p><label for="id_alternatebook_set-0-notes">Notes:</label> <input id="id_alternatebook_set-0-notes" type="text" name="alternatebook_set-0-notes" maxlength="100" /><input type="hidden" name="alternatebook_set-0-author" value="1" id="id_alternatebook_set-0-author" /><input type="hidden" name="alternatebook_set-0-book_ptr" id="id_alternatebook_set-0-book_ptr" /></p>')
data = {
'alternatebook_set-TOTAL_FORMS': '1', # the number of forms rendered
'alternatebook_set-INITIAL_FORMS': '0', # the number of forms with initial data
'alternatebook_set-MAX_NUM_FORMS': '', # the max number of forms
'alternatebook_set-0-title': 'Flowers of Evil',
'alternatebook_set-0-notes': 'English translation of Les Fleurs du Mal'
}
formset = AuthorBooksFormSet3(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book1, = saved
self.assertEqual(book1.title, 'Flowers of Evil')
self.assertEqual(book1.notes, 'English translation of Les Fleurs du Mal')
@skipUnlessDBFeature('supports_partially_nullable_unique_constraints')
def test_inline_formsets_with_nullable_unique_together(self):
# Test inline formsets where the inline-edited object has a
# unique_together constraint with a nullable member
AuthorBooksFormSet4 = inlineformset_factory(Author, BookWithOptionalAltEditor, can_delete=False, extra=2, fields="__all__")
author = Author.objects.create(pk=1, name='Charles Baudelaire')
data = {
'bookwithoptionalalteditor_set-TOTAL_FORMS': '2', # the number of forms rendered
'bookwithoptionalalteditor_set-INITIAL_FORMS': '0', # the number of forms with initial data
'bookwithoptionalalteditor_set-MAX_NUM_FORMS': '', # the max number of forms
'bookwithoptionalalteditor_set-0-author': '1',
'bookwithoptionalalteditor_set-0-title': 'Les Fleurs du Mal',
'bookwithoptionalalteditor_set-1-author': '1',
'bookwithoptionalalteditor_set-1-title': 'Les Fleurs du Mal',
}
formset = AuthorBooksFormSet4(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
book1, book2 = saved
self.assertEqual(book1.author_id, 1)
self.assertEqual(book1.title, 'Les Fleurs du Mal')
self.assertEqual(book2.author_id, 1)
self.assertEqual(book2.title, 'Les Fleurs du Mal')
def test_inline_formsets_with_custom_save_method(self):
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2, fields="__all__")
author = Author.objects.create(pk=1, name='Charles Baudelaire')
book1 = Book.objects.create(pk=1, author=author, title='Les Paradis Artificiels')
book2 = Book.objects.create(pk=2, author=author, title='Les Fleurs du Mal')
book3 = Book.objects.create(pk=3, author=author, title='Flowers of Evil')
class PoemForm(forms.ModelForm):
def save(self, commit=True):
# change the name to "Brooklyn Bridge" just to be a jerk.
poem = super(PoemForm, self).save(commit=False)
poem.name = "Brooklyn Bridge"
if commit:
poem.save()
return poem
PoemFormSet = inlineformset_factory(Poet, Poem, form=PoemForm, fields="__all__")
data = {
'poem_set-TOTAL_FORMS': '3', # the number of forms rendered
'poem_set-INITIAL_FORMS': '0', # the number of forms with initial data
'poem_set-MAX_NUM_FORMS': '', # the max number of forms
'poem_set-0-name': 'The Cloud in Trousers',
'poem_set-1-name': 'I',
'poem_set-2-name': '',
}
poet = Poet.objects.create(name='Vladimir Mayakovsky')
formset = PoemFormSet(data=data, instance=poet)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
poem1, poem2 = saved
self.assertEqual(poem1.name, 'Brooklyn Bridge')
self.assertEqual(poem2.name, 'Brooklyn Bridge')
# We can provide a custom queryset to our InlineFormSet:
custom_qs = Book.objects.order_by('-title')
formset = AuthorBooksFormSet(instance=author, queryset=custom_qs)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" value="Les Paradis Artificiels" maxlength="100" /><input type="hidden" name="book_set-0-author" value="1" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" value="1" id="id_book_set-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" value="Les Fleurs du Mal" maxlength="100" /><input type="hidden" name="book_set-1-author" value="1" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" value="2" id="id_book_set-1-id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" value="Flowers of Evil" maxlength="100" /><input type="hidden" name="book_set-2-author" value="1" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" value="3" id="id_book_set-2-id" /></p>')
self.assertHTMLEqual(formset.forms[3].as_p(),
'<p><label for="id_book_set-3-title">Title:</label> <input id="id_book_set-3-title" type="text" name="book_set-3-title" maxlength="100" /><input type="hidden" name="book_set-3-author" value="1" id="id_book_set-3-author" /><input type="hidden" name="book_set-3-id" id="id_book_set-3-id" /></p>')
self.assertHTMLEqual(formset.forms[4].as_p(),
'<p><label for="id_book_set-4-title">Title:</label> <input id="id_book_set-4-title" type="text" name="book_set-4-title" maxlength="100" /><input type="hidden" name="book_set-4-author" value="1" id="id_book_set-4-author" /><input type="hidden" name="book_set-4-id" id="id_book_set-4-id" /></p>')
data = {
'book_set-TOTAL_FORMS': '5', # the number of forms rendered
'book_set-INITIAL_FORMS': '3', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': str(book1.id),
'book_set-0-title': 'Les Paradis Artificiels',
'book_set-1-id': str(book2.id),
'book_set-1-title': 'Les Fleurs du Mal',
'book_set-2-id': str(book3.id),
'book_set-2-title': 'Flowers of Evil',
'book_set-3-title': 'Revue des deux mondes',
'book_set-4-title': '',
}
formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs)
self.assertTrue(formset.is_valid())
custom_qs = Book.objects.filter(title__startswith='F')
formset = AuthorBooksFormSet(instance=author, queryset=custom_qs)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" value="Flowers of Evil" maxlength="100" /><input type="hidden" name="book_set-0-author" value="1" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" value="3" id="id_book_set-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" /><input type="hidden" name="book_set-1-author" value="1" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" /><input type="hidden" name="book_set-2-author" value="1" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>')
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '1', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': str(book3.id),
'book_set-0-title': 'Flowers of Evil',
'book_set-1-title': 'Revue des deux mondes',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs)
self.assertTrue(formset.is_valid())
def test_custom_pk(self):
# We need to ensure that it is displayed
CustomPrimaryKeyFormSet = modelformset_factory(CustomPrimaryKey, fields="__all__")
formset = CustomPrimaryKeyFormSet()
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-my_pk">My pk:</label> <input id="id_form-0-my_pk" type="text" name="form-0-my_pk" maxlength="10" /></p>\n'
'<p><label for="id_form-0-some_field">Some field:</label> <input id="id_form-0-some_field" type="text" name="form-0-some_field" maxlength="100" /></p>')
# Custom primary keys with ForeignKey, OneToOneField and AutoField ############
place = Place.objects.create(pk=1, name='Giordanos', city='Chicago')
FormSet = inlineformset_factory(Place, Owner, extra=2, can_delete=False, fields="__all__")
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_owner_set-0-name">Name:</label> <input id="id_owner_set-0-name" type="text" name="owner_set-0-name" maxlength="100" /><input type="hidden" name="owner_set-0-place" value="1" id="id_owner_set-0-place" /><input type="hidden" name="owner_set-0-auto_id" id="id_owner_set-0-auto_id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_owner_set-1-name">Name:</label> <input id="id_owner_set-1-name" type="text" name="owner_set-1-name" maxlength="100" /><input type="hidden" name="owner_set-1-place" value="1" id="id_owner_set-1-place" /><input type="hidden" name="owner_set-1-auto_id" id="id_owner_set-1-auto_id" /></p>')
data = {
'owner_set-TOTAL_FORMS': '2',
'owner_set-INITIAL_FORMS': '0',
'owner_set-MAX_NUM_FORMS': '',
'owner_set-0-auto_id': '',
'owner_set-0-name': 'Joe Perry',
'owner_set-1-auto_id': '',
'owner_set-1-name': '',
}
formset = FormSet(data, instance=place)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
owner1, = saved
self.assertEqual(owner1.name, 'Joe Perry')
self.assertEqual(owner1.place.name, 'Giordanos')
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_owner_set-0-name">Name:</label> <input id="id_owner_set-0-name" type="text" name="owner_set-0-name" value="Joe Perry" maxlength="100" /><input type="hidden" name="owner_set-0-place" value="1" id="id_owner_set-0-place" /><input type="hidden" name="owner_set-0-auto_id" value="%d" id="id_owner_set-0-auto_id" /></p>'
% owner1.auto_id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_owner_set-1-name">Name:</label> <input id="id_owner_set-1-name" type="text" name="owner_set-1-name" maxlength="100" /><input type="hidden" name="owner_set-1-place" value="1" id="id_owner_set-1-place" /><input type="hidden" name="owner_set-1-auto_id" id="id_owner_set-1-auto_id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_owner_set-2-name">Name:</label> <input id="id_owner_set-2-name" type="text" name="owner_set-2-name" maxlength="100" /><input type="hidden" name="owner_set-2-place" value="1" id="id_owner_set-2-place" /><input type="hidden" name="owner_set-2-auto_id" id="id_owner_set-2-auto_id" /></p>')
data = {
'owner_set-TOTAL_FORMS': '3',
'owner_set-INITIAL_FORMS': '1',
'owner_set-MAX_NUM_FORMS': '',
'owner_set-0-auto_id': six.text_type(owner1.auto_id),
'owner_set-0-name': 'Joe Perry',
'owner_set-1-auto_id': '',
'owner_set-1-name': 'Jack Berry',
'owner_set-2-auto_id': '',
'owner_set-2-name': '',
}
formset = FormSet(data, instance=place)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
owner2, = saved
self.assertEqual(owner2.name, 'Jack Berry')
self.assertEqual(owner2.place.name, 'Giordanos')
# Ensure a custom primary key that is a ForeignKey or OneToOneField get rendered for the user to choose.
FormSet = modelformset_factory(OwnerProfile, fields="__all__")
formset = FormSet()
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-owner">Owner:</label> <select name="form-0-owner" id="id_form-0-owner">\n'
'<option value="" selected="selected">---------</option>\n'
'<option value="%d">Joe Perry at Giordanos</option>\n'
'<option value="%d">Jack Berry at Giordanos</option>\n'
'</select></p>\n'
'<p><label for="id_form-0-age">Age:</label> <input type="number" name="form-0-age" id="id_form-0-age" min="0" /></p>'
% (owner1.auto_id, owner2.auto_id))
owner1 = Owner.objects.get(name='Joe Perry')
FormSet = inlineformset_factory(Owner, OwnerProfile, max_num=1, can_delete=False, fields="__all__")
self.assertEqual(FormSet.max_num, 1)
formset = FormSet(instance=owner1)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_ownerprofile-0-age">Age:</label> <input type="number" name="ownerprofile-0-age" id="id_ownerprofile-0-age" min="0" /><input type="hidden" name="ownerprofile-0-owner" value="%d" id="id_ownerprofile-0-owner" /></p>'
% owner1.auto_id)
data = {
'ownerprofile-TOTAL_FORMS': '1',
'ownerprofile-INITIAL_FORMS': '0',
'ownerprofile-MAX_NUM_FORMS': '1',
'ownerprofile-0-owner': '',
'ownerprofile-0-age': '54',
}
formset = FormSet(data, instance=owner1)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
profile1, = saved
self.assertEqual(profile1.owner, owner1)
self.assertEqual(profile1.age, 54)
formset = FormSet(instance=owner1)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_ownerprofile-0-age">Age:</label> <input type="number" name="ownerprofile-0-age" value="54" id="id_ownerprofile-0-age" min="0" /><input type="hidden" name="ownerprofile-0-owner" value="%d" id="id_ownerprofile-0-owner" /></p>'
% owner1.auto_id)
data = {
'ownerprofile-TOTAL_FORMS': '1',
'ownerprofile-INITIAL_FORMS': '1',
'ownerprofile-MAX_NUM_FORMS': '1',
'ownerprofile-0-owner': six.text_type(owner1.auto_id),
'ownerprofile-0-age': '55',
}
formset = FormSet(data, instance=owner1)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
profile1, = saved
self.assertEqual(profile1.owner, owner1)
self.assertEqual(profile1.age, 55)
def test_unique_true_enforces_max_num_one(self):
# ForeignKey with unique=True should enforce max_num=1
place = Place.objects.create(pk=1, name='Giordanos', city='Chicago')
FormSet = inlineformset_factory(Place, Location, can_delete=False, fields="__all__")
self.assertEqual(FormSet.max_num, 1)
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_location_set-0-lat">Lat:</label> <input id="id_location_set-0-lat" type="text" name="location_set-0-lat" maxlength="100" /></p>\n'
'<p><label for="id_location_set-0-lon">Lon:</label> <input id="id_location_set-0-lon" type="text" name="location_set-0-lon" maxlength="100" /><input type="hidden" name="location_set-0-place" value="1" id="id_location_set-0-place" /><input type="hidden" name="location_set-0-id" id="id_location_set-0-id" /></p>')
def test_foreign_keys_in_parents(self):
self.assertEqual(type(_get_foreign_key(Restaurant, Owner)), models.ForeignKey)
self.assertEqual(type(_get_foreign_key(MexicanRestaurant, Owner)), models.ForeignKey)
def test_unique_validation(self):
FormSet = modelformset_factory(Product, fields="__all__", extra=1)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-slug': 'car-red',
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
product1, = saved
self.assertEqual(product1.slug, 'car-red')
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-slug': 'car-red',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'slug': ['Product with this Slug already exists.']}])
def test_modelformset_validate_max_flag(self):
# If validate_max is set and max_num is less than TOTAL_FORMS in the
# data, then throw an exception. MAX_NUM_FORMS in the data is
# irrelevant here (it's output as a hint for the client but its
# value in the returned data is not checked)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '2', # should be ignored
'form-0-price': '12.00',
'form-0-quantity': '1',
'form-1-price': '24.00',
'form-1-quantity': '2',
}
FormSet = modelformset_factory(Price, fields="__all__", extra=1, max_num=1, validate_max=True)
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit 1 or fewer forms.'])
# Now test the same thing without the validate_max flag to ensure
# default behavior is unchanged
FormSet = modelformset_factory(Price, fields="__all__", extra=1, max_num=1)
formset = FormSet(data)
self.assertTrue(formset.is_valid())
def test_unique_together_validation(self):
FormSet = modelformset_factory(Price, fields="__all__", extra=1)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-price': '12.00',
'form-0-quantity': '1',
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
price1, = saved
self.assertEqual(price1.price, Decimal('12.00'))
self.assertEqual(price1.quantity, 1)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-price': '12.00',
'form-0-quantity': '1',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'__all__': ['Price with this Price and Quantity already exists.']}])
def test_unique_together_with_inlineformset_factory(self):
# Also see bug #8882.
repository = Repository.objects.create(name='Test Repo')
FormSet = inlineformset_factory(Repository, Revision, extra=1, fields="__all__")
data = {
'revision_set-TOTAL_FORMS': '1',
'revision_set-INITIAL_FORMS': '0',
'revision_set-MAX_NUM_FORMS': '',
'revision_set-0-repository': repository.pk,
'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
'revision_set-0-DELETE': '',
}
formset = FormSet(data, instance=repository)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
revision1, = saved
self.assertEqual(revision1.repository, repository)
self.assertEqual(revision1.revision, '146239817507f148d448db38840db7c3cbf47c76')
# attempt to save the same revision against against the same repo.
data = {
'revision_set-TOTAL_FORMS': '1',
'revision_set-INITIAL_FORMS': '0',
'revision_set-MAX_NUM_FORMS': '',
'revision_set-0-repository': repository.pk,
'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
'revision_set-0-DELETE': '',
}
formset = FormSet(data, instance=repository)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'__all__': ['Revision with this Repository and Revision already exists.']}])
# unique_together with inlineformset_factory with overridden form fields
# Also see #9494
FormSet = inlineformset_factory(Repository, Revision, fields=('revision',), extra=1)
data = {
'revision_set-TOTAL_FORMS': '1',
'revision_set-INITIAL_FORMS': '0',
'revision_set-MAX_NUM_FORMS': '',
'revision_set-0-repository': repository.pk,
'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
'revision_set-0-DELETE': '',
}
formset = FormSet(data, instance=repository)
self.assertFalse(formset.is_valid())
def test_callable_defaults(self):
# Use of callable defaults (see bug #7975).
person = Person.objects.create(name='Ringo')
FormSet = inlineformset_factory(Person, Membership, can_delete=False, extra=1, fields="__all__")
formset = FormSet(instance=person)
# Django will render a hidden field for model fields that have a callable
# default. This is required to ensure the value is tested for change correctly
# when determine what extra forms have changed to save.
self.assertEqual(len(formset.forms), 1) # this formset only has one form
form = formset.forms[0]
now = form.fields['date_joined'].initial()
result = form.as_p()
result = re.sub(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}(?:\.\d+)?', '__DATETIME__', result)
self.assertHTMLEqual(result,
'<p><label for="id_membership_set-0-date_joined">Date joined:</label> <input type="text" name="membership_set-0-date_joined" value="__DATETIME__" id="id_membership_set-0-date_joined" /><input type="hidden" name="initial-membership_set-0-date_joined" value="__DATETIME__" id="initial-membership_set-0-id_membership_set-0-date_joined" /></p>\n'
'<p><label for="id_membership_set-0-karma">Karma:</label> <input type="number" name="membership_set-0-karma" id="id_membership_set-0-karma" /><input type="hidden" name="membership_set-0-person" value="%d" id="id_membership_set-0-person" /><input type="hidden" name="membership_set-0-id" id="id_membership_set-0-id" /></p>'
% person.id)
# test for validation with callable defaults. Validations rely on hidden fields
data = {
'membership_set-TOTAL_FORMS': '1',
'membership_set-INITIAL_FORMS': '0',
'membership_set-MAX_NUM_FORMS': '',
'membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'initial-membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'membership_set-0-karma': '',
}
formset = FormSet(data, instance=person)
self.assertTrue(formset.is_valid())
# now test for when the data changes
one_day_later = now + datetime.timedelta(days=1)
filled_data = {
'membership_set-TOTAL_FORMS': '1',
'membership_set-INITIAL_FORMS': '0',
'membership_set-MAX_NUM_FORMS': '',
'membership_set-0-date_joined': six.text_type(one_day_later.strftime('%Y-%m-%d %H:%M:%S')),
'initial-membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'membership_set-0-karma': '',
}
formset = FormSet(filled_data, instance=person)
self.assertFalse(formset.is_valid())
# now test with split datetime fields
class MembershipForm(forms.ModelForm):
date_joined = forms.SplitDateTimeField(initial=now)
class Meta:
model = Membership
fields = "__all__"
def __init__(self, **kwargs):
super(MembershipForm, self).__init__(**kwargs)
self.fields['date_joined'].widget = forms.SplitDateTimeWidget()
FormSet = inlineformset_factory(Person, Membership, form=MembershipForm, can_delete=False, extra=1, fields="__all__")
data = {
'membership_set-TOTAL_FORMS': '1',
'membership_set-INITIAL_FORMS': '0',
'membership_set-MAX_NUM_FORMS': '',
'membership_set-0-date_joined_0': six.text_type(now.strftime('%Y-%m-%d')),
'membership_set-0-date_joined_1': six.text_type(now.strftime('%H:%M:%S')),
'initial-membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'membership_set-0-karma': '',
}
formset = FormSet(data, instance=person)
self.assertTrue(formset.is_valid())
def test_inlineformset_factory_with_null_fk(self):
# inlineformset_factory tests with fk having null=True. see #9462.
# create some data that will exhibit the issue
team = Team.objects.create(name="Red Vipers")
Player(name="Timmy").save()
Player(name="Bobby", team=team).save()
PlayerInlineFormSet = inlineformset_factory(Team, Player, fields="__all__")
formset = PlayerInlineFormSet()
self.assertQuerysetEqual(formset.get_queryset(), [])
formset = PlayerInlineFormSet(instance=team)
players = formset.get_queryset()
self.assertEqual(len(players), 1)
player1, = players
self.assertEqual(player1.team, team)
self.assertEqual(player1.name, 'Bobby')
def test_model_formset_with_custom_pk(self):
# a formset for a Model that has a custom primary key that still needs to be
# added to the formset automatically
FormSet = modelformset_factory(ClassyMexicanRestaurant, fields=["tacos_are_yummy"])
self.assertEqual(sorted(FormSet().forms[0].fields.keys()), ['restaurant', 'tacos_are_yummy'])
def test_model_formset_with_initial_model_instance(self):
# has_changed should compare model instance and primary key
# see #18898
FormSet = modelformset_factory(Poem, fields='__all__')
john_milton = Poet(name="John Milton")
john_milton.save()
data = {
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-name': '',
'form-0-poet': str(john_milton.id),
}
formset = FormSet(initial=[{'poet': john_milton}], data=data)
self.assertFalse(formset.extra_forms[0].has_changed())
def test_model_formset_with_initial_queryset(self):
# has_changed should work with queryset and list of pk's
# see #18898
FormSet = modelformset_factory(AuthorMeeting, fields='__all__')
Author.objects.create(pk=1, name='Charles Baudelaire')
data = {
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-name': '',
'form-0-created': '',
'form-0-authors': list(Author.objects.values_list('id', flat=True)),
}
formset = FormSet(initial=[{'authors': Author.objects.all()}], data=data)
self.assertFalse(formset.extra_forms[0].has_changed())
def test_prevent_duplicates_from_with_the_same_formset(self):
FormSet = modelformset_factory(Product, fields="__all__", extra=2)
data = {
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-slug': 'red_car',
'form-1-slug': 'red_car',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for slug.'])
FormSet = modelformset_factory(Price, fields="__all__", extra=2)
data = {
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-price': '25',
'form-0-quantity': '7',
'form-1-price': '25',
'form-1-quantity': '7',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for price and quantity, which must be unique.'])
# Only the price field is specified, this should skip any unique checks since
# the unique_together is not fulfilled. This will fail with a KeyError if broken.
FormSet = modelformset_factory(Price, fields=("price",), extra=2)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-price': '24',
'form-1-price': '24',
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
FormSet = inlineformset_factory(Author, Book, extra=0, fields="__all__")
author = Author.objects.create(pk=1, name='Charles Baudelaire')
Book.objects.create(pk=1, author=author, title='Les Paradis Artificiels')
Book.objects.create(pk=2, author=author, title='Les Fleurs du Mal')
Book.objects.create(pk=3, author=author, title='Flowers of Evil')
book_ids = author.book_set.order_by('id').values_list('id', flat=True)
data = {
'book_set-TOTAL_FORMS': '2',
'book_set-INITIAL_FORMS': '2',
'book_set-MAX_NUM_FORMS': '',
'book_set-0-title': 'The 2008 Election',
'book_set-0-author': str(author.id),
'book_set-0-id': str(book_ids[0]),
'book_set-1-title': 'The 2008 Election',
'book_set-1-author': str(author.id),
'book_set-1-id': str(book_ids[1]),
}
formset = FormSet(data=data, instance=author)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for title.'])
self.assertEqual(formset.errors,
[{}, {'__all__': ['Please correct the duplicate values below.']}])
FormSet = modelformset_factory(Post, fields="__all__", extra=2)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'blah',
'form-0-slug': 'Morning',
'form-0-subtitle': 'foo',
'form-0-posted': '2009-01-01',
'form-1-title': 'blah',
'form-1-slug': 'Morning in Prague',
'form-1-subtitle': 'rawr',
'form-1-posted': '2009-01-01'
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for title which must be unique for the date in posted.'])
self.assertEqual(formset.errors,
[{}, {'__all__': ['Please correct the duplicate values below.']}])
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
'form-0-slug': 'Morning in Prague',
'form-0-subtitle': 'foo',
'form-0-posted': '2009-01-01',
'form-1-title': 'blah',
'form-1-slug': 'Morning in Prague',
'form-1-subtitle': 'rawr',
'form-1-posted': '2009-08-02'
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for slug which must be unique for the year in posted.'])
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
'form-0-slug': 'Morning in Prague',
'form-0-subtitle': 'rawr',
'form-0-posted': '2008-08-01',
'form-1-title': 'blah',
'form-1-slug': 'Prague',
'form-1-subtitle': 'rawr',
'form-1-posted': '2009-08-02'
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for subtitle which must be unique for the month in posted.'])
class TestModelFormsetOverridesTroughFormMeta(TestCase):
def test_modelformset_factory_widgets(self):
widgets = {
'name': forms.TextInput(attrs={'class': 'poet'})
}
PoetFormSet = modelformset_factory(Poet, fields="__all__", widgets=widgets)
form = PoetFormSet.form()
self.assertHTMLEqual(
"%s" % form['name'],
'<input id="id_name" maxlength="100" type="text" class="poet" name="name" />'
)
def test_inlineformset_factory_widgets(self):
widgets = {
'title': forms.TextInput(attrs={'class': 'book'})
}
BookFormSet = inlineformset_factory(Author, Book, widgets=widgets, fields="__all__")
form = BookFormSet.form()
self.assertHTMLEqual(
"%s" % form['title'],
'<input class="book" id="id_title" maxlength="100" name="title" type="text" />'
)
def test_modelformset_factory_labels_overrides(self):
BookFormSet = modelformset_factory(Book, fields="__all__", labels={
'title': 'Name'
})
form = BookFormSet.form()
self.assertHTMLEqual(form['title'].label_tag(), '<label for="id_title">Name:</label>')
def test_inlineformset_factory_labels_overrides(self):
BookFormSet = inlineformset_factory(Author, Book, fields="__all__", labels={
'title': 'Name'
})
form = BookFormSet.form()
self.assertHTMLEqual(form['title'].label_tag(), '<label for="id_title">Name:</label>')
def test_modelformset_factory_help_text_overrides(self):
BookFormSet = modelformset_factory(Book, fields="__all__", help_texts={
'title': 'Choose carefully.'
})
form = BookFormSet.form()
self.assertEqual(form['title'].help_text, 'Choose carefully.')
def test_inlineformset_factory_help_text_overrides(self):
BookFormSet = inlineformset_factory(Author, Book, fields="__all__", help_texts={
'title': 'Choose carefully.'
})
form = BookFormSet.form()
self.assertEqual(form['title'].help_text, 'Choose carefully.')
def test_modelformset_factory_error_messages_overrides(self):
author = Author.objects.create(pk=1, name='Charles Baudelaire')
BookFormSet = modelformset_factory(Book, fields="__all__", error_messages={
'title': {
'max_length': 'Title too long!!'
}
})
form = BookFormSet.form(data={'title': 'Foo ' * 30, 'author': author.id})
form.full_clean()
self.assertEqual(form.errors, {'title': ['Title too long!!']})
def test_inlineformset_factory_error_messages_overrides(self):
author = Author.objects.create(pk=1, name='Charles Baudelaire')
BookFormSet = inlineformset_factory(Author, Book, fields="__all__", error_messages={
'title': {
'max_length': 'Title too long!!'
}
})
form = BookFormSet.form(data={'title': 'Foo ' * 30, 'author': author.id})
form.full_clean()
self.assertEqual(form.errors, {'title': ['Title too long!!']})
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Quotas for volumes."""
import datetime
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import timeutils
import six
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _, _LE
from cinder.openstack.common import versionutils
LOG = logging.getLogger(__name__)
quota_opts = [
cfg.IntOpt('quota_volumes',
default=10,
help='Number of volumes allowed per project'),
cfg.IntOpt('quota_snapshots',
default=10,
help='Number of volume snapshots allowed per project'),
cfg.IntOpt('quota_consistencygroups',
default=10,
help='Number of consistencygroups allowed per project'),
cfg.IntOpt('quota_gigabytes',
default=1000,
help='Total amount of storage, in gigabytes, allowed '
'for volumes and snapshots per project'),
cfg.IntOpt('quota_backups',
default=10,
help='Number of volume backups allowed per project'),
cfg.IntOpt('quota_backup_gigabytes',
default=1000,
help='Total amount of storage, in gigabytes, allowed '
'for backups per project'),
cfg.IntOpt('reservation_expire',
default=86400,
help='Number of seconds until a reservation expires'),
cfg.IntOpt('until_refresh',
default=0,
help='Count of reservations until usage is refreshed'),
cfg.IntOpt('max_age',
default=0,
help='Number of seconds between subsequent usage refreshes'),
cfg.StrOpt('quota_driver',
default='cinder.quota.DbQuotaDriver',
help='Default driver to use for quota checks'),
cfg.BoolOpt('use_default_quota_class',
default=True,
help='Enables or disables use of default quota class '
'with default quota.'),
cfg.IntOpt('per_volume_size_limit',
default=-1,
help='Max size allowed per volume, in gigabytes'), ]
CONF = cfg.CONF
CONF.register_opts(quota_opts)
class DbQuotaDriver(object):
"""Driver to perform check to enforcement of quotas.
Also allows to obtain quota information.
The default driver utilizes the local database.
"""
def get_by_project(self, context, project_id, resource_name):
"""Get a specific quota by project."""
return db.quota_get(context, project_id, resource_name)
def get_by_class(self, context, quota_class, resource_name):
"""Get a specific quota by quota class."""
return db.quota_class_get(context, quota_class, resource_name)
def get_default(self, context, resource):
"""Get a specific default quota for a resource."""
default_quotas = db.quota_class_get_default(context)
return default_quotas.get(resource.name, resource.default)
def get_defaults(self, context, resources):
"""Given a list of resources, retrieve the default quotas.
Use the class quotas named `_DEFAULT_QUOTA_NAME` as default quotas,
if it exists.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
"""
quotas = {}
default_quotas = {}
if CONF.use_default_quota_class:
default_quotas = db.quota_class_get_default(context)
for resource in resources.values():
if resource.name not in default_quotas:
versionutils.report_deprecated_feature(LOG, _(
"Default quota for resource: %(res)s is set "
"by the default quota flag: quota_%(res)s, "
"it is now deprecated. Please use the "
"default quota class for default "
"quota.") % {'res': resource.name})
quotas[resource.name] = default_quotas.get(resource.name,
resource.default)
return quotas
def get_class_quotas(self, context, resources, quota_class,
defaults=True):
"""Given list of resources, retrieve the quotas for given quota class.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param quota_class: The name of the quota class to return
quotas for.
:param defaults: If True, the default value will be reported
if there is no specific value for the
resource.
"""
quotas = {}
default_quotas = {}
class_quotas = db.quota_class_get_all_by_name(context, quota_class)
if defaults:
default_quotas = db.quota_class_get_default(context)
for resource in resources.values():
if resource.name in class_quotas:
quotas[resource.name] = class_quotas[resource.name]
continue
if defaults:
quotas[resource.name] = default_quotas.get(resource.name,
resource.default)
return quotas
def get_project_quotas(self, context, resources, project_id,
quota_class=None, defaults=True,
usages=True):
"""Given a list of resources, retrieve the quotas for the given
project.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param project_id: The ID of the project to return quotas for.
:param quota_class: If project_id != context.project_id, the
quota class cannot be determined. This
parameter allows it to be specified. It
will be ignored if project_id ==
context.project_id.
:param defaults: If True, the quota class value (or the
default value, if there is no value from the
quota class) will be reported if there is no
specific value for the resource.
:param usages: If True, the current in_use and reserved counts
will also be returned.
"""
quotas = {}
project_quotas = db.quota_get_all_by_project(context, project_id)
if usages:
project_usages = db.quota_usage_get_all_by_project(context,
project_id)
# Get the quotas for the appropriate class. If the project ID
# matches the one in the context, we use the quota_class from
# the context, otherwise, we use the provided quota_class (if
# any)
if project_id == context.project_id:
quota_class = context.quota_class
if quota_class:
class_quotas = db.quota_class_get_all_by_name(context, quota_class)
else:
class_quotas = {}
default_quotas = self.get_defaults(context, resources)
for resource in resources.values():
# Omit default/quota class values
if not defaults and resource.name not in project_quotas:
continue
quotas[resource.name] = dict(
limit=project_quotas.get(
resource.name,
class_quotas.get(resource.name,
default_quotas[resource.name])),
)
# Include usages if desired. This is optional because one
# internal consumer of this interface wants to access the
# usages directly from inside a transaction.
if usages:
usage = project_usages.get(resource.name, {})
quotas[resource.name].update(
in_use=usage.get('in_use', 0),
reserved=usage.get('reserved', 0), )
return quotas
def _get_quotas(self, context, resources, keys, has_sync, project_id=None):
"""A helper method which retrieves the quotas for specific resources.
This specific resource is identified by keys, and which apply to the
current context.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param keys: A list of the desired quotas to retrieve.
:param has_sync: If True, indicates that the resource must
have a sync attribute; if False, indicates
that the resource must NOT have a sync
attribute.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
# Filter resources
if has_sync:
sync_filt = lambda x: hasattr(x, 'sync')
else:
sync_filt = lambda x: not hasattr(x, 'sync')
desired = set(keys)
sub_resources = {k: v for k, v in resources.items()
if k in desired and sync_filt(v)}
# Make sure we accounted for all of them...
if len(keys) != len(sub_resources):
unknown = desired - set(sub_resources.keys())
raise exception.QuotaResourceUnknown(unknown=sorted(unknown))
# Grab and return the quotas (without usages)
quotas = self.get_project_quotas(context, sub_resources,
project_id,
context.quota_class, usages=False)
return {k: v['limit'] for k, v in quotas.items()}
def limit_check(self, context, resources, values, project_id=None):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it is not a simple limit
resource.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns
nothing.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param values: A dictionary of the values to check against the
quota.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
# Ensure no value is less than zero
unders = [key for key, val in values.items() if val < 0]
if unders:
raise exception.InvalidQuotaValue(unders=sorted(unders))
# If project_id is None, then we use the project_id in context
if project_id is None:
project_id = context.project_id
# Get the applicable quotas
quotas = self._get_quotas(context, resources, values.keys(),
has_sync=False, project_id=project_id)
# Check the quotas and construct a list of the resources that
# would be put over limit by the desired values
overs = [key for key, val in values.items()
if quotas[key] >= 0 and quotas[key] < val]
if overs:
raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
usages={})
def reserve(self, context, resources, deltas, expire=None,
project_id=None):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
synchronization function--this method checks quotas against
current usage and the desired deltas.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it does not have a usage
synchronization function.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns a
list of reservation UUIDs which were created.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param deltas: A dictionary of the proposed delta changes.
:param expire: An optional parameter specifying an expiration
time for the reservations. If it is a simple
number, it is interpreted as a number of
seconds and added to the current time; if it is
a datetime.timedelta object, it will also be
added to the current time. A datetime.datetime
object will be interpreted as the absolute
expiration time. If None is specified, the
default expiration time set by
--default-reservation-expire will be used (this
value will be treated as a number of seconds).
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
# Set up the reservation expiration
if expire is None:
expire = CONF.reservation_expire
if isinstance(expire, (int, long)):
expire = datetime.timedelta(seconds=expire)
if isinstance(expire, datetime.timedelta):
expire = timeutils.utcnow() + expire
if not isinstance(expire, datetime.datetime):
raise exception.InvalidReservationExpiration(expire=expire)
# If project_id is None, then we use the project_id in context
if project_id is None:
project_id = context.project_id
# Get the applicable quotas.
# NOTE(Vek): We're not worried about races at this point.
# Yes, the admin may be in the process of reducing
# quotas, but that's a pretty rare thing.
quotas = self._get_quotas(context, resources, deltas.keys(),
has_sync=True, project_id=project_id)
# NOTE(Vek): Most of the work here has to be done in the DB
# API, because we have to do it in a transaction,
# which means access to the session. Since the
# session isn't available outside the DBAPI, we
# have to do the work there.
return db.quota_reserve(context, resources, quotas, deltas, expire,
CONF.until_refresh, CONF.max_age,
project_id=project_id)
def commit(self, context, reservations, project_id=None):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
# If project_id is None, then we use the project_id in context
if project_id is None:
project_id = context.project_id
db.reservation_commit(context, reservations, project_id=project_id)
def rollback(self, context, reservations, project_id=None):
"""Roll back reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
# If project_id is None, then we use the project_id in context
if project_id is None:
project_id = context.project_id
db.reservation_rollback(context, reservations, project_id=project_id)
def destroy_by_project(self, context, project_id):
"""Destroy all limit quotas associated with a project.
Leave usage and reservation quotas intact.
:param context: The request context, for access checks.
:param project_id: The ID of the project being deleted.
"""
db.quota_destroy_by_project(context, project_id)
def expire(self, context):
"""Expire reservations.
Explores all currently existing reservations and rolls back
any that have expired.
:param context: The request context, for access checks.
"""
db.reservation_expire(context)
class BaseResource(object):
"""Describe a single resource for quota checking."""
def __init__(self, name, flag=None):
"""Initializes a Resource.
:param name: The name of the resource, i.e., "volumes".
:param flag: The name of the flag or configuration option
which specifies the default value of the quota
for this resource.
"""
self.name = name
self.flag = flag
def quota(self, driver, context, **kwargs):
"""Given a driver and context, obtain the quota for this resource.
:param driver: A quota driver.
:param context: The request context.
:param project_id: The project to obtain the quota value for.
If not provided, it is taken from the
context. If it is given as None, no
project-specific quota will be searched
for.
:param quota_class: The quota class corresponding to the
project, or for which the quota is to be
looked up. If not provided, it is taken
from the context. If it is given as None,
no quota class-specific quota will be
searched for. Note that the quota class
defaults to the value in the context,
which may not correspond to the project if
project_id is not the same as the one in
the context.
"""
# Get the project ID
project_id = kwargs.get('project_id', context.project_id)
# Ditto for the quota class
quota_class = kwargs.get('quota_class', context.quota_class)
# Look up the quota for the project
if project_id:
try:
return driver.get_by_project(context, project_id, self.name)
except exception.ProjectQuotaNotFound:
pass
# Try for the quota class
if quota_class:
try:
return driver.get_by_class(context, quota_class, self.name)
except exception.QuotaClassNotFound:
pass
# OK, return the default
return driver.get_default(context, self)
@property
def default(self):
"""Return the default value of the quota."""
return CONF[self.flag] if self.flag else -1
class ReservableResource(BaseResource):
"""Describe a reservable resource."""
def __init__(self, name, sync, flag=None):
"""Initializes a ReservableResource.
Reservable resources are those resources which directly
correspond to objects in the database, i.e., volumes, gigabytes,
etc. A ReservableResource must be constructed with a usage
synchronization function, which will be called to determine the
current counts of one or more resources.
The usage synchronization function will be passed three
arguments: an admin context, the project ID, and an opaque
session object, which should in turn be passed to the
underlying database function. Synchronization functions
should return a dictionary mapping resource names to the
current in_use count for those resources; more than one
resource and resource count may be returned. Note that
synchronization functions may be associated with more than one
ReservableResource.
:param name: The name of the resource, i.e., "volumes".
:param sync: A dbapi methods name which returns a dictionary
to resynchronize the in_use count for one or more
resources, as described above.
:param flag: The name of the flag or configuration option
which specifies the default value of the quota
for this resource.
"""
super(ReservableResource, self).__init__(name, flag=flag)
if sync:
self.sync = sync
class AbsoluteResource(BaseResource):
"""Describe a non-reservable resource."""
pass
class CountableResource(AbsoluteResource):
"""Describe a resource where counts aren't based only on the project ID."""
def __init__(self, name, count, flag=None):
"""Initializes a CountableResource.
Countable resources are those resources which directly
correspond to objects in the database, i.e., volumes, gigabytes,
etc., but for which a count by project ID is inappropriate. A
CountableResource must be constructed with a counting
function, which will be called to determine the current counts
of the resource.
The counting function will be passed the context, along with
the extra positional and keyword arguments that are passed to
Quota.count(). It should return an integer specifying the
count.
Note that this counting is not performed in a transaction-safe
manner. This resource class is a temporary measure to provide
required functionality, until a better approach to solving
this problem can be evolved.
:param name: The name of the resource, i.e., "volumes".
:param count: A callable which returns the count of the
resource. The arguments passed are as described
above.
:param flag: The name of the flag or configuration option
which specifies the default value of the quota
for this resource.
"""
super(CountableResource, self).__init__(name, flag=flag)
self.count = count
class VolumeTypeResource(ReservableResource):
"""ReservableResource for a specific volume type."""
def __init__(self, part_name, volume_type):
"""Initializes a VolumeTypeResource.
:param part_name: The kind of resource, i.e., "volumes".
:param volume_type: The volume type for this resource.
"""
self.volume_type_name = volume_type['name']
self.volume_type_id = volume_type['id']
name = "%s_%s" % (part_name, self.volume_type_name)
super(VolumeTypeResource, self).__init__(name, "_sync_%s" % part_name)
class QuotaEngine(object):
"""Represent the set of recognized quotas."""
def __init__(self, quota_driver_class=None):
"""Initialize a Quota object."""
if not quota_driver_class:
quota_driver_class = CONF.quota_driver
if isinstance(quota_driver_class, six.string_types):
quota_driver_class = importutils.import_object(quota_driver_class)
self._resources = {}
self._driver = quota_driver_class
def __contains__(self, resource):
return resource in self.resources
def register_resource(self, resource):
"""Register a resource."""
self._resources[resource.name] = resource
def register_resources(self, resources):
"""Register a list of resources."""
for resource in resources:
self.register_resource(resource)
def get_by_project(self, context, project_id, resource_name):
"""Get a specific quota by project."""
return self._driver.get_by_project(context, project_id, resource_name)
def get_by_class(self, context, quota_class, resource_name):
"""Get a specific quota by quota class."""
return self._driver.get_by_class(context, quota_class, resource_name)
def get_default(self, context, resource):
"""Get a specific default quota for a resource."""
return self._driver.get_default(context, resource)
def get_defaults(self, context):
"""Retrieve the default quotas.
:param context: The request context, for access checks.
"""
return self._driver.get_defaults(context, self.resources)
def get_class_quotas(self, context, quota_class, defaults=True):
"""Retrieve the quotas for the given quota class.
:param context: The request context, for access checks.
:param quota_class: The name of the quota class to return
quotas for.
:param defaults: If True, the default value will be reported
if there is no specific value for the
resource.
"""
return self._driver.get_class_quotas(context, self.resources,
quota_class, defaults=defaults)
def get_project_quotas(self, context, project_id, quota_class=None,
defaults=True, usages=True):
"""Retrieve the quotas for the given project.
:param context: The request context, for access checks.
:param project_id: The ID of the project to return quotas for.
:param quota_class: If project_id != context.project_id, the
quota class cannot be determined. This
parameter allows it to be specified.
:param defaults: If True, the quota class value (or the
default value, if there is no value from the
quota class) will be reported if there is no
specific value for the resource.
:param usages: If True, the current in_use and reserved counts
will also be returned.
"""
return self._driver.get_project_quotas(context, self.resources,
project_id,
quota_class=quota_class,
defaults=defaults,
usages=usages)
def count(self, context, resource, *args, **kwargs):
"""Count a resource.
For countable resources, invokes the count() function and
returns its result. Arguments following the context and
resource are passed directly to the count function declared by
the resource.
:param context: The request context, for access checks.
:param resource: The name of the resource, as a string.
"""
# Get the resource
res = self.resources.get(resource)
if not res or not hasattr(res, 'count'):
raise exception.QuotaResourceUnknown(unknown=[resource])
return res.count(context, *args, **kwargs)
def limit_check(self, context, project_id=None, **values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction. The
values to check are given as keyword arguments, where the key
identifies the specific quota limit to check, and the value is
the proposed value.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it is not a simple limit
resource.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns
nothing.
:param context: The request context, for access checks.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
return self._driver.limit_check(context, self.resources, values,
project_id=project_id)
def reserve(self, context, expire=None, project_id=None, **deltas):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
synchronization function--this method checks quotas against
current usage and the desired deltas. The deltas are given as
keyword arguments, and current usage and other reservations
are factored into the quota check.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it does not have a usage
synchronization function.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns a
list of reservation UUIDs which were created.
:param context: The request context, for access checks.
:param expire: An optional parameter specifying an expiration
time for the reservations. If it is a simple
number, it is interpreted as a number of
seconds and added to the current time; if it is
a datetime.timedelta object, it will also be
added to the current time. A datetime.datetime
object will be interpreted as the absolute
expiration time. If None is specified, the
default expiration time set by
--default-reservation-expire will be used (this
value will be treated as a number of seconds).
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
reservations = self._driver.reserve(context, self.resources, deltas,
expire=expire,
project_id=project_id)
LOG.debug("Created reservations %s", reservations)
return reservations
def commit(self, context, reservations, project_id=None):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
try:
self._driver.commit(context, reservations, project_id=project_id)
except Exception:
# NOTE(Vek): Ignoring exceptions here is safe, because the
# usage resynchronization and the reservation expiration
# mechanisms will resolve the issue. The exception is
# logged, however, because this is less than optimal.
LOG.exception(_LE("Failed to commit "
"reservations %s"), reservations)
def rollback(self, context, reservations, project_id=None):
"""Roll back reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
try:
self._driver.rollback(context, reservations, project_id=project_id)
except Exception:
# NOTE(Vek): Ignoring exceptions here is safe, because the
# usage resynchronization and the reservation expiration
# mechanisms will resolve the issue. The exception is
# logged, however, because this is less than optimal.
LOG.exception(_LE("Failed to roll back reservations "
"%s"), reservations)
def destroy_by_project(self, context, project_id):
"""Destroy all quota limits associated with a project.
:param context: The request context, for access checks.
:param project_id: The ID of the project being deleted.
"""
self._driver.destroy_by_project(context, project_id)
def expire(self, context):
"""Expire reservations.
Explores all currently existing reservations and rolls back
any that have expired.
:param context: The request context, for access checks.
"""
self._driver.expire(context)
def add_volume_type_opts(self, context, opts, volume_type_id):
"""Add volume type resource options.
Adds elements to the opts hash for volume type quotas.
If a resource is being reserved ('gigabytes', etc) and the volume
type is set up for its own quotas, these reservations are copied
into keys for 'gigabytes_<volume type name>', etc.
:param context: The request context, for access checks.
:param opts: The reservations options hash.
:param volume_type_id: The volume type id for this reservation.
"""
if not volume_type_id:
return
# NOTE(jdg): set inactive to True in volume_type_get, as we
# may be operating on a volume that was created with a type
# that has since been deleted.
volume_type = db.volume_type_get(context, volume_type_id, True)
for quota in ('volumes', 'gigabytes', 'snapshots'):
if quota in opts:
vtype_quota = "%s_%s" % (quota, volume_type['name'])
opts[vtype_quota] = opts[quota]
@property
def resource_names(self):
return sorted(self.resources.keys())
@property
def resources(self):
return self._resources
class VolumeTypeQuotaEngine(QuotaEngine):
"""Represent the set of all quotas."""
@property
def resources(self):
"""Fetches all possible quota resources."""
result = {}
# Global quotas.
argses = [('volumes', '_sync_volumes', 'quota_volumes'),
('per_volume_gigabytes', None, 'per_volume_size_limit'),
('snapshots', '_sync_snapshots', 'quota_snapshots'),
('gigabytes', '_sync_gigabytes', 'quota_gigabytes'),
('backups', '_sync_backups', 'quota_backups'),
('backup_gigabytes', '_sync_backup_gigabytes',
'quota_backup_gigabytes')]
for args in argses:
resource = ReservableResource(*args)
result[resource.name] = resource
# Volume type quotas.
volume_types = db.volume_type_get_all(context.get_admin_context(),
False)
for volume_type in volume_types.values():
for part_name in ('volumes', 'gigabytes', 'snapshots'):
resource = VolumeTypeResource(part_name, volume_type)
result[resource.name] = resource
return result
def register_resource(self, resource):
raise NotImplementedError(_("Cannot register resource"))
def register_resources(self, resources):
raise NotImplementedError(_("Cannot register resources"))
class CGQuotaEngine(QuotaEngine):
"""Represent the consistencygroup quotas."""
@property
def resources(self):
"""Fetches all possible quota resources."""
result = {}
# Global quotas.
argses = [('consistencygroups', '_sync_consistencygroups',
'quota_consistencygroups'), ]
for args in argses:
resource = ReservableResource(*args)
result[resource.name] = resource
return result
def register_resource(self, resource):
raise NotImplementedError(_("Cannot register resource"))
def register_resources(self, resources):
raise NotImplementedError(_("Cannot register resources"))
QUOTAS = VolumeTypeQuotaEngine()
CGQUOTAS = CGQuotaEngine()
| |
from uuid import uuid4
from unidecode import unidecode
from django.db import models
from django.utils.encoding import smart_unicode
from django.conf import settings
from django.template.defaultfilters import slugify
from django.utils.functional import curry
from django.utils.translation import ugettext_lazy as _, get_language
from anora.templatetags.anora import CONSONANT_SOUND, VOWEL_SOUND
from i18n.utils import normalize_language_code
from nouns.utils import get_synsets, get_lemmas, from_lemma
class Noun(models.Model):
text = models.CharField(max_length=255, db_index=True)
slug = models.SlugField(max_length=255, blank=True)
language = models.CharField(max_length=25)
is_active = models.BooleanField(default=True)
class Meta:
unique_together = (("text", "language"),)
def __unicode__(self):
return smart_unicode(self.text).title()
def save(self, *args, **kwargs):
if not self.slug:
slug = slugify(unidecode(self.text))
duplications = Noun.objects.filter(slug=slug,
language=self.language)
if duplications.exists():
self.slug = "%s-%s" % (slug, uuid4().hex)
else:
self.slug = slug
return super(Noun, self).save(*args, **kwargs)
@classmethod
def from_synset(cls, synset):
lemmas = synset.lemma_names()
text = lemmas[0]
keywords = lemmas[1:]
noun, created = cls.objects.get_or_create(
text=from_lemma(text),
defaults={
'is_active': False,
'language': normalize_language_code(get_language())
})
for keyword in keywords:
noun.add_keyword(from_lemma(keyword))
return noun
def update_with_wordnet(self, update_antonyms=True):
synsets = get_synsets(self.text)
if not synsets:
return
for synset in synsets:
path = synset.hypernym_paths()[0]
parents = path[:-1]
parent = self
for hypernym in reversed(parents):
noun = Noun.from_synset(hypernym)
parent.add_hypernym(noun)
parent = noun
for holonym in synset.part_holonyms():
noun = Noun.from_synset(holonym)
self.add_holonym(noun)
for holonym in synset.member_holonyms():
noun = Noun.from_synset(holonym)
self.add_holonym(noun)
for lemma in get_lemmas(self.text):
if lemma != self.text:
self.add_keyword(lemma)
if not update_antonyms:
return
for synset in synsets:
for lemma in synset.lemmas():
for antonym in lemma.antonyms():
noun = Noun.from_synset(antonym.synset())
self.add_antonym(noun)
noun.update_with_wordnet(update_antonyms=False)
def add_keyword(self, text):
keyword, created = self.keywords.get_or_create(text=text)
return keyword
def active_keywords(self):
return self.keywords.filter(is_active=True)
def active_contentions(self):
language = normalize_language_code(get_language())
return self.contentions.filter(
is_published=True,
language=language
).order_by('?')
def indirect_contentions(self):
from premises.models import Contention # to avoid circular import
language = normalize_language_code(get_language())
nouns = self.out_relations.values_list('target', flat=True)
return Contention.objects.filter(
language=language,
is_published=True,
nouns__in=nouns
).order_by('?')
@models.permalink
def get_absolute_url(self):
return 'nouns_detail', [self.slug]
def serialize(self):
return {
'title': self.text,
'absolute_url': self.get_absolute_url()
}
def hypernyms(self):
return self.out_relations.filter(relation_type='hypernym')
def hyponyms(self):
return self.in_relations.filter(relation_type='hypernym')
def add_relation(self, target, relation_type=None):
relation, created = (
self.out_relations.get_or_create(
target=target, relation_type=relation_type)
)
return relation
add_hypernym = curry(add_relation, relation_type="hypernym")
add_holonym = curry(add_relation, relation_type="holonym")
add_antonym = curry(add_relation, relation_type="antonym")
class Keyword(models.Model):
"""
Keywords for matching contentions.
"""
noun = models.ForeignKey(Noun, related_name="keywords")
text = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
def __unicode__(self):
return smart_unicode(self.text)
class Relation(models.Model):
"""
Holds the relationships of contentions.
- is a (hypernym)
- part of (holonym)
- opposite (antonym)
- same_as (synonym)
"""
HYPERNYM = "hypernym"
HOLONYM = "holonym"
ANTONYM = "antonym"
HYPONYM = "hyponym"
MERONYM = "meronym"
TYPES = (
(HYPERNYM, _('is a')),
(HOLONYM, _('part of')),
(ANTONYM, _('opposite with')),
)
source = models.ForeignKey(Noun, related_name="out_relations")
target = models.ForeignKey(Noun, related_name="in_relations")
relation_type = models.CharField(max_length=25, choices=TYPES)
user = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True)
is_active = models.BooleanField(default=True)
date_created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return smart_unicode(self.relation_type)
def reverse_type(self):
return {
Relation.HYPERNYM: Relation.HYPONYM,
Relation.HOLONYM: Relation.MERONYM,
Relation.ANTONYM: Relation.ANTONYM
}.get(self.relation_type)
def get_reverse_type_display(self):
return {
Relation.HYPONYM: _("whole of"),
Relation.MERONYM: _("whole of"),
Relation.ANTONYM: _("opposite with")
}.get(self.reverse_type())
def relation_type_label(self):
if (self.relation_type == Relation.HYPERNYM
and self.target.language == 'en'):
text = unicode(self.target)
return ('is an' if not CONSONANT_SOUND.match(text)
and VOWEL_SOUND.match(text)
else 'is a')
return self.get_relation_type_display()
class Channel(models.Model):
title = models.CharField(max_length=255)
slug = models.CharField(max_length=255)
nouns = models.ManyToManyField('Noun', null=True, blank=True)
order = models.IntegerField()
language = models.CharField(max_length=255, blank=True, null=True)
is_featured = models.BooleanField(max_length=255, default=False)
def save(self, *args, **kwargs):
if not self.slug:
slug = slugify(unidecode(self.text))
self.slug = slug
return super(Channel, self).save(*args, **kwargs)
def __unicode__(self):
return smart_unicode(self.title)
@models.permalink
def get_absolute_url(self):
return 'channel_detail', [self.slug]
def serialize(self):
return {
'title': self.title,
'absolute_url': self.get_absolute_url()
}
| |
import numpy as np
import rbf.linalg
import unittest
import scipy.sparse as sp
np.random.seed(1)
class Test(unittest.TestCase):
def test_sparse_solver(self):
n = 100
A = sp.rand(n, n, density=0.2)
A = A.tocsc()
b = np.random.random((n,))
x1 = np.linalg.solve(A.A, b)
x2 = rbf.linalg._SparseSolver(A).solve(b)
self.assertTrue(np.allclose(x1, x2))
def test_dense_solver(self):
n = 100
A = np.random.random((n, n))
b = np.random.random((n,))
x1 = np.linalg.solve(A, b)
x2 = rbf.linalg._DenseSolver(A).solve(b)
self.assertTrue(np.allclose(x1, x2))
def test_sparse_pos_def_solve(self):
if not rbf.linalg.HAS_CHOLMOD:
# dont bother with this test if cholmod doesnt exist
return
n = 100
A = sp.rand(n,n,density=0.2)
A = A.T.dot(A).tocsc()
b = np.random.random((n,))
factor = rbf.linalg._SparsePosDefSolver(A)
x1 = factor.solve(b)
x2 = np.linalg.solve(A.A,b)
self.assertTrue(np.allclose(x1,x2))
def test_sparse_pos_def_solve_L(self):
if not rbf.linalg.HAS_CHOLMOD:
# dont bother with this test if cholmod doesnt exist
return
n = 100
A = sp.rand(n,n,density=0.2)
A = A.T.dot(A).tocsc()
b = np.random.random((n,))
factor = rbf.linalg._SparsePosDefSolver(A)
x1 = factor.solve_L(b)
x2 = np.linalg.solve(factor.L().A,b)
self.assertTrue(np.allclose(x1,x2))
def test_sparse_pos_def_L(self):
if not rbf.linalg.HAS_CHOLMOD:
# dont bother with this test if cholmod doesnt exist
return
n = 100
A = sp.rand(n,n,density=0.2)
A = A.T.dot(A).tocsc()
factor = rbf.linalg._SparsePosDefSolver(A)
L = factor.L()
A2 = L.dot(L.T)
self.assertTrue(np.allclose(A.A,A2.A))
def test_sparse_pos_def_log_det(self):
if not rbf.linalg.HAS_CHOLMOD:
# dont bother with this test if cholmod doesnt exist
return
n = 100
A = sp.rand(n,n,density=0.2)
A = A.T.dot(A).tocsc()
factor = rbf.linalg._SparsePosDefSolver(A)
x1 = factor.log_det()
x2 = np.log(np.linalg.det(A.A))
self.assertTrue(np.isclose(x1,x2))
def test_dense_pos_def_solve(self):
n = 100
A = np.random.random((n,n))
A = A.T.dot(A)
b = np.random.random((n,))
factor = rbf.linalg._DensePosDefSolver(A)
x1 = factor.solve(b)
x2 = np.linalg.solve(A,b)
self.assertTrue(np.allclose(x1,x2))
def test_dense_pos_def_solve_L(self):
n = 100
A = np.random.random((n,n))
A = A.T.dot(A)
b = np.random.random((n,))
factor = rbf.linalg._DensePosDefSolver(A)
x1 = factor.solve_L(b)
x2 = np.linalg.solve(factor.L(),b)
self.assertTrue(np.allclose(x1,x2))
def test_dense_pos_def_L(self):
n = 100
A = np.random.random((n,n))
A = A.T.dot(A)
factor = rbf.linalg._DensePosDefSolver(A)
L = factor.L()
A2 = L.dot(L.T)
self.assertTrue(np.allclose(A,A2))
def test_dense_pos_def_log_det(self):
n = 100
A = np.random.random((n,n))
A = A.T.dot(A)
factor = rbf.linalg._DensePosDefSolver(A)
x1 = factor.log_det()
x2 = np.log(np.linalg.det(A))
self.assertTrue(np.isclose(x1,x2))
def test_solver_dense_build_inv(self):
A = np.random.random((4, 4))
d = np.random.random((4,))
solver1 = rbf.linalg.Solver(A, build_inverse=False)
solver2 = rbf.linalg.Solver(A, build_inverse=True)
soln1 = solver1.solve(d)
soln2 = solver2.solve(d)
self.assertTrue(np.allclose(soln1,soln2))
def test_pos_def_solver_dense_build_inv(self):
A = np.random.random((4, 4))
A = A.T.dot(A)
d = np.random.random((4,))
solver1 = rbf.linalg.PosDefSolver(A, build_inverse=False)
solver2 = rbf.linalg.PosDefSolver(A, build_inverse=True)
soln1 = solver1.solve(d)
soln2 = solver2.solve(d)
self.assertTrue(np.allclose(soln1,soln2))
def test_partitioned_solver_dense(self):
A = np.random.random((4,4))
A = A.T + A # A is now symmetric
B = np.random.random((4,2))
a = np.random.random((4,))
b = np.random.random((2,))
Cfact = rbf.linalg.PartitionedSolver(A,B)
soln1a,soln1b = Cfact.solve(a,b)
soln1 = np.hstack((soln1a,soln1b))
Cinv = np.linalg.inv(
np.vstack(
(np.hstack((A,B)),
np.hstack((B.T,np.zeros((2,2)))))))
soln2 = Cinv.dot(np.hstack((a,b)))
self.assertTrue(np.allclose(soln1,soln2))
def test_partitioned_solver_dense_build_inv(self):
A = np.random.random((4,4))
A = A.T + A # A is now symmetric
B = np.random.random((4,2))
a = np.random.random((4,))
b = np.random.random((2,))
Cfact = rbf.linalg.PartitionedSolver(A,B, build_inverse=True)
soln1a,soln1b = Cfact.solve(a,b)
soln1 = np.hstack((soln1a,soln1b))
Cinv = np.linalg.inv(
np.vstack(
(np.hstack((A,B)),
np.hstack((B.T,np.zeros((2,2)))))))
soln2 = Cinv.dot(np.hstack((a,b)))
self.assertTrue(np.allclose(soln1,soln2))
def test_partitioned_solver_dense_pos_def(self):
A = np.random.random((4,4))
A = A.T.dot(A) # A is now P.D.
B = np.random.random((4,2))
a = np.random.random((4,))
b = np.random.random((2,))
Cfact = rbf.linalg.PartitionedPosDefSolver(A,B)
soln1a,soln1b = Cfact.solve(a,b)
soln1 = np.hstack((soln1a,soln1b))
Cinv = np.linalg.inv(
np.vstack(
(np.hstack((A,B)),
np.hstack((B.T,np.zeros((2,2)))))))
soln2 = Cinv.dot(np.hstack((a,b)))
self.assertTrue(np.allclose(soln1,soln2))
def test_partitioned_solver_dense_pos_def_build_inv(self):
A = np.random.random((4,4))
A = A.T.dot(A) # A is now P.D.
B = np.random.random((4,2))
a = np.random.random((4,))
b = np.random.random((2,))
Cfact = rbf.linalg.PartitionedPosDefSolver(A,B, build_inverse=True)
soln1a,soln1b = Cfact.solve(a,b)
soln1 = np.hstack((soln1a,soln1b))
Cinv = np.linalg.inv(
np.vstack(
(np.hstack((A,B)),
np.hstack((B.T,np.zeros((2,2)))))))
soln2 = Cinv.dot(np.hstack((a,b)))
self.assertTrue(np.allclose(soln1,soln2))
def test_partitioned_solver_sparse(self):
A = np.random.random((4,4))
A = A.T + A # A is now symmetric
A = sp.csc_matrix(A) # A is now sparse
B = np.random.random((4,2))
a = np.random.random((4,))
b = np.random.random((2,))
Cfact = rbf.linalg.PartitionedSolver(A,B)
soln1a,soln1b = Cfact.solve(a,b)
soln1 = np.hstack((soln1a,soln1b))
Cinv = np.linalg.inv(
np.vstack(
(np.hstack((A.A,B)),
np.hstack((B.T,np.zeros((2,2)))))))
soln2 = Cinv.dot(np.hstack((a,b)))
self.assertTrue(np.allclose(soln1,soln2))
def test_partitioned_solver_sparse_build_inv(self):
A = np.random.random((4,4))
A = A.T + A # A is now symmetric
A = sp.csc_matrix(A) # A is now sparse
B = np.random.random((4,2))
a = np.random.random((4,))
b = np.random.random((2,))
Cfact = rbf.linalg.PartitionedSolver(A,B, build_inverse=True)
soln1a,soln1b = Cfact.solve(a,b)
soln1 = np.hstack((soln1a,soln1b))
Cinv = np.linalg.inv(
np.vstack(
(np.hstack((A.A,B)),
np.hstack((B.T,np.zeros((2,2)))))))
soln2 = Cinv.dot(np.hstack((a,b)))
self.assertTrue(np.allclose(soln1,soln2))
def test_partitioned_solver_sparse_pos_def(self):
A = np.random.random((4,4))
A = A.T.dot(A) # A is now P.D.
A = sp.csc_matrix(A) # A is now sparse
B = np.random.random((4,2))
a = np.random.random((4,))
b = np.random.random((2,))
Cfact = rbf.linalg.PartitionedPosDefSolver(A,B)
soln1a,soln1b = Cfact.solve(a,b)
soln1 = np.hstack((soln1a,soln1b))
Cinv = np.linalg.inv(
np.vstack(
(np.hstack((A.A,B)),
np.hstack((B.T,np.zeros((2,2)))))))
soln2 = Cinv.dot(np.hstack((a,b)))
self.assertTrue(np.allclose(soln1,soln2))
def test_partitioned_solver_sparse_pos_def_build_inv(self):
A = np.random.random((4,4))
A = A.T.dot(A) # A is now P.D.
A = sp.csc_matrix(A) # A is now sparse
B = np.random.random((4,2))
a = np.random.random((4,))
b = np.random.random((2,))
Cfact = rbf.linalg.PartitionedPosDefSolver(A,B, build_inverse=True)
soln1a,soln1b = Cfact.solve(a,b)
soln1 = np.hstack((soln1a,soln1b))
Cinv = np.linalg.inv(
np.vstack(
(np.hstack((A.A,B)),
np.hstack((B.T,np.zeros((2,2)))))))
soln2 = Cinv.dot(np.hstack((a,b)))
self.assertTrue(np.allclose(soln1,soln2))
| |
""""""
from datetime import datetime
from typing import List
import shelve
from influxdb import InfluxDBClient
from vnpy.trader.constant import Exchange, Interval
from vnpy.trader.object import BarData, TickData
from vnpy.trader.database import (
BaseDatabase,
BarOverview,
DB_TZ,
convert_tz
)
from vnpy.trader.setting import SETTINGS
from vnpy.trader.utility import (
generate_vt_symbol,
extract_vt_symbol,
get_file_path
)
class InfluxdbDatabase(BaseDatabase):
""""""
overview_filename = "influxdb_overview"
overview_filepath = str(get_file_path(overview_filename))
def __init__(self) -> None:
""""""
database = SETTINGS["database.database"]
user = SETTINGS["database.user"]
password = SETTINGS["database.password"]
host = SETTINGS["database.host"]
port = SETTINGS["database.port"]
self.client = InfluxDBClient(host, port, user, password, database)
self.client.create_database(database)
def save_bar_data(self, bars: List[BarData]) -> bool:
""""""
json_body = []
bar = bars[0]
vt_symbol = bar.vt_symbol
interval = bar.interval
for bar in bars:
bar.datetime = convert_tz(bar.datetime)
d = {
"measurement": "bar_data",
"tags": {
"vt_symbol": vt_symbol,
"interval": interval.value
},
"time": bar.datetime.isoformat(),
"fields": {
"open_price": bar.open_price,
"high_price": bar.high_price,
"low_price": bar.low_price,
"close_price": bar.close_price,
"volume": bar.volume,
"open_interest": bar.open_interest,
}
}
json_body.append(d)
self.client.write_points(json_body, batch_size=10000)
# Update bar overview
symbol, exchange = extract_vt_symbol(vt_symbol)
key = f"{vt_symbol}_{interval.value}"
f = shelve.open(self.overview_filepath)
overview = f.get(key, None)
if not overview:
overview = BarOverview(
symbol=symbol,
exchange=exchange,
interval=interval
)
overview.count = len(bars)
overview.start = bars[0].datetime
overview.end = bars[-1].datetime
else:
overview.start = min(overview.start, bars[0].datetime)
overview.end = max(overview.end, bars[-1].datetime)
query = (
"select count(close_price) from bar_data"
" where vt_symbol=$vt_symbol"
" and interval=$interval"
)
bind_params = {
"vt_symbol": vt_symbol,
"interval": interval.value
}
result = self.client.query(query, bind_params=bind_params)
points = result.get_points()
for d in points:
overview.count = d["count"]
f[key] = overview
f.close()
def save_tick_data(self, ticks: List[TickData]) -> bool:
""""""
json_body = []
tick = ticks[0]
vt_symbol = tick.vt_symbol
for tick in ticks:
tick.datetime = convert_tz(tick.datetime)
d = {
"measurement": "tick_data",
"tags": {
"vt_symbol": vt_symbol
},
"time": tick.datetime.isoformat(),
"fields": {
"name": tick.name,
"volume": tick.volume,
"open_interest": tick.open_interest,
"last_price": tick.last_price,
"last_volume": tick.last_volume,
"limit_up": tick.limit_up,
"limit_down": tick.limit_down,
"open_price": tick.open_price,
"high_price": tick.high_price,
"low_price": tick.low_price,
"pre_close": tick.pre_close,
"bid_price_1": tick.bid_price_1,
"bid_price_2": tick.bid_price_2,
"bid_price_3": tick.bid_price_3,
"bid_price_4": tick.bid_price_4,
"bid_price_5": tick.bid_price_5,
"ask_price_1": tick.ask_price_1,
"ask_price_2": tick.ask_price_2,
"ask_price_3": tick.ask_price_3,
"ask_price_4": tick.ask_price_4,
"ask_price_5": tick.ask_price_5,
"bid_volume_1": tick.bid_volume_1,
"bid_volume_2": tick.bid_volume_2,
"bid_volume_3": tick.bid_volume_3,
"bid_volume_4": tick.bid_volume_4,
"bid_volume_5": tick.bid_volume_5,
"ask_volume_1": tick.ask_volume_1,
"ask_volume_2": tick.ask_volume_2,
"ask_volume_3": tick.ask_volume_3,
"ask_volume_4": tick.ask_volume_4,
"ask_volume_5": tick.ask_volume_5,
}
}
json_body.append(d)
self.client.write_points(json_body, batch_size=10000)
def load_bar_data(
self,
symbol: str,
exchange: Exchange,
interval: Interval,
start: datetime,
end: datetime
) -> List[BarData]:
""""""
query = (
"select * from bar_data"
" where vt_symbol=$vt_symbol"
" and interval=$interval"
f" and time >= '{start.date().isoformat()}'"
f" and time <= '{end.date().isoformat()}';"
)
bind_params = {
"vt_symbol": generate_vt_symbol(symbol, exchange),
"interval": interval.value
}
result = self.client.query(query, bind_params=bind_params)
points = result.get_points()
bars: List[BarData] = []
for d in points:
dt = datetime.strptime(d["time"], "%Y-%m-%dT%H:%M:%SZ")
bar = BarData(
symbol=symbol,
exchange=exchange,
interval=interval,
datetime=DB_TZ.localize(dt),
open_price=d["open_price"],
high_price=d["high_price"],
low_price=d["low_price"],
close_price=d["close_price"],
volume=d["volume"],
open_interest=d["open_interest"],
gateway_name="DB"
)
bars.append(bar)
return bars
def load_tick_data(
self,
symbol: str,
exchange: Exchange,
start: datetime,
end: datetime
) -> List[TickData]:
""""""
query = (
"select * from tick_data"
" where vt_symbol=$vt_symbol"
f" and time >= '{start.date().isoformat()}'"
f" and time <= '{end.date().isoformat()}';"
)
bind_params = {
"vt_symbol": generate_vt_symbol(symbol, exchange),
}
result = self.client.query(query, bind_params=bind_params)
points = result.get_points()
ticks: List[TickData] = []
for d in points:
dt = datetime.strptime(d["time"], "%Y-%m-%dT%H:%M:%SZ")
tick = TickData(
symbol=symbol,
exchange=exchange,
datetime=DB_TZ.localize(dt),
name=d["name"],
volume=d["volume"],
open_interest=d["open_interest"],
last_price=d["last_price"],
last_volume=d["last_volume"],
limit_up=d["limit_up"],
limit_down=d["limit_down"],
open_price=d["open_price"],
high_price=d["high_price"],
low_price=d["low_price"],
pre_close=d["pre_close"],
bid_price_1=d["bid_price_1"],
bid_price_2=d["bid_price_2"],
bid_price_3=d["bid_price_3"],
bid_price_4=d["bid_price_4"],
bid_price_5=d["bid_price_5"],
ask_price_1=d["ask_price_1"],
ask_price_2=d["ask_price_2"],
ask_price_3=d["ask_price_3"],
ask_price_4=d["ask_price_4"],
ask_price_5=d["ask_price_5"],
bid_volume_1=d["bid_volume_1"],
bid_volume_2=d["bid_volume_2"],
bid_volume_3=d["bid_volume_3"],
bid_volume_4=d["bid_volume_4"],
bid_volume_5=d["bid_volume_5"],
ask_volume_1=d["ask_volume_1"],
ask_volume_2=d["ask_volume_2"],
ask_volume_3=d["ask_volume_3"],
ask_volume_4=d["ask_volume_4"],
ask_volume_5=d["ask_volume_5"],
gateway_name="DB"
)
ticks.append(tick)
return ticks
def delete_bar_data(
self,
symbol: str,
exchange: Exchange,
interval: Interval
) -> int:
""""""
bind_params = {
"vt_symbol": generate_vt_symbol(symbol, exchange),
"interval": interval.value
}
# Query data count
query1 = (
"select count(close_price) from bar_data"
" where vt_symbol=$vt_symbol"
" and interval=$interval"
)
result = self.client.query(query1, bind_params=bind_params)
points = result.get_points()
for d in points:
count = d["count"]
# Delete data
query2 = (
"drop series from bar_data"
" where vt_symbol=$vt_symbol"
" and interval=$interval"
)
self.client.query(query2, bind_params=bind_params)
# Delete overview
f = shelve.open(self.overview_filepath)
vt_symbol = generate_vt_symbol(symbol, exchange)
key = f"{vt_symbol}_{interval.value}"
if key in f:
f.pop(key)
f.close()
return count
def delete_tick_data(
self,
symbol: str,
exchange: Exchange
) -> int:
""""""
bind_params = {
"vt_symbol": generate_vt_symbol(symbol, exchange),
}
# Query data count
query1 = (
"select count(last_price) from tick_data"
" where vt_symbol=$vt_symbol"
)
result = self.client.query(query1, bind_params=bind_params)
points = result.get_points()
for d in points:
count = d["count"]
# Delete data
query2 = (
"drop series from tick_data"
" where vt_symbol=$vt_symbol"
)
self.client.query(query2, bind_params=bind_params)
return count
def get_bar_overview(self) -> List[BarOverview]:
"""
Return data avaible in database.
"""
# Init bar overview if not exists
query = "select count(close_price) from bar_data"
result = self.client.query(query)
points = result.get_points()
data_count = 0
for d in points:
data_count = d["count"]
f = shelve.open(self.overview_filepath)
overview_count = len(f)
if data_count and not overview_count:
self.init_bar_overview()
overviews = list(f.values())
f.close()
return overviews
def init_bar_overview(self) -> None:
"""
Init overview table if not exists.
"""
f = shelve.open(self.overview_filepath)
query: str = "select count(close_price) from bar_data group by *"
result = self.client.query(query)
for k, v in result.items():
tags = k[1]
data = list(v)[0]
vt_symbol = tags["vt_symbol"]
symbol, exchange = extract_vt_symbol(vt_symbol)
interval = Interval(tags["interval"])
overview = BarOverview(
symbol=symbol,
exchange=exchange,
interval=interval,
count=data["count"]
)
overview.start = self.get_bar_datetime(vt_symbol, interval, 1)
overview.end = self.get_bar_datetime(vt_symbol, interval, -1)
key = f"{vt_symbol}_{interval.value}"
f[key] = overview
f.close()
def get_bar_datetime(self, vt_symbol: str, interval: Interval, order: int) -> datetime:
""""""
if order > 0:
keyword = "last"
else:
keyword = "first"
query = (
f"select {keyword}(close_price), * from bar_data"
" where vt_symbol=$vt_symbol"
" and interval=$interval"
)
bind_params = {
"vt_symbol": vt_symbol,
"interval": interval.value
}
result = self.client.query(query, bind_params=bind_params)
points = result.get_points()
for d in points:
dt = datetime.strptime(d["time"], "%Y-%m-%dT%H:%M:%SZ")
return dt
database_manager = InfluxdbDatabase()
| |
# Copyright 2017 Balazs Nemeth, Mark Szalay, Janos Doka
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
try:
# runs when mapping files are called from ESCAPE
from escape.nffg_lib.nffg import NFFG, NFFGToolBox
except ImportError:
# runs when mapping repo is cloned individually, and NFFG lib is in a
# sibling directory. WARNING: cicular import is not avioded by design.
import site
site.addsitedir('..')
from nffg_lib.nffg import NFFG, NFFGToolBox
from hybrid.WhatToOptimizeStrategy import *
from hybrid.WhenToOptimizeStrategy import *
from hybrid.ResourceSharingStrategy import *
from hybrid.WhenToApplyOptimization import *
import milp.milp_solution_in_nffg as offline_mapping
import alg1.MappingAlgorithms as online_mapping
import alg1.UnifyExceptionTypes as uet
import Queue
from memory_profiler import memory_usage
log = logging.getLogger(" Hybrid Orchestrator")
class ResNFFGProtector(object):
def __init__(self, lock_name, do_logging=False):
self.readers_count = 0
self.reader_counter_protector = threading.Lock()
self.res_nffg_protector = threading.Lock()
self.do_logging = do_logging
self.lock_name = lock_name
def start_reading_res_nffg(self, read_reason):
started_to_wait = time.time()
self.reader_counter_protector.acquire()
self.readers_count += 1
if self.readers_count == 1:
self.res_nffg_protector.acquire()
if self.do_logging:
log.debug("Time spent on waiting for lock of %s: %ss" %
(self.lock_name,time.time() - started_to_wait))
log.debug("Locking %s nffg for reading: \"%s\", number of current readers: %s"
%(self.lock_name, read_reason, self.readers_count))
self.reader_counter_protector.release()
def finish_reading_res_nffg(self, read_reason):
self.reader_counter_protector.acquire()
self.readers_count -= 1
if self.readers_count < 0:
raise RuntimeError("Some thread tried to release reading right on res_online multiple times!")
if self.readers_count == 0:
self.res_nffg_protector.release()
if self.do_logging:
log.debug("Releasing %s nffg for reading: \"%s\", number of current readers: %s"
%(self.lock_name, read_reason, self.readers_count))
self.reader_counter_protector.release()
def start_writing_res_nffg(self, write_reason):
started_to_wait = time.time()
self.res_nffg_protector.acquire()
if self.do_logging:
log.debug("Time spent on waiting for lock of %s: %ss" %
(self.lock_name,time.time() - started_to_wait))
log.debug("Locking %s nffg for writing: \"%s\"."%(self.lock_name, write_reason))
def finish_writing_res_nffg(self, write_reason):
self.res_nffg_protector.release()
if self.do_logging:
log.debug("Releasing %s nffg for writing: \"%s\"."%(self.lock_name, write_reason))
class HybridOrchestrator():
OFFLINE_STATE_INIT = 0
OFFLINE_STATE_RUNNING = 1
OFFLINE_STATE_FINISHED = 2
OFFLINE_STATE_MERGED = 3
def __init__(self, RG, config_file_path, full_log_path,
resource_type, remaining_request_lifetimes):
config = ConfigObj(config_file_path)
HybridOrchestrator.log_path = config_file_path
formatter = logging.Formatter(
'%(asctime)s | Hybrid Orches | %(levelname)s | \t%(message)s')
hdlr = logging.FileHandler(full_log_path)
hdlr.setFormatter(formatter)
log.addHandler(hdlr)
log.setLevel(logging.DEBUG)
# Protects the res_online
self.res_online_protector = ResNFFGProtector("res_online", True)
self.res_online = None
self.res_offline = None
self.received_resource = None
# Delete all NFs if there are maybe initial ones in RG.
self.bare_resource_100 = \
NFFGToolBox.strip_nfs_flowrules_sghops_ports(
copy.deepcopy(RG), log)
# list of request NFFG-s which are deleted. It is received with
# every MAP call.
self.deleted_services = []
# All request in one NFFG
# The sum of reqs needs to be accessed from Offline optimization to determine
# what to opt and online mapping have to gather all requests there
self.sum_req_protector = ResNFFGProtector("sum_req", True)
self.SUM_req = NFFG()
# if there are initial requests in the remaining lifetimes, we have
# to gather them into SUM_req
# Note: hybrid orchestrator may only read this parameter
for request in remaining_request_lifetimes:
self.merge_all_request(request['SG'])
self.offline_mapping_thread = None
self.offline_status = HybridOrchestrator.OFFLINE_STATE_INIT
self.reoptimized_resource = None
self.when_to_opt_param = int(float(config['when_to_opt_parameter']))
# NOTE: in non-multi threaded execution the online thread doesn't
# take any locks, so all of them are free for the sequential
# execution of the optimization.
if 'hybrid_multi_thread' in config:
self.hybrid_multi_thread = bool(config['hybrid_multi_thread'])
else:
# defaults to True to maintain backward compatibility.
self.hybrid_multi_thread = True
# What to optimize strategy
what_to_opt_strat = config['what_to_optimize']
if what_to_opt_strat == "reqs_since_last":
self.__what_to_opt = ReqsSinceLastOpt(full_log_path, config_file_path,
resource_type, remaining_request_lifetimes)
elif what_to_opt_strat == "all_reqs":
self.__what_to_opt = AllReqsOpt(full_log_path, config_file_path,
resource_type, remaining_request_lifetimes)
elif what_to_opt_strat == "reqs_lifetime":
self.__what_to_opt = ReqsBasedOnLifetime(full_log_path, config_file_path,
resource_type, remaining_request_lifetimes)
else:
raise ValueError(
'Invalid what_to_opt_strat type! Please choose one of the '
'followings: all_reqs, reqs_since_last')
self.reqs_under_optimization = None
# When to optimize strategy
when_to_opt_strat = config['when_to_optimize']
if when_to_opt_strat == "modell_based":
self.__when_to_opt = ModelBased(full_log_path)
elif when_to_opt_strat == "fixed_req_count":
self.__when_to_opt = FixedReqCount(full_log_path)
elif when_to_opt_strat == "fixed_time":
self.__when_to_opt = FixedTime(full_log_path)
elif when_to_opt_strat == "periodical_model_based":
self.__when_to_opt = PeriodicalModelBased(full_log_path)
elif when_to_opt_strat == "always":
self.__when_to_opt = Always(full_log_path)
else:
raise ValueError(
'Invalid when_to_opt type! Please choose '
'one of the followings: modell_based, '
'fixed_req_count, fixed_time, '
'periodical_model_based, always')
# Resource sharing strategy
resource_share_strat = config['resource_share_strat']
if resource_share_strat == "double_hundred":
self.__res_sharing_strat = DoubleHundred(self.bare_resource_100, full_log_path)
elif resource_share_strat == "dynamic":
self.__res_sharing_strat = DynamicMaxOnlineToAll(self.bare_resource_100, full_log_path)
else:
raise ValueError(
'Invalid resource_share_strat type! Please choose '
'one of the followings: double_hundred, '
'dynamic')
# Queue for online mapping
self.online_fails = Queue.Queue()
# Set offline mapping parameters
self.mig_handler = config['migration_handler_name']
self.optimize_already_mapped_nfs = bool(config['optimize_already_mapped_nfs'])
self.migration_coeff = float(config['migration_coeff'])
self.load_balance_coeff = float(config['load_balance_coeff'])
self.edge_cost_coeff = float(config['edge_cost_coeff'])
self.optional_milp_params = {}
if 'time_limit' in config:
self.optional_milp_params['time_limit'] = float(config['time_limit'])
if 'mip_gap_limit' in config:
self.optional_milp_params['mip_gap_limit'] = float(config['mip_gap_limit'])
if 'node_limit' in config:
self.optional_milp_params['node_limit'] = int(config['node_limit'])
self.optional_milp_params.update(**config['migration_handler_kwargs'])
base_when_to_apply_opt = BaseWhenToApplyOptimization(
[HybridOrchestrator.OFFLINE_STATE_FINISHED],
[HybridOrchestrator.OFFLINE_STATE_INIT,
HybridOrchestrator.OFFLINE_STATE_RUNNING,
HybridOrchestrator.OFFLINE_STATE_MERGED], log)
if 'when_to_apply_opt' in config:
if config['when_to_apply_opt'] == '':
self.__when_to_apply_opt = base_when_to_apply_opt
elif config['when_to_apply_opt'] == 'max_number':
self.__when_to_apply_opt = MaxNumberOfCalls(
int(config['when_to_apply_opt_param']),
base_when_to_apply_opt.opt_ready_states,
base_when_to_apply_opt.opt_pending_states, log)
else:
raise ValueError('Invalid when_to_apply_opt type! Please choose'
' one of the followings: max_number')
else:
self.__when_to_apply_opt = base_when_to_apply_opt
self.offline_mapping_num = 0
def merge_all_request(self, request):
self.sum_req_protector.start_writing_res_nffg("Appending new request to the "
"sum of requests")
self.SUM_req = NFFGToolBox.merge_nffgs(self.SUM_req, request, silent=True)
log.debug("Requests in SUM_req: %s"%len([r.sg_path for r in self.SUM_req.reqs]))
self.sum_req_protector.finish_writing_res_nffg("New request %s appended to "
"sum req" % request)
def do_online_mapping(self, request):
self.set_online_resource_graph(request)
# keep_input_unchanged=True makes it unnecessary
# temp_res_online = copy.deepcopy(self.res_online)
try:
# propagate_e2e_reqs must be turned False (so they are not tried to
# be splitted and the e2e versions removed!) We want to keep them in
# the res_online, so reoptimization wouldn't hurt violate them!
self.res_online, _ = online_mapping.MAP(request, self.res_online,
bw_factor=1, res_factor=1,
lat_factor=1,
shortest_paths=None,
return_dist=False,
propagate_e2e_reqs=False,
bt_limit=6,
bt_branching_factor=3, mode=NFFG.MODE_ADD,
keep_e2e_reqs_in_output=True,
keep_input_unchanged=True)
log.info("do_online_mapping : Successful online mapping :)")
except uet.MappingException as error:
log.warning("do_online_mapping : Unsuccessful online mapping :( ")
log.warning(error.msg)
# keep_input_unchanged=True makes it unnecessary
# self.res_online = temp_res_online
self.online_fails.put(error)
# Balazs: an online failure due to mapping is natural, we continue working.
except Exception as e:
# Balazs: exception is not thrown when acquire didnt succeed, this exception is fatal
log.error(str(e.message) + str(e.__class__))
log.error("do_online_mapping : "
"Unhandled exception cought during online mapping :( ")
raise
#fp = open('memory_profiler.log', 'a')
#@profile(stream=fp)
def do_offline_mapping(self):
mem_in_beginning = 0
try:
mem_in_beginning = memory_usage(-1, interval=1, timeout=1)
log.debug("Total MEMORY usage in the beginning of the do_offline_mapping: "+ str(mem_in_beginning)+" MB")
# WARNING: we can't lock both of them at the same time, cuz that can cause deadlock
# If both of them needs to be locked make the order: res_online -> sum_req!
self.set_offline_resource_graph()
# read what shall we optimize.
self.sum_req_protector.start_reading_res_nffg("Determine set of requests to optimize")
self.del_exp_reqs_from_sum_req()
self.reqs_under_optimization = self.__what_to_opt.reqs_to_optimize(self.SUM_req)
tmp_sum_req = copy.deepcopy(self.SUM_req)
self.sum_req_protector.finish_reading_res_nffg("Got requests to optimize")
log.debug("SAP count in request %s and in resource: %s, resource total size: %s" %
(len([s for s in self.reqs_under_optimization.saps]),
len([s for s in self.res_offline.saps]),
len(self.res_offline)))
starting_time = datetime.datetime.now()
# set mapped NF reoptimization True, and delete other NFs from
# res_offline which are not in reqs_under_optimization, because
# it is what_to_opt's responsibilty to determine the set of requests to optimize!
# ignore_infras=True calculates the difference only on the SG.
self.res_offline = NFFGToolBox.recreate_all_sghops(self.res_offline)
_, reqs_not_to_be_opt = NFFGToolBox.generate_difference_of_nffgs(
self.res_offline,
self.reqs_under_optimization,
ignore_infras=True)
# Remove infras from del graph to avoid unnecessary warning during delete.
for infra in [i for i in reqs_not_to_be_opt.infras]:
reqs_not_to_be_opt.del_node(infra)
if len([n for n in self.reqs_under_optimization.nfs]) == 0:
raise uet.MappingException("Offline didn't get any requests to optimize")
not_top_opt_nfs = [n.id for n in reqs_not_to_be_opt.nfs]
# Even in case of all_reqs strategy this may be non zero, in
# case a deletion happened during execution of this function.
log.debug("Removing requests (%s NFs) from res_offline which "
"shouldn't be optimized! Examples: %s"%(len(not_top_opt_nfs),
not_top_opt_nfs[:20]))
if len(not_top_opt_nfs) > 0:
# NOTE: generate_difference_of_nffgs doesn't return with the
# EdgeReqs! This is an ugly solution!!!
for req in tmp_sum_req.reqs:
if req.sg_path[0] in [sg.id for sg in reqs_not_to_be_opt.sg_hops]:
self.res_offline.del_edge(req.src.node.id, req.dst.node.id,
id=req.id)
self.res_offline, _ = online_mapping.MAP(reqs_not_to_be_opt,
self.res_offline,
mode=NFFG.MODE_DEL,
keep_input_unchanged=True)
log.debug("Time spent with deleting requests not to be optimized "
"from res_offline %s"%
(datetime.datetime.now()-starting_time))
starting_time = datetime.datetime.now()
log.debug("Adding %s path requirements to offline resource."
%len([r for r in self.reqs_under_optimization.reqs]))
for req in self.reqs_under_optimization.reqs:
# skip adding the EdgeReq if its SGHop is in not_top_opt_nfs
# (this may happen if a request expired since we last checked)
if len([s for s in self.reqs_under_optimization.sg_hops]) > 0:
# if there are no SGHops, no problem can happen due to this
sghop_of_edgereq = None
for sghop in self.reqs_under_optimization.sg_hops:
if sghop.id == req.sg_path[0]:
sghop_of_edgereq = sghop
break
if sghop_of_edgereq.id in [sg.id for sg in reqs_not_to_be_opt.sg_hops]:
log.debug("Skipping adding EdgeReq on path %s to offline "
"resource"%req.sg_path)
continue
if not self.res_offline.network.has_edge(req.src.node.id,
req.dst.node.id, key=req.id):
# Bandwidth requirements of SGhops are already known by the
# flowrules!! IF we would keep the EdgeReqs with non-zero
# bandwidth, they would count as additional bw!
# Only the delay is important in this case!
req.bandwidth = 0.0
# port objects are set correctly by NFFG lib
self.res_offline.add_req(req.src, req.dst, req=req)
# log.debug("Adding requirement with zero-ed bandwidth on "
# "path %s"%req.sg_path)
log.debug("Time spent with adding requirement links to "
"res_offline %s"%(datetime.datetime.now()-starting_time))
# we don't want to map additional requests, so set request to empty
self.res_offline = offline_mapping.MAP(
NFFG(), self.res_offline, True,
self.mig_handler, self.migration_coeff, self.load_balance_coeff,
self.edge_cost_coeff, **self.optional_milp_params)
mem_usage = memory_usage(-1, interval=1, timeout=1)
log.debug("Total MEMORY usage in the end of the do_offline_mapping: " + str(mem_usage) + " MB")
log.debug("Total MEMORY difference: " + str(mem_usage[0] - mem_in_beginning[0]) + " MB")
self.offline_status = HybridOrchestrator.OFFLINE_STATE_FINISHED
if self.__when_to_apply_opt.is_optimization_applicable(
self.offline_status, just_check=True):
# Need to del_exp_reqs_from_res_offline and merge
# the merge MUST set the state before releasing the writing lock
log.info("Merging online and offline immediately after "
"offline finished")
self.res_online_protector.start_writing_res_nffg(
"Removing SC-s which are possibly migrated and merging")
self.merge_online_offline()
self.res_online_protector.finish_writing_res_nffg(
"Merged or failed during merging res_online and the "
"optimized res_offline")
else:
log.info("Skipping merging online and offline merge, and "
"delaying optimization application.")
self.__what_to_opt.opt_data_handler.write_data(
len([n for n in self.reqs_under_optimization.nfs]),
(time.time() - self.offline_start_time ))
self.offline_start_time = 0
log.info("Offline mapping is ready!")
except uet.MappingException as e:
mem_usage = memory_usage(-1, interval=1, timeout=1)
log.debug("Total MEMORY usage after mapping error of the do_offline_mapping: " + str(mem_usage)+" MB")
log.debug("Total MEMORY difference: " + str(mem_usage[0] - mem_in_beginning[0]) + " MB")
log.warn(e.msg)
log.warn("Mapping thread: "
"Offline mapping: Unable to mapping offline!")
# Balazs: in case the MILP fails with MappingException we can continue working.
self.offline_status = HybridOrchestrator.OFFLINE_STATE_INIT
except Exception as e:
mem_usage = memory_usage(-1, interval=1, timeout=1)
log.debug("Total MEMORY usage after error of the do_offline_mapping: " + str(mem_usage)+" MB")
log.debug("Total MEMORY difference: " + str(mem_usage[0] - mem_in_beginning[0]) + " MB")
if hasattr(e, 'msg'):
msg = e.msg
else:
msg = e.message
log.error("Offline mapping failed: with exception %s, message:"
" %s"%(e,msg))
raise
def del_exp_reqs_from_nffg(self, self_nffg_name):
if getattr(self, self_nffg_name) is not None:
try:
for i in self.deleted_services:
delete = False
for j in i.nfs:
if j.id in [nf.id for nf in getattr(self, self_nffg_name).nfs]:
delete = True
j.operation = NFFG.OP_DELETE
if delete:
log.debug("Deleting NFs from %s due to expiration during the "
"offline optimization: %s" %
(self_nffg_name, i.network.nodes()))
for req in i.reqs:
getattr(self, self_nffg_name).del_edge(req.src.node.id,
req.dst.node.id, id=req.id)
log.debug("Deleting E2E requirement from %s on path %s" %
(self_nffg_name, req.sg_path))
del_nffg, _ = online_mapping.MAP(i,
getattr(self, self_nffg_name),
mode=NFFG.MODE_DEL,
keep_input_unchanged=True)
setattr(self, self_nffg_name, del_nffg)
except uet.UnifyException as ue:
log.error("UnifyException catched during deleting expired "
"requests from %s" % self_nffg_name)
log.error(ue.msg)
raise
except Exception as e:
log.error("Unhandled exception catched during deleting expired "
"requests from %s" % self_nffg_name)
raise
def remove_sg_from_sum_req(self, request):
"""
Removes request from SUM_req, the sum_req protector lock must be called
around it!
:param request:
:return:
"""
# The MAP function removed from NFFGs which represent mappings,
# removal from an SG collection is much easier.
nf_deleted = False
for nf in request.nfs:
self.SUM_req.del_node(nf.id)
nf_deleted = True
# if nf_deleted:
# log.debug("Deleted NFs of request %s from sum_req"%request.id)
req_deleted = False
for req in request.reqs:
self.SUM_req.del_edge(req.src.node.id, req.dst.node.id, id=req.id)
req_deleted = True
# if req_deleted:
# log.debug("Deleted EdgeReq on path %s from sum_req"%
# [r for r in request.reqs])
if nf_deleted and not req_deleted:
raise Exception("NFs were removed from sum_req, but their EdgeReq wasn't!")
for sap in request.saps:
# if sap.id is a string it may try to iterate in it... so we can
# prevent this with checking whether it contains this node.
if sap.id in self.SUM_req.network:
if self.SUM_req.network.out_degree(sap.id) + \
self.SUM_req.network.in_degree(sap.id) == 0:
self.SUM_req.del_node(sap.id)
def del_exp_reqs_from_sum_req(self):
log.debug("Deleting expired requests from sum_req.")
for i in self.deleted_services:
self.remove_sg_from_sum_req(i)
def set_online_resource_graph(self, request):
# Resource sharing strategy
try:
log.debug("Setting online resource for sharing between "
"online and offline resources")
optimization_applicable = \
self.__when_to_apply_opt.is_optimization_applicable(
self.offline_status)
# If we should already apply the optimization, but that is not ready yet, we have to wait for the thread to finish.
if optimization_applicable and self.offline_status != HybridOrchestrator.OFFLINE_STATE_FINISHED\
and self.offline_mapping_thread is not None:
if self.offline_mapping_thread.is_alive():
waiting_time = time.time()
self.offline_mapping_thread.join()
log.debug("Time spent for waiting for offline optimization, "
"when we already needed the result: %s s"%
(time.time()-waiting_time))
if self.offline_status == HybridOrchestrator.OFFLINE_STATE_RUNNING or \
self.offline_status == HybridOrchestrator.OFFLINE_STATE_INIT or \
not optimization_applicable:
# The online_res may be under merge OR offline reoptimization is idle because it was not needed.
self.res_online = self.__res_sharing_strat.get_online_resource(self.received_resource,
self.res_offline)
log.debug("Setting online resource based on received resource "
"for request %s!"%request.id)
elif self.offline_status == HybridOrchestrator.OFFLINE_STATE_FINISHED and \
optimization_applicable:
number_of_nfs = len([n for n in self.received_resource.nfs])
# we need to check if the optimization can be merged with the
# online resource
self.merge_online_offline()
res_to_use = self.received_resource
if self.offline_status == HybridOrchestrator.OFFLINE_STATE_MERGED:
# An expiration could have happened since reoptimization and the
# just finished mergin.
self.del_exp_reqs_from_nffg("reoptimized_resource")
res_to_use = self.reoptimized_resource
log.debug("Setting online resource based on just now merged "
"reoptimized resource for request %s!"%request.id)
else:
log.debug(
"Setting onlinre resource based on received resource "
"because of optimization application failure for request %s!"
% request.id)
# use the sharing strategy on the right resource
self.res_online = self.__res_sharing_strat.get_online_resource(
res_to_use, self.res_offline)
should_be_same_number_of_nfs = len([n for n in self.res_online.nfs])
if should_be_same_number_of_nfs != number_of_nfs:
log.error("NF in res_online but not in received_resource: %s"%
(set([n.id for n in self.res_online.nfs]) -
set([n.id for n in self.received_resource.nfs])))
log.error("NF in received_resource but not in res_online: %s" % (
set([n.id for n in self.received_resource.nfs]) - set(
[n.id for n in self.res_online.nfs])))
raise Exception("Merging messed up the number of NFs in "
"res_online: nubmer before: %s current number: %s"%
(number_of_nfs, should_be_same_number_of_nfs))
self.offline_status = HybridOrchestrator.OFFLINE_STATE_INIT
self.__when_to_apply_opt.applied()
elif self.offline_status == HybridOrchestrator.OFFLINE_STATE_MERGED:
# An expiration could have happened while we were merging or
# waiting for res_online setting.
self.del_exp_reqs_from_nffg("reoptimized_resource")
self.res_online = self.__res_sharing_strat.get_online_resource(self.reoptimized_resource,
self.res_offline)
log.debug("Setting online resource based on recently "
"reoptimized resource for request %s!"%request.id)
self.offline_status = HybridOrchestrator.OFFLINE_STATE_INIT
self.__when_to_apply_opt.applied()
else:
raise Exception("Invalid offline_status: %s, optimization "
"applicable: %s"%
(self.offline_status, optimization_applicable))
except Exception as e:
log.error(e.message)
log.error("Unhandled Exception catched during resource sharing.")
raise
log.debug("Examples of online resource capacities: %s"%
[(i.id, i.resources) for i in self.res_online.infras][:10])
def set_offline_resource_graph(self):
# Resources sharing startegy
self.res_online_protector.start_reading_res_nffg("Setting offline resource")
self.res_offline = self.__res_sharing_strat.get_offline_resource(self.received_resource,
self.res_offline)
self.del_exp_reqs_from_nffg("res_offline")
log.debug("Examples of offline resource capacities: %s"%
[(i.id, i.resources) for i in self.res_offline.infras][:10])
self.res_online_protector.finish_reading_res_nffg("Offline resource was set")
def merge_online_offline(self):
try:
log.info("Try to merge online and offline")
starting_time = datetime.datetime.now()
log.info("Delete expired requests from the res_offline")
# so we won't fail in merging due to already expired services.
# res_offline for multi-threaded writing is also covered by the
# res_online_protector
self.del_exp_reqs_from_nffg("res_offline")
# res_online always contains only the alive and currently mapped requests!
# Put the online mapping onto the bare 100% topology, the res_online is not
# changed, and the resource capacities of the 'target' are returned.
self.reoptimized_resource = NFFGToolBox.merge_nffgs(
copy.deepcopy(self.bare_resource_100), self.res_online,
copy_shallow=True, silent=True)
# Balazs: Delete requests from res_online, which are possibly migrated
# NOTE: if an NF to be deleted doesn't exist in the substrate DEL mode ignores it.
log.debug("merge_online_offline: Removing NFs to be migrated from "
"res_online, examples: %s"%self.reqs_under_optimization.network.nodes()[:20])
# deepcopy is not necessary here, SUM_req (at least its relevant subset) is copied
possible_reqs_to_migrate = self.reqs_under_optimization
for nf in possible_reqs_to_migrate.nfs:
nf.operation = NFFG.OP_DELETE
# if there is NF which is not in res_online anymore, DEL mode ignores it
for req in possible_reqs_to_migrate.reqs:
self.reoptimized_resource.del_edge(req.src.node.id, req.dst.node.id, id=req.id)
self.reoptimized_resource, _ = online_mapping.MAP(possible_reqs_to_migrate,
self.reoptimized_resource,
mode=NFFG.MODE_DEL,
keep_input_unchanged=True)
log.debug("Times passed with preparing merge: %s"%
(datetime.datetime.now()-starting_time))
starting_time = datetime.datetime.now()
log.debug("merge_online_offline: Applying offline optimization...")
self.reoptimized_resource = NFFGToolBox.merge_nffgs(self.reoptimized_resource,
self.res_offline,
copy_shallow=True,
silent=True)
log.debug(
"Time passed with merging online and offline resources: %s" %
(datetime.datetime.now() - starting_time))
starting_time = datetime.datetime.now()
try:
log.debug("Examples of reoptimized resource capacities: %s"%
[(i.id, i.resources) for i in
self.reoptimized_resource.infras][:10])
# Checking whether the merge was in fact successful according to resources.
self.reoptimized_resource.calculate_available_node_res()
self.reoptimized_resource.calculate_available_link_res([])
log.info("merge_online_offline : "
"Optimization applied successfully :)")
self.offline_status = HybridOrchestrator.OFFLINE_STATE_MERGED
# Balazs The calc res functions throw only RuntimeError if it is
# failed due to resource reservation collision!
except RuntimeError as e:
log.warn(e.message)
# We continue to work from this stage, we can try optimization again
log.warn("Unable to merge online and offline :(")
self.offline_status = HybridOrchestrator.OFFLINE_STATE_INIT
except Exception as e:
log.error(e.message)
# Balazs: this exception is fatal
log.error("Unhandled Exception during merge :(")
self.offline_status = HybridOrchestrator.OFFLINE_STATE_INIT
raise
finally:
log.debug("Time passed by checking merge success: %s "%
(datetime.datetime.now() - starting_time))
def MAP(self, request, resource, deleted_services):
self.deleted_services = deleted_services
# store received resource so the offline and online could use it
# disregarding their order of initiation.
self.received_resource = resource
# Start online mapping thread
online_mapping_thread = threading.Thread(None, self.do_online_mapping,
"Online mapping thread", [request])
# in case of not multi threaded operation, the incoming request would
# be lost if there is a sequential reoptimization in this turn. So
# the online mapping shall be executed after the optimizaton.
if self.hybrid_multi_thread:
try:
log.info("Start online mapping!")
# res_online surely shouldn't be modified while an online mapping
# is in progress! Until we return with its copy where the new
# request is also mapped.
self.res_online_protector.start_writing_res_nffg(
"Map a request in an online manner")
online_mapping_thread.start()
except Exception as e:
log.error(e.message)
log.error("Failed to start online thread")
raise
# Start offline mapping thread
# check if there is anything to optimize
if self.res_online is not None and len([n for n in self.res_online.nfs]) > 0:
if self.__when_to_opt.need_to_optimize(not self.offline_status==HybridOrchestrator.OFFLINE_STATE_INIT, self.when_to_opt_param):
try:
self.offline_mapping_thread = threading.Thread(None,
self.do_offline_mapping, "Offline mapping thread", [])
log.info("Start offline optimization!")
self.offline_status = HybridOrchestrator.OFFLINE_STATE_RUNNING
self.offline_start_time = time.time()
self.offline_mapping_thread.start()
if not self.hybrid_multi_thread:
self.offline_mapping_thread.join()
except Exception as e:
log.error(e.message)
log.error("Failed to start offline thread")
raise
else:
log.info("No need to optimize!")
if self.hybrid_multi_thread:
online_mapping_thread.join()
else:
# in case of non-multi threaded execution, do online after reoptimization.
online_mapping_thread.start()
online_mapping_thread.join()
if not self.online_fails.empty():
error = self.online_fails.get()
if self.hybrid_multi_thread:
self.res_online_protector.finish_writing_res_nffg("Online mapping failed")
raise uet.MappingException(error.msg)
# res_online may have less than 100% of capacities due to resource
# sharing strategies, so we need to copy the mapping to the bare
# resource. If an optimization has finished before this online
# mapping was executed, the online algorithm used the reaoptimized
# mapping already
res_online_to_return = NFFGToolBox.merge_nffgs(
copy.deepcopy(self.bare_resource_100), self.res_online,
copy_shallow=True, silent=True)
log.debug("Examples of the returned resource capacities: %s"%
[(i.id, i.resources) for i in res_online_to_return.infras][:10])
# Collect the requests
# NOTE: only after we know for sure, this request is mapped and the other
# lock is released (to avoid deadlock)
# NOTE: this also causes the offline optimization to skip this request
# for the first time, because it will be missing from SUM_req.
self.merge_all_request(request)
if self.hybrid_multi_thread:
self.res_online_protector.finish_writing_res_nffg("Online mapping finished")
return res_online_to_return
| |
import copy
from datetime import datetime
import pytest
from bson.objectid import ObjectId
from MongoDB import convert_id_to_object_id, convert_object_id_to_str, convert_str_to_datetime, Client, search_query, \
format_sort, pipeline_query_command
id_to_obj_inputs = [
(
[{"_id": "5e4412f230c5b8f63a7356ba"}],
[{"_id": ObjectId("5e4412f230c5b8f63a7356ba")}],
),
(
{"_id": "5e4412f230c5b8f63a7356ba"},
{"_id": ObjectId("5e4412f230c5b8f63a7356ba")},
),
(
{"_id": {"$gte": "5e4412f230c5b8f63a7356ba"}},
{"_id": {"$gte": ObjectId("5e4412f230c5b8f63a7356ba")}},
),
({}, {}),
({"id": 1}, {"id": 1}),
]
@pytest.mark.parametrize("func_input, expected", id_to_obj_inputs)
def test_convert_id_to_object_id(func_input, expected):
assert expected == convert_id_to_object_id(func_input)
object_to_id = [
([ObjectId("5e4412f230c5b8f63a7356ba")], ["5e4412f230c5b8f63a7356ba"]),
(
[ObjectId("5e4412f230c5b8f63a7356ba"), ObjectId("5e4412f230c5b8f63a7356ba")],
["5e4412f230c5b8f63a7356ba", "5e4412f230c5b8f63a7356ba"],
),
]
@pytest.mark.parametrize("func_input, expected", object_to_id)
def test_convert_object_id_to_str(func_input, expected):
assert expected == convert_object_id_to_str(func_input)
def test_normalize_id():
res = Client.normalize_id({'_id': ObjectId('5e4412f230c5b8f63a7356ba')})
assert '5e4412f230c5b8f63a7356ba' == res['_id']
class TestConvertStrToDatetime:
dict_inputs = [
{"testing": 123, "time": "ISODate('2020-06-12T08:23:07.000Z')"},
pytest.param(
{"testing": 123, "time": "ISODate('2018-06-12T08:23:07.000')"},
marks=pytest.mark.xfail)
]
@pytest.mark.parametrize("func_input", dict_inputs)
def test_convert_str_to_datetime(self, func_input):
res = convert_str_to_datetime(func_input)
assert isinstance(res['time'], datetime)
def test_convert_str_to_datetime_no_datetime_obj(self):
inputs = {1: 2}
res = convert_str_to_datetime(inputs)
assert isinstance(res[1], int)
def test_nested_dict(self):
"""
Given:
A nested dict with a timestamp
When:
Running a query or insert
Then:
Validating all keys in the dict are there and the timestamp is valid
"""
func_input = {"k": {"$gte": "ISODate('2020-06-12T08:23:07.000Z')"}}
res = convert_str_to_datetime(func_input)
assert isinstance(res["k"]["$gte"], datetime)
class TestDatetimeToStr:
datetime_obj = datetime.strptime('2020-05-19T09:05:28.000Z', '%Y-%m-%dT%H:%M:%S.000Z')
datetime_str = '2020-05-19T09:05:28.000Z'
def test_datetime_to_str_dict(self):
"""
Given:
dict containing datetime object
When:
converting datetimes to strs
Then:
validate the value is a string.
"""
raw = Client.datetime_to_str({'time': self.datetime_obj})
assert self.datetime_str == raw['time']
def test_datetime_to_str_list(self):
"""
Given:
list containing datetime object
When:
converting datetimes to strs
Then:
validate the value is a string.
"""
raw = Client.datetime_to_str([self.datetime_obj])
assert [self.datetime_str] == raw
def test_datetime_to_str_str(self):
"""
Given:
datetime object
When:
converting datetimes to strs
Then:
validate the value is a string.
"""
raw = Client.datetime_to_str(self.datetime_obj)
assert self.datetime_str == raw
def test_datetime_to_str_dict_no_datetime(self):
"""
Given:
dict containing 5 (int)
When:
converting datetimes to strs
Then:
validate the value returned is 5
"""
raw = Client.datetime_to_str({'time': 5})
assert 5 == raw['time']
def test_datetime_to_str_list_no_datetime(self):
"""
Given:
list containing an int (5) object
When:
converting datetimes to strs
Then:
validate the value returned is 5.
"""
raw = Client.datetime_to_str([5])
assert [5] == raw
def test_datetime_to_str_str_no_datetime(self):
"""
Given:
'str'
When:
converting datetimes to strs
Then:
validate the value returned is 'str'.
"""
raw = Client.datetime_to_str('str')
assert 'str' == raw
class MockedQuery:
class Limit:
@staticmethod
def limit(number):
return [{'time': TestDatetimeToStr.datetime_obj, '_id': ObjectId('5e4412f230c5b8f63a7356ba')}]
@classmethod
def find(cls, query):
return cls.Limit
def test_query(mocker):
"""
Given:
Object with datetime and object id in it
When:
Quering object
Then:
validate all objects returned are strs.
"""
client = Client(['aaaaa'], 'a', 'b', 'd')
mocker.patch.object(Client, 'get_collection', return_value=MockedQuery)
readable_outputs, outputs, raw_response = search_query(client, 'a', '{}', '50')
time = raw_response[0]['time']
_id = raw_response[0]['_id']
assert isinstance(_id, str)
assert isinstance(time, str)
class TestFormatSort:
def test_format_sort_correctly(self):
"""
Given:
a sort string in the correct format
Then:
Format the string in the correct format to be used in `pymongo.sort()`
"""
assert format_sort("field1:asc,field2:desc") == [('field1', 1), ('field2', -1)]
assert format_sort("field1:asc") == [('field1', 1)]
def test_format_sort_raises_error(self):
"""
Given:
a sort string in the wrong format
Then:
raise a ValueError
"""
with pytest.raises(ValueError):
format_sort("Wrong:Type")
with pytest.raises(ValueError):
format_sort("WrongType")
def test_pipeline_query_command(mocker):
"""
Given:
collection - where to search.
pipeline - json pipeline query
When:
calling `pipeline_query_command`
Then:
validate the readable output and context
"""
client = Client(['aaaaa'], 'a', 'b', 'd')
return_value = [
{'title': 'test_title', 'color': 'red', 'year': '2019', '_id': '6034a5a62f605638740dba55'},
{'title': 'test_title', 'color': 'yellow', 'year': '2020', '_id': '6034a5c52f605638740dba57'}
]
mocker.patch.object(client, 'pipeline_query', return_value=return_value)
readable_outputs, outputs, raw_response = pipeline_query_command(
client=client,
collection='test_collection',
pipeline="[{\"$match\": {\"title\": \"test_title\"}}]"
)
expected_context = list()
for item in copy.deepcopy(raw_response):
item.update({'collection': 'test_collection'})
expected_context.append(item)
assert 'Total of 2 entries were found in MongoDB collection' in readable_outputs
assert outputs.get('MongoDB.Entry(val._id === obj._id && obj.collection === val.collection)') == expected_context
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import interface
class interfaces(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/interfaces. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Enclosing container for a list of interfaces enabled within
this area
"""
__slots__ = ("_path_helper", "_extmethods", "__interface")
_yang_name = "interfaces"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(
base=YANGListType(
"id",
interface.interface,
yang_name="interface",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="id",
extensions=None,
),
is_container="list",
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"interfaces",
]
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface (list)
YANG Description: List of interfaces which are enabled within this area
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: List of interfaces which are enabled within this area
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"id",
interface.interface,
yang_name="interface",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="id",
extensions=None,
),
is_container="list",
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """interface must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("id",interface.interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""",
}
)
self.__interface = t
if hasattr(self, "_set"):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(
base=YANGListType(
"id",
interface.interface,
yang_name="interface",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="id",
extensions=None,
),
is_container="list",
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
interface = __builtin__.property(_get_interface, _set_interface)
_pyangbind_elements = OrderedDict([("interface", interface)])
from . import interface
class interfaces(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/interfaces. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Enclosing container for a list of interfaces enabled within
this area
"""
__slots__ = ("_path_helper", "_extmethods", "__interface")
_yang_name = "interfaces"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(
base=YANGListType(
"id",
interface.interface,
yang_name="interface",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="id",
extensions=None,
),
is_container="list",
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"interfaces",
]
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface (list)
YANG Description: List of interfaces which are enabled within this area
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: List of interfaces which are enabled within this area
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"id",
interface.interface,
yang_name="interface",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="id",
extensions=None,
),
is_container="list",
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """interface must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("id",interface.interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""",
}
)
self.__interface = t
if hasattr(self, "_set"):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(
base=YANGListType(
"id",
interface.interface,
yang_name="interface",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="id",
extensions=None,
),
is_container="list",
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
interface = __builtin__.property(_get_interface, _set_interface)
_pyangbind_elements = OrderedDict([("interface", interface)])
| |
#!/usr/bin/env python3
## -*- coding: utf-8 -*-
from __future__ import print_function
from triton import *
from unicorn import *
from unicorn.arm_const import *
import pprint
import random
import sys
ADDR = 0x000000
STACK = 0x100000
HEAP = 0x200000
SIZE = 5 * 1024 * 1024
# Switchs from Thumb to ARM and back.
IT_INSTRS = [
# ITxyz EQ -------------------------------------------------------------- #
(0x00, b"\x08\xbf", "it eq"),
(0x00, b"\x04\xbf", "itt eq"),
(0x00, b"\x0c\xbf", "ite eq"),
(0x00, b"\x02\xbf", "ittt eq"),
(0x00, b"\x0a\xbf", "itet eq"),
(0x00, b"\x06\xbf", "itte eq"),
(0x00, b"\x0e\xbf", "itee eq"),
(0x00, b"\x01\xbf", "itttt eq"),
(0x00, b"\x09\xbf", "itett eq"),
(0x00, b"\x05\xbf", "ittet eq"),
(0x00, b"\x0d\xbf", "iteet eq"),
(0x00, b"\x03\xbf", "ittte eq"),
(0x00, b"\x0b\xbf", "itete eq"),
(0x00, b"\x07\xbf", "ittee eq"),
(0x00, b"\x0f\xbf", "iteee eq"),
# ITxyz NE -------------------------------------------------------------- #
(0x00, b"\x18\xbf", "it ne"),
(0x00, b"\x1c\xbf", "itt ne"),
(0x00, b"\x14\xbf", "ite ne"),
(0x00, b"\x1e\xbf", "ittt ne"),
(0x00, b"\x16\xbf", "itet ne"),
(0x00, b"\x1a\xbf", "itte ne"),
(0x00, b"\x12\xbf", "itee ne"),
(0x00, b"\x1f\xbf", "itttt ne"),
(0x00, b"\x17\xbf", "itett ne"),
(0x00, b"\x1b\xbf", "ittet ne"),
(0x00, b"\x13\xbf", "iteet ne"),
(0x00, b"\x1d\xbf", "ittte ne"),
(0x00, b"\x15\xbf", "itete ne"),
(0x00, b"\x19\xbf", "ittee ne"),
(0x00, b"\x11\xbf", "iteee ne"),
# ITxyz HS | CS ---------------------------------------------------------- #
(0x00, b"\x28\xbf", "it hs"),
(0x00, b"\x24\xbf", "itt hs"),
(0x00, b"\x2c\xbf", "ite hs"),
(0x00, b"\x22\xbf", "ittt hs"),
(0x00, b"\x2a\xbf", "itet hs"),
(0x00, b"\x26\xbf", "itte hs"),
(0x00, b"\x2e\xbf", "itee hs"),
(0x00, b"\x21\xbf", "itttt hs"),
(0x00, b"\x29\xbf", "itett hs"),
(0x00, b"\x25\xbf", "ittet hs"),
(0x00, b"\x2d\xbf", "iteet hs"),
(0x00, b"\x23\xbf", "ittte hs"),
(0x00, b"\x2b\xbf", "itete hs"),
(0x00, b"\x27\xbf", "ittee hs"),
(0x00, b"\x2f\xbf", "iteee hs"),
(0x00, b"\x28\xbf", "it cs"),
(0x00, b"\x24\xbf", "itt cs"),
(0x00, b"\x2c\xbf", "ite cs"),
(0x00, b"\x22\xbf", "ittt cs"),
(0x00, b"\x2a\xbf", "itet cs"),
(0x00, b"\x26\xbf", "itte cs"),
(0x00, b"\x2e\xbf", "itee cs"),
(0x00, b"\x21\xbf", "itttt cs"),
(0x00, b"\x29\xbf", "itett cs"),
(0x00, b"\x25\xbf", "ittet cs"),
(0x00, b"\x2d\xbf", "iteet cs"),
(0x00, b"\x23\xbf", "ittte cs"),
(0x00, b"\x2b\xbf", "itete cs"),
(0x00, b"\x27\xbf", "ittee cs"),
(0x00, b"\x2f\xbf", "iteee cs"),
# ITxyz LO | CC ---------------------------------------------------------- #
(0x00, b"\x38\xbf", "it lo"),
(0x00, b"\x3c\xbf", "itt lo"),
(0x00, b"\x34\xbf", "ite lo"),
(0x00, b"\x3e\xbf", "ittt lo"),
(0x00, b"\x36\xbf", "itet lo"),
(0x00, b"\x3a\xbf", "itte lo"),
(0x00, b"\x32\xbf", "itee lo"),
(0x00, b"\x3f\xbf", "itttt lo"),
(0x00, b"\x37\xbf", "itett lo"),
(0x00, b"\x3b\xbf", "ittet lo"),
(0x00, b"\x33\xbf", "iteet lo"),
(0x00, b"\x3d\xbf", "ittte lo"),
(0x00, b"\x35\xbf", "itete lo"),
(0x00, b"\x39\xbf", "ittee lo"),
(0x00, b"\x31\xbf", "iteee lo"),
(0x00, b"\x38\xbf", "it cc"),
(0x00, b"\x3c\xbf", "itt cc"),
(0x00, b"\x34\xbf", "ite cc"),
(0x00, b"\x3e\xbf", "ittt cc"),
(0x00, b"\x36\xbf", "itet cc"),
(0x00, b"\x3a\xbf", "itte cc"),
(0x00, b"\x32\xbf", "itee cc"),
(0x00, b"\x3f\xbf", "itttt cc"),
(0x00, b"\x37\xbf", "itett cc"),
(0x00, b"\x3b\xbf", "ittet cc"),
(0x00, b"\x33\xbf", "iteet cc"),
(0x00, b"\x3d\xbf", "ittte cc"),
(0x00, b"\x35\xbf", "itete cc"),
(0x00, b"\x39\xbf", "ittee cc"),
(0x00, b"\x31\xbf", "iteee cc"),
# ITxyz MI -------------------------------------------------------------- #
(0x00, b"\x48\xbf", "it mi"),
(0x00, b"\x44\xbf", "itt mi"),
(0x00, b"\x4c\xbf", "ite mi"),
(0x00, b"\x42\xbf", "ittt mi"),
(0x00, b"\x4a\xbf", "itet mi"),
(0x00, b"\x46\xbf", "itte mi"),
(0x00, b"\x4e\xbf", "itee mi"),
(0x00, b"\x41\xbf", "itttt mi"),
(0x00, b"\x49\xbf", "itett mi"),
(0x00, b"\x45\xbf", "ittet mi"),
(0x00, b"\x4d\xbf", "iteet mi"),
(0x00, b"\x43\xbf", "ittte mi"),
(0x00, b"\x4b\xbf", "itete mi"),
(0x00, b"\x47\xbf", "ittee mi"),
(0x00, b"\x4f\xbf", "iteee mi"),
# ITxyz PL -------------------------------------------------------------- #
(0x00, b"\x58\xbf", "it pl"),
(0x00, b"\x5c\xbf", "itt pl"),
(0x00, b"\x54\xbf", "ite pl"),
(0x00, b"\x5e\xbf", "ittt pl"),
(0x00, b"\x56\xbf", "itet pl"),
(0x00, b"\x5a\xbf", "itte pl"),
(0x00, b"\x52\xbf", "itee pl"),
(0x00, b"\x5f\xbf", "itttt pl"),
(0x00, b"\x57\xbf", "itett pl"),
(0x00, b"\x5b\xbf", "ittet pl"),
(0x00, b"\x53\xbf", "iteet pl"),
(0x00, b"\x5d\xbf", "ittte pl"),
(0x00, b"\x55\xbf", "itete pl"),
(0x00, b"\x59\xbf", "ittee pl"),
(0x00, b"\x51\xbf", "iteee pl"),
# ITxyz VS -------------------------------------------------------------- #
(0x00, b"\x68\xbf", "it vs"),
(0x00, b"\x64\xbf", "itt vs"),
(0x00, b"\x6c\xbf", "ite vs"),
(0x00, b"\x62\xbf", "ittt vs"),
(0x00, b"\x6a\xbf", "itet vs"),
(0x00, b"\x66\xbf", "itte vs"),
(0x00, b"\x6e\xbf", "itee vs"),
(0x00, b"\x61\xbf", "itttt vs"),
(0x00, b"\x69\xbf", "itett vs"),
(0x00, b"\x65\xbf", "ittet vs"),
(0x00, b"\x6d\xbf", "iteet vs"),
(0x00, b"\x63\xbf", "ittte vs"),
(0x00, b"\x6b\xbf", "itete vs"),
(0x00, b"\x67\xbf", "ittee vs"),
(0x00, b"\x6f\xbf", "iteee vs"),
# ITxyz VC -------------------------------------------------------------- #
(0x00, b"\x78\xbf", "it vc"),
(0x00, b"\x7c\xbf", "itt vc"),
(0x00, b"\x74\xbf", "ite vc"),
(0x00, b"\x7e\xbf", "ittt vc"),
(0x00, b"\x76\xbf", "itet vc"),
(0x00, b"\x7a\xbf", "itte vc"),
(0x00, b"\x72\xbf", "itee vc"),
(0x00, b"\x7f\xbf", "itttt vc"),
(0x00, b"\x77\xbf", "itett vc"),
(0x00, b"\x7b\xbf", "ittet vc"),
(0x00, b"\x73\xbf", "iteet vc"),
(0x00, b"\x7d\xbf", "ittte vc"),
(0x00, b"\x75\xbf", "itete vc"),
(0x00, b"\x79\xbf", "ittee vc"),
(0x00, b"\x71\xbf", "iteee vc"),
# ITxyz HI -------------------------------------------------------------- #
(0x00, b"\x88\xbf", "it hi"),
(0x00, b"\x84\xbf", "itt hi"),
(0x00, b"\x8c\xbf", "ite hi"),
(0x00, b"\x82\xbf", "ittt hi"),
(0x00, b"\x8a\xbf", "itet hi"),
(0x00, b"\x86\xbf", "itte hi"),
(0x00, b"\x8e\xbf", "itee hi"),
(0x00, b"\x81\xbf", "itttt hi"),
(0x00, b"\x89\xbf", "itett hi"),
(0x00, b"\x85\xbf", "ittet hi"),
(0x00, b"\x8d\xbf", "iteet hi"),
(0x00, b"\x83\xbf", "ittte hi"),
(0x00, b"\x8b\xbf", "itete hi"),
(0x00, b"\x87\xbf", "ittee hi"),
(0x00, b"\x8f\xbf", "iteee hi"),
# ITxyz LS -------------------------------------------------------------- #
(0x00, b"\x98\xbf", "it ls"),
(0x00, b"\x9c\xbf", "itt ls"),
(0x00, b"\x94\xbf", "ite ls"),
(0x00, b"\x9e\xbf", "ittt ls"),
(0x00, b"\x96\xbf", "itet ls"),
(0x00, b"\x9a\xbf", "itte ls"),
(0x00, b"\x92\xbf", "itee ls"),
(0x00, b"\x9f\xbf", "itttt ls"),
(0x00, b"\x97\xbf", "itett ls"),
(0x00, b"\x9b\xbf", "ittet ls"),
(0x00, b"\x93\xbf", "iteet ls"),
(0x00, b"\x9d\xbf", "ittte ls"),
(0x00, b"\x95\xbf", "itete ls"),
(0x00, b"\x99\xbf", "ittee ls"),
(0x00, b"\x91\xbf", "iteee ls"),
# ITxyz GE -------------------------------------------------------------- #
(0x00, b"\xa8\xbf", "it ge"),
(0x00, b"\xa4\xbf", "itt ge"),
(0x00, b"\xac\xbf", "ite ge"),
(0x00, b"\xa2\xbf", "ittt ge"),
(0x00, b"\xaa\xbf", "itet ge"),
(0x00, b"\xa6\xbf", "itte ge"),
(0x00, b"\xae\xbf", "itee ge"),
(0x00, b"\xa1\xbf", "itttt ge"),
(0x00, b"\xa9\xbf", "itett ge"),
(0x00, b"\xa5\xbf", "ittet ge"),
(0x00, b"\xad\xbf", "iteet ge"),
(0x00, b"\xa3\xbf", "ittte ge"),
(0x00, b"\xab\xbf", "itete ge"),
(0x00, b"\xa7\xbf", "ittee ge"),
(0x00, b"\xaf\xbf", "iteee ge"),
# ITxyz LT -------------------------------------------------------------- #
(0x00, b"\xb8\xbf", "it lt"),
(0x00, b"\xbc\xbf", "itt lt"),
(0x00, b"\xb4\xbf", "ite lt"),
(0x00, b"\xbe\xbf", "ittt lt"),
(0x00, b"\xb6\xbf", "itet lt"),
(0x00, b"\xba\xbf", "itte lt"),
(0x00, b"\xb2\xbf", "itee lt"),
(0x00, b"\xbf\xbf", "itttt lt"),
(0x00, b"\xb7\xbf", "itett lt"),
(0x00, b"\xbb\xbf", "ittet lt"),
(0x00, b"\xb3\xbf", "iteet lt"),
(0x00, b"\xbd\xbf", "ittte lt"),
(0x00, b"\xb5\xbf", "itete lt"),
(0x00, b"\xb9\xbf", "ittee lt"),
(0x00, b"\xb1\xbf", "iteee lt"),
# ITxyz GT -------------------------------------------------------------- #
(0x00, b"\xc8\xbf", "it gt"),
(0x00, b"\xc4\xbf", "itt gt"),
(0x00, b"\xcc\xbf", "ite gt"),
(0x00, b"\xc2\xbf", "ittt gt"),
(0x00, b"\xca\xbf", "itet gt"),
(0x00, b"\xc6\xbf", "itte gt"),
(0x00, b"\xce\xbf", "itee gt"),
(0x00, b"\xc1\xbf", "itttt gt"),
(0x00, b"\xc9\xbf", "itett gt"),
(0x00, b"\xc5\xbf", "ittet gt"),
(0x00, b"\xcd\xbf", "iteet gt"),
(0x00, b"\xc3\xbf", "ittte gt"),
(0x00, b"\xcb\xbf", "itete gt"),
(0x00, b"\xc7\xbf", "ittee gt"),
(0x00, b"\xcf\xbf", "iteee gt"),
# ITxyz LE -------------------------------------------------------------- #
(0x00, b"\xd8\xbf", "it le"),
(0x00, b"\xdc\xbf", "itt le"),
(0x00, b"\xd4\xbf", "ite le"),
(0x00, b"\xde\xbf", "ittt le"),
(0x00, b"\xd6\xbf", "itet le"),
(0x00, b"\xda\xbf", "itte le"),
(0x00, b"\xd2\xbf", "itee le"),
(0x00, b"\xdf\xbf", "itttt le"),
(0x00, b"\xd7\xbf", "itett le"),
(0x00, b"\xdb\xbf", "ittet le"),
(0x00, b"\xd3\xbf", "iteet le"),
(0x00, b"\xdd\xbf", "ittte le"),
(0x00, b"\xd5\xbf", "itete le"),
(0x00, b"\xd9\xbf", "ittee le"),
(0x00, b"\xd1\xbf", "iteee le"),
]
CODE = [
(0x02, b"\x4f\xf0\x01\x00", "mov r0, 1"),
(0x06, b"\x4f\xf0\x02\x01", "mov r1, 2"),
(0x0a, b"\x4f\xf0\x03\x02", "mov r2, 3"),
(0x0e, b"\x4f\xf0\x04\x03", "mov r3, 4"),
(0x12, b"\x4f\xf0\x05\x04", "mov r4, 5"),
]
def hook_code(mu, address, size, istate):
opcode = mu.mem_read(address, size)
cpsr = mu.reg_read(ARM_REG_CPSR)
thumb = (cpsr >> 5) & 0x1
md = Cs(CS_ARCH_ARM, CS_MODE_THUMB if thumb else CS_MODE_ARM)
md.detail = True
i = list(md.disasm(opcode, address))[0]
disasm = "{} {}".format(i.mnemonic, i.op_str)
opcode_str = " ".join(["%02x" % b for b in opcode])
print("[UC] {}\t{:08x}: {}".format(opcode_str, address, disasm))
def emu_with_unicorn(test_code, start, stop, istate):
# Initialize emulator in arm32 mode.
mu = Uc(UC_ARCH_ARM, UC_MODE_ARM)
# Map memory for this emulation.
mu.mem_map(ADDR, SIZE)
# Write machine code to be emulated to memory.
index = 0
for _, op, _ in test_code:
mu.mem_write(ADDR+index, op)
index += len(op)
# Retrieve APSR register value.
apsr = mu.reg_read(UC_ARM_REG_APSR)
nzcv = istate['n'] << 31 | istate['z'] << 30 | istate['c'] << 29 | istate['v'] << 28
mu.mem_write(STACK, bytes(istate['stack']))
mu.mem_write(HEAP, bytes(istate['heap']))
mu.reg_write(UC_ARM_REG_R0, istate['r0'])
mu.reg_write(UC_ARM_REG_R1, istate['r1'])
mu.reg_write(UC_ARM_REG_R2, istate['r2'])
mu.reg_write(UC_ARM_REG_R3, istate['r3'])
mu.reg_write(UC_ARM_REG_R4, istate['r4'])
mu.reg_write(UC_ARM_REG_R5, istate['r5'])
mu.reg_write(UC_ARM_REG_R6, istate['r6'])
mu.reg_write(UC_ARM_REG_R7, istate['r7'])
mu.reg_write(UC_ARM_REG_R8, istate['r8'])
mu.reg_write(UC_ARM_REG_R9, istate['r9'])
mu.reg_write(UC_ARM_REG_R10, istate['r10'])
mu.reg_write(UC_ARM_REG_R11, istate['r11'])
mu.reg_write(UC_ARM_REG_R12, istate['r12'])
mu.reg_write(UC_ARM_REG_SP, istate['sp'])
mu.reg_write(UC_ARM_REG_R14, istate['r14'])
mu.reg_write(UC_ARM_REG_PC, istate['pc'])
mu.reg_write(UC_ARM_REG_APSR, apsr & 0x0fffffff | nzcv)
# mu.hook_add(UC_HOOK_CODE, hook_code)
# Emulate code from start to stop.
try:
mu.emu_start(start, stop)
except UcError as e:
print("[UC] Error: {}".format(e))
ostate = {
"stack": bytearray(mu.mem_read(STACK, 0x100)),
"heap": bytearray(mu.mem_read(HEAP, 0x100)),
"r0": mu.reg_read(UC_ARM_REG_R0),
"r1": mu.reg_read(UC_ARM_REG_R1),
"r2": mu.reg_read(UC_ARM_REG_R2),
"r3": mu.reg_read(UC_ARM_REG_R3),
"r4": mu.reg_read(UC_ARM_REG_R4),
"r5": mu.reg_read(UC_ARM_REG_R5),
"r6": mu.reg_read(UC_ARM_REG_R6),
"r7": mu.reg_read(UC_ARM_REG_R7),
"r8": mu.reg_read(UC_ARM_REG_R8),
"r9": mu.reg_read(UC_ARM_REG_R9),
"r10": mu.reg_read(UC_ARM_REG_R10),
"r11": mu.reg_read(UC_ARM_REG_R11),
"r12": mu.reg_read(UC_ARM_REG_R12),
"sp": mu.reg_read(UC_ARM_REG_SP),
"r14": mu.reg_read(UC_ARM_REG_R14),
"pc": mu.reg_read(UC_ARM_REG_PC),
"n": ((mu.reg_read(UC_ARM_REG_APSR) >> 31) & 1),
"z": ((mu.reg_read(UC_ARM_REG_APSR) >> 30) & 1),
"c": ((mu.reg_read(UC_ARM_REG_APSR) >> 29) & 1),
"v": ((mu.reg_read(UC_ARM_REG_APSR) >> 28) & 1),
}
return ostate
def emu_with_triton(test_code, start, stop, istate):
ctx = TritonContext()
ctx.setArchitecture(ARCH.ARM32)
ctx.setConcreteMemoryAreaValue(STACK, bytes(istate['stack']))
ctx.setConcreteMemoryAreaValue(HEAP, bytes(istate['heap']))
ctx.setConcreteRegisterValue(ctx.registers.r0, istate['r0'])
ctx.setConcreteRegisterValue(ctx.registers.r1, istate['r1'])
ctx.setConcreteRegisterValue(ctx.registers.r2, istate['r2'])
ctx.setConcreteRegisterValue(ctx.registers.r3, istate['r3'])
ctx.setConcreteRegisterValue(ctx.registers.r4, istate['r4'])
ctx.setConcreteRegisterValue(ctx.registers.r5, istate['r5'])
ctx.setConcreteRegisterValue(ctx.registers.r6, istate['r6'])
ctx.setConcreteRegisterValue(ctx.registers.r7, istate['r7'])
ctx.setConcreteRegisterValue(ctx.registers.r8, istate['r8'])
ctx.setConcreteRegisterValue(ctx.registers.r9, istate['r9'])
ctx.setConcreteRegisterValue(ctx.registers.r10, istate['r10'])
ctx.setConcreteRegisterValue(ctx.registers.r11, istate['r11'])
ctx.setConcreteRegisterValue(ctx.registers.r12, istate['r12'])
ctx.setConcreteRegisterValue(ctx.registers.sp, istate['sp'])
ctx.setConcreteRegisterValue(ctx.registers.r14, istate['r14'])
ctx.setConcreteRegisterValue(ctx.registers.pc, istate['pc'])
ctx.setConcreteRegisterValue(ctx.registers.n, istate['n'])
ctx.setConcreteRegisterValue(ctx.registers.z, istate['z'])
ctx.setConcreteRegisterValue(ctx.registers.c, istate['c'])
ctx.setConcreteRegisterValue(ctx.registers.v, istate['v'])
code = {}
for addr, opcode, disasm in test_code:
code[addr] = (opcode, disasm)
addr = start & ~0x1
while addr != stop:
opcode, disasm = code[addr]
inst = Instruction(opcode)
inst.setAddress(addr)
ctx.processing(inst)
# print()
# print(inst)
# for x in inst.getSymbolicExpressions():
# print(x)
# print()
addr = ctx.getSymbolicRegisterValue(ctx.registers.pc)
ostate = {
"stack": bytearray(ctx.getConcreteMemoryAreaValue(STACK, 0x100)),
"heap": bytearray(ctx.getConcreteMemoryAreaValue(HEAP, 0x100)),
"r0": ctx.getSymbolicRegisterValue(ctx.registers.r0),
"r1": ctx.getSymbolicRegisterValue(ctx.registers.r1),
"r2": ctx.getSymbolicRegisterValue(ctx.registers.r2),
"r3": ctx.getSymbolicRegisterValue(ctx.registers.r3),
"r4": ctx.getSymbolicRegisterValue(ctx.registers.r4),
"r5": ctx.getSymbolicRegisterValue(ctx.registers.r5),
"r6": ctx.getSymbolicRegisterValue(ctx.registers.r6),
"r7": ctx.getSymbolicRegisterValue(ctx.registers.r7),
"r8": ctx.getSymbolicRegisterValue(ctx.registers.r8),
"r9": ctx.getSymbolicRegisterValue(ctx.registers.r9),
"r10": ctx.getSymbolicRegisterValue(ctx.registers.r10),
"r11": ctx.getSymbolicRegisterValue(ctx.registers.r11),
"r12": ctx.getSymbolicRegisterValue(ctx.registers.r12),
"sp": ctx.getSymbolicRegisterValue(ctx.registers.sp),
"r14": ctx.getSymbolicRegisterValue(ctx.registers.r14),
"pc": ctx.getSymbolicRegisterValue(ctx.registers.pc),
"n": ctx.getSymbolicRegisterValue(ctx.registers.n),
"z": ctx.getSymbolicRegisterValue(ctx.registers.z),
"c": ctx.getSymbolicRegisterValue(ctx.registers.c),
"v": ctx.getSymbolicRegisterValue(ctx.registers.v),
}
return ostate
def diff_state(state1, state2):
for k, v in list(state1.items()):
if (k == 'heap' or k == 'stack') and v != state2[k]:
print('\t%s: (UC) != (TT)' %(k))
elif not (k == 'heap' or k == 'stack') and v != state2[k]:
print('\t%s: %#x (UC) != %#x (TT)' %(k, v, state2[k]))
return
def print_state(istate, uc_ostate, tt_ostate):
for k in sorted(istate.keys()):
if k in ['stack', 'heap']:
continue
diff = "!=" if uc_ostate[k] != tt_ostate[k] else "=="
print("{:>3s}: {:08x} | {:08x} {} {:08x}".format(k, istate[k], uc_ostate[k], diff, tt_ostate[k]))
if __name__ == '__main__':
start = 0x00 | 1 # Address of the first instruction.
stop = 0x12 + 0x4 # Address of the last instruction + size.
# Initial state.
state = {
"stack": bytearray([255 - i for i in range(256)]),
"heap": bytearray([i for i in range(256)]),
"r0": random.randint(0x0, 0xffffffff),
"r1": random.randint(0x0, 0xffffffff),
"r2": random.randint(0x0, 0xffffffff),
"r3": random.randint(0x0, 0xffffffff),
"r4": random.randint(0x0, 0xffffffff),
"r5": random.randint(0x0, 0xffffffff),
"r6": random.randint(0x0, 0xffffffff),
"r7": random.randint(0x0, 0xffffffff),
"r8": random.randint(0x0, 0xffffffff),
"r9": random.randint(0x0, 0xffffffff),
"r10": random.randint(0x0, 0xffffffff),
"r11": random.randint(0x0, 0xffffffff),
"r12": random.randint(0x0, 0xffffffff),
"sp": STACK,
"r14": random.randint(0x0, 0xffffffff),
"pc": start,
"n": random.randint(0x0, 0x1),
"z": random.randint(0x0, 0x1),
"c": random.randint(0x0, 0x1),
"v": random.randint(0x0, 0x1),
}
for it_inst in IT_INSTRS:
test_block = [it_inst] + CODE
disassembly = it_inst[2]
try:
uc_state = emu_with_unicorn(test_block, start, stop, state)
tt_state = emu_with_triton(test_block, start, stop, state)
uc_state["pc"] = tt_state["pc"]
except Exception as e:
print('[KO] %s' %(disassembly))
print('\t%s' %(e))
sys.exit(-1)
if uc_state != tt_state:
print('[KO] %s' %(disassembly))
diff_state(uc_state, tt_state)
print_state(state, uc_state, tt_state)
sys.exit(-1)
print('[OK] %s' %(disassembly))
sys.exit(0)
| |
# pylint: disable=E1101,E1103,W0232
import operator
from datetime import datetime, date, timedelta
import numpy as np
from pandas.core.base import PandasObject
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
from pandas.tseries.base import DatetimeIndexOpsMixin
from pandas.tseries.tools import parse_time_string
import pandas.tseries.offsets as offsets
from pandas._period import Period
import pandas._period as period
from pandas._period import (
get_period_field_arr,
_validate_end_alias,
_quarter_to_myear,
)
import pandas.core.common as com
from pandas.core.common import (isnull, _INT64_DTYPE, _maybe_box,
_values_from_object, ABCSeries,
is_integer, is_float)
from pandas import compat
from pandas.lib import Timestamp, Timedelta
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.algos as _algos
from pandas.compat import zip, u
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = _gfc(self.freq)
return get_period_field_arr(alias, self.values, base)
f.__name__ = name
f.__doc__ = docstring
return property(f)
def _get_ordinals(data, freq):
f = lambda x: Period(x, freq=freq).ordinal
if isinstance(data[0], Period):
return period.extract_ordinals(data, freq)
else:
return lib.map_infer(data, f)
def dt64arr_to_periodarr(data, freq, tz):
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: %s' % data.dtype)
base, mult = _gfc(freq)
return period.dt64arr_to_periodarr(data.view('i8'), base, tz)
# --- Period index sketch
def _period_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, Period):
func = getattr(self.values, opname)
if other.freq != self.freq:
raise AssertionError("Frequencies must be equal")
result = func(other.ordinal)
elif isinstance(other, PeriodIndex):
if other.freq != self.freq:
raise AssertionError("Frequencies must be equal")
result = getattr(self.values, opname)(other.values)
mask = (com.mask_missing(self.values, tslib.iNaT) |
com.mask_missing(other.values, tslib.iNaT))
if mask.any():
result[mask] = nat_result
return result
else:
other = Period(other, freq=self.freq)
func = getattr(self.values, opname)
result = func(other.ordinal)
if other.ordinal == tslib.iNaT:
result.fill(nat_result)
mask = self.values == tslib.iNaT
if mask.any():
result[mask] = nat_result
return result
return wrapper
class PeriodIndex(DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc. A value of 1 is the
period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.
This ordinal representation is from the scikits.timeseries project.
For instance,
# construct period for day 1/1/1 and get the first second
i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')
i.ordinal
===> 1
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
dtype : NumPy dtype (default: i8)
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
tz : object, default None
Timezone for converting datetime64 data to Periods
Examples
--------
>>> idx = PeriodIndex(year=year_arr, quarter=q_arr)
>>> idx2 = PeriodIndex(start='2000', end='2010', freq='A')
"""
_box_scalars = True
_typ = 'periodindex'
_attributes = ['name','freq']
_datetimelike_ops = ['year','month','day','hour','minute','second',
'weekofyear','week','dayofweek','weekday','dayofyear','quarter', 'qyear', 'freq', 'days_in_month', 'daysinmonth']
_is_numeric_dtype = False
freq = None
__eq__ = _period_index_cmp('__eq__')
__ne__ = _period_index_cmp('__ne__', nat_result=True)
__lt__ = _period_index_cmp('__lt__')
__gt__ = _period_index_cmp('__gt__')
__le__ = _period_index_cmp('__le__')
__ge__ = _period_index_cmp('__ge__')
def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
periods=None, copy=False, name=None, tz=None, **kwargs):
freq = frequencies.get_standard_freq(freq)
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None:
if ordinal is not None:
data = np.asarray(ordinal, dtype=np.int64)
else:
data, freq = cls._generate_range(start, end, periods,
freq, kwargs)
else:
ordinal, freq = cls._from_arraylike(data, freq, tz)
data = np.array(ordinal, dtype=np.int64, copy=False)
return cls._simple_new(data, name=name, freq=freq)
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
field_count = len(fields)
if com._count_not_none(start, end) > 0:
if field_count > 0:
raise ValueError('Can either instantiate from fields '
'or endpoints, but not both')
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError('Not enough parameters to construct '
'Period range')
return subarr, freq
@classmethod
def _from_arraylike(cls, data, freq, tz):
if not isinstance(data, (np.ndarray, PeriodIndex, DatetimeIndex, Int64Index)):
if np.isscalar(data) or isinstance(data, Period):
raise ValueError('PeriodIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = com._ensure_int64(data)
if freq is None:
raise ValueError('freq not specified')
data = np.array([Period(x, freq=freq).ordinal for x in data],
dtype=np.int64)
except (TypeError, ValueError):
data = com._ensure_object(data)
if freq is None and len(data) > 0:
freq = getattr(data[0], 'freq', None)
if freq is None:
raise ValueError('freq not specified and cannot be '
'inferred from first element')
data = _get_ordinals(data, freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data.values
else:
base1, _ = _gfc(data.freq)
base2, _ = _gfc(freq)
data = period.period_asfreq_arr(data.values, base1,
base2, 1)
else:
if freq is None and len(data) > 0:
freq = getattr(data[0], 'freq', None)
if freq is None:
raise ValueError('freq not specified and cannot be '
'inferred from first element')
if data.dtype != np.int64:
if np.issubdtype(data.dtype, np.datetime64):
data = dt64arr_to_periodarr(data, freq, tz)
else:
try:
data = com._ensure_int64(data)
except (TypeError, ValueError):
data = com._ensure_object(data)
data = _get_ordinals(data, freq)
return data, freq
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
result = object.__new__(cls)
result._data = values
result.name = name
result.freq = freq
result._reset_identity()
return result
@property
def _na_value(self):
return self._box_func(tslib.iNaT)
def __contains__(self, key):
if not isinstance(key, Period) or key.freq != self.freq:
if isinstance(key, compat.string_types):
try:
self.get_loc(key)
return True
except Exception:
return False
return False
return key.ordinal in self._engine
@property
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
def _to_embed(self, keep_tz=False):
""" return an array repr of this object, potentially casting to object """
return self.asobject.values
@property
def _formatter_func(self):
return lambda x: "'%s'" % x
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx.values, freq=self.freq)
locs = self.values[mask].searchsorted(where_idx.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx.values < self.values[first])] = -1
return result
def _array_values(self):
return self.asobject
def astype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return Index(np.array(list(self), dtype), dtype)
elif dtype == _INT64_DTYPE:
return Index(self.values, dtype)
raise ValueError('Cannot cast PeriodIndex to dtype %s' % dtype)
def searchsorted(self, key, side='left'):
if isinstance(key, Period):
if key.freq != self.freq:
raise ValueError("Different period frequency: %s" % key.freq)
key = key.ordinal
elif isinstance(key, compat.string_types):
key = Period(key, freq=self.freq).ordinal
return self.values.searchsorted(key, side=side)
@property
def is_all_dates(self):
return True
@property
def is_full(self):
"""
Returns True if there are any missing periods from start to end
"""
if len(self) == 0:
return True
if not self.is_monotonic:
raise ValueError('Index is not monotonic')
values = self.values
return ((values[1:] - values[:-1]) < 2).all()
@property
def freqstr(self):
return self.freq
def asfreq(self, freq=None, how='E'):
"""
Convert the PeriodIndex to the specified frequency `freq`.
Parameters
----------
freq : str
a frequency
how : str {'E', 'S'}
'E', 'END', or 'FINISH' for end,
'S', 'START', or 'BEGIN' for start.
Whether the elements should be aligned to the end
or start within pa period. January 31st ('END') vs.
Janury 1st ('START') for example.
Returns
-------
new : PeriodIndex with the new frequency
Examples
--------
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A')
>>> pidx
<class 'pandas.tseries.period.PeriodIndex'>
[2010, ..., 2015]
Length: 6, Freq: A-DEC
>>> pidx.asfreq('M')
<class 'pandas.tseries.period.PeriodIndex'>
[2010-12, ..., 2015-12]
Length: 6, Freq: M
>>> pidx.asfreq('M', how='S')
<class 'pandas.tseries.period.PeriodIndex'>
[2010-01, ..., 2015-01]
Length: 6, Freq: M
"""
how = _validate_end_alias(how)
freq = frequencies.get_standard_freq(freq)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
if mult2 != 1:
raise ValueError('Only mult == 1 supported')
end = how == 'E'
new_data = period.period_asfreq_arr(self.values, base1, base2, end)
return self._simple_new(new_data, self.name, freq=freq)
def to_datetime(self, dayfirst=False):
return self.to_timestamp()
year = _field_accessor('year', 0, "The year of the period")
month = _field_accessor('month', 3, "The month as January=1, December=12")
day = _field_accessor('day', 4, "The days of the period")
hour = _field_accessor('hour', 5, "The hour of the period")
minute = _field_accessor('minute', 6, "The minute of the period")
second = _field_accessor('second', 7, "The second of the period")
weekofyear = _field_accessor('week', 8, "The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 10, "The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
dayofyear = day_of_year = _field_accessor('dayofyear', 9, "The ordinal day of the year")
quarter = _field_accessor('quarter', 2, "The quarter of the date")
qyear = _field_accessor('qyear', 1)
days_in_month = _field_accessor('days_in_month', 11, "The number of days in the month")
daysinmonth = days_in_month
def _get_object_array(self):
freq = self.freq
return np.array([ Period._from_ordinal(ordinal=x, freq=freq) for x in self.values], copy=False)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self._get_object_array()
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if (not hasattr(other, 'inferred_type') or
other.inferred_type != 'int64'):
try:
other = PeriodIndex(other)
except:
return False
return np.array_equal(self.asi8, other.asi8)
def to_timestamp(self, freq=None, how='start'):
"""
Cast to DatetimeIndex
Parameters
----------
freq : string or DateOffset, default 'D' for week or longer, 'S'
otherwise
Target frequency
how : {'s', 'e', 'start', 'end'}
Returns
-------
DatetimeIndex
"""
how = _validate_end_alias(how)
if freq is None:
base, mult = _gfc(self.freq)
freq = frequencies.get_to_timestamp_base(base)
base, mult = _gfc(freq)
new_data = self.asfreq(freq, how)
new_data = period.periodarr_to_dt64arr(new_data.values, base)
return DatetimeIndex(new_data, freq='infer', name=self.name)
def _add_delta(self, other):
if isinstance(other, (timedelta, np.timedelta64, offsets.Tick, Timedelta)):
offset = frequencies.to_offset(self.freq)
if isinstance(offset, offsets.Tick):
nanos = tslib._delta_to_nanoseconds(other)
offset_nanos = tslib._delta_to_nanoseconds(offset)
if nanos % offset_nanos == 0:
return self.shift(nanos // offset_nanos)
elif isinstance(other, offsets.DateOffset):
freqstr = frequencies.get_standard_freq(other)
base = frequencies.get_base_alias(freqstr)
if base == self.freq:
return self.shift(other.n)
raise ValueError("Input has different freq from PeriodIndex(freq={0})".format(self.freq))
def shift(self, n):
"""
Specialized shift which produces an PeriodIndex
Parameters
----------
n : int
Periods to shift by
freq : freq string
Returns
-------
shifted : PeriodIndex
"""
mask = self.values == tslib.iNaT
values = self.values + n
values[mask] = tslib.iNaT
return PeriodIndex(data=values, name=self.name, freq=self.freq)
@property
def inferred_type(self):
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return 'period'
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
s = _values_from_object(series)
try:
return _maybe_box(self, super(PeriodIndex, self).get_value(s, key), series, key)
except (KeyError, IndexError):
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
grp = frequencies._infer_period_group(reso)
freqn = frequencies._period_group(self.freq)
vals = self.values
# if our data is higher resolution than requested key, slice
if grp < freqn:
iv = Period(asdt, freq=(grp, 1))
ord1 = iv.asfreq(self.freq, how='S').ordinal
ord2 = iv.asfreq(self.freq, how='E').ordinal
if ord2 < vals[0] or ord1 > vals[-1]:
raise KeyError(key)
pos = np.searchsorted(self.values, [ord1, ord2])
key = slice(pos[0], pos[1] + 1)
return series[key]
elif grp == freqn:
key = Period(asdt, freq=self.freq).ordinal
return _maybe_box(self, self._engine.get_value(s, key), series, key)
else:
raise KeyError(key)
except TypeError:
pass
key = Period(key, self.freq).ordinal
return _maybe_box(self, self._engine.get_value(s, key), series, key)
def get_indexer(self, target, method=None, limit=None):
if hasattr(target, 'freq') and target.freq != self.freq:
raise ValueError('target and index have different freq: '
'(%s, %s)' % (target.freq, self.freq))
return Index.get_indexer(self, target, method, limit)
def get_loc(self, key, method=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.get_loc(key)
except KeyError:
if is_integer(key):
raise
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
key = asdt
except TypeError:
pass
key = Period(key, self.freq)
try:
return Index.get_loc(self, key.ordinal, method=method)
except KeyError:
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string or a datetime, cast it to Period.ordinal according to
resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : string / None
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, compat.string_types):
try:
_, parsed, reso = parse_time_string(label, self.freq)
bounds = self._parsed_string_to_bounds(reso, parsed)
return bounds[0 if side == 'left' else 1]
except Exception:
raise KeyError(label)
elif is_integer(label) or is_float(label):
self._invalid_indexer('slice',label)
return label
def _parsed_string_to_bounds(self, reso, parsed):
if reso == 'year':
t1 = Period(year=parsed.year, freq='A')
elif reso == 'month':
t1 = Period(year=parsed.year, month=parsed.month, freq='M')
elif reso == 'quarter':
q = (parsed.month - 1) // 3 + 1
t1 = Period(year=parsed.year, quarter=q, freq='Q-DEC')
elif reso == 'day':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
freq='D')
elif reso == 'hour':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, freq='H')
elif reso == 'minute':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute, freq='T')
elif reso == 'second':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute, second=parsed.second,
freq='S')
else:
raise KeyError(key)
return (t1.asfreq(self.freq, how='start'),
t1.asfreq(self.freq, how='end'))
def _get_string_slice(self, key):
if not self.is_monotonic:
raise ValueError('Partial indexing only valid for '
'ordered time series')
key, parsed, reso = parse_time_string(key, self.freq)
grp = frequencies._infer_period_group(reso)
freqn = frequencies._period_group(self.freq)
if reso in ['day', 'hour', 'minute', 'second'] and not grp < freqn:
raise KeyError(key)
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
return slice(self.searchsorted(t1.ordinal, side='left'),
self.searchsorted(t2.ordinal, side='right'))
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
self._assert_can_do_setop(other)
result = Int64Index.join(self, other, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
result, lidx, ridx = result
return self._apply_meta(result), lidx, ridx
return self._apply_meta(result)
def _assert_can_do_setop(self, other):
if not isinstance(other, PeriodIndex):
raise ValueError('can only call with other PeriodIndex-ed objects')
if self.freq != other.freq:
raise ValueError('Only like-indexed PeriodIndexes compatible '
'for join (for now)')
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
result = self._apply_meta(result)
result.name = name
return result
def _apply_meta(self, rawarr):
if not isinstance(rawarr, PeriodIndex):
rawarr = PeriodIndex(rawarr, freq=self.freq)
return rawarr
def __getitem__(self, key):
getitem = self._data.__getitem__
if np.isscalar(key):
val = getitem(key)
return Period(ordinal=val, freq=self.freq)
else:
if com.is_bool_indexer(key):
key = np.asarray(key)
result = getitem(key)
if result.ndim > 1:
# MPL kludge
# values = np.asarray(list(values), dtype=object)
# return values.reshape(result.shape)
return PeriodIndex(result, name=self.name, freq=self.freq)
return PeriodIndex(result, name=self.name, freq=self.freq)
def _format_native_types(self, na_rep=u('NaT'), **kwargs):
values = np.array(list(self), dtype=object)
mask = isnull(self.values)
values[mask] = na_rep
imask = ~mask
values[imask] = np.array([u('%s') % dt for dt in values[imask]])
return values
def __array_finalize__(self, obj):
if not self.ndim: # pragma: no cover
return self.item()
self.freq = getattr(obj, 'freq', None)
self.name = getattr(obj, 'name', None)
self._reset_identity()
def take(self, indices, axis=None):
"""
Analogous to ndarray.take
"""
indices = com._ensure_platform_int(indices)
taken = self.values.take(indices, axis=axis)
return self._simple_new(taken, self.name, freq=self.freq)
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
if isinstance(to_concat[0], PeriodIndex):
if len(set([x.freq for x in to_concat])) > 1:
# box
to_concat = [x.asobject.values for x in to_concat]
else:
cat_values = np.concatenate([x.values for x in to_concat])
return PeriodIndex(cat_values, freq=self.freq, name=name)
to_concat = [x.values if isinstance(x, Index) else x
for x in to_concat]
return Index(com._concat_compat(to_concat), name=name)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(PeriodIndex, self).__setstate__(state)
elif isinstance(state, tuple):
# < 0.15 compat
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
try: # backcompat
self.freq = own_state[1]
except:
pass
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(self, state)
self._data = data
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def tz_convert(self, tz):
"""
Convert tz-aware DatetimeIndex from one time zone to another (using pytz/dateutil)
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding UTC time.
Returns
-------
normalized : DatetimeIndex
Note
----
Not currently implemented for PeriodIndex
"""
raise NotImplementedError("Not yet implemented for PeriodIndex")
def tz_localize(self, tz, infer_dst=False):
"""
Localize tz-naive DatetimeIndex to given time zone (using pytz/dateutil),
or remove timezone from tz-aware DatetimeIndex
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding local time.
infer_dst : boolean, default False
Attempt to infer fall dst-transition hours based on order
Returns
-------
localized : DatetimeIndex
Note
----
Not currently implemented for PeriodIndex
"""
raise NotImplementedError("Not yet implemented for PeriodIndex")
PeriodIndex._add_numeric_methods_disabled()
PeriodIndex._add_logical_methods_disabled()
PeriodIndex._add_datetimelike_methods()
def _get_ordinal_range(start, end, periods, freq):
if com._count_not_none(start, end, periods) < 2:
raise ValueError('Must specify 2 of start, end, periods')
if start is not None:
start = Period(start, freq)
if end is not None:
end = Period(end, freq)
is_start_per = isinstance(start, Period)
is_end_per = isinstance(end, Period)
if is_start_per and is_end_per and start.freq != end.freq:
raise ValueError('Start and end must have same freq')
if ((is_start_per and start.ordinal == tslib.iNaT) or
(is_end_per and end.ordinal == tslib.iNaT)):
raise ValueError('Start and end must not be NaT')
if freq is None:
if is_start_per:
freq = start.freq
elif is_end_per:
freq = end.freq
else: # pragma: no cover
raise ValueError('Could not infer freq from start/end')
if periods is not None:
if start is None:
data = np.arange(end.ordinal - periods + 1,
end.ordinal + 1,
dtype=np.int64)
else:
data = np.arange(start.ordinal, start.ordinal + periods,
dtype=np.int64)
else:
data = np.arange(start.ordinal, end.ordinal + 1, dtype=np.int64)
return data, freq
def _range_from_fields(year=None, month=None, quarter=None, day=None,
hour=None, minute=None, second=None, freq=None):
if hour is None:
hour = 0
if minute is None:
minute = 0
if second is None:
second = 0
if day is None:
day = 1
ordinals = []
if quarter is not None:
if freq is None:
freq = 'Q'
base = frequencies.FreqGroup.FR_QTR
else:
base, mult = _gfc(freq)
if mult != 1:
raise ValueError('Only mult == 1 supported')
if base != frequencies.FreqGroup.FR_QTR:
raise AssertionError("base must equal FR_QTR")
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
y, m = _quarter_to_myear(y, q, freq)
val = period.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
base, mult = _gfc(freq)
if mult != 1:
raise ValueError('Only mult == 1 supported')
arrays = _make_field_arrays(year, month, day, hour, minute, second)
for y, mth, d, h, mn, s in zip(*arrays):
ordinals.append(period.period_ordinal(y, mth, d, h, mn, s, 0, 0, base))
return np.array(ordinals, dtype=np.int64), freq
def _make_field_arrays(*fields):
length = None
for x in fields:
if isinstance(x, (list, np.ndarray, ABCSeries)):
if length is not None and len(x) != length:
raise ValueError('Mismatched Period array lengths')
elif length is None:
length = len(x)
arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries))
else np.repeat(x, length) for x in fields]
return arrays
def pnow(freq=None):
return Period(datetime.now(), freq=freq)
def period_range(start=None, end=None, periods=None, freq='D', name=None):
"""
Return a fixed frequency datetime index, with day (calendar) as the default
frequency
Parameters
----------
start :
end :
periods : int, default None
Number of periods in the index
freq : str/DateOffset, default 'D'
Frequency alias
name : str, default None
Name for the resulting PeriodIndex
Returns
-------
prng : PeriodIndex
"""
return PeriodIndex(start=start, end=end, periods=periods,
freq=freq, name=name)
| |
from datetime import datetime
import re
import numpy as np
import pytest
from pandas import DataFrame, NaT
import pandas._testing as tm
@pytest.mark.parametrize("subset", ["a", ["a"], ["a", "B"]])
def test_drop_duplicates_with_misspelled_column_name(subset):
# GH 19730
df = DataFrame({"A": [0, 0, 1], "B": [0, 0, 1], "C": [0, 0, 1]})
msg = re.escape("Index(['a'], dtype='object')")
with pytest.raises(KeyError, match=msg):
df.drop_duplicates(subset)
def test_drop_duplicates():
df = DataFrame(
{
"AAA": ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": [1, 1, 2, 2, 2, 2, 1, 2],
"D": range(8),
}
)
# single column
result = df.drop_duplicates("AAA")
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates("AAA", keep="last")
expected = df.loc[[6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates("AAA", keep=False)
expected = df.loc[[]]
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
expected = df.loc[[0, 1, 2, 3]]
result = df.drop_duplicates(np.array(["AAA", "B"]))
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(["AAA", "B"])
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(("AAA", "B"), keep="last")
expected = df.loc[[0, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(("AAA", "B"), keep=False)
expected = df.loc[[0]]
tm.assert_frame_equal(result, expected)
# consider everything
df2 = df.loc[:, ["AAA", "B", "C"]]
result = df2.drop_duplicates()
# in this case only
expected = df2.drop_duplicates(["AAA", "B"])
tm.assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep="last")
expected = df2.drop_duplicates(["AAA", "B"], keep="last")
tm.assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep=False)
expected = df2.drop_duplicates(["AAA", "B"], keep=False)
tm.assert_frame_equal(result, expected)
# integers
result = df.drop_duplicates("C")
expected = df.iloc[[0, 2]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates("C", keep="last")
expected = df.iloc[[-2, -1]]
tm.assert_frame_equal(result, expected)
df["E"] = df["C"].astype("int8")
result = df.drop_duplicates("E")
expected = df.iloc[[0, 2]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates("E", keep="last")
expected = df.iloc[[-2, -1]]
tm.assert_frame_equal(result, expected)
# GH 11376
df = DataFrame({"x": [7, 6, 3, 3, 4, 8, 0], "y": [0, 6, 5, 5, 9, 1, 2]})
expected = df.loc[df.index != 3]
tm.assert_frame_equal(df.drop_duplicates(), expected)
df = DataFrame([[1, 0], [0, 2]])
tm.assert_frame_equal(df.drop_duplicates(), df)
df = DataFrame([[-2, 0], [0, -4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
x = np.iinfo(np.int64).max / 3 * 2
df = DataFrame([[-x, x], [0, x + 4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
df = DataFrame([[-x, x], [x, x + 4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
# GH 11864
df = DataFrame([i] * 9 for i in range(16))
df = df.append([[1] + [0] * 8], ignore_index=True)
for keep in ["first", "last", False]:
assert df.duplicated(keep=keep).sum() == 0
def test_drop_duplicates_with_duplicate_column_names():
# GH17836
df = DataFrame([[1, 2, 5], [3, 4, 6], [3, 4, 7]], columns=["a", "a", "b"])
result0 = df.drop_duplicates()
tm.assert_frame_equal(result0, df)
result1 = df.drop_duplicates("a")
expected1 = df[:2]
tm.assert_frame_equal(result1, expected1)
def test_drop_duplicates_for_take_all():
df = DataFrame(
{
"AAA": ["foo", "bar", "baz", "bar", "foo", "bar", "qux", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": [1, 1, 2, 2, 2, 2, 1, 2],
"D": range(8),
}
)
# single column
result = df.drop_duplicates("AAA")
expected = df.iloc[[0, 1, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates("AAA", keep="last")
expected = df.iloc[[2, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates("AAA", keep=False)
expected = df.iloc[[2, 6]]
tm.assert_frame_equal(result, expected)
# multiple columns
result = df.drop_duplicates(["AAA", "B"])
expected = df.iloc[[0, 1, 2, 3, 4, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(["AAA", "B"], keep="last")
expected = df.iloc[[0, 1, 2, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(["AAA", "B"], keep=False)
expected = df.iloc[[0, 1, 2, 6]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_tuple():
df = DataFrame(
{
("AA", "AB"): ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": [1, 1, 2, 2, 2, 2, 1, 2],
"D": range(8),
}
)
# single column
result = df.drop_duplicates(("AA", "AB"))
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(("AA", "AB"), keep="last")
expected = df.loc[[6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(("AA", "AB"), keep=False)
expected = df.loc[[]] # empty df
assert len(result) == 0
tm.assert_frame_equal(result, expected)
# multi column
expected = df.loc[[0, 1, 2, 3]]
result = df.drop_duplicates((("AA", "AB"), "B"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"df",
[
DataFrame(),
DataFrame(columns=[]),
DataFrame(columns=["A", "B", "C"]),
DataFrame(index=[]),
DataFrame(index=["A", "B", "C"]),
],
)
def test_drop_duplicates_empty(df):
# GH 20516
result = df.drop_duplicates()
tm.assert_frame_equal(result, df)
result = df.copy()
result.drop_duplicates(inplace=True)
tm.assert_frame_equal(result, df)
def test_drop_duplicates_NA():
# none
df = DataFrame(
{
"A": [None, None, "foo", "bar", "foo", "bar", "bar", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": [1.0, np.nan, np.nan, np.nan, 1.0, 1.0, 1, 1.0],
"D": range(8),
}
)
# single column
result = df.drop_duplicates("A")
expected = df.loc[[0, 2, 3]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates("A", keep="last")
expected = df.loc[[1, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates("A", keep=False)
expected = df.loc[[]] # empty df
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
result = df.drop_duplicates(["A", "B"])
expected = df.loc[[0, 2, 3, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(["A", "B"], keep="last")
expected = df.loc[[1, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(["A", "B"], keep=False)
expected = df.loc[[6]]
tm.assert_frame_equal(result, expected)
# nan
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": [1.0, np.nan, np.nan, np.nan, 1.0, 1.0, 1, 1.0],
"D": range(8),
}
)
# single column
result = df.drop_duplicates("C")
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates("C", keep="last")
expected = df.loc[[3, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates("C", keep=False)
expected = df.loc[[]] # empty df
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
result = df.drop_duplicates(["C", "B"])
expected = df.loc[[0, 1, 2, 4]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(["C", "B"], keep="last")
expected = df.loc[[1, 3, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(["C", "B"], keep=False)
expected = df.loc[[1]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_NA_for_take_all():
# none
df = DataFrame(
{
"A": [None, None, "foo", "bar", "foo", "baz", "bar", "qux"],
"C": [1.0, np.nan, np.nan, np.nan, 1.0, 2.0, 3, 1.0],
}
)
# single column
result = df.drop_duplicates("A")
expected = df.iloc[[0, 2, 3, 5, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates("A", keep="last")
expected = df.iloc[[1, 4, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates("A", keep=False)
expected = df.iloc[[5, 7]]
tm.assert_frame_equal(result, expected)
# nan
# single column
result = df.drop_duplicates("C")
expected = df.iloc[[0, 1, 5, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates("C", keep="last")
expected = df.iloc[[3, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates("C", keep=False)
expected = df.iloc[[5, 6]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_inplace():
orig = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": [1, 1, 2, 2, 2, 2, 1, 2],
"D": range(8),
}
)
# single column
df = orig.copy()
return_value = df.drop_duplicates("A", inplace=True)
expected = orig[:2]
result = df
tm.assert_frame_equal(result, expected)
assert return_value is None
df = orig.copy()
return_value = df.drop_duplicates("A", keep="last", inplace=True)
expected = orig.loc[[6, 7]]
result = df
tm.assert_frame_equal(result, expected)
assert return_value is None
df = orig.copy()
return_value = df.drop_duplicates("A", keep=False, inplace=True)
expected = orig.loc[[]]
result = df
tm.assert_frame_equal(result, expected)
assert len(df) == 0
assert return_value is None
# multi column
df = orig.copy()
return_value = df.drop_duplicates(["A", "B"], inplace=True)
expected = orig.loc[[0, 1, 2, 3]]
result = df
tm.assert_frame_equal(result, expected)
assert return_value is None
df = orig.copy()
return_value = df.drop_duplicates(["A", "B"], keep="last", inplace=True)
expected = orig.loc[[0, 5, 6, 7]]
result = df
tm.assert_frame_equal(result, expected)
assert return_value is None
df = orig.copy()
return_value = df.drop_duplicates(["A", "B"], keep=False, inplace=True)
expected = orig.loc[[0]]
result = df
tm.assert_frame_equal(result, expected)
assert return_value is None
# consider everything
orig2 = orig.loc[:, ["A", "B", "C"]].copy()
df2 = orig2.copy()
return_value = df2.drop_duplicates(inplace=True)
# in this case only
expected = orig2.drop_duplicates(["A", "B"])
result = df2
tm.assert_frame_equal(result, expected)
assert return_value is None
df2 = orig2.copy()
return_value = df2.drop_duplicates(keep="last", inplace=True)
expected = orig2.drop_duplicates(["A", "B"], keep="last")
result = df2
tm.assert_frame_equal(result, expected)
assert return_value is None
df2 = orig2.copy()
return_value = df2.drop_duplicates(keep=False, inplace=True)
expected = orig2.drop_duplicates(["A", "B"], keep=False)
result = df2
tm.assert_frame_equal(result, expected)
assert return_value is None
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"origin_dict, output_dict, ignore_index, output_index",
[
({"A": [2, 2, 3]}, {"A": [2, 3]}, True, [0, 1]),
({"A": [2, 2, 3]}, {"A": [2, 3]}, False, [0, 2]),
({"A": [2, 2, 3], "B": [2, 2, 4]}, {"A": [2, 3], "B": [2, 4]}, True, [0, 1]),
({"A": [2, 2, 3], "B": [2, 2, 4]}, {"A": [2, 3], "B": [2, 4]}, False, [0, 2]),
],
)
def test_drop_duplicates_ignore_index(
inplace, origin_dict, output_dict, ignore_index, output_index
):
# GH 30114
df = DataFrame(origin_dict)
expected = DataFrame(output_dict, index=output_index)
if inplace:
result_df = df.copy()
result_df.drop_duplicates(ignore_index=ignore_index, inplace=inplace)
else:
result_df = df.drop_duplicates(ignore_index=ignore_index, inplace=inplace)
tm.assert_frame_equal(result_df, expected)
tm.assert_frame_equal(df, DataFrame(origin_dict))
def test_drop_duplicates_null_in_object_column(nulls_fixture):
# https://github.com/pandas-dev/pandas/issues/32992
df = DataFrame([[1, nulls_fixture], [2, "a"]], dtype=object)
result = df.drop_duplicates()
tm.assert_frame_equal(result, df)
@pytest.mark.parametrize("keep", ["first", "last", False])
def test_drop_duplicates_series_vs_dataframe(keep):
# GH#14192
df = DataFrame(
{
"a": [1, 1, 1, "one", "one"],
"b": [2, 2, np.nan, np.nan, np.nan],
"c": [3, 3, np.nan, np.nan, "three"],
"d": [1, 2, 3, 4, 4],
"e": [
datetime(2015, 1, 1),
datetime(2015, 1, 1),
datetime(2015, 2, 1),
NaT,
NaT,
],
}
)
for column in df.columns:
dropped_frame = df[[column]].drop_duplicates(keep=keep)
dropped_series = df[column].drop_duplicates(keep=keep)
tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())
@pytest.mark.parametrize("arg", [[1], 1, "True", [], 0])
def test_drop_duplicates_non_boolean_ignore_index(arg):
# GH#38274
df = DataFrame({"a": [1, 2, 1, 3]})
msg = '^For argument "ignore_index" expected type bool, received type .*.$'
with pytest.raises(ValueError, match=msg):
df.drop_duplicates(ignore_index=arg)
| |
import flask
import pymysql.cursors
from pymysql.constants import ER
from pymysql.err import IntegrityError
from donut.auth_utils import get_user_id
TERMS = {'FA': 1, 'WI': 2, 'SP': 3}
TERM_NAMES = {v: k for k, v in TERMS.items()}
def try_int(x):
"""
Converts a float to an int if it is already an integral value.
Makes the JSON a little smaller.
"""
as_int = int(x)
return as_int if as_int == x else x
def get_terms():
"""
Returns {'year', 'term'} structs for each year with courses,
sorted from most to least recent.
"""
query = """
SELECT DISTINCT year, term FROM courses
ORDER BY year DESC, (term + 1) % 3 DESC
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query)
return cursor.fetchall()
def get_year_courses():
"""
Returns {'ids'[], number', name', 'units'[3], 'instructor', 'terms'[]}
structs for all courses in the most recent FA, WI, and SP terms.
'ids' and 'terms' link the ids of different terms of the same course.
"""
# Find most recent year for each term that has courses
term_years = {}
for term_year in get_terms():
term = term_year['term']
if term not in term_years:
term_years[term] = term_year['year']
query = """
SELECT
course_id,
CONCAT(department, ' ', course_number) AS number,
name,
units_lecture, units_lab, units_homework
FROM courses
WHERE year = %s AND term = %s
"""
instructor_query = """
SELECT DISTINCT instructor
FROM sections NATURAL JOIN instructors
WHERE course_id = %s
"""
courses = {} # mapping of course numbers to course structs
with flask.g.pymysql_db.cursor() as cursor:
for term, year in term_years.items():
cursor.execute(query, (year, term))
for course in cursor.fetchall():
number = course['number']
cursor.execute(instructor_query, course['course_id'])
instructors = cursor.fetchall()
instructor = instructors[0]['instructor'] \
if len(instructors) == 1 else None
matching_course = courses.get(number)
if matching_course:
matching_course['terms'].append(term)
matching_course['ids'].append(course['course_id'])
if instructor != matching_course['instructor']:
matching_course['instructor'] = None
else:
units = (course['units_lecture'], course['units_lab'],
course['units_homework'])
courses[number] = {
# Separate course id for each term
'ids': [course['course_id']],
'number': number,
'name': course['name'],
'units': tuple(map(try_int, units)),
'instructor': instructor,
'terms': [term]
}
return sorted(
courses.values(), key=lambda course: course['number'].lower())
def add_planner_course(username, course_id, year):
"""
Adds a certain course to a certain user's planner for a given year.
Year 1 is frosh year, year 2 is smore year, etc.
"""
user_id = get_user_id(username)
query = 'INSERT INTO planner_courses (user_id, course_id, planner_year) VALUES (%s, %s, %s)'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (user_id, course_id, year))
def drop_planner_course(username, course_id, year):
"""
Removes a certain course from a certain user's planner for a given year.
Year 1 is frosh year, year 2 is smore year, etc.
"""
user_id = get_user_id(username)
query = """
DELETE FROM planner_courses
WHERE user_id = %s AND course_id = %s AND planner_year = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (user_id, course_id, year))
def add_planner_placeholder(username, year, term, course, units):
"""
Adds a placedholder course to a user's planner for a given term.
Year 1 is frosh year, year 2 is smore year, etc.
Term 1 is FA, 2 is WI, and 3 is SP.
"""
user_id = get_user_id(username)
query = """
INSERT INTO planner_placeholders
(user_id, planner_year, term, course_name, course_units)
VALUES (%s, %s, %s, %s, %s)
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (user_id, year, term, course, units))
return cursor.lastrowid
def drop_planner_placeholder(username, placeholder_id):
"""
Removes the placeholder with the given ID from the user's planner.
Returns whether successful (i.e. the given placeholder did belong to the user).
"""
user_id = get_user_id(username)
query = """
DELETE FROM planner_placeholders
WHERE placeholder_id = %s AND user_id = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (placeholder_id, user_id))
return cursor.rowcount > 0
def get_user_planner_courses(username):
"""
Returns {'ids'[1], 'number', 'units', 'terms'[1], 'year'} structs
for each course on a certain user's planner.
Unlike in get_planner_courses(), the unit counts are already summed.
"""
query = """
SELECT
course_id,
CONCAT(department, ' ', course_number) AS number,
term,
units,
planner_year
FROM users NATURAL JOIN planner_courses NATURAL JOIN courses
WHERE username = %s
ORDER BY units DESC, number
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, username)
courses = cursor.fetchall()
return [{
'ids': (course['course_id'], ),
'number': course['number'],
'units': try_int(course['units']),
'terms': (course['term'], ),
'year': course['planner_year']
} for course in courses]
def get_user_planner_placeholders(username):
query = """
SELECT placeholder_id, planner_year, term, course_name, course_units
FROM planner_placeholders NATURAL JOIN users
WHERE username = %s
ORDER BY course_units DESC, course_name
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, username)
placeholders = cursor.fetchall()
return [{
'id': placeholder['placeholder_id'],
'year': placeholder['planner_year'],
'term': placeholder['term'],
'course': placeholder['course_name'],
'units': try_int(placeholder['course_units'])
} for placeholder in placeholders]
def get_scheduler_courses(year, term):
"""
Returns {'id', 'number', 'name', 'units'[3], 'sections'[]} structs for each
course in a certain term of a certain year.
'sections' is a list of {'number', 'instructor', 'grades', 'times'} structs.
"""
query = """
SELECT
course_id,
CONCAT(department, ' ', course_number) AS number,
name,
units_lecture, units_lab, units_homework,
section_number,
instructor,
grades_type,
times,
locations
FROM
courses
NATURAL JOIN sections
NATURAL JOIN instructors
NATURAL JOIN grades_types
WHERE year = %s AND term = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (year, term))
sections = cursor.fetchall()
course_sections = {}
for section in sections:
course_id = section['course_id']
course = course_sections.get(course_id)
if course:
sections = course['sections']
else:
sections = []
units = (section['units_lecture'], section['units_lab'],
section['units_homework'])
course_sections[course_id] = {
'id': course_id,
'number': section['number'],
'name': section['name'],
'units': tuple(map(try_int, units)),
'sections': sections
}
sections.append({
'number': section['section_number'],
'instructor': section['instructor'],
'grades': section['grades_type'],
'times': section['times'],
'locations': section['locations']
})
courses = course_sections.values()
for course in courses:
course['sections'].sort(key=lambda section: section['number'])
return sorted(courses, key=lambda course: course['number'].lower())
def add_scheduler_section(username, course, section):
"""
Adds a certain section number of a certain course
to a certain user's schedule for the course's term.
"""
user_id = get_user_id(username)
query = """
INSERT INTO scheduler_sections (user_id, course_id, section_number)
VALUES (%s, %s, %s)
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (user_id, course, section))
def drop_scheduler_section(username, course, section):
"""
Removes a certain section number of a certain course
from a certain user's schedule for the course's term.
"""
user_id = get_user_id(username)
query = """
DELETE FROM scheduler_sections
WHERE user_id = %s AND course_id = %s AND section_number = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (user_id, course, section))
def get_user_scheduler_sections(username, year, term):
"""
Returns {'id' (course_id), 'section' (section_number)} structs for each
section on a certain user's schedule for a certain term of a certain year.
"""
query = """
SELECT course_id, section_number
FROM
users
NATURAL JOIN scheduler_sections
NATURAL JOIN courses
WHERE username = %s AND year = %s AND term = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (username, year, term))
sections = cursor.fetchall()
return [{
'id': section['course_id'],
'section': section['section_number']
} for section in sections]
is_duplicate_error = lambda e: \
isinstance(e, IntegrityError) and e.args[0] == ER.DUP_ENTRY
def get_notes(username, course, section):
user_id = get_user_id(username)
query = """
SELECT notes FROM scheduler_sections
WHERE user_id= %s AND course_id = %s AND section_number = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (user_id, course, section))
return cursor.fetchone()['notes']
def edit_notes(username, course, section, notes):
user_id = get_user_id(username)
query = """
UPDATE scheduler_sections SET notes = %s
WHERE user_id = %s AND course_id = %s AND section_number = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (notes, user_id, course, section))
def delete_notes(username, course, section):
user_id = get_user_id(username)
query = """
UPDATE scheduler_sections SET notes = NULL
WHERE user_id = %s AND course_id = %s AND section_number = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (user_id, course, section))
| |
from __future__ import absolute_import
import itertools
import operator
import os
from plumbum.lib import six
from abc import abstractmethod, abstractproperty
from functools import reduce
class FSUser(int):
"""A special object that represents a file-system user. It derives from ``int``, so it behaves
just like a number (``uid``/``gid``), but also have a ``.name`` attribute that holds the
string-name of the user, if given (otherwise ``None``)
"""
def __new__(cls, val, name = None):
self = int.__new__(cls, val)
self.name = name
return self
class Path(six.ABC):
"""An abstraction over file system paths. This class is abstract, and the two implementations
are :class:`LocalPath <plumbum.machines.local.LocalPath>` and
:class:`RemotePath <plumbum.path.remote.RemotePath>`.
"""
__slots__ = []
CASE_SENSITIVE = True
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, str(self))
def __div__(self, other):
"""Joins two paths"""
return self.join(other)
__truediv__ = __div__
def __floordiv__(self, expr):
"""Returns a (possibly empty) list of paths that matched the glob-pattern under this path"""
return self.glob(expr)
def __iter__(self):
"""Iterate over the files in this directory"""
return iter(self.list())
def __eq__(self, other):
if isinstance(other, Path):
return self._get_info() == other._get_info()
elif isinstance(other, str):
if self.CASE_SENSITIVE:
return str(self) == other
else:
return str(self).lower() == other.lower()
else:
return NotImplemented
def __ne__(self, other):
return not (self == other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __hash__(self):
if self.CASE_SENSITIVE:
return hash(str(self))
else:
return hash(str(self).lower())
def __nonzero__(self):
return bool(str(self))
__bool__ = __nonzero__
@abstractmethod
def _form(self, *parts):
pass
def up(self, count = 1):
"""Go up in ``count`` directories (the default is 1)"""
return self.join("../" * count)
def walk(self, filter = lambda p: True, dir_filter = lambda p: True): # @ReservedAssignment
"""traverse all (recursive) sub-elements under this directory, that match the given filter.
By default, the filter accepts everything; you can provide a custom filter function that
takes a path as an argument and returns a boolean
:param filter: the filter (predicate function) for matching results. Only paths matching
this predicate are returned. Defaults to everything.
:param dir_filter: the filter (predicate function) for matching directories. Only directories
matching this predicate are recursed into. Defaults to everything.
"""
for p in self.list():
if filter(p):
yield p
if p.isdir() and dir_filter(p):
for p2 in p.walk(filter, dir_filter):
yield p2
@abstractproperty
def basename(self):
"""The basename component of this path"""
@abstractproperty
def dirname(self):
"""The dirname component of this path"""
@abstractproperty
def suffix(self):
"""The suffix of this file"""
@abstractproperty
def suffixes(self):
"""This is a list of all suffixes"""
@abstractproperty
def uid(self):
"""The user that owns this path. The returned value is a :class:`FSUser <plumbum.path.FSUser>`
object which behaves like an ``int`` (as expected from ``uid``), but it also has a ``.name``
attribute that holds the string-name of the user"""
@abstractproperty
def gid(self):
"""The group that owns this path. The returned value is a :class:`FSUser <plumbum.path.FSUser>`
object which behaves like an ``int`` (as expected from ``gid``), but it also has a ``.name``
attribute that holds the string-name of the group"""
@abstractmethod
def _get_info(self):
pass
@abstractmethod
def join(self, *parts):
"""Joins this path with any number of paths"""
@abstractmethod
def list(self):
"""Returns the files in this directory"""
@abstractmethod
def isdir(self):
"""Returns ``True`` if this path is a directory, ``False`` otherwise"""
@abstractmethod
def isfile(self):
"""Returns ``True`` if this path is a regular file, ``False`` otherwise"""
@abstractmethod
def islink(self):
"""Returns ``True`` if this path is a symbolic link, ``False`` otherwise"""
@abstractmethod
def exists(self):
"""Returns ``True`` if this path exists, ``False`` otherwise"""
@abstractmethod
def stat(self):
pass
@abstractmethod
def with_name(self, name):
"""Returns a path with the name replaced"""
@abstractmethod
def with_suffix(self, suffix, depth=1):
"""Returns a path with the suffix replaced. Up to last ``depth`` suffixes will be
replaces. None will replace all suffixes. If there are less than ``depth`` suffixes,
this will replace all suffixes. ``.tar.gz`` is an example where ``depth=2`` or
``depth=None`` is useful"""
@abstractmethod
def glob(self, pattern):
"""Returns a (possibly empty) list of paths that matched the glob-pattern under this path"""
@abstractmethod
def delete(self):
"""Deletes this path (recursively, if a directory)"""
@abstractmethod
def move(self, dst):
"""Moves this path to a different location"""
def rename(self, newname):
"""Renames this path to the ``new name`` (only the basename is changed)"""
return self.move(self.up() / newname)
@abstractmethod
def copy(self, dst, override = False):
"""Copies this path (recursively, if a directory) to the destination path"""
@abstractmethod
def mkdir(self):
"""Creates a directory at this path; if the directory already exists, silently ignore"""
@abstractmethod
def open(self, mode = "r"):
"""opens this path as a file"""
@abstractmethod
def read(self, encoding=None):
"""returns the contents of this file. By default the data is binary (``bytes``), but you can
specify the encoding, e.g., ``'latin1'`` or ``'utf8'``"""
@abstractmethod
def write(self, data, encoding=None):
"""writes the given data to this file. By default the data is expected to be binary (``bytes``),
but you can specify the encoding, e.g., ``'latin1'`` or ``'utf8'``"""
@abstractmethod
def chown(self, owner = None, group = None, recursive = None):
"""Change ownership of this path.
:param owner: The owner to set (either ``uid`` or ``username``), optional
:param owner: The group to set (either ``gid`` or ``groupname``), optional
:param recursive: whether to change ownership of all contained files and subdirectories.
Only meaningful when ``self`` is a directory. If ``None``, the value
will default to ``True`` if ``self`` is a directory, ``False`` otherwise.
"""
@abstractmethod
def chmod(self, mode):
"""Change the mode of path to the numeric mode.
:param mode: file mode as for os.chmod
"""
@staticmethod
def _access_mode_to_flags(mode, flags = {"f" : os.F_OK, "w" : os.W_OK, "r" : os.R_OK, "x" : os.X_OK}):
if isinstance(mode, str):
mode = reduce(operator.or_, [flags[m] for m in mode.lower()], 0)
return mode
@abstractmethod
def access(self, mode = 0):
"""Test file existence or permission bits
:param mode: a bitwise-or of access bits, or a string-representation thereof:
``'f'``, ``'x'``, ``'r'``, ``'w'`` for ``os.F_OK``, ``os.X_OK``,
``os.R_OK``, ``os.W_OK``
"""
@abstractmethod
def link(self, dst):
"""Creates a hard link from ``self`` to ``dst``
:param dst: the destination path
"""
@abstractmethod
def symlink(self, dst):
"""Creates a symbolic link from ``self`` to ``dst``
:param dst: the destination path
"""
@abstractmethod
def unlink(self):
"""Deletes a symbolic link"""
def split(self):
"""Splits the path on directory separators, yielding a list of directories, e.g,
``"/var/log/messages"`` will yield ``['var', 'log', 'messages']``.
"""
parts = []
path = self
while path != path.dirname:
parts.append(path.basename)
path = path.dirname
return parts[::-1]
def relative_to(self, source):
"""Computes the "relative path" require to get from ``source`` to ``self``. They satisfy the invariant
``source_path + (target_path - source_path) == target_path``. For example::
/var/log/messages - /var/log/messages = []
/var/log/messages - /var = [log, messages]
/var/log/messages - / = [var, log, messages]
/var/log/messages - /var/tmp = [.., log, messages]
/var/log/messages - /opt = [.., var, log, messages]
/var/log/messages - /opt/lib = [.., .., var, log, messages]
"""
if isinstance(source, str):
source = self._form(source)
parts = self.split()
baseparts = source.split()
ancestors = len(list(itertools.takewhile(lambda p: p[0] == p[1], zip(parts, baseparts))))
return RelativePath([".."] * (len(baseparts) - ancestors) + parts[ancestors:])
def __sub__(self, other):
"""Same as ``self.relative_to(other)``"""
return self.relative_to(other)
class RelativePath(object):
"""
Relative paths are the "delta" required to get from one path to another.
Note that relative path do not point at anything, and thus are not paths.
Therefore they are system agnostic (but closed under addition)
Paths are always absolute and point at "something", whether existent or not.
Relative paths are created by subtracting paths (``Path.relative_to``)
"""
def __init__(self, parts):
self.parts = parts
def __str__(self):
return "/".join(self.parts)
def __iter__(self):
return iter(self.parts)
def __len__(self):
return len(self.parts)
def __getitem__(self, index):
return self.parts[index]
def __repr__(self):
return "RelativePath(%r)" % (self.parts,)
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return not (self == other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __hash__(self):
return hash(str(self))
def __nonzero__(self):
return bool(str(self))
__bool__ = __nonzero__
def up(self, count = 1):
return RelativePath(self.parts[:-count])
def __radd__(self, path):
return path.join(*self.parts)
| |
from .session import Session
from .utils import list_wrapper
class ValueSerializer(object):
def _check_needs_loading(self):
"""check if it needs to load the endpoint."""
def serialize(self):
"""Custom serialize a value."""
def _apply_validate_func(validate_func, value):
if validate_func:
ret = validate_func(value)
# a validate_func can raise specific ValidationError or can return falsy, to raise a generic ValidationError
if not ret:
raise ValidationError("Validation failed for value='%s'" % (value,))
def _single_attr_get(attr_name, default=None, custom_type=None):
def _get(self):
self._check_needs_loading()
ret = self._content.get(attr_name, default)
if ret is not None:
if custom_type:
cache_key = '%s%s' % (attr_name, custom_type.__name__)
inst = self._cached_instances.get(cache_key)
if inst is None:
inst = custom_type(content=ret, session=self.session)
self._cached_instances[cache_key] = inst
ret = inst
return ret
return _get
def _single_attr_set(attr_name, attr_type, validate_func=None, transform_func=None):
def _set(self, value):
assert isinstance(value, attr_type) or value is None
_apply_validate_func(validate_func, value)
if transform_func:
transform_func(self._content, attr_name, value)
else:
if isinstance(value, ValueSerializer):
value = value.serialize()
self._content[attr_name] = value
return _set
def _attr_del(attr_name):
def _del(self):
del self._content[attr_name]
return _del
def _multi_attr_get(attr_name, default=None, custom_type=None, readonly=False):
def _get(self):
self._check_needs_loading()
ret = self._content.get(attr_name, default)
if ret is not None:
if custom_type:
ret = list_wrapper(content=ret, custom_type=custom_type, readonly=readonly, session=self.session)
return ret
return _get
def _multi_attr_set(attr_name, list_elem_type, validate_func=None, transform_func=None):
def _set(self, value):
assert isinstance(value, list) or value is None
assert all(isinstance(elem, list_elem_type) for elem in value)
_apply_validate_func(validate_func, value)
if transform_func:
transform_func(self._content, attr_name, value)
else:
self._content[attr_name] = [v.serialize() if isinstance(v, ValueSerializer) else v for v in value]
return _set
def make_single_elem_property(attr_name, elem_type, default=None, doc_string='', validate_func=None, transform_func=None, custom_type=None, readonly=False):
return property(
_single_attr_get(attr_name, default, custom_type=custom_type),
None if readonly else _single_attr_set(attr_name, elem_type, validate_func=validate_func, transform_func=transform_func),
None if readonly else _attr_del(attr_name),
doc_string
)
def make_multi_elem_property(attr_name, elem_type, doc_string='', validate_func=None, transform_func=None, custom_type=None, readonly=False):
return property(
_multi_attr_get(attr_name, default=[], custom_type=custom_type, readonly=readonly),
None if readonly else _multi_attr_set(attr_name, elem_type, validate_func=validate_func, transform_func=transform_func),
None if readonly else _attr_del(attr_name),
doc_string
)
def _single_ref_attr_get(attr_name, elem_ref_type):
def _get(self):
self._check_needs_loading()
key = self._content.get(attr_name)
if key:
return self.session.get(elem_ref_type, key, lazy_load=True)
return _get
def _single_ref_attr_set(attr_name, elem_ref_type, validate_func=None):
def _set(self, value):
if value is not None:
assert isinstance(value, Session.enpoint_accepted_types(elem_ref_type))
_apply_validate_func(validate_func, value)
value = value.key
self._content[attr_name] = value
return _set
def make_single_elem_ref_property(attr_name, elem_ref_type, doc_string='', validate_func=None, readonly=False):
return property(
_single_ref_attr_get(attr_name, elem_ref_type),
None if readonly else _single_ref_attr_set(attr_name, elem_ref_type, validate_func=validate_func),
None if readonly else _attr_del(attr_name),
doc_string
)
def _multi_ref_attr_get(attr_name, elem_ref_type, readonly=False):
def _get(self):
self._check_needs_loading()
keys = self._content.get(attr_name, [])
return list_wrapper(keys, endpoint_type=elem_ref_type, readonly=readonly, session=self.session)
return _get
def _multi_ref_attr_set(attr_name, elem_ref_type):
def _set(self, value):
if value is not None:
assert isinstance(value, list)
elem_ref_type_cls = Session.enpoint_accepted_types(elem_ref_type)
assert all(isinstance(v, elem_ref_type_cls) for v in value)
value = [v.key for v in value]
self._content[attr_name] = value
return _set
def make_multi_elem_ref_property(attr_name, elem_ref_type, doc_string='', readonly=False):
return property(
_multi_ref_attr_get(attr_name, elem_ref_type, readonly=readonly),
None if readonly else _multi_ref_attr_set(attr_name, elem_ref_type),
None if readonly else _attr_del(attr_name),
doc_string
)
def make_single_readonly_property(attr_name, default=None, doc_string=''):
return property(_single_attr_get(attr_name, default=default), None, None, doc_string)
class ValidationError(Exception):
"""Raised when the validation of the instance fails before a save."""
class Endpoint(ValueSerializer):
class VISIBILITY(object):
PRIVATE = 'PRIVATE'
EVERYONE = 'EVERYONE'
GROUPS = 'GROUPS'
def __init__(self, session, key=None, content=None, lazy_load=False):
"""
Create an endpoint object.
session - a Session instance
key - the key (unique id) of the object. When creating a new object and before saving, this will be None
content - when the content is provided, the instance is built from this content
lazy_load - when True, the instance is only loaded from the server on the first property access
"""
self.session = session
if key is not None:
try:
key = int(key)
except ValueError:
pass
self._key = key
self._loaded = False
self._cached_instances = {}
if content is not None:
self._content = content
elif self.key and not lazy_load:
self._load()
else:
self._content = {}
if hasattr(self, 'DEFAULTS'):
self._content.update(self.DEFAULTS)
@classmethod
def all(cls, session):
ret = []
content = session.make_request(cls.ENDPOINT)
for endpoint_content in content:
ret.append(cls(session, key=endpoint_content['id'], content=endpoint_content))
return ret
def _check_needs_loading(self):
if self._key and not self._loaded:
self._load()
def serialize(self):
return self.key
def _load(self, reload=False):
assert self._key, "Cannot load a resource without a key"
load_all = getattr(self, 'LOAD_ALL', False)
self._content = self.session.make_request(self.ENDPOINT, resource_id=self._key, load_all=load_all, reload=reload)
self._loaded = True
def reload(self):
self._load(reload=True)
def validate(self):
"""Validates the current instance before save.
When it fails, validate raises a ValidationError exception."""
def save(self):
self.validate()
method = 'PUT' if self._key else 'POST'
ret = self.session.make_request(self.ENDPOINT, resource_id=self.key, method=method, payload=self._content)
self._key = ret['id']
self._content.update(ret)
self._loaded = True
def delete(self):
self.session.make_request(self.ENDPOINT, resource_id=self._key, method='DELETE')
self._key = None
def is_new(self):
return not self._key
def get_content(self):
# get the raw content for this endpoint - avoid using directly
return self._content
@property
def key(self):
return self._key
@property
def visibility(self):
self._check_needs_loading()
if self._content.get('is_private', False):
return self.VISIBILITY.PRIVATE
if self._content.get('gid', 0):
return self.VISIBILITY.GROUPS
return self.VISIBILITY.EVERYONE
private_to = make_single_elem_ref_property('puser', 'User', 'The user this item is private to (if any).')
def get_groups(self):
self._check_needs_loading()
groups = []
gid = self._content.get('gid', 0)
if gid:
from .user import Group # avoid cyclical import
groups = []
for group in Group.all(self.session):
if group.gid & gid:
groups.append(group)
return tuple(groups)
def set_groups(self, groups):
assert isinstance(groups, (list, tuple))
self._content['gid'] = sum([group.gid for group in groups])
@property
def added_on(self):
self._check_needs_loading()
return self._content.get('ao')
@property
def last_modified(self):
self._check_needs_loading()
return self._content.get('lm')
def __eq__(self, other):
return self.key == other.key
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '%s' % (self._content,)
| |
# This file is part of Viper - https://github.com/botherder/viper
# See the file 'LICENSE' for copying permission.
from datetime import datetime
from sqlalchemy import *
from sqlalchemy.pool import NullPool
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref, sessionmaker
from sqlalchemy.exc import SQLAlchemyError, IntegrityError
from viper.common.out import *
from viper.common.objects import File, Singleton
from viper.core.project import __project__
Base = declarative_base()
association_table = Table(
'association',
Base.metadata,
Column('tag_id', Integer, ForeignKey('tag.id')),
Column('note_id', Integer, ForeignKey('note.id')),
Column('malware_id', Integer, ForeignKey('malware.id'))
)
class Malware(Base):
__tablename__ = 'malware'
id = Column(Integer(), primary_key=True)
name = Column(String(255), nullable=True)
size = Column(Integer(), nullable=False)
type = Column(Text(), nullable=True)
mime = Column(String(255), nullable=True)
md5 = Column(String(32), nullable=False, index=True)
crc32 = Column(String(8), nullable=False)
sha1 = Column(String(40), nullable=False)
sha256 = Column(String(64), nullable=False, index=True)
sha512 = Column(String(128), nullable=False)
ssdeep = Column(String(255), nullable=True)
created_at = Column(DateTime(timezone=False), default=datetime.now(), nullable=False)
tag = relationship(
'Tag',
secondary=association_table,
backref=backref('malware')
)
note = relationship(
'Note',
secondary=association_table,
backref=backref('malware')
)
__table_args__ = (Index(
'hash_index',
'md5',
'crc32',
'sha1',
'sha256',
'sha512',
unique=True
),)
def to_dict(self):
row_dict = {}
for column in self.__table__.columns:
value = getattr(self, column.name)
row_dict[column.name] = value
return row_dict
def __repr__(self):
return "<Malware('{0}','{1}')>".format(self.id, self.md5)
def __init__(self,
md5,
crc32,
sha1,
sha256,
sha512,
size,
type=None,
mime=None,
ssdeep=None,
name=None):
self.md5 = md5
self.sha1 = sha1
self.crc32 = crc32
self.sha256 = sha256
self.sha512 = sha512
self.size = size
self.type = type
self.mime = mime
self.ssdeep = ssdeep
self.name = name
class Tag(Base):
__tablename__ = 'tag'
id = Column(Integer(), primary_key=True)
tag = Column(String(255), nullable=False, unique=True, index=True)
def to_dict(self):
row_dict = {}
for column in self.__table__.columns:
value = getattr(self, column.name)
row_dict[column.name] = value
return row_dict
def __repr__(self):
return "<Tag ('{0}','{1}'>".format(self.id, self.tag)
def __init__(self, tag):
self.tag = tag
class Note(Base):
__tablename__ = 'note'
id = Column(Integer(), primary_key=True)
title = Column(String(255), nullable=True)
body = Column(Text(), nullable=False)
def to_dict(self):
row_dict = {}
for column in self.__table__.columns:
value = getattr(self, column.name)
row_dict[column.name] = value
return row_dict
def __repr__(self):
return "<Note ('{0}','{1}'>".format(self.id, self.title)
def __init__(self, title, body):
self.title = title
self.body = body
class Database:
#__metaclass__ = Singleton
def __init__(self):
db_path = os.path.join(__project__.get_path(), 'viper.db')
self.engine = create_engine('sqlite:///{0}'.format(db_path), poolclass=NullPool)
self.engine.echo = False
self.engine.pool_timeout = 60
Base.metadata.create_all(self.engine)
self.Session = sessionmaker(bind=self.engine)
def __del__(self):
self.engine.dispose()
def add_tags(self, sha256, tags):
session = self.Session()
malware_entry = session.query(Malware).filter(Malware.sha256 == sha256).first()
if not malware_entry:
return
tags = tags.strip()
if ',' in tags:
tags = tags.split(',')
else:
tags = tags.split()
for tag in tags:
tag = tag.strip().lower()
if tag == '':
continue
try:
malware_entry.tag.append(Tag(tag))
session.commit()
except IntegrityError as e:
session.rollback()
try:
malware_entry.tag.append(session.query(Tag).filter(Tag.tag==tag).first())
session.commit()
except SQLAlchemyError:
session.rollback()
def list_tags(self):
session = self.Session()
rows = session.query(Tag).all()
return rows
def delete_tag(self, tag_name):
session = self.Session()
try:
tag = session.query(Tag).filter(Tag.tag==tag_name).first()
session.delete(tag)
session.commit()
except SQLAlchemyError as e:
print_error("Unable to delete tag: {0}".format(e))
session.rollback()
finally:
session.close()
def add_note(self, sha256, title, body):
session = self.Session()
malware_entry = session.query(Malware).filter(Malware.sha256 == sha256).first()
if not malware_entry:
return
try:
malware_entry.note.append(Note(title, body))
session.commit()
except SQLAlchemyError as e:
print_error("Unable to add note: {0}".format(e))
session.rollback()
finally:
session.close()
def get_note(self, note_id):
session = self.Session()
note = session.query(Note).get(note_id)
return note
def edit_note(self, note_id, body):
session = self.Session()
try:
session.query(Note).get(note_id).body = body
session.commit()
except SQLAlchemyError as e:
print_error("Unable to update note: {0}".format(e))
session.rollback()
finally:
session.close()
def delete_note(self, note_id):
session = self.Session()
try:
note = session.query(Note).get(note_id)
session.delete(note)
session.commit()
except SQLAlchemyError as e:
print_error("Unable to delete note: {0}".format(e))
session.rollback()
finally:
session.close()
def add(self, obj, name=None, tags=None):
session = self.Session()
if not name:
name = obj.name
if isinstance(obj, File):
try:
malware_entry = Malware(md5=obj.md5,
crc32=obj.crc32,
sha1=obj.sha1,
sha256=obj.sha256,
sha512=obj.sha512,
size=obj.size,
type=obj.type,
mime=obj.mime,
ssdeep=obj.ssdeep,
name=name)
session.add(malware_entry)
session.commit()
except IntegrityError:
session.rollback()
malware_entry = session.query(Malware).filter(Malware.md5 == obj.md5).first()
except SQLAlchemyError as e:
print_error("Unable to store file: {0}".format(e))
session.rollback()
return False
if tags:
self.add_tags(sha256=obj.sha256, tags=tags)
return True
def delete(self, id):
session = self.Session()
try:
malware = session.query(Malware).get(id)
session.delete(malware)
session.commit()
except SQLAlchemyError:
session.rollback()
return False
finally:
session.close()
return True
def find(self, key, value=None, offset=0):
session = self.Session()
offset = int(offset)
rows = None
if key == 'all':
rows = session.query(Malware).all()
elif key == 'latest':
if value:
try:
value = int(value)
except ValueError:
print_error("You need to specify a valid number as a limit for your query")
return None
else:
value = 5
rows = session.query(Malware).order_by(Malware.created_at.desc()).limit(value).offset(offset)
elif key == 'md5':
rows = session.query(Malware).filter(Malware.md5 == value).all()
elif key == 'sha256':
rows = session.query(Malware).filter(Malware.sha256 == value).all()
elif key == 'tag':
rows = session.query(Malware).filter(Malware.tag.any(Tag.tag == value.lower())).all()
elif key == 'name':
if '*' in value:
value = value.replace('*', '%')
else:
value = '%{0}%'.format(value)
rows = session.query(Malware).filter(Malware.name.like(value)).all()
elif key == 'note':
rows = session.query(Malware).filter(Malware.note.any(Note.body.like(u'%' + unicode(value) + u'%'))).all()
elif key == 'type':
rows = session.query(Malware).filter(Malware.type.like('%{0}%'.format(value))).all()
elif key == 'mime':
rows = session.query(Malware).filter(Malware.mime.like('%{0}%'.format(value))).all()
else:
print_error("No valid term specified")
return rows
def get_sample_count(self):
session = self.Session()
return session.query(Malware).count()
| |
#
# Author: Travis Oliphant, March 2002
#
from __future__ import division, print_function, absolute_import
__all__ = ['expm','expm2','expm3','cosm','sinm','tanm','coshm','sinhm',
'tanhm','logm','funm','signm','sqrtm',
'expm_frechet', 'expm_cond', 'fractional_matrix_power']
from numpy import (Inf, dot, diag, exp, product, logical_not, cast, ravel,
transpose, conjugate, absolute, amax, sign, isfinite, sqrt, single)
import numpy as np
import warnings
# Local imports
from .misc import norm
from .basic import solve, inv
from .special_matrices import triu
from .decomp import eig
from .decomp_svd import svd
from .decomp_schur import schur, rsf2csf
from ._expm_frechet import expm_frechet, expm_cond
from ._matfuncs_sqrtm import sqrtm
eps = np.finfo(float).eps
feps = np.finfo(single).eps
_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1}
###############################################################################
# Utility functions.
def _asarray_square(A):
"""
Wraps asarray with the extra requirement that the input be a square matrix.
The motivation is that the matfuncs module has real functions that have
been lifted to square matrix functions.
Parameters
----------
A : array_like
A square matrix.
Returns
-------
out : ndarray
An ndarray copy or view or other representation of A.
"""
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected square array_like input')
return A
def _maybe_real(A, B, tol=None):
"""
Return either B or the real part of B, depending on properties of A and B.
The motivation is that B has been computed as a complicated function of A,
and B may be perturbed by negligible imaginary components.
If A is real and B is complex with small imaginary components,
then return a real copy of B. The assumption in that case would be that
the imaginary components of B are numerical artifacts.
Parameters
----------
A : ndarray
Input array whose type is to be checked as real vs. complex.
B : ndarray
Array to be returned, possibly without its imaginary part.
tol : float
Absolute tolerance.
Returns
-------
out : real or complex array
Either the input array B or only the real part of the input array B.
"""
# Note that booleans and integers compare as real.
if np.isrealobj(A) and np.iscomplexobj(B):
if tol is None:
tol = {0:feps*1e3, 1:eps*1e6}[_array_precision[B.dtype.char]]
if np.allclose(B.imag, 0.0, atol=tol):
B = B.real
return B
###############################################################################
# Matrix functions.
def fractional_matrix_power(A, t):
"""
Compute the fractional power of a matrix.
Proceeds according to the discussion in section (6) of [1]_.
Parameters
----------
A : (N, N) array_like
Matrix whose fractional power to evaluate.
t : float
Fractional power.
Returns
-------
X : (N, N) array_like
The fractional power of the matrix.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
Examples
--------
>>> from scipy.linalg import fractional_matrix_power
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> b = fractional_matrix_power(a, 0.5)
>>> b
array([[ 0.75592895, 1.13389342],
[ 0.37796447, 1.88982237]])
>>> np.dot(b, b) # Verify square root
array([[ 1., 3.],
[ 1., 4.]])
"""
# This fixes some issue with imports;
# this function calls onenormest which is in scipy.sparse.
A = _asarray_square(A)
import scipy.linalg._matfuncs_inv_ssq
return scipy.linalg._matfuncs_inv_ssq._fractional_matrix_power(A, t)
def logm(A, disp=True):
"""
Compute matrix logarithm.
The matrix logarithm is the inverse of
expm: expm(logm(`A`)) == `A`
Parameters
----------
A : (N, N) array_like
Matrix whose logarithm to evaluate
disp : bool, optional
Print warning if error in the result is estimated large
instead of returning estimated error. (Default: True)
Returns
-------
logm : (N, N) ndarray
Matrix logarithm of `A`
errest : float
(if disp == False)
1-norm of the estimated error, ||err||_1 / ||A||_1
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
.. [2] Nicholas J. Higham (2008)
"Functions of Matrices: Theory and Computation"
ISBN 978-0-898716-46-7
.. [3] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
Examples
--------
>>> from scipy.linalg import logm, expm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> b = logm(a)
>>> b
array([[-1.02571087, 2.05142174],
[ 0.68380725, 1.02571087]])
>>> expm(b) # Verify expm(logm(a)) returns a
array([[ 1., 3.],
[ 1., 4.]])
"""
A = _asarray_square(A)
# Avoid circular import ... this is OK, right?
import scipy.linalg._matfuncs_inv_ssq
F = scipy.linalg._matfuncs_inv_ssq._logm(A)
F = _maybe_real(A, F)
errtol = 1000*eps
#TODO use a better error approximation
errest = norm(expm(F)-A,1) / norm(A,1)
if disp:
if not isfinite(errest) or errest >= errtol:
print("logm result may be inaccurate, approximate err =", errest)
return F
else:
return F, errest
def expm(A, q=None):
"""
Compute the matrix exponential using Pade approximation.
Parameters
----------
A : (N, N) array_like or sparse matrix
Matrix to be exponentiated.
Returns
-------
expm : (N, N) ndarray
Matrix exponential of `A`.
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
"A New Scaling and Squaring Algorithm for the Matrix Exponential."
SIAM Journal on Matrix Analysis and Applications.
31 (3). pp. 970-989. ISSN 1095-7162
Examples
--------
>>> from scipy.linalg import expm, sinm, cosm
Matrix version of the formula exp(0) = 1:
>>> expm(np.zeros((2,2)))
array([[ 1., 0.],
[ 0., 1.]])
Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
applied to a matrix:
>>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
>>> expm(1j*a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
>>> cosm(a) + 1j*sinm(a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
"""
if q is not None:
msg = "argument q=... in scipy.linalg.expm is deprecated."
warnings.warn(msg, DeprecationWarning)
# Input checking and conversion is provided by sparse.linalg.expm().
import scipy.sparse.linalg
return scipy.sparse.linalg.expm(A)
# deprecated, but probably should be left there in the long term
@np.deprecate(new_name="expm")
def expm2(A):
"""
Compute the matrix exponential using eigenvalue decomposition.
Parameters
----------
A : (N, N) array_like
Matrix to be exponentiated
Returns
-------
expm2 : (N, N) ndarray
Matrix exponential of `A`
"""
A = _asarray_square(A)
t = A.dtype.char
if t not in ['f','F','d','D']:
A = A.astype('d')
t = 'd'
s, vr = eig(A)
vri = inv(vr)
r = dot(dot(vr, diag(exp(s))), vri)
if t in ['f', 'd']:
return r.real.astype(t)
else:
return r.astype(t)
# deprecated, but probably should be left there in the long term
@np.deprecate(new_name="expm")
def expm3(A, q=20):
"""
Compute the matrix exponential using Taylor series.
Parameters
----------
A : (N, N) array_like
Matrix to be exponentiated
q : int
Order of the Taylor series used is `q-1`
Returns
-------
expm3 : (N, N) ndarray
Matrix exponential of `A`
"""
A = _asarray_square(A)
n = A.shape[0]
t = A.dtype.char
if t not in ['f','F','d','D']:
A = A.astype('d')
t = 'd'
eA = np.identity(n, dtype=t)
trm = np.identity(n, dtype=t)
castfunc = cast[t]
for k in range(1, q):
trm[:] = trm.dot(A) / castfunc(k)
eA += trm
return eA
def cosm(A):
"""
Compute the matrix cosine.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array
Returns
-------
cosm : (N, N) ndarray
Matrix cosine of A
Examples
--------
>>> from scipy.linalg import expm, sinm, cosm
Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
applied to a matrix:
>>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
>>> expm(1j*a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
>>> cosm(a) + 1j*sinm(a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
"""
A = _asarray_square(A)
if np.iscomplexobj(A):
return 0.5*(expm(1j*A) + expm(-1j*A))
else:
return expm(1j*A).real
def sinm(A):
"""
Compute the matrix sine.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array.
Returns
-------
sinm : (N, N) ndarray
Matrix sine of `A`
Examples
--------
>>> from scipy.linalg import expm, sinm, cosm
Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta))
applied to a matrix:
>>> a = np.array([[1.0, 2.0], [-1.0, 3.0]])
>>> expm(1j*a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
>>> cosm(a) + 1j*sinm(a)
array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j],
[ 1.06860742+0.48905626j, -1.71075555+0.91406299j]])
"""
A = _asarray_square(A)
if np.iscomplexobj(A):
return -0.5j*(expm(1j*A) - expm(-1j*A))
else:
return expm(1j*A).imag
def tanm(A):
"""
Compute the matrix tangent.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array.
Returns
-------
tanm : (N, N) ndarray
Matrix tangent of `A`
Examples
--------
>>> from scipy.linalg import tanm, sinm, cosm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> t = tanm(a)
>>> t
array([[ -2.00876993, -8.41880636],
[ -2.80626879, -10.42757629]])
Verify tanm(a) = sinm(a).dot(inv(cosm(a)))
>>> s = sinm(a)
>>> c = cosm(a)
>>> s.dot(np.linalg.inv(c))
array([[ -2.00876993, -8.41880636],
[ -2.80626879, -10.42757629]])
"""
A = _asarray_square(A)
return _maybe_real(A, solve(cosm(A), sinm(A)))
def coshm(A):
"""
Compute the hyperbolic matrix cosine.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array.
Returns
-------
coshm : (N, N) ndarray
Hyperbolic matrix cosine of `A`
Examples
--------
>>> from scipy.linalg import tanhm, sinhm, coshm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> c = coshm(a)
>>> c
array([[ 11.24592233, 38.76236492],
[ 12.92078831, 50.00828725]])
Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
>>> t = tanhm(a)
>>> s = sinhm(a)
>>> t - s.dot(np.linalg.inv(c))
array([[ 2.72004641e-15, 4.55191440e-15],
[ 0.00000000e+00, -5.55111512e-16]])
"""
A = _asarray_square(A)
return _maybe_real(A, 0.5 * (expm(A) + expm(-A)))
def sinhm(A):
"""
Compute the hyperbolic matrix sine.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array.
Returns
-------
sinhm : (N, N) ndarray
Hyperbolic matrix sine of `A`
Examples
--------
>>> from scipy.linalg import tanhm, sinhm, coshm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> s = sinhm(a)
>>> s
array([[ 10.57300653, 39.28826594],
[ 13.09608865, 49.86127247]])
Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
>>> t = tanhm(a)
>>> c = coshm(a)
>>> t - s.dot(np.linalg.inv(c))
array([[ 2.72004641e-15, 4.55191440e-15],
[ 0.00000000e+00, -5.55111512e-16]])
"""
A = _asarray_square(A)
return _maybe_real(A, 0.5 * (expm(A) - expm(-A)))
def tanhm(A):
"""
Compute the hyperbolic matrix tangent.
This routine uses expm to compute the matrix exponentials.
Parameters
----------
A : (N, N) array_like
Input array
Returns
-------
tanhm : (N, N) ndarray
Hyperbolic matrix tangent of `A`
Examples
--------
>>> from scipy.linalg import tanhm, sinhm, coshm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> t = tanhm(a)
>>> t
array([[ 0.3428582 , 0.51987926],
[ 0.17329309, 0.86273746]])
Verify tanhm(a) = sinhm(a).dot(inv(coshm(a)))
>>> s = sinhm(a)
>>> c = coshm(a)
>>> t - s.dot(np.linalg.inv(c))
array([[ 2.72004641e-15, 4.55191440e-15],
[ 0.00000000e+00, -5.55111512e-16]])
"""
A = _asarray_square(A)
return _maybe_real(A, solve(coshm(A), sinhm(A)))
def funm(A, func, disp=True):
"""
Evaluate a matrix function specified by a callable.
Returns the value of matrix-valued function ``f`` at `A`. The
function ``f`` is an extension of the scalar-valued function `func`
to matrices.
Parameters
----------
A : (N, N) array_like
Matrix at which to evaluate the function
func : callable
Callable object that evaluates a scalar function f.
Must be vectorized (eg. using vectorize).
disp : bool, optional
Print warning if error in the result is estimated large
instead of returning estimated error. (Default: True)
Returns
-------
funm : (N, N) ndarray
Value of the matrix function specified by func evaluated at `A`
errest : float
(if disp == False)
1-norm of the estimated error, ||err||_1 / ||A||_1
Examples
--------
>>> from scipy.linalg import funm
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> funm(a, lambda x: x*x)
array([[ 4., 15.],
[ 5., 19.]])
>>> a.dot(a)
array([[ 4., 15.],
[ 5., 19.]])
Notes
-----
This function implements the general algorithm based on Schur decomposition
(Algorithm 9.1.1. in [1]_).
If the input matrix is known to be diagonalizable, then relying on the
eigendecomposition is likely to be faster. For example, if your matrix is
Hermitian, you can do
>>> from scipy.linalg import eigh
>>> def funm_herm(a, func, check_finite=False):
... w, v = eigh(a, check_finite=check_finite)
... ## if you further know that your matrix is positive semidefinite,
... ## you can optionally guard against precision errors by doing
... # w = np.maximum(w, 0)
... w = func(w)
... return (v * w).dot(v.conj().T)
References
----------
.. [1] Gene H. Golub, Charles F. van Loan, Matrix Computations 4th ed.
"""
A = _asarray_square(A)
# Perform Shur decomposition (lapack ?gees)
T, Z = schur(A)
T, Z = rsf2csf(T,Z)
n,n = T.shape
F = diag(func(diag(T))) # apply function to diagonal elements
F = F.astype(T.dtype.char) # e.g. when F is real but T is complex
minden = abs(T[0,0])
# implement Algorithm 11.1.1 from Golub and Van Loan
# "matrix Computations."
for p in range(1,n):
for i in range(1,n-p+1):
j = i + p
s = T[i-1,j-1] * (F[j-1,j-1] - F[i-1,i-1])
ksl = slice(i,j-1)
val = dot(T[i-1,ksl],F[ksl,j-1]) - dot(F[i-1,ksl],T[ksl,j-1])
s = s + val
den = T[j-1,j-1] - T[i-1,i-1]
if den != 0.0:
s = s / den
F[i-1,j-1] = s
minden = min(minden,abs(den))
F = dot(dot(Z, F), transpose(conjugate(Z)))
F = _maybe_real(A, F)
tol = {0:feps, 1:eps}[_array_precision[F.dtype.char]]
if minden == 0.0:
minden = tol
err = min(1, max(tol,(tol/minden)*norm(triu(T,1),1)))
if product(ravel(logical_not(isfinite(F))),axis=0):
err = Inf
if disp:
if err > 1000*tol:
print("funm result may be inaccurate, approximate err =", err)
return F
else:
return F, err
def signm(A, disp=True):
"""
Matrix sign function.
Extension of the scalar sign(x) to matrices.
Parameters
----------
A : (N, N) array_like
Matrix at which to evaluate the sign function
disp : bool, optional
Print warning if error in the result is estimated large
instead of returning estimated error. (Default: True)
Returns
-------
signm : (N, N) ndarray
Value of the sign function at `A`
errest : float
(if disp == False)
1-norm of the estimated error, ||err||_1 / ||A||_1
Examples
--------
>>> from scipy.linalg import signm, eigvals
>>> a = [[1,2,3], [1,2,1], [1,1,1]]
>>> eigvals(a)
array([ 4.12488542+0.j, -0.76155718+0.j, 0.63667176+0.j])
>>> eigvals(signm(a))
array([-1.+0.j, 1.+0.j, 1.+0.j])
"""
A = _asarray_square(A)
def rounded_sign(x):
rx = np.real(x)
if rx.dtype.char == 'f':
c = 1e3*feps*amax(x)
else:
c = 1e3*eps*amax(x)
return sign((absolute(rx) > c) * rx)
result, errest = funm(A, rounded_sign, disp=0)
errtol = {0:1e3*feps, 1:1e3*eps}[_array_precision[result.dtype.char]]
if errest < errtol:
return result
# Handle signm of defective matrices:
# See "E.D.Denman and J.Leyva-Ramos, Appl.Math.Comp.,
# 8:237-250,1981" for how to improve the following (currently a
# rather naive) iteration process:
# a = result # sometimes iteration converges faster but where??
# Shifting to avoid zero eigenvalues. How to ensure that shifting does
# not change the spectrum too much?
vals = svd(A, compute_uv=0)
max_sv = np.amax(vals)
# min_nonzero_sv = vals[(vals>max_sv*errtol).tolist().count(1)-1]
# c = 0.5/min_nonzero_sv
c = 0.5/max_sv
S0 = A + c*np.identity(A.shape[0])
prev_errest = errest
for i in range(100):
iS0 = inv(S0)
S0 = 0.5*(S0 + iS0)
Pp = 0.5*(dot(S0,S0)+S0)
errest = norm(dot(Pp,Pp)-Pp,1)
if errest < errtol or prev_errest == errest:
break
prev_errest = errest
if disp:
if not isfinite(errest) or errest >= errtol:
print("signm result may be inaccurate, approximate err =", errest)
return S0
else:
return S0, errest
| |
#!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for build stages."""
from __future__ import print_function
import copy
import glob
import mox
import optparse
import os
import sys
import constants
sys.path.insert(0, constants.SOURCE_ROOT)
from chromite.cbuildbot import commands
from chromite.cbuildbot import cbuildbot_config as config
from chromite.cbuildbot import cbuildbot_run
from chromite.cbuildbot import manifest_version
from chromite.lib import cidb
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import osutils
from chromite.lib import parallel_unittest
from chromite.lib import partial_mock
from chromite.scripts import cbuildbot
DEFAULT_CHROME_BRANCH = '27'
# pylint: disable=protected-access
class BuilderRunMock(partial_mock.PartialMock):
"""Partial mock for BuilderRun class."""
TARGET = 'chromite.cbuildbot.cbuildbot_run._BuilderRunBase'
ATTRS = ('GetVersionInfo', 'DetermineChromeVersion')
VERSION = '3333.1.0'
CHROME_VERSION = '35.0.1234.5'
def GetVersionInfo(self, _build_root):
return manifest_version.VersionInfo(
version_string=self.VERSION, chrome_branch=DEFAULT_CHROME_BRANCH)
def DetermineChromeVersion(self, _inst):
return self.CHROME_VERSION
class TestExitedException(Exception):
"""Exception used by sys.exit() mock to halt execution."""
class TestHaltedException(Exception):
"""Exception used by mocks to halt execution without indicating failure."""
class TestFailedException(Exception):
"""Exception used by mocks to halt execution and indicate failure."""
class RunBuildStagesTest(cros_test_lib.MoxTempDirTestCase,
cros_test_lib.MockTestCase):
"""Test that cbuildbot runs the appropriate stages for a given config."""
VERSION = '1234.5.6'
def setUp(self):
self.buildroot = os.path.join(self.tempdir, 'buildroot')
osutils.SafeMakedirs(self.buildroot)
# Always stub RunCommmand out as we use it in every method.
self.bot_id = 'x86-generic-paladin'
self.build_config = copy.deepcopy(config.config[self.bot_id])
self.build_config['master'] = False
self.build_config['important'] = False
# Use the cbuildbot parser to create properties and populate default values.
self.parser = cbuildbot._CreateParser()
argv = ['-r', self.buildroot, '--buildbot', '--debug',
'x86-generic-paladin']
(self.options, _) = cbuildbot._ParseCommandLine(self.parser, argv)
self.options.bootstrap = False
self.options.clean = False
self.options.resume = False
self.options.sync = False
self.options.build = False
self.options.uprev = False
self.options.tests = False
self.options.archive = False
self.options.remote_test_status = False
self.options.patches = None
self.options.prebuilts = False
self._manager = cbuildbot.parallel.Manager()
self._manager.__enter__()
self.run = cbuildbot_run.BuilderRun(self.options, self.build_config,
self._manager)
self.StartPatcher(BuilderRunMock())
def tearDown(self):
# Mimic exiting a 'with' statement.
self._manager.__exit__(None, None, None)
def testChromeosOfficialSet(self):
"""Verify that CHROMEOS_OFFICIAL is set correctly."""
self.build_config['chromeos_official'] = True
# Clean up before
if 'CHROMEOS_OFFICIAL' in os.environ:
del os.environ['CHROMEOS_OFFICIAL']
self.mox.StubOutWithMock(cros_build_lib, 'RunCommand')
cidb.CIDBConnectionFactory.SetupNoCidb()
api = self.mox.CreateMock(cros_build_lib.CommandResult)
api.returncode = 0
api.output = constants.REEXEC_API_VERSION
cros_build_lib.RunCommand(
[constants.PATH_TO_CBUILDBOT, '--reexec-api-version'],
cwd=self.buildroot, capture_output=True, error_code_ok=True
).AndReturn(api)
result = self.mox.CreateMock(cros_build_lib.CommandResult)
result.returncode = 0
cros_build_lib.RunCommand(mox.IgnoreArg(), cwd=self.buildroot,
error_code_ok=True,
kill_timeout=mox.IgnoreArg()).AndReturn(result)
self.mox.ReplayAll()
self.assertFalse('CHROMEOS_OFFICIAL' in os.environ)
cbuildbot.SimpleBuilder(self.run).Run()
self.assertTrue('CHROMEOS_OFFICIAL' in os.environ)
self.mox.VerifyAll()
# Clean up after the test
if 'CHROMEOS_OFFICIAL' in os.environ:
del os.environ['CHROMEOS_OFFICIAL']
def testChromeosOfficialNotSet(self):
"""Verify that CHROMEOS_OFFICIAL is not always set."""
self.build_config['chromeos_official'] = False
# Clean up before
if 'CHROMEOS_OFFICIAL' in os.environ:
del os.environ['CHROMEOS_OFFICIAL']
self.mox.StubOutWithMock(cros_build_lib, 'RunCommand')
cidb.CIDBConnectionFactory.SetupNoCidb()
api = self.mox.CreateMock(cros_build_lib.CommandResult)
api.returncode = 0
api.output = constants.REEXEC_API_VERSION
cros_build_lib.RunCommand(
[constants.PATH_TO_CBUILDBOT, '--reexec-api-version'],
cwd=self.buildroot, capture_output=True, error_code_ok=True
).AndReturn(api)
result = self.mox.CreateMock(cros_build_lib.CommandResult)
result.returncode = 0
cros_build_lib.RunCommand(mox.IgnoreArg(), cwd=self.buildroot,
error_code_ok=True,
kill_timeout=mox.IgnoreArg()).AndReturn(result)
self.mox.ReplayAll()
self.assertFalse('CHROMEOS_OFFICIAL' in os.environ)
cbuildbot.SimpleBuilder(self.run).Run()
self.assertFalse('CHROMEOS_OFFICIAL' in os.environ)
self.mox.VerifyAll()
# Clean up after the test
if 'CHROMEOS_OFFICIAL' in os.environ:
del os.environ['CHROMEOS_OFFICIAL']
class SimpleBuilderTest(cros_test_lib.MockTempDirTestCase):
"""Tests for the main code paths in cbuildbot.SimpleBuilder"""
def setUp(self):
self.buildroot = os.path.join(self.tempdir, 'buildroot')
chroot_path = os.path.join(self.buildroot, constants.DEFAULT_CHROOT_DIR)
osutils.SafeMakedirs(os.path.join(chroot_path, 'tmp'))
self.PatchObject(cbuildbot.Builder, '_RunStage')
self.PatchObject(cbuildbot.SimpleBuilder, '_RunParallelStages')
self.PatchObject(cbuildbot_run._BuilderRunBase, 'GetVersion',
return_value='R32-1234.0.0')
self.StartPatcher(parallel_unittest.ParallelMock())
self._manager = cbuildbot.parallel.Manager()
self._manager.__enter__()
def tearDown(self):
# Mimic exiting a 'with' statement.
self._manager.__exit__(None, None, None)
def _initConfig(self, bot_id, extra_argv=None):
"""Return normal options/build_config for |bot_id|"""
build_config = copy.deepcopy(config.config[bot_id])
build_config['master'] = False
build_config['important'] = False
# Use the cbuildbot parser to create properties and populate default values.
parser = cbuildbot._CreateParser()
argv = (['-r', self.buildroot, '--buildbot', '--debug', '--nochromesdk'] +
(extra_argv if extra_argv else []) + [bot_id])
(options, _) = cbuildbot._ParseCommandLine(parser, argv)
# Yikes.
options.managed_chrome = build_config['sync_chrome']
return cbuildbot_run.BuilderRun(options, build_config, self._manager)
def testRunStagesPreCQ(self):
"""Verify RunStages for PRE_CQ_LAUNCHER_TYPE builders"""
builder_run = self._initConfig('pre-cq-launcher')
cbuildbot.SimpleBuilder(builder_run).RunStages()
def testRunStagesBranchUtil(self):
"""Verify RunStages for CREATE_BRANCH_TYPE builders"""
extra_argv = ['--branch-name', 'foo', '--version', '1234']
builder_run = self._initConfig(constants.BRANCH_UTIL_CONFIG,
extra_argv=extra_argv)
cbuildbot.SimpleBuilder(builder_run).RunStages()
def testRunStagesChrootBuilder(self):
"""Verify RunStages for CHROOT_BUILDER_TYPE builders"""
builder_run = self._initConfig('chromiumos-sdk')
cbuildbot.SimpleBuilder(builder_run).RunStages()
def testRunStagesRefreshPackages(self):
"""Verify RunStages for REFRESH_PACKAGES_TYPE builders"""
builder_run = self._initConfig('refresh-packages')
cbuildbot.SimpleBuilder(builder_run).RunStages()
def testRunStagesDefaultBuild(self):
"""Verify RunStages for standard board builders"""
builder_run = self._initConfig('x86-generic-full')
builder_run.attrs.chrome_version = 'TheChromeVersion'
cbuildbot.SimpleBuilder(builder_run).RunStages()
def testRunStagesDefaultBuildCompileCheck(self):
"""Verify RunStages for standard board builders (compile only)"""
extra_argv = ['--compilecheck']
builder_run = self._initConfig('x86-generic-full', extra_argv=extra_argv)
builder_run.attrs.chrome_version = 'TheChromeVersion'
cbuildbot.SimpleBuilder(builder_run).RunStages()
def testRunStagesDefaultBuildHwTests(self):
"""Verify RunStages for boards w/hwtests"""
extra_argv = ['--hwtest']
builder_run = self._initConfig('lumpy-release', extra_argv=extra_argv)
builder_run.attrs.chrome_version = 'TheChromeVersion'
cbuildbot.SimpleBuilder(builder_run).RunStages()
class LogTest(cros_test_lib.TempDirTestCase):
"""Test logging functionality."""
def _generateLogs(self, num):
"""Generates cbuildbot.log and num backups."""
with open(os.path.join(self.tempdir, 'cbuildbot.log'), 'w') as f:
f.write(str(num + 1))
for i in range(1, num + 1):
with open(os.path.join(self.tempdir, 'cbuildbot.log.' + str(i)),
'w') as f:
f.write(str(i))
def testZeroToOneLogs(self):
"""Test beginning corner case."""
self._generateLogs(0)
cbuildbot._BackupPreviousLog(os.path.join(self.tempdir, 'cbuildbot.log'),
backup_limit=25)
with open(os.path.join(self.tempdir, 'cbuildbot.log.1')) as f:
self.assertEquals(f.readline(), '1')
def testNineToTenLogs(self):
"""Test handling *.log.9 to *.log.10 (correct sorting)."""
self._generateLogs(9)
cbuildbot._BackupPreviousLog(os.path.join(self.tempdir, 'cbuildbot.log'),
backup_limit=25)
with open(os.path.join(self.tempdir, 'cbuildbot.log.10')) as f:
self.assertEquals(f.readline(), '10')
def testOverLimit(self):
"""Test going over the limit and having to purge old logs."""
self._generateLogs(25)
cbuildbot._BackupPreviousLog(os.path.join(self.tempdir, 'cbuildbot.log'),
backup_limit=25)
with open(os.path.join(self.tempdir, 'cbuildbot.log.26')) as f:
self.assertEquals(f.readline(), '26')
self.assertEquals(len(glob.glob(os.path.join(self.tempdir, 'cbuildbot*'))),
25)
class InterfaceTest(cros_test_lib.MoxTestCase, cros_test_lib.LoggingTestCase):
"""Test the command line interface."""
_X86_PREFLIGHT = 'x86-generic-paladin'
_BUILD_ROOT = '/b/test_build1'
def setUp(self):
self.parser = cbuildbot._CreateParser()
def assertDieSysExit(self, *args, **kwargs):
self.assertRaises(cros_build_lib.DieSystemExit, *args, **kwargs)
def testDepotTools(self):
"""Test that the entry point used by depot_tools works."""
path = os.path.join(constants.SOURCE_ROOT, 'chromite', 'buildbot',
'cbuildbot')
# Verify the tests below actually are testing correct behaviour;
# specifically that it doesn't always just return 0.
self.assertRaises(cros_build_lib.RunCommandError,
cros_build_lib.RunCommand,
['cbuildbot', '--monkeys'], cwd=constants.SOURCE_ROOT)
# Validate depot_tools lookup.
cros_build_lib.RunCommand(
['cbuildbot', '--help'], cwd=constants.SOURCE_ROOT, capture_output=True)
# Validate buildbot invocation pathway.
cros_build_lib.RunCommand(
[path, '--help'], cwd=constants.SOURCE_ROOT, capture_output=True)
def testDebugBuildBotSetByDefault(self):
"""Test that debug and buildbot flags are set by default."""
args = ['--local', '-r', self._BUILD_ROOT, self._X86_PREFLIGHT]
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.assertEquals(options.debug, True)
self.assertEquals(options.buildbot, False)
def testBuildBotOption(self):
"""Test that --buildbot option unsets debug flag."""
args = ['-r', self._BUILD_ROOT, '--buildbot', self._X86_PREFLIGHT]
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.assertEquals(options.debug, False)
self.assertEquals(options.buildbot, True)
def testBuildBotWithDebugOption(self):
"""Test that --debug option overrides --buildbot option."""
args = ['-r', self._BUILD_ROOT, '--buildbot', '--debug',
self._X86_PREFLIGHT]
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.assertEquals(options.debug, True)
self.assertEquals(options.buildbot, True)
def testLocalTrybotWithSpacesInPatches(self):
"""Test that we handle spaces in patch arguments."""
args = ['-r', self._BUILD_ROOT, '--remote', '--local-patches',
' proj:br \t proj2:b2 ',
self._X86_PREFLIGHT]
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.assertEquals(options.local_patches, ['proj:br', 'proj2:b2'])
def testBuildBotWithRemotePatches(self):
"""Test that --buildbot errors out with patches."""
args = ['-r', self._BUILD_ROOT, '--buildbot', '-g', '1234',
self._X86_PREFLIGHT]
self.assertDieSysExit(cbuildbot._ParseCommandLine, self.parser, args)
def testRemoteBuildBotWithRemotePatches(self):
"""Test that --buildbot and --remote errors out with patches."""
args = ['-r', self._BUILD_ROOT, '--buildbot', '--remote', '-g', '1234',
self._X86_PREFLIGHT]
self.assertDieSysExit(cbuildbot._ParseCommandLine, self.parser, args)
def testBuildbotDebugWithPatches(self):
"""Test we can test patches with --buildbot --debug."""
args = ['--remote', '-g', '1234', '--debug', '--buildbot',
self._X86_PREFLIGHT]
cbuildbot._ParseCommandLine(self.parser, args)
def testBuildBotWithoutProfileOption(self):
"""Test that no --profile option gets defaulted."""
args = ['--buildbot', self._X86_PREFLIGHT]
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.assertEquals(options.profile, None)
def testBuildBotWithProfileOption(self):
"""Test that --profile option gets parsed."""
args = ['--buildbot', '--profile', 'carp', self._X86_PREFLIGHT]
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.assertEquals(options.profile, 'carp')
def testValidateClobberUserDeclines_1(self):
"""Test case where user declines in prompt."""
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(cros_build_lib, 'GetInput')
os.path.exists(self._BUILD_ROOT).AndReturn(True)
cros_build_lib.GetInput(mox.IgnoreArg()).AndReturn('No')
self.mox.ReplayAll()
self.assertFalse(commands.ValidateClobber(self._BUILD_ROOT))
self.mox.VerifyAll()
def testValidateClobberUserDeclines_2(self):
"""Test case where user does not enter the full 'yes' pattern."""
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(cros_build_lib, 'GetInput')
os.path.exists(self._BUILD_ROOT).AndReturn(True)
cros_build_lib.GetInput(mox.IgnoreArg()).AndReturn('asdf')
cros_build_lib.GetInput(mox.IgnoreArg()).AndReturn('No')
self.mox.ReplayAll()
self.assertFalse(commands.ValidateClobber(self._BUILD_ROOT))
self.mox.VerifyAll()
def testValidateClobberProtectRunningChromite(self):
"""User should not be clobbering our own source."""
cwd = os.path.dirname(os.path.realpath(__file__))
buildroot = os.path.dirname(cwd)
self.assertDieSysExit(commands.ValidateClobber, buildroot)
def testValidateClobberProtectRoot(self):
"""User should not be clobbering /"""
self.assertDieSysExit(commands.ValidateClobber, '/')
def testBuildBotWithBadChromeRevOption(self):
"""chrome_rev can't be passed an invalid option after chrome_root."""
args = [
'--local',
'--buildroot=/tmp',
'--chrome_root=.',
'--chrome_rev=%s' % constants.CHROME_REV_TOT,
self._X86_PREFLIGHT,
]
self.assertDieSysExit(cbuildbot._ParseCommandLine, self.parser, args)
def testBuildBotWithBadChromeRootOption(self):
"""chrome_root can't get passed after non-local chrome_rev."""
args = [
'--local',
'--buildroot=/tmp',
'--chrome_rev=%s' % constants.CHROME_REV_TOT,
'--chrome_root=.',
self._X86_PREFLIGHT,
]
self.assertDieSysExit(cbuildbot._ParseCommandLine, self.parser, args)
def testBuildBotWithBadChromeRevOptionLocal(self):
"""chrome_rev can't be local without chrome_root."""
args = [
'--local',
'--buildroot=/tmp',
'--chrome_rev=%s' % constants.CHROME_REV_LOCAL,
self._X86_PREFLIGHT,
]
self.assertDieSysExit(cbuildbot._ParseCommandLine, self.parser, args)
def testBuildBotWithGoodChromeRootOption(self):
"""chrome_root can be set without chrome_rev."""
args = [
'--local',
'--buildroot=/tmp',
'--chrome_root=.',
self._X86_PREFLIGHT,
]
self.mox.ReplayAll()
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.mox.VerifyAll()
self.assertEquals(options.chrome_rev, constants.CHROME_REV_LOCAL)
self.assertNotEquals(options.chrome_root, None)
def testBuildBotWithGoodChromeRevAndRootOption(self):
"""chrome_rev can get reset around chrome_root."""
args = [
'--local',
'--buildroot=/tmp',
'--chrome_rev=%s' % constants.CHROME_REV_LATEST,
'--chrome_rev=%s' % constants.CHROME_REV_STICKY,
'--chrome_rev=%s' % constants.CHROME_REV_TOT,
'--chrome_rev=%s' % constants.CHROME_REV_TOT,
'--chrome_rev=%s' % constants.CHROME_REV_STICKY,
'--chrome_rev=%s' % constants.CHROME_REV_LATEST,
'--chrome_rev=%s' % constants.CHROME_REV_LOCAL,
'--chrome_root=.',
'--chrome_rev=%s' % constants.CHROME_REV_TOT,
'--chrome_rev=%s' % constants.CHROME_REV_LOCAL,
self._X86_PREFLIGHT,
]
self.mox.ReplayAll()
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.mox.VerifyAll()
self.assertEquals(options.chrome_rev, constants.CHROME_REV_LOCAL)
self.assertNotEquals(options.chrome_root, None)
def testPassThroughOptions(self):
"""Test we are building up pass-through list properly."""
args = ['--remote', '-g', '1234', self._X86_PREFLIGHT]
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.assertEquals(options.pass_through_args, ['-g', '1234'])
def testDebugPassThrough(self):
"""Test we are passing --debug through."""
args = ['--remote', '--debug', '--buildbot', self._X86_PREFLIGHT]
(options, args) = cbuildbot._ParseCommandLine(self.parser, args)
self.assertEquals(options.pass_through_args, ['--debug', '--buildbot'])
def testCreateBranch(self):
"""Test a normal create branch run."""
args = ['--branch-name', 'refs/heads/test', constants.BRANCH_UTIL_CONFIG]
self.assertDieSysExit(cbuildbot._ParseCommandLine, self.parser, args)
def testCreateBranchNoVersion(self):
"""Test we require --version with branch-util."""
with cros_test_lib.LoggingCapturer('chromite') as logger:
args = [constants.BRANCH_UTIL_CONFIG]
self.assertDieSysExit(cbuildbot._ParseCommandLine, self.parser, args)
self.AssertLogsContain(logger, '--branch-name')
def testCreateBranchDelete(self):
"""Test we don't require --version with --delete."""
args = ['--delete-branch', '--branch-name', 'refs/heads/test',
constants.BRANCH_UTIL_CONFIG]
cbuildbot._ParseCommandLine(self.parser, args)
def testBranchOptionsWithoutBranchConfig(self):
"""Error out when branch options passed in without branch-util config."""
for extra_args in [['--delete-branch'],
['--branch-name', 'refs/heads/test'],
['--rename-to', 'abc']]:
with cros_test_lib.LoggingCapturer('chromite') as logger:
args = [self._X86_PREFLIGHT] + extra_args
self.assertDieSysExit(cbuildbot._ParseCommandLine, self.parser, args)
self.AssertLogsContain(logger, 'Cannot specify')
class FullInterfaceTest(cros_test_lib.MoxTempDirTestCase):
"""Tests that run the cbuildbot.main() function directly.
Note this explicitly suppresses automatic VerifyAll() calls; thus if you want
that checked, you have to invoke it yourself.
"""
mox_suppress_verify_all = True
def MakeTestRootDir(self, relpath):
abspath = os.path.join(self.root, relpath)
os.makedirs(abspath)
return abspath
def setUp(self):
self.root = self.tempdir
self.buildroot = self.MakeTestRootDir('build_root')
self.sourceroot = self.MakeTestRootDir('source_root')
self.trybot_root = self.MakeTestRootDir('trybot')
self.trybot_internal_root = self.MakeTestRootDir('trybot-internal')
self.external_marker = os.path.join(self.trybot_root, '.trybot')
self.internal_marker = os.path.join(self.trybot_internal_root, '.trybot')
os.makedirs(os.path.join(self.sourceroot, '.repo', 'manifests'))
os.makedirs(os.path.join(self.sourceroot, '.repo', 'repo'))
# Create the parser before we stub out os.path.exists() - which the parser
# creation code actually uses.
parser = cbuildbot._CreateParser()
# Stub out all relevant methods regardless of whether they are called in the
# specific test case. We can do this because we don't run VerifyAll() at
# the end of every test.
self.mox.StubOutWithMock(optparse.OptionParser, 'error')
self.mox.StubOutWithMock(cros_build_lib, 'IsInsideChroot')
self.mox.StubOutWithMock(cbuildbot, '_CreateParser')
self.mox.StubOutWithMock(sys, 'exit')
self.mox.StubOutWithMock(cros_build_lib, 'GetInput')
self.mox.StubOutWithMock(cbuildbot, '_RunBuildStagesWrapper')
parser.error(mox.IgnoreArg()).InAnyOrder().AndRaise(TestExitedException())
cros_build_lib.IsInsideChroot().InAnyOrder().AndReturn(False)
cbuildbot._CreateParser().InAnyOrder().AndReturn(parser)
sys.exit(mox.IgnoreArg()).InAnyOrder().AndRaise(TestExitedException())
cbuildbot._RunBuildStagesWrapper(
mox.IgnoreArg(),
mox.IgnoreArg()).InAnyOrder().AndReturn(True)
def assertMain(self, args, common_options=True):
if common_options:
# Suppress cgroups code. For cbuildbot invocation, it doesn't hugely
# care about cgroups- that's a blackbox to it. As such these unittests
# should not be sensitive to it.
args.extend(['--sourceroot', self.sourceroot, '--nocgroups',
'--notee'])
return cbuildbot.main(args)
def testNullArgsStripped(self):
"""Test that null args are stripped out and don't cause error."""
self.mox.ReplayAll()
self.assertMain(['--local', '-r', self.buildroot, '', '',
'x86-generic-paladin'])
def testMultipleConfigsError(self):
"""Test that multiple configs cause error if --remote is not used."""
self.mox.ReplayAll()
self.assertRaises(cros_build_lib.DieSystemExit, self.assertMain,
['--local',
'-r', self.buildroot,
'arm-generic-paladin',
'x86-generic-paladin'])
def testDontInferBuildrootForBuildBotRuns(self):
"""Test that we don't infer buildroot if run with --buildbot option."""
self.mox.ReplayAll()
self.assertRaises(TestExitedException, self.assertMain,
['--buildbot', 'x86-generic-paladin'])
def testInferExternalBuildRoot(self):
"""Test that we default to correct buildroot for external config."""
self.mox.StubOutWithMock(cbuildbot, '_ConfirmBuildRoot')
cbuildbot._ConfirmBuildRoot(mox.IgnoreArg()).InAnyOrder().AndRaise(
TestHaltedException())
self.mox.ReplayAll()
self.assertRaises(TestHaltedException, self.assertMain,
['--local', 'x86-generic-paladin'])
def testInferInternalBuildRoot(self):
"""Test that we default to correct buildroot for internal config."""
self.mox.StubOutWithMock(cbuildbot, '_ConfirmBuildRoot')
cbuildbot._ConfirmBuildRoot(mox.IgnoreArg()).InAnyOrder().AndRaise(
TestHaltedException())
self.mox.ReplayAll()
self.assertRaises(TestHaltedException, self.assertMain,
['--local', 'x86-mario-paladin'])
def testInferBuildRootPromptNo(self):
"""Test that a 'no' answer on the prompt halts execution."""
cros_build_lib.GetInput(mox.IgnoreArg()).InAnyOrder().AndReturn('no')
self.mox.ReplayAll()
self.assertRaises(TestExitedException, self.assertMain,
['--local', 'x86-generic-paladin'])
def testInferBuildRootExists(self):
"""Test that we don't prompt the user if buildroot already exists."""
cros_build_lib.RunCommand(['touch', self.external_marker],
capture_output=True)
os.utime(self.external_marker, None)
cros_build_lib.GetInput(mox.IgnoreArg()).InAnyOrder().AndRaise(
TestFailedException())
self.mox.ReplayAll()
self.assertMain(['--local', 'x86-generic-paladin'])
def testBuildbotDiesInChroot(self):
"""Buildbot should quit if run inside a chroot."""
# Need to do this since a cros_build_lib.IsInsideChroot() call is already
# queued up in setup() and we can't Reset() an individual mock.
# pylint: disable=not-callable
new_is_inside_chroot = self.mox.CreateMockAnything()
new_is_inside_chroot().InAnyOrder().AndReturn(True)
cros_build_lib.IsInsideChroot = new_is_inside_chroot
self.mox.ReplayAll()
self.assertRaises(cros_build_lib.DieSystemExit, self.assertMain,
['--local', '-r', self.buildroot, 'x86-generic-paladin'])
if __name__ == '__main__':
cros_test_lib.main()
| |
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines base data types and models required specifically for VRF support.
"""
import abc
import logging
import six
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_ORIGIN
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_AS_PATH
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_EXTENDED_COMMUNITIES
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_MULTI_EXIT_DISC
from ryu.lib.packet.bgp import BGPPathAttributeOrigin
from ryu.lib.packet.bgp import BGPPathAttributeAsPath
from ryu.lib.packet.bgp import BGPPathAttributeExtendedCommunities
from ryu.lib.packet.bgp import BGPTwoOctetAsSpecificExtendedCommunity
from ryu.lib.packet.bgp import BGPPathAttributeMultiExitDisc
from ryu.lib.packet.bgp import RF_L2_EVPN
from ryu.lib.packet.bgp import EvpnMacIPAdvertisementNLRI
from ryu.services.protocols.bgp.base import OrderedDict
from ryu.services.protocols.bgp.constants import VPN_TABLE
from ryu.services.protocols.bgp.constants import VRF_TABLE
from ryu.services.protocols.bgp.info_base.base import Destination
from ryu.services.protocols.bgp.info_base.base import Path
from ryu.services.protocols.bgp.info_base.base import Table
from ryu.services.protocols.bgp.utils.stats import LOCAL_ROUTES
from ryu.services.protocols.bgp.utils.stats import REMOTE_ROUTES
from ryu.services.protocols.bgp.utils.stats import RESOURCE_ID
from ryu.services.protocols.bgp.utils.stats import RESOURCE_NAME
LOG = logging.getLogger('bgpspeaker.info_base.vrf')
@six.add_metaclass(abc.ABCMeta)
class VrfTable(Table):
"""Virtual Routing and Forwarding information base.
Keeps destination imported to given vrf in represents.
"""
ROUTE_FAMILY = None
VPN_ROUTE_FAMILY = None
NLRI_CLASS = None
VRF_PATH_CLASS = None
VRF_DEST_CLASS = None
def __init__(self, vrf_conf, core_service, signal_bus):
Table.__init__(self, vrf_conf.route_dist, core_service, signal_bus)
self._vrf_conf = vrf_conf
self._import_maps = []
self.init_import_maps(vrf_conf.import_maps)
def init_import_maps(self, import_maps):
LOG.debug(
"Initializing import maps (%s) for %r", import_maps, self
)
del self._import_maps[:]
importmap_manager = self._core_service.importmap_manager
for name in import_maps:
import_map = importmap_manager.get_import_map_by_name(name)
if import_map is None:
raise KeyError('No import map with name %s' % name)
self._import_maps.append(import_map)
@property
def import_rts(self):
return self._vrf_conf.import_rts
@property
def vrf_conf(self):
return self._vrf_conf
def _table_key(self, nlri):
"""Return a key that will uniquely identify this NLRI inside
this table.
"""
# Note: We use `prefix` representation of the NLRI, because
# BGP route can be identified without the route distinguisher
# value in the VRF space.
return nlri.prefix
def _create_dest(self, nlri):
return self.VRF_DEST_CLASS(self, nlri)
def append_import_map(self, import_map):
self._import_maps.append(import_map)
def remove_import_map(self, import_map):
self._import_maps.remove(import_map)
def get_stats_summary_dict(self):
"""Returns count of local and remote paths."""
remote_route_count = 0
local_route_count = 0
for dest in self.values():
for path in dest.known_path_list:
if (hasattr(path.source, 'version_num') or
path.source == VPN_TABLE):
remote_route_count += 1
else:
local_route_count += 1
return {RESOURCE_ID: self._vrf_conf.id,
RESOURCE_NAME: self._vrf_conf.name,
REMOTE_ROUTES: remote_route_count,
LOCAL_ROUTES: local_route_count}
def import_vpn_paths_from_table(self, vpn_table, import_rts=None):
for vpn_dest in vpn_table.values():
vpn_path = vpn_dest.best_path
if not vpn_path:
continue
if import_rts is None:
import_rts = set(self.import_rts)
else:
import_rts = set(import_rts)
path_rts = vpn_path.get_rts()
if import_rts.intersection(path_rts):
# TODO(PH): When (re-)implementing extranet, check what should
# be the label reported back to NC for local paths coming from
# other VRFs.
self.import_vpn_path(vpn_path)
def import_vpn_path(self, vpn_path):
"""Imports `vpnv(4|6)_path` into `vrf(4|6)_table` or `evpn_path`
into vrfevpn_table`.
:Parameters:
- `vpn_path`: (Path) VPN path that will be cloned and imported
into VRF.
Note: Does not do any checking if this import is valid.
"""
assert vpn_path.route_family == self.VPN_ROUTE_FAMILY
# If source of given vpnv4 path is NC we import it to given VRF
# table because of extranet setting. Hence we identify source of
# EXTRANET prefixes as VRF_TABLE, else VPN_TABLE.
source = vpn_path.source
if not source:
source = VRF_TABLE
if self.VPN_ROUTE_FAMILY == RF_L2_EVPN:
nlri_cls = self.NLRI_CLASS._lookup_type(vpn_path.nlri.type)
kwargs = dict(vpn_path.nlri.__dict__)
kwargs.pop('type', None)
vrf_nlri = nlri_cls(**kwargs)
else: # self.VPN_ROUTE_FAMILY in [RF_IPv4_VPN, RF_IPv6_VPN]
# Copy NLRI instance
ip, masklen = vpn_path.nlri.prefix.split('/')
vrf_nlri = self.NLRI_CLASS(length=int(masklen), addr=ip)
vrf_path = self.VRF_PATH_CLASS(
puid=self.VRF_PATH_CLASS.create_puid(
vpn_path.nlri.route_dist,
vpn_path.nlri.prefix),
source=source,
nlri=vrf_nlri,
src_ver_num=vpn_path.source_version_num,
pattrs=vpn_path.pathattr_map,
nexthop=vpn_path.nexthop,
is_withdraw=vpn_path.is_withdraw,
label_list=vpn_path.nlri.label_list
)
if self._is_vrf_path_already_in_table(vrf_path):
return None
if self._is_vrf_path_filtered_out_by_import_maps(vrf_path):
return None
else:
vrf_dest = self.insert(vrf_path)
self._signal_bus.dest_changed(vrf_dest)
def _is_vrf_path_filtered_out_by_import_maps(self, vrf_path):
for import_map in self._import_maps:
if import_map.match(vrf_path):
return True
return False
def _is_vrf_path_already_in_table(self, vrf_path):
dest = self._get_dest(vrf_path.nlri)
if dest is None:
return False
return vrf_path in dest.known_path_list
def apply_import_maps(self):
changed_dests = []
for dest in self.values():
assert isinstance(dest, VrfDest)
for import_map in self._import_maps:
for path in dest.known_path_list:
if import_map.match(path):
dest.withdraw_path(path)
changed_dests.append(dest)
return changed_dests
def insert_vrf_path(self, nlri, next_hop=None,
gen_lbl=False, is_withdraw=False):
assert nlri
pattrs = None
label_list = []
vrf_conf = self.vrf_conf
if not is_withdraw:
# Create a dictionary for path-attrs.
pattrs = OrderedDict()
# MpReachNlri and/or MpUnReachNlri attribute info. is contained
# in the path. Hence we do not add these attributes here.
from ryu.services.protocols.bgp.core import EXPECTED_ORIGIN
pattrs[BGP_ATTR_TYPE_ORIGIN] = BGPPathAttributeOrigin(
EXPECTED_ORIGIN)
pattrs[BGP_ATTR_TYPE_AS_PATH] = BGPPathAttributeAsPath([])
communities = []
for rt in vrf_conf.export_rts:
as_num, local_admin = rt.split(':')
subtype = 2
communities.append(BGPTwoOctetAsSpecificExtendedCommunity(
as_number=int(as_num),
local_administrator=int(local_admin),
subtype=subtype))
for soo in vrf_conf.soo_list:
as_num, local_admin = soo.split(':')
subtype = 3
communities.append(BGPTwoOctetAsSpecificExtendedCommunity(
as_number=int(as_num),
local_administrator=int(local_admin),
subtype=subtype))
pattrs[BGP_ATTR_TYPE_EXTENDED_COMMUNITIES] = \
BGPPathAttributeExtendedCommunities(communities=communities)
if vrf_conf.multi_exit_disc:
pattrs[BGP_ATTR_TYPE_MULTI_EXIT_DISC] = \
BGPPathAttributeMultiExitDisc(vrf_conf.multi_exit_disc)
table_manager = self._core_service.table_manager
if gen_lbl and next_hop:
# Label per next_hop demands we use a different label
# per next_hop. Here connected interfaces are advertised per
# VRF.
label_key = (vrf_conf.route_dist, next_hop)
nh_label = table_manager.get_nexthop_label(label_key)
if not nh_label:
nh_label = table_manager.get_next_vpnv4_label()
table_manager.set_nexthop_label(label_key, nh_label)
label_list.append(nh_label)
elif gen_lbl:
# If we do not have next_hop, get a new label.
label_list.append(table_manager.get_next_vpnv4_label())
# Set MPLS labels with the generated labels
if isinstance(nlri, EvpnMacIPAdvertisementNLRI):
nlri.mpls_labels = label_list[:2]
puid = self.VRF_PATH_CLASS.create_puid(
vrf_conf.route_dist, nlri.prefix)
path = self.VRF_PATH_CLASS(
puid, None, nlri, 0, pattrs=pattrs,
nexthop=next_hop, label_list=label_list,
is_withdraw=is_withdraw
)
# Insert the path into VRF table, get affected destination so that we
# can process it further.
eff_dest = self.insert(path)
# Enqueue the eff_dest for further processing.
self._signal_bus.dest_changed(eff_dest)
return label_list
def clean_uninteresting_paths(self, interested_rts=None):
if interested_rts is None:
interested_rts = set(self.vrf_conf.import_rts)
return super(VrfTable, self).clean_uninteresting_paths(interested_rts)
@six.add_metaclass(abc.ABCMeta)
class VrfDest(Destination):
"""Base class for VRF destination."""
def __init__(self, table, nlri):
super(VrfDest, self).__init__(table, nlri)
self._route_dist = self._table.vrf_conf.route_dist
@property
def nlri_str(self):
# Returns `prefix` without the route distinguisher value, because
# a destination in VRF space can be identified without the route
# distinguisher.
return self._nlri.prefix
def _best_path_lost(self):
# Have to send update messages for withdraw of best-path to Network
# controller or Global table.
old_best_path = self._best_path
self._best_path = None
if old_best_path is None:
return
if old_best_path.source is not None:
# Send update-withdraw msg. to Sink. Create withdraw path
# out of old best path and queue it into flexinet sinks.
old_best_path = old_best_path.clone(for_withdrawal=True)
self._core_service.update_flexinet_peers(old_best_path,
self._route_dist)
else:
# Create withdraw-path out of old best path.
gpath = old_best_path.clone_to_vpn(self._route_dist,
for_withdrawal=True)
# Insert withdraw into global table and enqueue the destination
# for further processing.
tm = self._core_service.table_manager
tm.learn_path(gpath)
def _new_best_path(self, best_path):
LOG.debug('New best path selected for destination %s', self)
old_best_path = self._best_path
assert (best_path != old_best_path)
self._best_path = best_path
# Distribute new best-path to flexinet-peers.
if best_path.source is not None:
# Since route-refresh just causes the version number to
# go up and this changes best-path, we check if new-
# best-path is really different than old-best-path that
# warrants sending update to flexinet peers.
def really_diff():
old_labels = old_best_path.label_list
new_labels = best_path.label_list
return old_best_path.nexthop != best_path.nexthop \
or set(old_labels) != set(new_labels)
if not old_best_path or (old_best_path and really_diff()):
# Create OutgoingRoute and queue it into NC sink.
self._core_service.update_flexinet_peers(
best_path, self._route_dist
)
else:
# If NC is source, we create new path and insert into global
# table.
gpath = best_path.clone_to_vpn(self._route_dist)
tm = self._core_service.table_manager
tm.learn_path(gpath)
LOG.debug('VRF table %s has new best path: %s',
self._route_dist, self.best_path)
def _remove_withdrawals(self):
"""Removes withdrawn paths.
Note:
We may have disproportionate number of withdraws compared to know paths
since not all paths get installed into the table due to bgp policy and
we can receive withdraws for such paths and withdrawals may not be
stopped by the same policies.
"""
LOG.debug('Removing %s withdrawals', len(self._withdraw_list))
# If we have not withdrawals, we have nothing to do.
if not self._withdraw_list:
return
# If we have some withdrawals and no know-paths, it means it is safe to
# delete these withdraws.
if not self._known_path_list:
LOG.debug('Found %s withdrawals for path(s) that did not get'
' installed.', len(self._withdraw_list))
del (self._withdraw_list[:])
return
# If we have some known paths and some withdrawals, we find matches and
# delete them first.
matches = []
w_matches = []
# Match all withdrawals from destination paths.
for withdraw in self._withdraw_list:
match = None
for path in self._known_path_list:
# We have a match if the source are same.
if path.puid == withdraw.puid:
match = path
matches.append(path)
w_matches.append(withdraw)
# One withdraw can remove only one path.
break
# We do no have any match for this withdraw.
if not match:
LOG.debug('No matching path for withdraw found, may be path '
'was not installed into table: %s',
withdraw)
# If we have partial match.
if len(matches) != len(self._withdraw_list):
LOG.debug('Did not find match for some withdrawals. Number of '
'matches(%s), number of withdrawals (%s)',
len(matches), len(self._withdraw_list))
# Clear matching paths and withdrawals.
for match in matches:
self._known_path_list.remove(match)
for w_match in w_matches:
self._withdraw_list.remove(w_match)
def _remove_old_paths(self):
"""Identifies which of known paths are old and removes them.
Known paths will no longer have paths whose new version is present in
new paths.
"""
new_paths = self._new_path_list
known_paths = self._known_path_list
for new_path in new_paths:
old_paths = []
for path in known_paths:
# Here we just check if source is same and not check if path
# version num. as new_paths are implicit withdrawal of old
# paths and when doing RouteRefresh (not EnhancedRouteRefresh)
# we get same paths again.
if new_path.puid == path.puid:
old_paths.append(path)
break
for old_path in old_paths:
known_paths.remove(old_path)
LOG.debug('Implicit withdrawal of old path, since we have'
' learned new path from same source: %s', old_path)
def _validate_path(self, path):
if not path or not hasattr(path, 'label_list'):
raise ValueError('Invalid value of path. Expected type '
'with attribute label_list got %s' % path)
@six.add_metaclass(abc.ABCMeta)
class VrfPath(Path):
"""Represents a way of reaching an IP destination with a VPN.
"""
__slots__ = ('_label_list', '_puid')
ROUTE_FAMILY = None
VPN_PATH_CLASS = None
VPN_NLRI_CLASS = None
def __init__(self, puid, source, nlri, src_ver_num,
pattrs=None, nexthop=None,
is_withdraw=False, label_list=None):
"""Initializes a Vrf path.
Parameters:
- `puid`: (str) path ID, identifies VPN path from which this
VRF path was imported.
- `label_list`: (list) List of labels for this path.
Note: other parameters are as documented in super class.
"""
Path.__init__(self, source, nlri, src_ver_num, pattrs, nexthop,
is_withdraw)
if label_list is None:
label_list = []
self._label_list = label_list
self._puid = puid
@property
def puid(self):
return self._puid
@property
def origin_rd(self):
tokens = self.puid.split(':')
return tokens[0] + ':' + tokens[1]
@property
def label_list(self):
return self._label_list[:]
@property
def nlri_str(self):
# Returns `prefix` without the route distinguisher value, because
# a destination in VRF space can be identified without the route
# distinguisher.
return self._nlri.prefix
@staticmethod
def create_puid(route_dist, ip_prefix):
assert route_dist and ip_prefix
return str(route_dist) + ':' + ip_prefix
def clone(self, for_withdrawal=False):
pathattrs = None
if not for_withdrawal:
pathattrs = self.pathattr_map
clone = self.__class__(
self.puid,
self._source,
self.nlri,
self.source_version_num,
pattrs=pathattrs,
nexthop=self.nexthop,
is_withdraw=for_withdrawal,
label_list=self.label_list
)
return clone
def clone_to_vpn(self, route_dist, for_withdrawal=False):
if self.ROUTE_FAMILY == RF_L2_EVPN:
nlri_cls = self.VPN_NLRI_CLASS._lookup_type(self._nlri.type)
kwargs = dict(self._nlri.__dict__)
kwargs.pop('type', None)
vpn_nlri = nlri_cls(**kwargs)
else: # self.ROUTE_FAMILY in [RF_IPv4_UC, RF_IPv6_UC]
ip, masklen = self._nlri.prefix.split('/')
vpn_nlri = self.VPN_NLRI_CLASS(length=int(masklen),
addr=ip,
labels=self.label_list,
route_dist=route_dist)
pathattrs = None
if not for_withdrawal:
pathattrs = self.pathattr_map
vpnv_path = self.VPN_PATH_CLASS(
source=self.source,
nlri=vpn_nlri,
src_ver_num=self.source_version_num,
pattrs=pathattrs,
nexthop=self.nexthop,
is_withdraw=for_withdrawal)
return vpnv_path
def __eq__(self, b_path):
if not isinstance(b_path, self.__class__):
return False
if not self.route_family == b_path.route_family:
return False
if not self.puid == b_path.puid:
return False
if not self.label_list == b_path.label_list:
return False
if not self.nexthop == b_path.nexthop:
return False
if not self.pathattr_map == b_path.pathattr_map:
return False
return True
class ImportMap(object):
def match(self, vrf_path):
raise NotImplementedError()
class VrfNlriImportMap(ImportMap):
VRF_PATH_CLASS = None
NLRI_CLASS = None
def __init__(self, prefix):
assert self.VRF_PATH_CLASS is not None
assert self.NLRI_CLASS is not None
self._nlri = self.NLRI_CLASS(prefix)
def match(self, vrf_path):
if vrf_path.route_family != self.VRF_PATH_CLASS.ROUTE_FAMILY:
LOG.error(
"vrf_paths route_family doesn\'t match importmaps"
"route_family. Applied to wrong table?")
return False
return vrf_path.nlri == self._nlri
class VrfRtImportMap(ImportMap):
def __init__(self, rt):
self._rt = rt
def match(self, vrf_path):
extcomm = vrf_path.pathattr_map.get(BGP_ATTR_TYPE_EXTENDED_COMMUNITIES)
return extcomm is not None and self._rt in extcomm.rt_list
| |
import re
import urllib.request
import urllib.error
import json
import smtplib
from email.mime.text import MIMEText
import keyring
import sys
import getpass
import time
from datetime import datetime, timedelta
import os
# all possible arguments that have meaning to the program
arguments = ['-a', '-d', '-e', '-h', '-k', '-r', 's', '-p', '-u', '--add-keyword', '--delete-keyword', '--api',
'--add-receiver', '--delete-receiver', '--sender', '--server', '--ssl', '--user', '--password',
'--status', '--help']
# Adds keywords to the list in data.conf
# args: command line arguments
# index: the index of the current argument from args
def add_keywords(args, index):
path = os.path.realpath(__file__)
path = path[:path.rfind('/')]
f = open(path + '/data.conf', 'r')
lines = f.readlines()
f.close()
f = open(path + '/data.conf', 'w')
for line in lines:
if line.startswith('keyword'):
keywords = re.findall('keywords:.*', line)[0]
keywords = re.findall('\'.*?\'', keywords)
buffer = []
for keyword in keywords:
buffer.append(re.findall('[^\']+', keyword)[0])
keywords = buffer
index += 1
while index < len(args):
if args[index] in arguments:
break
else:
if not args[index] in keywords:
keywords.append(args[index])
index += 1
keywords.sort(key=str.lower)
line = 'keywords:' + str(keywords) + '\n'
f.write(line)
f.close()
return index
# Deletes keywords from the list in data.conf
# args: command line arguments
# index: the index of the current argument from args
def delete_keywords(args, index):
path = os.path.realpath(__file__)
path = path[:path.rfind('/')]
f = open(path + '/data.conf', 'r')
lines = f.readlines()
f.close()
f = open(path + '/data.conf', 'w')
for line in lines:
if line.startswith('keyword'):
keywords = re.findall('keywords:.*', line)[0]
keywords = re.findall('\'.*?\'', keywords)
buffer = []
for keyword in keywords:
buffer.append(re.findall('[^\']+', keyword)[0])
keywords = buffer
index += 1
while index < len(args):
if args[index] in arguments:
break
else:
if args[index] in keywords:
keywords.remove(args[index])
index += 1
line = 'keywords:' + str(keywords) + '\n'
f.write(line)
f.close()
return index
# changes the apikey value in data.conf
# args: command line arguments
# index: the index of the current argument from args
def change_api(args, index):
path = os.path.realpath(__file__)
path = path[:path.rfind('/')]
f = open(path + '/data.conf', 'r')
lines = f.readlines()
f.close()
f = open(path + '/data.conf', 'w')
for line in lines:
if line.startswith('api'):
api = re.findall('api:.+', line)[0]
api = re.findall('\'.+\'', api)[0]
api = re.findall('[^\']+', api)[0]
index += 1
while index < len(args):
if args[index] in arguments:
break
else:
api = args[index]
index += 1
line = 'api:\'' + api + '\'\n'
f.write(line)
f.close()
return index
# Adds receivers to the list in data.conf
# args: command line arguments
# index: the index of the current argument from args
def add_receivers(args, index):
path = os.path.realpath(__file__)
path = path[:path.rfind('/')]
f = open(path + '/data.conf', 'r')
lines = f.readlines()
f.close()
f = open(path + '/data.conf', 'w')
for line in lines:
if line.startswith('receiver'):
receivers = re.findall('receivers:.*', line)[0]
receivers = re.findall('\'.*?\'', receivers)
buffer = []
for receiver in receivers:
buffer.append(re.findall('[^\']+', receiver)[0])
receivers = buffer
index += 1
while index < len(args):
if args[index] in arguments:
break
else:
if not args[index] in receivers:
receivers.append(args[index])
index += 1
receivers.sort(key=str.lower)
line = 'receivers:' + str(receivers) + '\n'
f.write(line)
f.close()
return index
# Deletes receivers to the list in data.conf
# args: command line arguments
# index: the index of the current argument from args
def delete_receivers(args, index):
path = os.path.realpath(__file__)
path = path[:path.rfind('/')]
f = open(path + '/data.conf', 'r')
lines = f.readlines()
f.close()
f = open(path + '/data.conf', 'w')
for line in lines:
if line.startswith('receiver'):
receivers = re.findall('receivers:.*', line)[0]
receivers = re.findall('\'.*?\'', receivers)
buffer = []
for receiver in receivers:
buffer.append(re.findall('[^\']+', receiver)[0])
receivers = buffer
index += 1
while index < len(args):
if args[index] in arguments:
break
else:
if args[index] in receivers:
receivers.remove(args[index])
index += 1
line = 'receivers:' + str(receivers) + '\n'
f.write(line)
f.close()
return index
# Changes the sender of the emails in data.conf
# args: command line arguments
# index: the index of the current argument from args
def change_sender(args, index):
path = os.path.realpath(__file__)
path = path[:path.rfind('/')]
f = open(path + '/data.conf', 'r')
lines = f.readlines()
f.close()
f = open(path + '/data.conf', 'w')
for line in lines:
if line.startswith('sender'):
sender = re.findall('sender:.+', line)[0]
sender = re.findall('\'.+\'', sender)[0]
sender = re.findall('[^\']+', sender)[0]
index += 1
while index < len(args):
if args[index] in arguments:
break
else:
sender = args[index]
index += 1
line = 'sender:\'' + sender + '\'\n'
f.write(line)
f.close()
return index
# Changes the SMTP-server in data.conf
# args: command line arguments
# index: the index of the current argument from args
def change_server(args, index):
path = os.path.realpath(__file__)
path = path[:path.rfind('/')]
f = open(path + '/data.conf', 'r')
lines = f.readlines()
f.close()
f = open(path + '/data.conf', 'w')
for line in lines:
if line.startswith('server'):
server = re.findall('server:.+', line)[0]
server = re.findall('\'.+\'', server)[0]
server = re.findall('[^\']+', server)[0]
index += 1
while index < len(args):
if args[index] in arguments:
break
else:
pw = keyring.get_password('password', server)
user = keyring.get_password('username', server)
keyring.delete_password('password', server)
keyring.delete_password('username', server)
server = args[index]
keyring.set_password('password', server, pw)
keyring.set_password('username', server, user)
index += 1
line = 'server:\'' + server + '\'\n'
f.write(line)
f.close()
return index
# Changes the ssl variable in data.conf to yes or no
# args: command line arguments
# index: the index of the current argument from args
def toggle_ssl(args, index):
path = os.path.realpath(__file__)
path = path[:path.rfind('/')]
f = open(path + '/data.conf', 'r')
lines = f.readlines()
f.close()
f = open(path + '/data.conf', 'w')
for line in lines:
if line.startswith('ssl'):
ssl = re.findall('ssl:.+', line)[0]
ssl = re.findall('\'.+\'', ssl)[0]
ssl = re.findall('[^\']+', ssl)[0]
if ssl == 'yes':
ssl = 'no'
else:
ssl = 'yes'
line = 'ssl:\'' + ssl + '\'\n'
f.write(line)
f.close()
return index + 1
# Changes the user used to authenticate at the SMTP-server
# args: command line arguments
# index: the index of the current argument from args
def change_user(args, index):
path = os.path.realpath(__file__)
path = path[:path.rfind('/')]
server = re.findall('server:.+', open(path + '/data.conf', 'r').read())[0]
server = re.findall('\'[^\']+\'', server)[0]
server = re.findall('[^\']+', server)[0]
index += 1
if not args[index] in arguments:
keyring.set_password('username', server, args[index])
return index + 1
# Changes the password used to authenticate at the SMTP-server
# args: command line arguments
# index: the index of the current argument from args
def change_password(args, index):
path = os.path.realpath(__file__)
path = path[:path.rfind('/')]
server = re.findall('server:.+', open(path + '/data.conf', 'r').read())[0]
server = re.findall('\'[^\']+\'', server)[0]
server = re.findall('[^\']+', server)[0]
pw = getpass.getpass()
keyring.set_password('password', server, pw)
return index + 1
# Prints the current configuration
# args: command line arguments
# index: the index of the current argument from args
def show_status(args, index):
path = os.path.realpath(__file__)
path = path[:path.rfind('/')]
options = open(path + '/data.conf', 'r').read()
server = re.findall('server:.+', options)[0]
server = re.findall('\'[^\']+\'', server)[0]
server = re.findall('[^\']+', server)[0]
while options.endswith('\n'):
options = options[:-1]
print(options)
print('username:\'' + keyring.get_password('username', server) + '\'')
return index + 1
# Decides which methods are called with the current argument
# args: command line arguments
# index: the index of the current argument from args
def switch_argument(args, index):
arg = args[index]
if (arg == '-k') | (arg == '--add-keyword'):
return add_keywords(args, index)
elif (arg == '-d') | (arg == '--delete-keyword'):
return delete_keywords(args, index)
elif (arg == '-a') | (arg == '--api'):
return change_api(args, index)
elif (arg == '-r') | (arg == '--add-receiver'):
return add_receivers(args, index)
elif (arg == '--delete-receiver'):
return delete_receivers(args, index)
elif (arg == '-s') | (arg == '--sender'):
return change_sender(args, index)
elif (arg == '-h') | (arg == '--server'):
return change_server(args, index)
elif (arg == '-e') | (arg == '-ssl'):
return toggle_ssl(args, index)
elif (arg == '-u') | (arg == '--user'):
return change_user(args, index)
elif (arg == '-p') | (arg == '--password'):
return change_password(args, index)
elif arg == '--status':
return show_status(args, index)
else:
return show_help(args, index)
# Prints the help screen
# args: command line arguments
# index: the index of the current argument from args
def show_help(args, index):
separator = ''
print('Usage:\nlistener.py [(-k|--add-keyword) KEYWORDS] [(-d|--delete-keyword) KEYWORDS] [(-a|--api) APIKEY] '
'[(-r|--add-receiver) RECEIVERS] [--delete-receiver RECEIVERS] [(-s|--sender) SENDER] [(-h|--server) SERVER] '
'[(-e|--ssl)] [(-u|--user) USER] [(-p|--password)] [--status] [--help]')
print(separator)
print('(-k|--add-keyword) KEYWORDS: Adds the keywords to the keyword list. Keywords are separated by a whitespace.')
print(separator)
print('(-d|--delete-keyword) KEYWORDS: Removes keywords from the list. Keywords are separated by a whitespace.')
print(separator)
print('(-a|--api) APIKEY: Changes the api key.')
print(separator)
print('(-r|--add-receiver) RECEIVERS: Adds new receivers to the list. Receivers are separated by a whitespace.')
print(separator)
print('--delete-receiver RECEIVERS: Removes receivers from the list. Receivers are separated by a whitespace.')
print(separator)
print('(-s|--sender) SENDER: Changes the sender.')
print(separator)
print('(-h|--server) SERVER: Changes the SMTP-server.')
print(separator)
print('(-e|--ssl): Toggles whether to use ssl or not.')
print(separator)
print('(-u|--user) USER: Changes the username for the SMTP-server.')
print(separator)
print('(-p|--password): Changes the password for the SMTP-server.')
print(separator)
print('--status: Prints the configuration (except password).')
print(separator)
print('--help: Prints this page.')
return index + 1
# Gets the usenet data and sends it to the receivers
# args: command line arguments
# index: the index of the current argument from args
def send():
path = os.path.realpath(__file__)
path = path[:path.rfind('/')]
options = (open(path + '/data.conf', 'r').read())
api = re.findall('api:.+', options)[0]
api = re.findall('\'.+\'', api)[0]
api = re.findall('[^\']+', api)[0]
receivers = re.findall('receivers:.*', options)[0]
receivers = re.findall('\'.*?\'', receivers)
buffer = []
for receiver in receivers:
buffer.append(re.findall('[^\']+', receiver)[0])
receivers = buffer
sender = re.findall('sender:.+', options)[0]
sender = re.findall('\'[^\']+\'', sender)[0]
sender = re.findall('[^\']+', sender)[0]
keywords = re.findall('keywords:.*', options)[0]
keywords = re.findall('\'.*?\'', keywords)
buffer = []
for keyword in keywords:
buffer.append(re.findall('[^\']+', keyword)[0])
keywords = buffer
server = re.findall('server:.+', options)[0]
server = re.findall('\'[^\']+\'', server)[0]
server = re.findall('[^\']+', server)[0]
port = re.findall('port:\d+', options)[0]
port = re.findall('\d+', port)[0]
ssl = re.findall('ssl:.+', options)[0]
if ssl.endswith('yes\''):
ssl = True
else:
ssl = False
emailBody = ''
now = time.time()
for keyword in keywords:
emailBody += keyword + ':\n'
keyword = str.replace(keyword, ' ', '%20')
r = {}
i = 0
url = 'https://api.oznzb.com/api?extended=1&o=json&t=search&q=' + keyword + '&apikey=' + api
while (i < 20) & (r == {}):
i += 1
try:
r = urllib.request.urlopen(url)
except urllib.error.URLError:
r = {}
if r == {}:
emailBody += 'An error occurred (probably connection refused, which is a server error)'
else:
channel = json.loads(r.read().decode('utf-8'))['channel']
if 'item' not in channel:
continue
for item in channel['item']:
if not isinstance(item, dict):
continue
date = time.strptime(item['pubDate'][:-6], '%a, %d %b %Y %H:%M:%S')
date = datetime.fromtimestamp(time.mktime(date))
if (datetime.now() - date) > timedelta(365): # 1 year
break
emailBody += 'Link: ' + item['link'] + '\n'
emailBody += 'Title: ' + item['title'] + '\n'
emailBody += 'Uploaded: ' + item['pubDate'] + '\n'
emailBody += 'Desc: ' + item['description'] + '\n\n'
emailBody += '\n\n'
msg = MIMEText(emailBody)
msg['Subject'] = 'UseNet News'
msg['From'] = sender
if not emailBody.strip():
exit(0)
user = keyring.get_password('username', server)
pw = keyring.get_password('password', server)
smtp = {}
print(keyring)
if ssl:
smtp = smtplib.SMTP_SSL(host=server, port=port)
else:
smtp = smtplib.SMTP(host=server, port=port)
smtp.login(user, pw)
for receiver in receivers:
msg['To'] = receiver
smtp.send_message(msg)
smtp.quit()
args = sys.argv
if len(args) == 1:
send()
else:
i = 1
while i < len(args):
i = switch_argument(args, i)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=too-many-lines
"""Weight updating functions."""
import math
import pickle
import warnings
import numpy
from .base import py_str
from .ndarray import (NDArray, zeros, clip, sqrt, cast, maximum, abs as NDabs)
from .ndarray import (sgd_update, sgd_mom_update, adam_update, rmsprop_update, rmspropalex_update,
mp_sgd_update, mp_sgd_mom_update, square, ftrl_update, ftml_update)
from .ndarray import _internal
from .ndarray import op
from .ndarray import sparse
from .random import normal
class Optimizer(object):
"""The base class inherited by all optimizers.
Parameters
----------
rescale_grad : float, optional
Multiply the gradient with `rescale_grad` before updating. Often
choose to be ``1.0/batch_size``.
param_idx2name : dict from int to string, optional
A dictionary that maps int index to string name.
clip_gradient : float, optional
Clip the gradient by projecting onto the box ``[-clip_gradient, clip_gradient]``.
learning_rate : float, optional
The initial learning rate.
lr_scheduler : LRScheduler, optional
The learning rate scheduler.
wd : float, optional
The weight decay (or L2 regularization) coefficient. Modifies objective
by adding a penalty for having large weights.
sym: Symbol, optional
The Symbol this optimizer is applying to.
begin_num_update : int, optional
The initial number of updates.
multi_precision : bool, optional
Flag to control the internal precision of the optimizer.
``False`` results in using the same precision as the weights (default),
``True`` makes internal 32-bit copy of the weights and applies gradients
in 32-bit precision even if actual weights used in the model have lower precision.
Turning this on can improve convergence and accuracy when training with float16.
Properties
----------
learning_rate : float
The current learning rate of the optimizer. Given an Optimizer object
optimizer, its learning rate can be accessed as optimizer.learning_rate.
"""
def __init__(self, rescale_grad=1., param_idx2name=None, wd=0.,
clip_gradient=None, learning_rate=0.01,
lr_scheduler=None, sym=None, begin_num_update=0,
multi_precision=False, param_dict=None):
self.rescale_grad = rescale_grad
self.lr = learning_rate
self.lr_scheduler = lr_scheduler
if lr_scheduler is not None:
self.lr_scheduler.base_lr = learning_rate
self.wd = wd
self.lr_mult = {}
self.wd_mult = {}
self.begin_num_update = begin_num_update
self.num_update = begin_num_update
self._index_update_count = {}
self.clip_gradient = clip_gradient
self.multi_precision = multi_precision
if param_idx2name is None:
param_idx2name = {}
assert isinstance(param_idx2name, dict), \
'param_idx2name should be a dict of param indexes to names.'
self.idx2name = param_idx2name.copy()
self.sym_info = (sym.attr_dict(), sym.list_arguments()) if sym is not None else ()
self.param_dict = param_dict if param_dict else {}
self.set_lr_mult({})
self.set_wd_mult({})
opt_registry = {}
@staticmethod
def register(klass):
"""Registers a new optimizer.
Once an optimizer is registered, we can create an instance of this
optimizer with `create_optimizer` later.
Examples
--------
>>> @mx.optimizer.Optimizer.register
... class MyOptimizer(mx.optimizer.Optimizer):
... pass
>>> optim = mx.optimizer.Optimizer.create_optimizer('MyOptimizer')
>>> print(type(optim))
<class '__main__.MyOptimizer'>
"""
assert(isinstance(klass, type))
name = klass.__name__.lower()
if name in Optimizer.opt_registry:
warnings.warn('WARNING: New optimizer %s.%s is overriding existing '
'optimizer %s.%s', klass.__module__, klass.__name__,
Optimizer.opt_registry[name].__module__,
Optimizer.opt_registry[name].__name__)
Optimizer.opt_registry[name] = klass
return klass
@staticmethod
def create_optimizer(name, **kwargs):
"""Instantiates an optimizer with a given name and kwargs.
.. note:: We can use the alias `create` for ``Optimizer.create_optimizer``.
Parameters
----------
name: str
Name of the optimizer. Should be the name
of a subclass of Optimizer. Case insensitive.
kwargs: dict
Parameters for the optimizer.
Returns
-------
Optimizer
An instantiated optimizer.
Examples
--------
>>> sgd = mx.optimizer.Optimizer.create_optimizer('sgd')
>>> type(sgd)
<class 'mxnet.optimizer.SGD'>
>>> adam = mx.optimizer.create('adam', learning_rate=.1)
>>> type(adam)
<class 'mxnet.optimizer.Adam'>
"""
if name.lower() in Optimizer.opt_registry:
return Optimizer.opt_registry[name.lower()](**kwargs)
else:
raise ValueError('Cannot find optimizer %s' % name)
@property
def learning_rate(self):
if self.lr_scheduler is not None:
return self.lr_scheduler(self.num_update)
else:
return self.lr
def create_state(self, index, weight):
"""Creates auxiliary state for a given weight.
Some optimizers require additional states, e.g. as momentum, in addition
to gradients in order to update weights. This function creates state
for a given weight which will be used in `update`. This function is
called only once for each weight.
Parameters
----------
index : int
An unique index to identify the weight.
weight : NDArray
The weight.
Returns
-------
state : any obj
The state associated with the weight.
"""
def create_state_multi_precision(self, index, weight):
"""Creates auxiliary state for a given weight, including FP32 high
precision copy if original weight is FP16.
This method is provided to perform automatic mixed precision training
for optimizers that do not support it themselves.
Parameters
----------
index : int
An unique index to identify the weight.
weight : NDArray
The weight.
Returns
-------
state : any obj
The state associated with the weight.
"""
weight_master_copy = None
if self.multi_precision and weight.dtype == numpy.float16:
weight_master_copy = weight.astype(numpy.float32)
return (weight_master_copy,) + (self.create_state(index, weight_master_copy),)
if weight.dtype == numpy.float16 and not self.multi_precision:
warnings.warn("Accumulating with float16 in optimizer can lead to "
"poor accuracy or slow convergence. "
"Consider using multi_precision=True option of the "
"optimizer")
return self.create_state(index, weight)
def update(self, index, weight, grad, state):
"""Updates the given parameter using the corresponding gradient and state.
Parameters
----------
index : int
The unique index of the parameter into the individual learning
rates and weight decays. Learning rates and weight decay
may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.
weight : NDArray
The parameter to be updated.
grad : NDArray
The gradient of the objective with respect to this parameter.
state : any obj
The state returned by `create_state()`.
"""
raise NotImplementedError()
def update_multi_precision(self, index, weight, grad, state):
"""Updates the given parameter using the corresponding gradient and state.
Mixed precision version.
Parameters
----------
index : int
The unique index of the parameter into the individual learning
rates and weight decays. Learning rates and weight decay
may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.
weight : NDArray
The parameter to be updated.
grad : NDArray
The gradient of the objective with respect to this parameter.
state : any obj
The state returned by `create_state()`.
"""
if self.multi_precision and weight.dtype == numpy.float16:
# Wrapper for mixed precision
weight_master_copy = state[0]
original_state = state[1]
grad32 = grad.astype(numpy.float32)
self.update(index, weight_master_copy, grad32, original_state)
cast(weight_master_copy, dtype=weight.dtype, out=weight)
else:
self.update(index, weight, grad, state)
def set_learning_rate(self, lr):
"""Sets a new learning rate of the optimizer.
Parameters
----------
lr : float
The new learning rate of the optimizer.
"""
if self.lr_scheduler is not None:
raise UserWarning("LRScheduler of the optimizer has already been "
"defined. Note that set_learning_rate can mutate "
"the value of the learning rate of the optimizer "
"only when the LRScheduler of the optimizer is "
"undefined.")
else:
self.lr = lr
def set_lr_scale(self, args_lrscale): # pylint: disable=unused-argument
"""[DEPRECATED] Sets lr scale. Use set_lr_mult instead."""
raise DeprecationWarning
def set_lr_mult(self, args_lr_mult):
"""Sets an individual learning rate multiplier for each parameter.
If you specify a learning rate multiplier for a parameter, then
the learning rate for the parameter will be set as the product of
the global learning rate `self.lr` and its multiplier.
.. note:: The default learning rate multiplier of a `Variable`
can be set with `lr_mult` argument in the constructor.
Parameters
----------
args_lr_mult : dict of str/int to float
For each of its key-value entries, the learning rate multipler for the
parameter specified in the key will be set as the given value.
You can specify the parameter with either its name or its index.
If you use the name, you should pass `sym` in the constructor,
and the name you specified in the key of `args_lr_mult` should match
the name of the parameter in `sym`. If you use the index, it should
correspond to the index of the parameter used in the `update` method.
Specifying a parameter by its index is only supported for backward
compatibility, and we recommend to use the name instead.
"""
self.lr_mult = {}
if self.sym_info:
attr, arg_names = self.sym_info
for name in arg_names:
if name in attr and '__lr_mult__' in attr[name]:
self.lr_mult[name] = float(attr[name]['__lr_mult__'])
self.lr_mult.update(args_lr_mult)
def set_wd_mult(self, args_wd_mult):
"""Sets an individual weight decay multiplier for each parameter.
By default, if `param_idx2name` was provided in the
constructor, the weight decay multipler is set as 0 for all
parameters whose name don't end with ``_weight`` or
``_gamma``.
.. note:: The default weight decay multiplier for a `Variable`
can be set with its `wd_mult` argument in the constructor.
Parameters
----------
args_wd_mult : dict of string/int to float
For each of its key-value entries, the weight decay multipler for the
parameter specified in the key will be set as the given value.
You can specify the parameter with either its name or its index.
If you use the name, you should pass `sym` in the constructor,
and the name you specified in the key of `args_lr_mult` should match
the name of the parameter in `sym`. If you use the index, it should
correspond to the index of the parameter used in the `update` method.
Specifying a parameter by its index is only supported for backward
compatibility, and we recommend to use the name instead.
"""
self.wd_mult = {}
for n in self.idx2name.values():
if not (n.endswith('_weight') or n.endswith('_gamma')):
self.wd_mult[n] = 0.0
if self.sym_info:
attr, arg_names = self.sym_info
for name in arg_names:
if name in attr and '__wd_mult__' in attr[name]:
self.wd_mult[name] = float(attr[name]['__wd_mult__'])
self.wd_mult.update(args_wd_mult)
def _update_count(self, index):
"""Updates num_update.
Parameters
----------
index : int
The index to be updated.
"""
if index not in self._index_update_count:
self._index_update_count[index] = self.begin_num_update
self._index_update_count[index] += 1
self.num_update = max(self._index_update_count[index], self.num_update)
def _get_lr(self, index):
"""Gets the learning rate given the index of the weight.
Parameters
----------
index : int
The index corresponding to the weight.
Returns
-------
lr : float
Learning rate for this index.
"""
if self.lr_scheduler is not None:
lr = self.lr_scheduler(self.num_update)
else:
lr = self.lr
if index in self.param_dict:
lr *= self.param_dict[index].lr_mult
elif index in self.lr_mult:
lr *= self.lr_mult[index]
elif index in self.idx2name:
lr *= self.lr_mult.get(self.idx2name[index], 1.0)
return lr
def _get_wd(self, index):
"""Gets weight decay for index.
Returns 0 for non-weights if the name of weights are provided for `__init__`.
Parameters
----------
index : int
The index for weight.
Returns
-------
wd : float
Weight decay for this index.
"""
wd = self.wd
if index in self.param_dict:
wd *= self.param_dict[index].wd_mult
elif index in self.wd_mult:
wd *= self.wd_mult[index]
elif index in self.idx2name:
wd *= self.wd_mult.get(self.idx2name[index], 1.0)
return wd
# convenience wrapper for Optimizer.Register
register = Optimizer.register # pylint: disable=invalid-name
# pylint: disable=line-too-long
@register
class SGD(Optimizer):
"""The SGD optimizer with momentum and weight decay.
If the storage types of weight and grad are both ``row_sparse``, and ``lazy_update`` is True, \
**lazy updates** are applied by::
for row in grad.indices:
rescaled_grad[row] = lr * rescale_grad * clip(grad[row], clip_gradient) + wd * weight[row]
state[row] = momentum[row] * state[row] + rescaled_grad[row]
weight[row] = weight[row] - state[row]
The sparse update only updates the momentum for the weights whose row_sparse
gradient indices appear in the current batch, rather than updating it for all
indices. Compared with the original update, it can provide large
improvements in model training throughput for some applications. However, it
provides slightly different semantics than the original update, and
may lead to different empirical results.
Otherwise, **standard updates** are applied by::
rescaled_grad = lr * rescale_grad * clip(grad, clip_gradient) + wd * weight
state = momentum * state + rescaled_grad
weight = weight - state
For details of the update algorithm see
:class:`~mxnet.ndarray.sgd_update` and :class:`~mxnet.ndarray.sgd_mom_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
lazy_update : bool, optional
Default is True. If True, lazy updates are applied \
if the storage types of weight and grad are both ``row_sparse``.
multi_precision: bool, optional
Flag to control the internal precision of the optimizer.
``False`` results in using the same precision as the weights (default),
``True`` makes internal 32-bit copy of the weights and applies gradients \
in 32-bit precision even if actual weights used in the model have lower precision.\
Turning this on can improve convergence and accuracy when training with float16.
"""
def __init__(self, momentum=0.0, lazy_update=True, **kwargs):
super(SGD, self).__init__(**kwargs)
self.momentum = momentum
self.lazy_update = lazy_update
def create_state_multi_precision(self, index, weight):
weight_master_copy = None
if self.multi_precision and weight.dtype == numpy.float16:
weight_master_copy = weight.astype(numpy.float32)
return (self.create_state(index, weight_master_copy), weight_master_copy)
if weight.dtype == numpy.float16 and not self.multi_precision:
warnings.warn("Accumulating with float16 in optimizer can lead to "
"poor accuracy or slow convergence. "
"Consider using multi_precision=True option of the "
"SGD optimizer")
return self.create_state(index, weight)
def create_state(self, index, weight):
momentum = None
stype = weight.stype if self.lazy_update else 'default'
if self.momentum != 0.0:
momentum = zeros(weight.shape, weight.context, dtype=weight.dtype, stype=stype)
return momentum
def _update_impl(self, index, weight, grad, state, multi_precision=False):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
kwargs = {'rescale_grad': self.rescale_grad}
if self.momentum > 0:
kwargs['momentum'] = self.momentum
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
if not multi_precision:
if state is not None:
sgd_mom_update(weight, grad, state, out=weight,
lr=lr, wd=wd, **kwargs)
else:
sgd_update(weight, grad, out=weight,
lr=lr, wd=wd, **kwargs)
else:
if state[0] is not None:
mp_sgd_mom_update(weight, grad, state[0], state[1], out=weight,
lr=lr, wd=wd, **kwargs)
else:
mp_sgd_update(weight, grad, state[1], out=weight,
lr=lr, wd=wd, **kwargs)
def update(self, index, weight, grad, state):
self._update_impl(index, weight, grad, state, multi_precision=False)
def update_multi_precision(self, index, weight, grad, state):
use_multi_precision = self.multi_precision and weight.dtype == numpy.float16
self._update_impl(index, weight, grad, state,
multi_precision=use_multi_precision)
@register
class FTML(Optimizer):
"""The FTML optimizer.
This class implements the optimizer described in
*FTML - Follow the Moving Leader in Deep Learning*,
available at http://proceedings.mlr.press/v70/zheng17a/zheng17a.pdf.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
beta1 : float, optional
0 < beta1 < 1. Generally close to 0.5.
beta2 : float, optional
0 < beta2 < 1. Generally close to 1.
epsilon : float, optional
Small value to avoid division by 0.
"""
def __init__(self, beta1=0.6, beta2=0.999, epsilon=1e-8, **kwargs):
super(FTML, self).__init__(**kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # d_0
zeros(weight.shape, weight.context, dtype=weight.dtype), # v_0
zeros(weight.shape, weight.context, dtype=weight.dtype)) # z_0
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
kwargs = {'beta1': self.beta1, 'beta2': self.beta2, 'epsilon': self.epsilon,
'rescale_grad': self.rescale_grad, 't': t}
if self.clip_gradient:
kwargs['clip_grad'] = self.clip_gradient
prev_d, prev_v, prev_z = state
ftml_update(weight, grad, prev_d, prev_v, prev_z, out=weight,
lr=lr, wd=wd, **kwargs)
# pylint: enable=line-too-long
@register
class DCASGD(Optimizer):
"""The DCASGD optimizer.
This class implements the optimizer described in *Asynchronous Stochastic Gradient Descent
with Delay Compensation for Distributed Deep Learning*,
available at https://arxiv.org/abs/1609.08326.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
lamda : float, optional
Scale DC value.
"""
def __init__(self, momentum=0.0, lamda=0.04, **kwargs):
super(DCASGD, self).__init__(**kwargs)
self.momentum = momentum
self.weight_previous = {}
self.lamda = lamda
def create_state(self, index, weight):
if self.momentum == 0.0:
return (None,
weight.copy()) # previous weight
else:
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # momentum
weight.copy()) # previous weight
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
mom, previous_weight = state
if mom:
mom[:] *= self.momentum
mom[:] += -lr * (grad + wd * weight + self.lamda \
* grad * grad * (weight - previous_weight))
else:
assert(self.momentum == 0.0)
mom = -lr * (grad + wd * weight + self.lamda \
* grad * grad * (weight - previous_weight))
previous_weight[:] = weight
weight[:] += mom
@register
class NAG(SGD):
"""Nesterov accelerated SGD.
This optimizer updates each weight by::
state = momentum * state + grad + wd * weight
weight = weight - (lr * (grad + momentum * state))
This optimizer accepts the same arguments as :class:`.SGD`.
"""
def __init__(self, **kwargs):
super(NAG, self).__init__(**kwargs)
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
if state is not None:
mom = state
mom[:] *= self.momentum
grad += wd * weight
mom[:] += grad
grad[:] += self.momentum * mom
weight[:] += -lr * grad
else:
assert self.momentum == 0.0
weight[:] += -lr * (grad + wd * weight)
@register
class SGLD(Optimizer):
"""Stochastic Gradient Riemannian Langevin Dynamics.
This class implements the optimizer described in the paper *Stochastic Gradient
Riemannian Langevin Dynamics on the Probability Simplex*, available at
https://papers.nips.cc/paper/4883-stochastic-gradient-riemannian-langevin-dynamics-on-the-probability-simplex.pdf.
"""
def __init__(self, **kwargs):
super(SGLD, self).__init__(**kwargs)
def create_state(self, index, weight):
return None
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
weight[:] += - lr/2 * (grad + wd * weight) + normal(0, math.sqrt(lr),
shape=weight.shape,
ctx=weight.context)
@register # pylint: disable=invalid-name
class ccSGD(SGD):
"""[DEPRECATED] Same as `SGD`. Left here for backward compatibility."""
def __init__(self, *args, **kwargs):
super(ccSGD, self).__init__(*args, **kwargs)
@register
class Adam(Optimizer):
"""The Adam optimizer.
This class implements the optimizer described in *Adam: A Method for
Stochastic Optimization*, available at http://arxiv.org/abs/1412.6980.
The optimizer updates the weight by::
rescaled_grad = clip(grad * rescale_grad + wd * weight, clip_gradient)
m = beta1 * m + (1 - beta1) * rescaled_grad
v = beta2 * v + (1 - beta2) * (rescaled_grad**2)
w = w - learning_rate * m / (sqrt(v) + epsilon)
If the storage types of weight, state and grad are all ``row_sparse``, \
**sparse updates** are applied by::
for row in grad.indices:
rescaled_grad[row] = clip(grad[row] * rescale_grad + wd * weight[row], clip_gradient)
m[row] = beta1 * m[row] + (1 - beta1) * rescaled_grad[row]
v[row] = beta2 * v[row] + (1 - beta2) * (rescaled_grad[row]**2)
w[row] = w[row] - learning_rate * m[row] / (sqrt(v[row]) + epsilon)
The sparse update only updates the mean and var for the weights whose row_sparse
gradient indices appear in the current batch, rather than updating it for all indices.
Compared with the original update, it can provide large improvements in model training
throughput for some applications. However, it provides slightly different semantics than
the original update, and may lead to different empirical results.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
For details of the update algorithm, see :class:`~mxnet.ndarray.adam_update`.
Parameters
----------
beta1 : float, optional
Exponential decay rate for the first moment estimates.
beta2 : float, optional
Exponential decay rate for the second moment estimates.
epsilon : float, optional
Small value to avoid division by 0.
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
**kwargs):
super(Adam, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, dtype=weight.dtype,
stype=weight.stype), # mean
zeros(weight.shape, weight.context, dtype=weight.dtype,
stype=weight.stype)) # variance
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
coef1 = 1. - self.beta1**t
coef2 = 1. - self.beta2**t
lr *= math.sqrt(coef2)/coef1
kwargs = {'beta1': self.beta1, 'beta2': self.beta2, 'epsilon': self.epsilon,
'rescale_grad': self.rescale_grad}
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
mean, var = state
adam_update(weight, grad, mean, var, out=weight,
lr=lr, wd=wd, **kwargs)
@register
class AdaGrad(Optimizer):
"""AdaGrad optimizer.
This class implements the AdaGrad optimizer described in *Adaptive Subgradient
Methods for Online Learning and Stochastic Optimization*, and available at
http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
eps: float, optional
Small value to avoid division by 0.
"""
def __init__(self, eps=1e-7, **kwargs):
super(AdaGrad, self).__init__(**kwargs)
self.float_stable_eps = eps
def create_state(self, index, weight):
return zeros(weight.shape, weight.context, stype=weight.stype) # history
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
is_sparse = True if weight.stype == 'row_sparse' and grad.stype == 'row_sparse' else False
if is_sparse is True:
grad_indices_count = len(grad.indices)
grad = grad * self.rescale_grad
if is_sparse is True:
grad_indices = grad.indices
# Make sure that the scalar multiply still has a sparse result
assert grad_indices_count == len(grad_indices)
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
history = state
save_history_stype = history.stype
if is_sparse:
history[:] = sparse.elemwise_add(sparse.square(grad),
sparse.retain(history, grad_indices))
history_indices = history.indices
assert len(history_indices) == grad_indices_count
adjusted_add = _internal._scatter_plus_scalar(history, self.float_stable_eps)
srt = op.sqrt(adjusted_add)
div = _internal._scatter_elemwise_div(grad, srt)
retained_weight = sparse.retain(weight, grad.indices)
to_add = sparse.elemwise_add(div, _internal._mul_scalar(retained_weight, float(wd)))
assert len(to_add.indices) == grad_indices_count
weight[:] = sparse.elemwise_add(weight, _internal._mul_scalar(to_add, float(-lr)))
state[:] = history
assert state.stype == save_history_stype
assert len(history_indices) == grad_indices_count
else:
history[:] += square(grad)
div = grad / sqrt(history + self.float_stable_eps)
weight[:] += (div + weight * wd) * -lr
@register
class RMSProp(Optimizer):
"""The RMSProp optimizer.
Two versions of RMSProp are implemented:
If ``centered=False``, we follow
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf by
Tieleman & Hinton, 2012.
For details of the update algorithm see :class:`~mxnet.ndarray.rmsprop_update`.
If ``centered=True``, we follow http://arxiv.org/pdf/1308.0850v5.pdf (38)-(45)
by Alex Graves, 2013.
For details of the update algorithm see :class:`~mxnet.ndarray.rmspropalex_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
gamma1: float, optional
A decay factor of moving average over past squared gradient.
gamma2: float, optional
A "momentum" factor. Only used if `centered`=``True``.
epsilon : float, optional
Small value to avoid division by 0.
centered : bool, optional
Flag to control which version of RMSProp to use.
``True`` will use Graves's version of `RMSProp`,
``False`` will use Tieleman & Hinton's version of `RMSProp`.
clip_weights : float, optional
Clips weights into range ``[-clip_weights, clip_weights]``.
"""
def __init__(self, learning_rate=0.001, gamma1=0.9, gamma2=0.9,
epsilon=1e-8, centered=False, clip_weights=None, **kwargs):
super(RMSProp, self).__init__(learning_rate=learning_rate, **kwargs)
self.gamma1 = gamma1
self.gamma2 = gamma2
self.centered = centered
self.epsilon = epsilon
self.clip_weights = clip_weights
def create_state(self, index, weight):
if self.centered:
return (
zeros(weight.shape, weight.context, stype=weight.stype), # n
zeros(weight.shape, weight.context, stype=weight.stype), # g
zeros(weight.shape, weight.context, stype=weight.stype)) # delta
else:
return (zeros(weight.shape, weight.context, stype=weight.stype),) # n
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
kwargs = {'gamma1': self.gamma1, 'epsilon': self.epsilon,
'rescale_grad': self.rescale_grad}
if self.centered:
kwargs['gamma2'] = self.gamma2
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
if self.clip_weights:
kwargs['clip_weights'] = self.clip_weights
if not self.centered:
(n, ) = state
rmsprop_update(
weight, grad, n, out=weight, lr=lr, wd=wd, **kwargs)
else:
n, g, delta = state
rmspropalex_update(weight, grad, n, g, delta, out=weight,
lr=lr, wd=wd, **kwargs)
@register
class AdaDelta(Optimizer):
"""The AdaDelta optimizer.
This class implements AdaDelta, an optimizer described in *ADADELTA: An adaptive
learning rate method*, available at https://arxiv.org/abs/1212.5701.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
rho: float
Decay rate for both squared gradients and delta.
epsilon : float
Small value to avoid division by 0.
"""
def __init__(self, rho=0.90, epsilon=1e-5, **kwargs):
super(AdaDelta, self).__init__(**kwargs)
self.rho = rho
self.epsilon = epsilon
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context), # accumulated g
zeros(weight.shape, weight.context)) # accumulated delta
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
wd = self._get_wd(index)
self._update_count(index)
# preprocess grad
grad *= self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# accumulated g and delta initlization
acc_g, acc_delta = state
# update g, delta
acc_g[:] = self.rho * acc_g + (1. - self.rho) * grad * grad
current_delta = sqrt(acc_delta + self.epsilon) / sqrt(acc_g + self.epsilon) * grad
acc_delta[:] = self.rho * acc_delta + (1. - self.rho) * current_delta * current_delta
# update weight
weight[:] -= current_delta + wd * weight
#pylint: disable=invalid-name
#pylint: disable=line-too-long
@register
class Ftrl(Optimizer):
"""The Ftrl optimizer.
Referenced from *Ad Click Prediction: a View from the Trenches*, available at
http://dl.acm.org/citation.cfm?id=2488200.
eta :
.. math::
\\eta_{t,i} = \\frac{learningrate}{\\beta+\\sqrt{\\sum_{s=1}^tg_{s,i}^2}}
The optimizer updates the weight by::
rescaled_grad = clip(grad * rescale_grad, clip_gradient)
z += rescaled_grad - (sqrt(n + rescaled_grad**2) - sqrt(n)) * weight / learning_rate
n += rescaled_grad**2
w = (sign(z) * lamda1 - z) / ((beta + sqrt(n)) / learning_rate + wd) * (abs(z) > lamda1)
If the storage types of weight, state and grad are all ``row_sparse``, \
**sparse updates** are applied by::
for row in grad.indices:
rescaled_grad[row] = clip(grad[row] * rescale_grad, clip_gradient)
z[row] += rescaled_grad[row] - (sqrt(n[row] + rescaled_grad[row]**2) - sqrt(n[row])) * weight[row] / learning_rate
n[row] += rescaled_grad[row]**2
w[row] = (sign(z[row]) * lamda1 - z[row]) / ((beta + sqrt(n[row])) / learning_rate + wd) * (abs(z[row]) > lamda1)
The sparse update only updates the z and n for the weights whose row_sparse
gradient indices appear in the current batch, rather than updating it for all
indices. Compared with the original update, it can provide large
improvements in model training throughput for some applications. However, it
provides slightly different semantics than the original update, and
may lead to different empirical results.
For details of the update algorithm, see :class:`~mxnet.ndarray.ftrl_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
lamda1 : float, optional
L1 regularization coefficient.
learning_rate : float, optional
The initial learning rate.
beta : float, optional
Per-coordinate learning rate correlation parameter.
"""
def __init__(self, lamda1=0.01, learning_rate=0.1, beta=1, **kwargs):
super(Ftrl, self).__init__(**kwargs)
self.lamda1 = lamda1
self.beta = beta
self.lr = learning_rate
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, stype=weight.stype), # z
zeros(weight.shape, weight.context, stype=weight.stype)) # n
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
wd = self._get_wd(index)
lr = self._get_lr(index)
kwargs = {'lamda1': self.lamda1, 'beta': self.beta, 'rescale_grad': self.rescale_grad}
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
# accumulated g and delta initialization
z, n = state
ftrl_update(weight, grad, z, n, out=weight,
lr=lr, wd=wd, **kwargs)
# pylint: enable=line-too-long
@register
class Adamax(Optimizer):
"""The AdaMax optimizer.
It is a variant of Adam based on the infinity norm
available at http://arxiv.org/abs/1412.6980 Section 7.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
beta1 : float, optional
Exponential decay rate for the first moment estimates.
beta2 : float, optional
Exponential decay rate for the second moment estimates.
"""
def __init__(self, learning_rate=0.002, beta1=0.9, beta2=0.999, **kwargs):
super(Adamax, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
lr /= (1. - self.beta1**t)
# preprocess grad
grad = grad * self.rescale_grad + wd * weight
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# update m_t and u_t
m_t, u_t = state
m_t[:] = self.beta1 * m_t + (1. - self.beta1) * grad
u_t[:] = maximum(self.beta2 * u_t, NDabs(grad))
# update weight
weight[:] -= lr * m_t / u_t
@register
class Nadam(Optimizer):
"""The Nesterov Adam optimizer.
Much like Adam is essentially RMSprop with momentum,
Nadam is Adam RMSprop with Nesterov momentum available
at http://cs229.stanford.edu/proj2015/054_report.pdf.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
beta1 : float, optional
Exponential decay rate for the first moment estimates.
beta2 : float, optional
Exponential decay rate for the second moment estimates.
epsilon : float, optional
Small value to avoid division by 0.
schedule_decay : float, optional
Exponential decay rate for the momentum schedule
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
schedule_decay=0.004, **kwargs):
super(Nadam, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.schedule_decay = schedule_decay
self.m_schedule = 1.
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
# preprocess grad
grad = grad * self.rescale_grad + wd * weight
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# warming momentum schedule
momentum_t = self.beta1 * (1. - 0.5 * (pow(0.96, t * self.schedule_decay)))
momentum_t_1 = self.beta1 * (1. - 0.5 * (pow(0.96, (t + 1) * self.schedule_decay)))
self.m_schedule = self.m_schedule * momentum_t
m_schedule_next = self.m_schedule * momentum_t_1
# update m_t and v_t
m_t, v_t = state
m_t[:] = self.beta1 * m_t + (1. - self.beta1) * grad
v_t[:] = self.beta2 * v_t + (1. - self.beta2) * grad * grad
grad_prime = grad / (1. - self.m_schedule)
m_t_prime = m_t / (1. - m_schedule_next)
v_t_prime = v_t / (1. - pow(self.beta2, t))
m_t_bar = (1. - momentum_t) * grad_prime + momentum_t_1 * m_t_prime
# update weight
weight[:] -= lr * m_t_bar / (sqrt(v_t_prime) + self.epsilon)
@register
class Test(Optimizer):
"""The Test optimizer"""
def __init__(self, **kwargs):
super(Test, self).__init__(**kwargs)
def create_state(self, index, weight):
"""Creates a state to duplicate weight."""
return zeros(weight.shape, weight.context)
def update(self, index, weight, grad, state):
"""Performs w += rescale_grad * grad."""
weight[:] += grad * self.rescale_grad
state[:] = weight
# backward compatibility wrapper for Optimizer.CreateOptimizer
create = Optimizer.create_optimizer # pylint: disable=invalid-name
class Updater(object):
"""Updater for kvstore."""
def __init__(self, optimizer):
self.optimizer = optimizer
self.states = {}
self.states_synced = {}
def __call__(self, index, grad, weight):
"""Updates weight given gradient and index."""
# convert ctypes.char_p.value back to python str if needed
if isinstance(index, bytes):
index = py_str(index)
if index not in self.states:
self.states[index] = self.optimizer.create_state_multi_precision(index, weight)
self.states_synced[index] = True
elif not self.states_synced[index]:
self.states[index] = \
self.sync_state_context(self.states[index], weight.context)
self.states_synced[index] = True
self.optimizer.update_multi_precision(index, weight, grad, self.states[index])
def sync_state_context(self, state, context):
if isinstance(state, NDArray):
return state.as_in_context(context)
elif isinstance(state, (tuple, list)):
synced_state = (self.sync_state_context(i, context) for i in state)
if isinstance(state, tuple):
return tuple(synced_state)
else:
return list(synced_state)
else:
return state
def set_states(self, states):
"""Sets updater states."""
states = pickle.loads(states)
if isinstance(states, tuple) and len(states) == 2:
self.states, self.optimizer = states
else:
self.states = states
self.states_synced = dict.fromkeys(self.states.keys(), False)
def get_states(self, dump_optimizer=False):
"""Gets updater states.
Parameters
----------
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
"""
return pickle.dumps((self.states, self.optimizer) if dump_optimizer else self.states)
def get_updater(optimizer):
"""Returns a closure of the updater needed for kvstore.
Parameters
----------
optimizer: Optimizer
The optimizer.
Returns
-------
updater: function
The closure of the updater.
"""
return Updater(optimizer)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo.config import cfg
from neutron.agent import firewall
from neutron.agent.linux import iptables_manager
from neutron.common import constants
from neutron.common import ipv6_utils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
SG_CHAIN = 'sg-chain'
INGRESS_DIRECTION = 'ingress'
EGRESS_DIRECTION = 'egress'
SPOOF_FILTER = 'spoof-filter'
CHAIN_NAME_PREFIX = {INGRESS_DIRECTION: 'i',
EGRESS_DIRECTION: 'o',
SPOOF_FILTER: 's'}
LINUX_DEV_LEN = 14
class IptablesFirewallDriver(firewall.FirewallDriver):
"""Driver which enforces security groups through iptables rules."""
IPTABLES_DIRECTION = {INGRESS_DIRECTION: 'physdev-out',
EGRESS_DIRECTION: 'physdev-in'}
def __init__(self):
self.iptables = iptables_manager.IptablesManager(
root_helper=cfg.CONF.AGENT.root_helper,
use_ipv6=ipv6_utils.is_enabled())
# list of port which has security group
self.filtered_ports = {}
self._add_fallback_chain_v4v6()
self._defer_apply = False
self._pre_defer_filtered_ports = None
@property
def ports(self):
return self.filtered_ports
def prepare_port_filter(self, port):
LOG.debug(_("Preparing device (%s) filter"), port['device'])
self._remove_chains()
self.filtered_ports[port['device']] = port
# each security group has it own chains
self._setup_chains()
self.iptables.apply()
def update_port_filter(self, port):
LOG.debug(_("Updating device (%s) filter"), port['device'])
if port['device'] not in self.filtered_ports:
LOG.info(_('Attempted to update port filter which is not '
'filtered %s'), port['device'])
return
self._remove_chains()
self.filtered_ports[port['device']] = port
self._setup_chains()
self.iptables.apply()
def remove_port_filter(self, port):
LOG.debug(_("Removing device (%s) filter"), port['device'])
if not self.filtered_ports.get(port['device']):
LOG.info(_('Attempted to remove port filter which is not '
'filtered %r'), port)
return
self._remove_chains()
self.filtered_ports.pop(port['device'], None)
self._setup_chains()
self.iptables.apply()
def _setup_chains(self):
"""Setup ingress and egress chain for a port."""
if not self._defer_apply:
self._setup_chains_apply(self.filtered_ports)
def _setup_chains_apply(self, ports):
self._add_chain_by_name_v4v6(SG_CHAIN)
for port in ports.values():
self._setup_chain(port, INGRESS_DIRECTION)
self._setup_chain(port, EGRESS_DIRECTION)
self.iptables.ipv4['filter'].add_rule(SG_CHAIN, '-j ACCEPT')
self.iptables.ipv6['filter'].add_rule(SG_CHAIN, '-j ACCEPT')
def _remove_chains(self):
"""Remove ingress and egress chain for a port."""
if not self._defer_apply:
self._remove_chains_apply(self.filtered_ports)
def _remove_chains_apply(self, ports):
for port in ports.values():
self._remove_chain(port, INGRESS_DIRECTION)
self._remove_chain(port, EGRESS_DIRECTION)
self._remove_chain(port, SPOOF_FILTER)
self._remove_chain_by_name_v4v6(SG_CHAIN)
def _setup_chain(self, port, DIRECTION):
self._add_chain(port, DIRECTION)
self._add_rule_by_security_group(port, DIRECTION)
def _remove_chain(self, port, DIRECTION):
chain_name = self._port_chain_name(port, DIRECTION)
self._remove_chain_by_name_v4v6(chain_name)
def _add_fallback_chain_v4v6(self):
self.iptables.ipv4['filter'].add_chain('sg-fallback')
self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
self.iptables.ipv6['filter'].add_chain('sg-fallback')
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
def _add_chain_by_name_v4v6(self, chain_name):
self.iptables.ipv6['filter'].add_chain(chain_name)
self.iptables.ipv4['filter'].add_chain(chain_name)
def _remove_chain_by_name_v4v6(self, chain_name):
self.iptables.ipv4['filter'].ensure_remove_chain(chain_name)
self.iptables.ipv6['filter'].ensure_remove_chain(chain_name)
def _add_rule_to_chain_v4v6(self, chain_name, ipv4_rules, ipv6_rules):
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule(chain_name, rule)
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule(chain_name, rule)
def _get_device_name(self, port):
return port['device']
def _add_chain(self, port, direction):
chain_name = self._port_chain_name(port, direction)
self._add_chain_by_name_v4v6(chain_name)
# Note(nati) jump to the security group chain (SG_CHAIN)
# This is needed because the packet may much two rule in port
# if the two port is in the same host
# We accept the packet at the end of SG_CHAIN.
# jump to the security group chain
device = self._get_device_name(port)
jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
'-j $%s' % (self.IPTABLES_DIRECTION[direction],
device,
SG_CHAIN)]
self._add_rule_to_chain_v4v6('FORWARD', jump_rule, jump_rule)
# jump to the chain based on the device
jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
'-j $%s' % (self.IPTABLES_DIRECTION[direction],
device,
chain_name)]
self._add_rule_to_chain_v4v6(SG_CHAIN, jump_rule, jump_rule)
if direction == EGRESS_DIRECTION:
self._add_rule_to_chain_v4v6('INPUT', jump_rule, jump_rule)
def _split_sgr_by_ethertype(self, security_group_rules):
ipv4_sg_rules = []
ipv6_sg_rules = []
for rule in security_group_rules:
if rule.get('ethertype') == constants.IPv4:
ipv4_sg_rules.append(rule)
elif rule.get('ethertype') == constants.IPv6:
if rule.get('protocol') == 'icmp':
rule['protocol'] = 'icmpv6'
ipv6_sg_rules.append(rule)
return ipv4_sg_rules, ipv6_sg_rules
def _select_sgr_by_direction(self, port, direction):
return [rule
for rule in port.get('security_group_rules', [])
if rule['direction'] == direction]
def _setup_spoof_filter_chain(self, port, table, mac_ip_pairs, rules):
if mac_ip_pairs:
chain_name = self._port_chain_name(port, SPOOF_FILTER)
table.add_chain(chain_name)
for mac, ip in mac_ip_pairs:
if ip is None:
# If fixed_ips is [] this rule will be added to the end
# of the list after the allowed_address_pair rules.
table.add_rule(chain_name,
'-m mac --mac-source %s -j RETURN'
% mac)
else:
table.add_rule(chain_name,
'-m mac --mac-source %s -s %s -j RETURN'
% (mac, ip))
table.add_rule(chain_name, '-j DROP')
rules.append('-j $%s' % chain_name)
def _build_ipv4v6_mac_ip_list(self, mac, ip_address, mac_ipv4_pairs,
mac_ipv6_pairs):
if netaddr.IPNetwork(ip_address).version == 4:
mac_ipv4_pairs.append((mac, ip_address))
else:
mac_ipv6_pairs.append((mac, ip_address))
def _spoofing_rule(self, port, ipv4_rules, ipv6_rules):
#Note(nati) allow dhcp or RA packet
ipv4_rules += ['-p udp -m udp --sport 68 --dport 67 -j RETURN']
ipv6_rules += ['-p icmpv6 -j RETURN']
mac_ipv4_pairs = []
mac_ipv6_pairs = []
if isinstance(port.get('allowed_address_pairs'), list):
for address_pair in port['allowed_address_pairs']:
self._build_ipv4v6_mac_ip_list(address_pair['mac_address'],
address_pair['ip_address'],
mac_ipv4_pairs,
mac_ipv6_pairs)
for ip in port['fixed_ips']:
self._build_ipv4v6_mac_ip_list(port['mac_address'], ip,
mac_ipv4_pairs, mac_ipv6_pairs)
if not port['fixed_ips']:
mac_ipv4_pairs.append((port['mac_address'], None))
mac_ipv6_pairs.append((port['mac_address'], None))
self._setup_spoof_filter_chain(port, self.iptables.ipv4['filter'],
mac_ipv4_pairs, ipv4_rules)
self._setup_spoof_filter_chain(port, self.iptables.ipv6['filter'],
mac_ipv6_pairs, ipv6_rules)
def _drop_dhcp_rule(self):
#Note(nati) Drop dhcp packet from VM
return ['-p udp -m udp --sport 67 --dport 68 -j DROP']
def _accept_inbound_icmpv6(self):
# Allow router advertisements, multicast listener
# and neighbor advertisement into the instance
icmpv6_rules = []
for icmp6_type in constants.ICMPV6_ALLOWED_TYPES:
icmpv6_rules += ['-p icmpv6 --icmpv6-type %s -j RETURN' %
icmp6_type]
return icmpv6_rules
def _add_rule_by_security_group(self, port, direction):
chain_name = self._port_chain_name(port, direction)
# select rules for current direction
security_group_rules = self._select_sgr_by_direction(port, direction)
# split groups by ip version
# for ipv4, iptables command is used
# for ipv6, iptables6 command is used
ipv4_sg_rules, ipv6_sg_rules = self._split_sgr_by_ethertype(
security_group_rules)
ipv4_iptables_rule = []
ipv6_iptables_rule = []
if direction == EGRESS_DIRECTION:
self._spoofing_rule(port,
ipv4_iptables_rule,
ipv6_iptables_rule)
ipv4_iptables_rule += self._drop_dhcp_rule()
if direction == INGRESS_DIRECTION:
ipv6_iptables_rule += self._accept_inbound_icmpv6()
ipv4_iptables_rule += self._convert_sgr_to_iptables_rules(
ipv4_sg_rules)
ipv6_iptables_rule += self._convert_sgr_to_iptables_rules(
ipv6_sg_rules)
self._add_rule_to_chain_v4v6(chain_name,
ipv4_iptables_rule,
ipv6_iptables_rule)
def _convert_sgr_to_iptables_rules(self, security_group_rules):
iptables_rules = []
self._drop_invalid_packets(iptables_rules)
self._allow_established(iptables_rules)
for rule in security_group_rules:
# These arguments MUST be in the format iptables-save will
# display them: source/dest, protocol, sport, dport, target
# Otherwise the iptables_manager code won't be able to find
# them to preserve their [packet:byte] counts.
args = self._ip_prefix_arg('s',
rule.get('source_ip_prefix'))
args += self._ip_prefix_arg('d',
rule.get('dest_ip_prefix'))
args += self._protocol_arg(rule.get('protocol'))
args += self._port_arg('sport',
rule.get('protocol'),
rule.get('source_port_range_min'),
rule.get('source_port_range_max'))
args += self._port_arg('dport',
rule.get('protocol'),
rule.get('port_range_min'),
rule.get('port_range_max'))
args += ['-j RETURN']
iptables_rules += [' '.join(args)]
iptables_rules += ['-j $sg-fallback']
return iptables_rules
def _drop_invalid_packets(self, iptables_rules):
# Always drop invalid packets
iptables_rules += ['-m state --state ' 'INVALID -j DROP']
return iptables_rules
def _allow_established(self, iptables_rules):
# Allow established connections
iptables_rules += ['-m state --state RELATED,ESTABLISHED -j RETURN']
return iptables_rules
def _protocol_arg(self, protocol):
if not protocol:
return []
iptables_rule = ['-p', protocol]
# iptables always adds '-m protocol' for udp and tcp
if protocol in ['udp', 'tcp']:
iptables_rule += ['-m', protocol]
return iptables_rule
def _port_arg(self, direction, protocol, port_range_min, port_range_max):
if (protocol not in ['udp', 'tcp', 'icmp', 'icmpv6']
or not port_range_min):
return []
if protocol in ['icmp', 'icmpv6']:
# Note(xuhanp): port_range_min/port_range_max represent
# icmp type/code when protocal is icmp or icmpv6
# icmp code can be 0 so we cannot use "if port_range_max" here
if port_range_max is not None:
return ['--%s-type' % protocol,
'%s/%s' % (port_range_min, port_range_max)]
return ['--%s-type' % protocol, '%s' % port_range_min]
elif port_range_min == port_range_max:
return ['--%s' % direction, '%s' % (port_range_min,)]
else:
return ['-m', 'multiport',
'--%ss' % direction,
'%s:%s' % (port_range_min, port_range_max)]
def _ip_prefix_arg(self, direction, ip_prefix):
#NOTE (nati) : source_group_id is converted to list of source_
# ip_prefix in server side
if ip_prefix:
return ['-%s' % direction, ip_prefix]
return []
def _port_chain_name(self, port, direction):
return iptables_manager.get_chain_name(
'%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'][3:]))
def filter_defer_apply_on(self):
if not self._defer_apply:
self.iptables.defer_apply_on()
self._pre_defer_filtered_ports = dict(self.filtered_ports)
self._defer_apply = True
def filter_defer_apply_off(self):
if self._defer_apply:
self._defer_apply = False
self._remove_chains_apply(self._pre_defer_filtered_ports)
self._pre_defer_filtered_ports = None
self._setup_chains_apply(self.filtered_ports)
self.iptables.defer_apply_off()
class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver):
OVS_HYBRID_TAP_PREFIX = 'tap'
def _port_chain_name(self, port, direction):
return iptables_manager.get_chain_name(
'%s%s' % (CHAIN_NAME_PREFIX[direction], port['device']))
def _get_device_name(self, port):
return (self.OVS_HYBRID_TAP_PREFIX + port['device'])[:LINUX_DEV_LEN]
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._snapshots_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_grant_access_request_initial, build_list_by_resource_group_request, build_list_request, build_revoke_access_request_initial, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SnapshotsOperations:
"""SnapshotsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
snapshot_name: str,
snapshot: "_models.Snapshot",
**kwargs: Any
) -> "_models.Snapshot":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Snapshot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(snapshot, 'Snapshot')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Snapshot', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Snapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
snapshot_name: str,
snapshot: "_models.Snapshot",
**kwargs: Any
) -> AsyncLROPoller["_models.Snapshot"]:
"""Creates or updates a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -.
The max name length is 80 characters.
:type snapshot_name: str
:param snapshot: Snapshot object supplied in the body of the Put disk operation.
:type snapshot: ~azure.mgmt.compute.v2021_12_01.models.Snapshot
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Snapshot or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_12_01.models.Snapshot]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Snapshot"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
snapshot=snapshot,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Snapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
snapshot_name: str,
snapshot: "_models.SnapshotUpdate",
**kwargs: Any
) -> "_models.Snapshot":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Snapshot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(snapshot, 'SnapshotUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Snapshot', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Snapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
snapshot_name: str,
snapshot: "_models.SnapshotUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.Snapshot"]:
"""Updates (patches) a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -.
The max name length is 80 characters.
:type snapshot_name: str
:param snapshot: Snapshot object supplied in the body of the Patch snapshot operation.
:type snapshot: ~azure.mgmt.compute.v2021_12_01.models.SnapshotUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Snapshot or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_12_01.models.Snapshot]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Snapshot"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
snapshot=snapshot,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Snapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
snapshot_name: str,
**kwargs: Any
) -> "_models.Snapshot":
"""Gets information about a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -.
The max name length is 80 characters.
:type snapshot_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Snapshot, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_12_01.models.Snapshot
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Snapshot"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Snapshot', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
snapshot_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
snapshot_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -.
The max name length is 80 characters.
:type snapshot_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SnapshotList"]:
"""Lists snapshots under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SnapshotList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_12_01.models.SnapshotList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SnapshotList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SnapshotList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.SnapshotList"]:
"""Lists snapshots under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SnapshotList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_12_01.models.SnapshotList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SnapshotList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SnapshotList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots'} # type: ignore
async def _grant_access_initial(
self,
resource_group_name: str,
snapshot_name: str,
grant_access_data: "_models.GrantAccessData",
**kwargs: Any
) -> Optional["_models.AccessUri"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AccessUri"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(grant_access_data, 'GrantAccessData')
request = build_grant_access_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
content_type=content_type,
json=_json,
template_url=self._grant_access_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_grant_access_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess'} # type: ignore
@distributed_trace_async
async def begin_grant_access(
self,
resource_group_name: str,
snapshot_name: str,
grant_access_data: "_models.GrantAccessData",
**kwargs: Any
) -> AsyncLROPoller["_models.AccessUri"]:
"""Grants access to a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -.
The max name length is 80 characters.
:type snapshot_name: str
:param grant_access_data: Access data object supplied in the body of the get snapshot access
operation.
:type grant_access_data: ~azure.mgmt.compute.v2021_12_01.models.GrantAccessData
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AccessUri or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_12_01.models.AccessUri]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccessUri"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._grant_access_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
grant_access_data=grant_access_data,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_grant_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess'} # type: ignore
async def _revoke_access_initial(
self,
resource_group_name: str,
snapshot_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_revoke_access_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
template_url=self._revoke_access_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_revoke_access_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess'} # type: ignore
@distributed_trace_async
async def begin_revoke_access(
self,
resource_group_name: str,
snapshot_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Revokes access to a snapshot.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param snapshot_name: The name of the snapshot that is being created. The name can't be changed
after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -.
The max name length is 80 characters.
:type snapshot_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._revoke_access_initial(
resource_group_name=resource_group_name,
snapshot_name=snapshot_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_revoke_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess'} # type: ignore
| |
#!/usr/bin/env python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom.core
import gdata.test_config as conf
SAMPLE_XML = ('<outer xmlns="http://example.com/xml/1" '
'xmlns:two="http://example.com/xml/2">'
'<inner x="123"/>'
'<inner x="234" y="abc"/>'
'<inner>'
'<two:nested>Some Test</two:nested>'
'<nested>Different Namespace</nested>'
'</inner>'
'<other two:z="true"></other>'
'</outer>')
NO_NAMESPACE_XML = ('<foo bar="123"><baz>Baz Text!</baz></foo>')
V1_XML = ('<able xmlns="http://example.com/1" '
'xmlns:ex="http://example.com/ex/1">'
'<baker foo="42"/>'
'<ex:charlie>Greetings!</ex:charlie>'
'<same xmlns="http://example.com/s" x="true">'
'</able>')
V2_XML = ('<alpha xmlns="http://example.com/2" '
'xmlns:ex="http://example.com/ex/2">'
'<bravo bar="42"/>'
'<ex:charlie>Greetings!</ex:charlie>'
'<same xmlns="http://example.com/s" x="true">'
'</alpha>')
class Child(atom.core.XmlElement):
_qname = ('{http://example.com/1}child', '{http://example.com/2}child')
class Foo(atom.core.XmlElement):
_qname = 'foo'
class Example(atom.core.XmlElement):
_qname = '{http://example.com}foo'
child = Child
foos = [Foo]
tag = 'tag'
versioned_attr = ('attr', '{http://new_ns}attr')
# Example XmlElement subclass declarations.
class Inner(atom.core.XmlElement):
_qname = '{http://example.com/xml/1}inner'
my_x = 'x'
class Outer(atom.core.XmlElement):
_qname = '{http://example.com/xml/1}outer'
innards = [Inner]
class XmlElementTest(unittest.TestCase):
def testGetQName(self):
class Unversioned(atom.core.XmlElement):
_qname = '{http://example.com}foo'
class Versioned(atom.core.XmlElement):
_qname = ('{http://example.com/1}foo', '{http://example.com/2}foo')
self.assert_(
atom.core._get_qname(Unversioned, 1) == '{http://example.com}foo')
self.assert_(
atom.core._get_qname(Unversioned, 2) == '{http://example.com}foo')
self.assert_(
atom.core._get_qname(Versioned, 1) == '{http://example.com/1}foo')
self.assert_(
atom.core._get_qname(Versioned, 2) == '{http://example.com/2}foo')
def testConstructor(self):
e = Example()
self.assert_(e.child is None)
self.assert_(e.tag is None)
self.assert_(e.versioned_attr is None)
self.assert_(e.foos == [])
self.assert_(e.text is None)
def testGetRules(self):
rules1 = Example._get_rules(1)
self.assert_(rules1[0] == '{http://example.com}foo')
self.assert_(rules1[1]['{http://example.com/1}child'] == ('child', Child,
False))
self.assert_(rules1[1]['foo'] == ('foos', Foo, True))
self.assert_(rules1[2]['tag'] == 'tag')
self.assert_(rules1[2]['attr'] == 'versioned_attr')
# Check to make sure we don't recalculate the rules.
self.assert_(rules1 == Example._get_rules(1))
rules2 = Example._get_rules(2)
self.assert_(rules2[0] == '{http://example.com}foo')
self.assert_(rules2[1]['{http://example.com/2}child'] == ('child', Child,
False))
self.assert_(rules2[1]['foo'] == ('foos', Foo, True))
self.assert_(rules2[2]['tag'] == 'tag')
self.assert_(rules2[2]['{http://new_ns}attr'] == 'versioned_attr')
def testGetElements(self):
e = Example()
e.child = Child()
e.child.text = 'child text'
e.foos.append(Foo())
e.foos[0].text = 'foo1'
e.foos.append(Foo())
e.foos[1].text = 'foo2'
e._other_elements.append(atom.core.XmlElement())
e._other_elements[0]._qname = 'bar'
e._other_elements[0].text = 'other1'
e._other_elements.append(atom.core.XmlElement())
e._other_elements[1]._qname = 'child'
e._other_elements[1].text = 'other2'
self.contains_expected_elements(e.get_elements(),
['foo1', 'foo2', 'child text', 'other1', 'other2'])
self.contains_expected_elements(e.get_elements('child'),
['child text', 'other2'])
self.contains_expected_elements(
e.get_elements('child', 'http://example.com/1'), ['child text'])
self.contains_expected_elements(
e.get_elements('child', 'http://example.com/2'), [])
self.contains_expected_elements(
e.get_elements('child', 'http://example.com/2', 2), ['child text'])
self.contains_expected_elements(
e.get_elements('child', 'http://example.com/1', 2), [])
self.contains_expected_elements(
e.get_elements('child', 'http://example.com/2', 3), ['child text'])
self.contains_expected_elements(e.get_elements('bar'), ['other1'])
self.contains_expected_elements(e.get_elements('bar', version=2),
['other1'])
self.contains_expected_elements(e.get_elements('bar', version=3),
['other1'])
def contains_expected_elements(self, elements, expected_texts):
self.assert_(len(elements) == len(expected_texts))
for element in elements:
self.assert_(element.text in expected_texts)
def testConstructorKwargs(self):
e = Example('hello', child=Child('world'), versioned_attr='1')
self.assert_(e.text == 'hello')
self.assert_(e.child.text == 'world')
self.assert_(e.versioned_attr == '1')
self.assert_(e.foos == [])
self.assert_(e.tag is None)
e = Example(foos=[Foo('1', ignored=1), Foo(text='2')], tag='ok')
self.assert_(e.text is None)
self.assert_(e.child is None)
self.assert_(e.versioned_attr is None)
self.assert_(len(e.foos) == 2)
self.assert_(e.foos[0].text == '1')
self.assert_(e.foos[1].text == '2')
self.assert_('ignored' not in e.foos[0].__dict__)
self.assert_(e.tag == 'ok')
def testParseBasicXmlElement(self):
element = atom.core.xml_element_from_string(SAMPLE_XML,
atom.core.XmlElement)
inners = element.get_elements('inner')
self.assert_(len(inners) == 3)
self.assert_(inners[0].get_attributes('x')[0].value == '123')
self.assert_(inners[0].get_attributes('y') == [])
self.assert_(inners[1].get_attributes('x')[0].value == '234')
self.assert_(inners[1].get_attributes('y')[0].value == 'abc')
self.assert_(inners[2].get_attributes('x') == [])
inners = element.get_elements('inner', 'http://example.com/xml/1')
self.assert_(len(inners) == 3)
inners = element.get_elements(None, 'http://example.com/xml/1')
self.assert_(len(inners) == 4)
inners = element.get_elements()
self.assert_(len(inners) == 4)
inners = element.get_elements('other')
self.assert_(len(inners) == 1)
self.assert_(inners[0].get_attributes(
'z', 'http://example.com/xml/2')[0].value == 'true')
inners = element.get_elements('missing')
self.assert_(len(inners) == 0)
def testBasicXmlElementPreservesMarkup(self):
element = atom.core.xml_element_from_string(SAMPLE_XML,
atom.core.XmlElement)
tree1 = ElementTree.fromstring(SAMPLE_XML)
tree2 = ElementTree.fromstring(element.to_string())
self.assert_trees_similar(tree1, tree2)
def testSchemaParse(self):
outer = atom.core.xml_element_from_string(SAMPLE_XML, Outer)
self.assert_(isinstance(outer.innards, list))
self.assert_(len(outer.innards) == 3)
self.assert_(outer.innards[0].my_x == '123')
def testSchemaParsePreservesMarkup(self):
outer = atom.core.xml_element_from_string(SAMPLE_XML, Outer)
tree1 = ElementTree.fromstring(SAMPLE_XML)
tree2 = ElementTree.fromstring(outer.to_string())
self.assert_trees_similar(tree1, tree2)
found_x_and_y = False
found_x_123 = False
child = tree1.find('{http://example.com/xml/1}inner')
matching_children = tree2.findall(child.tag)
for match in matching_children:
if 'y' in match.attrib and match.attrib['y'] == 'abc':
if match.attrib['x'] == '234':
found_x_and_y = True
self.assert_(match.attrib['x'] == '234')
if 'x' in match.attrib and match.attrib['x'] == '123':
self.assert_('y' not in match.attrib)
found_x_123 = True
self.assert_(found_x_and_y)
self.assert_(found_x_123)
def assert_trees_similar(self, a, b):
"""Compares two XML trees for approximate matching."""
for child in a:
self.assert_(len(a.findall(child.tag)) == len(b.findall(child.tag)))
for child in b:
self.assert_(len(a.findall(child.tag)) == len(b.findall(child.tag)))
self.assert_(len(a) == len(b))
self.assert_(a.text == b.text)
self.assert_(a.attrib == b.attrib)
class UtilityFunctionTest(unittest.TestCase):
def testMatchQnames(self):
self.assert_(atom.core._qname_matches(
'foo', 'http://example.com', '{http://example.com}foo'))
self.assert_(atom.core._qname_matches(
None, None, '{http://example.com}foo'))
self.assert_(atom.core._qname_matches(
None, None, 'foo'))
self.assert_(atom.core._qname_matches(
None, None, None))
self.assert_(atom.core._qname_matches(
None, None, '{http://example.com}'))
self.assert_(atom.core._qname_matches(
'foo', None, '{http://example.com}foo'))
self.assert_(atom.core._qname_matches(
None, 'http://example.com', '{http://example.com}foo'))
self.assert_(atom.core._qname_matches(
None, '', 'foo'))
self.assert_(atom.core._qname_matches(
'foo', '', 'foo'))
self.assert_(atom.core._qname_matches(
'foo', '', 'foo'))
self.assert_(atom.core._qname_matches(
'foo', 'http://google.com', '{http://example.com}foo') == False)
self.assert_(atom.core._qname_matches(
'foo', 'http://example.com', '{http://example.com}bar') == False)
self.assert_(atom.core._qname_matches(
'foo', 'http://example.com', '{http://google.com}foo') == False)
self.assert_(atom.core._qname_matches(
'bar', 'http://example.com', '{http://google.com}foo') == False)
self.assert_(atom.core._qname_matches(
'foo', None, '{http://example.com}bar') == False)
self.assert_(atom.core._qname_matches(
None, 'http://google.com', '{http://example.com}foo') == False)
self.assert_(atom.core._qname_matches(
None, '', '{http://example.com}foo') == False)
self.assert_(atom.core._qname_matches(
'foo', '', 'bar') == False)
class Chars(atom.core.XmlElement):
_qname = u'{http://example.com/}chars'
y = 'y'
alpha = 'a'
class Strs(atom.core.XmlElement):
_qname = '{http://example.com/}strs'
chars = [Chars]
delta = u'd'
def parse(string):
return atom.core.xml_element_from_string(string, atom.core.XmlElement)
def create(tag, string):
element = atom.core.XmlElement(text=string)
element._qname = tag
return element
class CharacterEncodingTest(unittest.TestCase):
def testUnicodeInputString(self):
# Test parsing the inner text.
self.assertEqual(parse(u'<x>δ</x>').text, u'\u03b4')
self.assertEqual(parse(u'<x>\u03b4</x>').text, u'\u03b4')
# Test output valid XML.
self.assertEqual(parse(u'<x>δ</x>').to_string(), '<x>δ</x>')
self.assertEqual(parse(u'<x>\u03b4</x>').to_string(), '<x>δ</x>')
# Test setting the inner text and output valid XML.
e = create(u'x', u'\u03b4')
self.assertEqual(e.to_string(), '<x>δ</x>')
self.assertEqual(e.text, u'\u03b4')
self.assert_(isinstance(e.text, unicode))
self.assertEqual(create(u'x', '\xce\xb4'.decode('utf-8')).to_string(),
'<x>δ</x>')
def testUnicodeTagsAndAttributes(self):
# Begin with test to show underlying ElementTree behavior.
t = ElementTree.fromstring(u'<del\u03b4ta>test</del\u03b4ta>'.encode('utf-8'))
self.assertEqual(t.tag, u'del\u03b4ta')
self.assertEqual(parse(u'<\u03b4elta>test</\u03b4elta>')._qname,
u'\u03b4elta')
# Test unicode attribute names and values.
t = ElementTree.fromstring(u'<x \u03b4a="\u03b4b" />'.encode('utf-8'))
self.assertEqual(t.attrib, {u'\u03b4a': u'\u03b4b'})
self.assertEqual(parse(u'<x \u03b4a="\u03b4b" />').get_attributes(
u'\u03b4a')[0].value, u'\u03b4b')
x = create('x', None)
x._other_attributes[u'a'] = u'\u03b4elta'
self.assert_(x.to_string().startswith('<x a="δelta"'))
def testUtf8InputString(self):
# Test parsing inner text.
self.assertEqual(parse('<x>δ</x>').text, u'\u03b4')
self.assertEqual(parse(u'<x>\u03b4</x>'.encode('utf-8')).text, u'\u03b4')
self.assertEqual(parse('<x>\xce\xb4</x>').text, u'\u03b4')
# Test output valid XML.
self.assertEqual(parse('<x>δ</x>').to_string(), '<x>δ</x>')
self.assertEqual(parse(u'<x>\u03b4</x>'.encode('utf-8')).to_string(),
'<x>δ</x>')
self.assertEqual(parse('<x>\xce\xb4</x>').to_string(), '<x>δ</x>')
# Test setting the inner text and output valid XML.
e = create('x', '\xce\xb4')
self.assertEqual(e.to_string(), '<x>δ</x>')
# Don't change the encoding until the we convert to an XML string.
self.assertEqual(e.text, '\xce\xb4')
self.assert_(isinstance(e.text, str))
self.assert_(isinstance(e.to_string(), str))
self.assertEqual(create('x', u'\u03b4'.encode('utf-8')).to_string(),
'<x>δ</x>')
# Test attributes and values with UTF-8 inputs.
self.assertEqual(parse('<x \xce\xb4a="\xce\xb4b" />').get_attributes(
u'\u03b4a')[0].value, u'\u03b4b')
def testUtf8TagsAndAttributes(self):
self.assertEqual(
parse(u'<\u03b4elta>test</\u03b4elta>'.encode('utf-8'))._qname,
u'\u03b4elta')
self.assertEqual(parse('<\xce\xb4elta>test</\xce\xb4elta>')._qname,
u'\u03b4elta')
# Test an element with UTF-8 in the attribute value.
x = create('x', None)
x._other_attributes[u'a'] = '\xce\xb4'
self.assert_(x.to_string(encoding='UTF-8').startswith('<x a="δ"'))
self.assert_(x.to_string().startswith('<x a="δ"'))
def testOtherEncodingOnInputString(self):
BIG_ENDIAN = 0
LITTLE_ENDIAN = 1
# Test parsing inner text.
self.assertEqual(parse(u'<x>\u03b4</x>'.encode('utf-16')).text, u'\u03b4')
# Test output valid XML.
self.assertEqual(parse(u'<x>\u03b4</x>'.encode('utf-16')).to_string(),
'<x>δ</x>')
# Test setting the inner text and output valid XML.
e = create('x', u'\u03b4'.encode('utf-16'))
self.assertEqual(e.to_string(encoding='utf-16'), '<x>δ</x>')
# Don't change the encoding until the we convert to an XML string.
# Allow either little-endian or big-endian byte orderings.
self.assert_(e.text in ['\xff\xfe\xb4\x03', '\xfe\xff\x03\xb4'])
endianness = LITTLE_ENDIAN
if e.text == '\xfe\xff\x03\xb4':
endianness = BIG_ENDIAN
self.assert_(isinstance(e.text, str))
self.assert_(isinstance(e.to_string(encoding='utf-16'), str))
if endianness == LITTLE_ENDIAN:
self.assertEqual(
create('x', '\xff\xfe\xb4\x03').to_string(encoding='utf-16'),
'<x>δ</x>')
else:
self.assertEqual(
create('x', '\xfe\xff\x03\xb4').to_string(encoding='utf-16'),
'<x>δ</x>')
def testOtherEncodingInTagsAndAttributes(self):
self.assertEqual(
parse(u'<\u03b4elta>test</\u03b4elta>'.encode('utf-16'))._qname,
u'\u03b4elta')
# Test an element with UTF-16 in the attribute value.
x = create('x', None)
x._other_attributes[u'a'] = u'\u03b4'.encode('utf-16')
self.assert_(x.to_string(encoding='UTF-16').startswith('<x a="δ"'))
def suite():
return conf.build_suite([XmlElementTest, UtilityFunctionTest,
CharacterEncodingTest])
if __name__ == '__main__':
unittest.main()
| |
"""
A collection of hashing and encoding functions
"""
import base64
import hashlib
import hmac
import io
import salt.exceptions
import salt.utils.files
import salt.utils.hashutils
import salt.utils.stringutils
def digest(instr, checksum="md5"):
"""
Return a checksum digest for a string
instr
A string
checksum : ``md5``
The hashing algorithm to use to generate checksums. Valid options: md5,
sha256, sha512.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest 'get salted'
"""
hashing_funcs = {
"md5": __salt__["hashutil.md5_digest"],
"sha256": __salt__["hashutil.sha256_digest"],
"sha512": __salt__["hashutil.sha512_digest"],
}
hash_func = hashing_funcs.get(checksum)
if hash_func is None:
raise salt.exceptions.CommandExecutionError(
"Hash func '{}' is not supported.".format(checksum)
)
return hash_func(instr)
def digest_file(infile, checksum="md5"):
"""
Return a checksum digest for a file
infile
A file path
checksum : ``md5``
The hashing algorithm to use to generate checksums. Wraps the
:py:func:`hashutil.digest <salt.modules.hashutil.digest>` execution
function.
CLI Example:
.. code-block:: bash
salt '*' hashutil.digest_file /path/to/file
"""
if not __salt__["file.file_exists"](infile):
raise salt.exceptions.CommandExecutionError(
"File path '{}' not found.".format(infile)
)
with salt.utils.files.fopen(infile, "rb") as f:
file_hash = __salt__["hashutil.digest"](f.read(), checksum)
return file_hash
def base64_b64encode(instr):
"""
Encode a string as base64 using the "modern" Python interface.
Among other possible differences, the "modern" encoder does not include
newline ('\\n') characters in the encoded output.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_b64encode 'get salted'
"""
return salt.utils.hashutils.base64_b64encode(instr)
def base64_b64decode(instr):
"""
Decode a base64-encoded string using the "modern" Python interface
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_b64decode 'Z2V0IHNhbHRlZA=='
"""
return salt.utils.hashutils.base64_b64decode(instr)
def base64_encodestring(instr):
"""
Encode a byte-like object as base64 using the "modern" Python interface.
Among other possible differences, the "modern" encoder includes
a newline ('\\n') character after every 76 characters and always
at the end of the encoded byte-like object.
.. versionadded:: 3000
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_encodestring 'get salted'
"""
return salt.utils.hashutils.base64_encodestring(instr)
def base64_encodefile(fname):
"""
Read a file from the file system and return as a base64 encoded string
.. versionadded:: 2016.3.0
Pillar example:
.. code-block:: yaml
path:
to:
data: |
{{ salt.hashutil.base64_encodefile('/path/to/binary_file') | indent(6) }}
The :py:func:`file.decode <salt.states.file.decode>` state function can be
used to decode this data and write it to disk.
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_encodefile /path/to/binary_file
"""
encoded_f = io.BytesIO()
with salt.utils.files.fopen(fname, "rb") as f:
base64.encode(f, encoded_f)
encoded_f.seek(0)
return salt.utils.stringutils.to_str(encoded_f.read())
def base64_decodestring(instr):
"""
Decode a base64-encoded byte-like object using the "modern" Python interface
.. versionadded:: 3000
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_decodestring instr='Z2V0IHNhbHRlZAo='
"""
return salt.utils.hashutils.base64_decodestring(instr)
def base64_decodefile(instr, outfile):
r"""
Decode a base64-encoded string and write the result to a file
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_decodefile instr='Z2V0IHNhbHRlZAo=' outfile='/path/to/binary_file'
"""
encoded_f = io.StringIO(instr)
with salt.utils.files.fopen(outfile, "wb") as f:
base64.decode(encoded_f, f)
return True
def md5_digest(instr):
"""
Generate an md5 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.md5_digest 'get salted'
"""
return salt.utils.hashutils.md5_digest(instr)
def sha256_digest(instr):
"""
Generate an sha256 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.sha256_digest 'get salted'
"""
return salt.utils.hashutils.sha256_digest(instr)
def sha512_digest(instr):
"""
Generate an sha512 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.sha512_digest 'get salted'
"""
return salt.utils.hashutils.sha512_digest(instr)
def hmac_signature(string, shared_secret, challenge_hmac):
"""
Verify a challenging hmac signature against a string / shared-secret
.. versionadded:: 2014.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.hmac_signature 'get salted' 'shared secret' 'eBWf9bstXg+NiP5AOwppB5HMvZiYMPzEM9W5YMm/AmQ='
"""
return salt.utils.hashutils.hmac_signature(string, shared_secret, challenge_hmac)
def hmac_compute(string, shared_secret):
"""
.. versionadded:: 3000
Compute a HMAC SHA256 digest using a string and secret.
CLI Example:
.. code-block:: bash
salt '*' hashutil.hmac_compute 'get salted' 'shared secret'
"""
return salt.utils.hashutils.hmac_compute(string, shared_secret)
def github_signature(string, shared_secret, challenge_hmac):
"""
Verify a challenging hmac signature against a string / shared-secret for
github webhooks.
.. versionadded:: 2017.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.github_signature '{"ref":....} ' 'shared secret' 'sha1=bc6550fc290acf5b42283fa8deaf55cea0f8c206'
"""
msg = string
key = shared_secret
hashtype, challenge = challenge_hmac.split("=")
if isinstance(msg, str):
msg = salt.utils.stringutils.to_bytes(msg)
if isinstance(key, str):
key = salt.utils.stringutils.to_bytes(key)
hmac_hash = hmac.new(key, msg, getattr(hashlib, hashtype))
return hmac_hash.hexdigest() == challenge
| |
#Embedded file name: ACEStream\Core\Video\VideoStatus.pyo
import sys
from math import ceil, floor
from ACEStream.Core.simpledefs import *
from threading import currentThread, Lock
from traceback import print_exc
from ACEStream.Core.Utilities.logger import log, log_exc
LIVE_WRAPAROUND = True
DEBUG = False
DEBUG_SKIP_METADATA = False
class VideoStatus():
def __init__(self, piecelen, fileinfo, videoinfo, authparams, is_extra = False):
self.piecelen = piecelen
self.sigsize = 0
self.fileinfo = fileinfo
self.videoinfo = videoinfo
self.authparams = authparams
self.piecelock = Lock()
self.high_prob_curr_time = 20
self.high_prob_curr_time_limit = (10, 180, 10)
self.high_prob_curr_pieces = 6
self.high_prob_curr_pieces_limit = (4, 50, 4)
index = self.videoinfo['index']
if index == -1:
index = 0
self.fileindex = index
movie_offset = sum((filesize for _, filesize in fileinfo[:index] if filesize))
movie_name = fileinfo[index][0]
movie_size = fileinfo[index][1]
self.selected_movie = {'offset': movie_offset,
'name': movie_name,
'size': movie_size}
movie_begin = movie_offset
movie_end = movie_offset + movie_size - 1
self.movie_range = ((movie_begin / piecelen, movie_begin % piecelen), (movie_end / piecelen, movie_end % piecelen))
self.first_piecelen = piecelen - self.movie_range[0][1]
self.last_piecelen = self.movie_range[1][1] + 1
self.first_piece = self.movie_range[0][0]
self.last_piece = self.movie_range[1][0]
self.movie_numpieces = self.last_piece - self.first_piece + 1
self.completed = 0.0
self.can_be_downloaded = not is_extra
self.min_download_percent = 0.0
self.is_extra = is_extra
self.numhave = 0
self.have = []
if DEBUG:
log('VideoStatus:__init__: index', index, 'movie_offset', movie_offset, 'movie_size', movie_size, 'self.first_piece', self.first_piece, 'self.last_piece', self.last_piece, 'self.movie_numpieces', self.movie_numpieces)
self.live_streaming = videoinfo['live']
self.live_startpos = None
self.live_first_piece = None
self.live_first_piece_with_offset = None
self.live_last_piece = None
self.live_first_ts = None
self.live_last_ts = None
self.live_buffer_pieces = 0
self.playback_pos_is_live = True
self.playback_pos_observers = []
self.wraparound = self.live_streaming and LIVE_WRAPAROUND
self.wraparound_delta = max(4, self.movie_numpieces / 8)
self.playback_pos = self.first_piece
self.playback_pos_real = self.playback_pos
self.last_read_pos = None
if self.live_streaming:
self.set_bitrate(videoinfo['bitrate'])
self.live_hook_left_offset_min = self.time_to_pieces(10)
self.live_hook_left_offset = self.live_hook_left_offset_min
self.live_hook_left_offset_step = self.live_hook_left_offset
self.live_hook_left_offset_max = self.wraparound_delta
elif not DEBUG_SKIP_METADATA and videoinfo['bitrate']:
if DEBUG:
log('vs::__init__: got bitrate', videoinfo['bitrate'])
self.set_bitrate(videoinfo['bitrate'])
else:
if movie_size < 52428800:
fake_bitrate = 64
elif movie_size < 104857600:
fake_bitrate = 128
elif movie_size < 1073741824:
fake_bitrate = 256
else:
fake_bitrate = 512
self.set_bitrate(fake_bitrate * 1024, True)
mimetype = None
if 'mimetype' in self.videoinfo:
mimetype = self.videoinfo['mimetype']
self.prebuf_extra_pieces = None
self.got_prebuf_pieces = False
self.prebuf_high_priority_pieces = []
self.prebuf_high_priority_length = 0
self.prebuf_needed_pieces = []
if self.live_streaming:
self.prebuf_missing_pieces = []
else:
high_range_len = self.get_high_range_length()
self.prebuf_pieces = min(self.movie_numpieces, 2 * high_range_len)
self.prebuf_needed_pieces.extend(self.generate_range((self.first_piece, self.first_piece + self.prebuf_pieces)))
if DEBUG:
log('vs::__init__: set needed pieces: total_pieces', self.movie_numpieces, 'high_range_len', high_range_len, 'prebuf_pieces', self.prebuf_pieces, 'prebuf_needed_pieces', self.prebuf_needed_pieces)
if not DEBUG_SKIP_METADATA and videoinfo.has_key('prebuf_pieces') and videoinfo['prebuf_pieces']:
try:
self.prebuf_extra_pieces = [ int(x) for x in videoinfo['prebuf_pieces'].split(',') ]
if len(self.prebuf_extra_pieces) == 1 and self.prebuf_extra_pieces[0] == 0:
self.prebuf_extra_pieces = []
self.got_prebuf_pieces = True
if DEBUG:
log('vs::__init__: got prebuf pieces', videoinfo['prebuf_pieces'], 'extra', self.prebuf_extra_pieces)
except:
log_exc()
if not self.got_prebuf_pieces:
self.prebuf_extra_pieces = []
if mimetype == 'video/mpeg' or mimetype == 'video/mp4':
p = int(floor(self.last_piece * 0.997))
self.prebuf_extra_pieces.extend(self.generate_range((p, self.last_piece + 1)))
elif not mimetype.startswith('audio'):
tail = 0
if movie_size > 1073741824:
tail = int(ceil(8388608 / self.piecelen))
elif movie_size > 524288000:
tail = int(ceil(7340032 / self.piecelen))
elif movie_size > 157286400:
tail = int(ceil(4194304 / self.piecelen))
else:
tail = int(ceil(2097152 / self.piecelen))
if tail > 0:
self.prebuf_extra_pieces.extend(self.generate_range((self.last_piece - tail + 1, self.last_piece + 1)))
if DEBUG:
log('vs::__init__: set extra pieces: movie_size', movie_size, 'mimetype', mimetype, 'tail', tail, 'prebuf_extra_pieces', self.prebuf_extra_pieces)
self.prebuf_needed_pieces.extend(self.prebuf_extra_pieces)
self.prebuf_needed_pieces = list(set(self.prebuf_needed_pieces))
self.prebuf_needed_pieces.sort()
self.prebuf_missing_pieces = self.prebuf_needed_pieces[:]
if DEBUG:
log('vs::__init__: prebuf configuration: mimetype', mimetype, 'size', movie_size, 'piecelen', self.piecelen, 'first', self.first_piece, 'last', self.last_piece, 'needed', self.prebuf_needed_pieces)
if self.live_streaming:
self.dropping = True
else:
self.dropping = False
self.playing = False
self.paused = False
self.autoresume = False
self.prebuffering = True
self.pausable = VODEVENT_PAUSE in videoinfo['userevents'] and VODEVENT_RESUME in videoinfo['userevents']
def add_high_priority_pieces(self, pieces):
self.piecelock.acquire()
try:
if DEBUG:
log('vs::add_high_priority_pieces:', pieces, 'thread', currentThread().getName())
self.prebuf_high_priority_length += len(pieces)
for index in pieces:
if index in self.have:
continue
if index not in self.prebuf_high_priority_pieces:
self.prebuf_high_priority_pieces.append(index)
finally:
self.piecelock.release()
def set_high_priority_pieces(self, pieces):
self.piecelock.acquire()
try:
if DEBUG:
log('vs::set_high_priority_pieces:', pieces, 'thread', currentThread().getName())
self.prebuf_high_priority_length = len(pieces)
self.prebuf_high_priority_pieces = []
for index in pieces:
if index in self.have:
continue
self.prebuf_high_priority_pieces.append(index)
finally:
self.piecelock.release()
def add_missing_piece(self, index, high_priority = False):
self.piecelock.acquire()
try:
if DEBUG:
log('vs::add_missing_piece:', index, 'high_priority', high_priority, 'thread', currentThread().getName())
if index in self.have:
return
if high_priority and index not in self.prebuf_high_priority_pieces:
self.prebuf_high_priority_pieces.append(index)
self.prebuf_high_priority_length += 1
if index not in self.prebuf_needed_pieces:
self.prebuf_needed_pieces.append(index)
self.prebuf_missing_pieces.append(index)
if not self.prebuffering:
self.prebuffering = True
finally:
self.piecelock.release()
def add_missing_piece_range(self, pieces, high_priority = False):
self.piecelock.acquire()
try:
if DEBUG:
log('vs::add_missing_piece_range:', pieces, 'high_priority', high_priority, 'thread', currentThread().getName())
for index in pieces:
if index in self.have:
continue
if high_priority and index not in self.prebuf_high_priority_pieces:
self.prebuf_high_priority_pieces.append(index)
self.prebuf_high_priority_length += 1
if index not in self.prebuf_needed_pieces:
self.prebuf_needed_pieces.append(index)
self.prebuf_missing_pieces.append(index)
if not self.prebuffering:
self.prebuffering = True
finally:
self.piecelock.release()
def high_priority_pieces(self):
self.piecelock.acquire()
try:
return len(self.prebuf_high_priority_pieces)
finally:
self.piecelock.release()
def high_priority_length(self):
self.piecelock.acquire()
try:
return self.prebuf_high_priority_length
finally:
self.piecelock.release()
def got_piece(self, index):
self.piecelock.acquire()
try:
if index in self.have:
return
start_new_file = False
if DEBUG:
log('vs::got_piece: index', index, 'thread', currentThread().getName())
if self.in_download_range(index):
self.have.append(index)
self.numhave += 1
self.completed = self.numhave / float(self.movie_numpieces)
if not self.can_be_downloaded and self.completed >= self.min_download_percent:
self.can_be_downloaded = True
start_new_file = True
if index in self.prebuf_high_priority_pieces:
self.prebuf_high_priority_pieces.remove(index)
if len(self.prebuf_missing_pieces):
try:
if index in self.prebuf_missing_pieces:
self.prebuf_missing_pieces.remove(index)
if len(self.prebuf_missing_pieces) == 0 and self.is_extra:
self.prebuffering = False
except:
pass
elif DEBUG:
log('vs::got_piece: piece not in download range: index', index)
return start_new_file
finally:
self.piecelock.release()
def live_invalidate_piece(self, index):
self.piecelock.acquire()
try:
if index in self.have:
if DEBUG:
log('vs::live_invalidate_piece: index', index)
self.have.remove(index)
self.numhave -= 1
finally:
self.piecelock.release()
def add_playback_pos_observer(self, observer):
self.playback_pos_observers.append(observer)
def real_piecelen(self, x):
if x == self.first_piece:
return self.first_piecelen
elif x == self.last_piece:
return self.last_piecelen
else:
return self.piecelen
def set_bitrate(self, bitrate, fake_bitrate = False):
self.bitrate_set = not fake_bitrate
self.bitrate = bitrate
self.piece_per_sec = float(bitrate) / self.piecelen
if DEBUG:
log('vs::set_bitrate: bitrate', bitrate, 'fake', fake_bitrate, 'piece_per_sec', self.piece_per_sec)
def set_duration(self, duration):
try:
self.set_bitrate(self.selected_movie['size'] / duration)
except:
log_exc()
def set_prebuf_pieces(self, prebuf_extra_pieces):
self.piecelock.acquire()
try:
if DEBUG:
log('vs::set_prebuf_pieces: prebuf_extra_pieces', prebuf_extra_pieces)
prebuf_needed_pieces = []
prebuf_pieces = min(self.movie_numpieces, 2 * self.get_high_range_length())
prebuf_needed_pieces.extend(self.generate_range((self.first_piece, self.first_piece + prebuf_pieces)))
if len(prebuf_extra_pieces) == 1 and prebuf_extra_pieces[0] == 0:
prebuf_extra_pieces = []
prebuf_needed_pieces.extend(prebuf_extra_pieces)
prebuf_needed_pieces = list(set(prebuf_needed_pieces))
prebuf_needed_pieces.sort()
prebuf_missing_pieces = filter(lambda i: i not in self.have, prebuf_needed_pieces)
if DEBUG:
log('vs::set_prebuf_pieces: prebuf_pieces', prebuf_pieces, 'prebuf_needed_pieces', prebuf_needed_pieces, 'prebuf_missing_pieces', prebuf_missing_pieces)
self.prebuf_pieces = prebuf_pieces
self.prebuf_needed_pieces = prebuf_needed_pieces
self.prebuf_missing_pieces = prebuf_missing_pieces
except:
if DEBUG:
print_exc()
finally:
self.piecelock.release()
def update_player_buffer_pieces(self, player_buffer_time):
count = self.time_to_pieces(player_buffer_time)
buffer_pieces = []
last = min(self.first_piece + count, self.last_piece + 1)
buffer_pieces.extend(self.generate_range((self.first_piece, last)))
self.set_high_priority_pieces(buffer_pieces)
if DEBUG:
log('vs::update_player_buffer_pieces: buffer_pieces', buffer_pieces, 'player_buffer_time', player_buffer_time, 'bitrate', self.bitrate)
def set_live_startpos(self, pos):
invalidate_range = None
if self.live_startpos is not None:
dist1 = self.dist_range(self.live_startpos, pos)
dist2 = self.dist_range(pos, self.live_startpos)
if DEBUG:
log('vs::set_live_startpos: check range: curpos', self.live_startpos, 'newpos', pos, 'dist1', dist1, 'dist2', dist2)
if dist1 <= dist2:
if dist1 > 1:
invalidate_range = self.generate_range((self.live_startpos, pos))
elif dist2 > 1:
invalidate_range = self.generate_range((pos, self.live_startpos))
self.live_startpos = pos
self.playback_pos = pos
self.playback_pos_real = pos
for o in self.playback_pos_observers:
o(None, pos)
return invalidate_range
def get_live_startpos(self):
return self.live_startpos
def generate_range(self, (f, t)):
if self.wraparound and f > t:
for x in xrange(f, self.last_piece + 1):
yield x
for x in xrange(self.first_piece, t):
yield x
else:
for x in xrange(f, t):
yield x
def dist_range(self, f, t):
if f > t:
return self.last_piece - f + t - self.first_piece
else:
return t - f
def in_range(self, f, t, x):
if self.wraparound and f > t:
return self.first_piece <= x < t or f <= x <= self.last_piece
else:
return f <= x < t
def inc_playback_pos(self):
oldpos = self.playback_pos
self.playback_pos += 1
if self.playback_pos > self.last_piece:
if self.wraparound:
self.playback_pos = self.first_piece
else:
self.playback_pos = self.last_piece + 1
if self.live_streaming and self.live_startpos is not None:
self.live_startpos = self.playback_pos
for o in self.playback_pos_observers:
o(oldpos, self.playback_pos)
def in_download_range(self, x):
if self.wraparound:
wraplen = self.playback_pos + self.wraparound_delta - self.last_piece
if wraplen > 0:
return self.first_piece <= x < self.first_piece + wraplen or self.playback_pos <= x <= self.last_piece
return self.playback_pos <= x < self.playback_pos + self.wraparound_delta
else:
return self.first_piece <= x <= self.last_piece
def in_valid_range(self, piece):
if self.live_streaming:
if self.live_startpos is None:
return True
else:
begin, end = self.live_get_valid_range()
ret = self.in_range(begin, end, piece)
if DEBUG and not ret:
log('vs::in_valid_range: not in valid range:', begin, '<', piece, '<', end)
return ret
else:
return self.first_piece <= piece <= self.last_piece
def live_get_valid_range(self):
begin = self.normalize(self.playback_pos - self.wraparound_delta)
end = self.normalize(self.playback_pos + self.wraparound_delta)
return (begin, end)
def live_get_window_range(self):
if self.live_first_piece is None or self.live_last_piece is None:
return
return (self.live_first_piece, self.live_last_piece)
def live_piece_to_invalidate(self, last_piece = None):
if last_piece is None:
last_piece = self.playback_pos
return self.normalize(last_piece - self.wraparound_delta)
def get_range_diff(self, oldrange, newrange):
rlist = []
if oldrange[0] == 0 and oldrange[1] == self.movie_numpieces - 1:
if newrange[0] < newrange[1]:
a = (oldrange[0], newrange[0] - 1)
b = (newrange[1] + 1, oldrange[1])
rlist = [a, b]
return (None, rlist)
else:
a = (newrange[1] + 1, newrange[0] - 1)
rlist = [a]
return (None, rlist)
oldset = range2set(oldrange, self.movie_numpieces)
newset = range2set(newrange, self.movie_numpieces)
return (oldset - newset, rlist)
def normalize(self, x):
if self.first_piece <= x <= self.last_piece:
return x
elif self.wraparound:
return (x - self.first_piece) % self.movie_numpieces + self.first_piece
else:
return max(self.first_piece, min(x, self.last_piece))
def time_to_pieces(self, sec):
return int(ceil(sec * self.piece_per_sec))
def size_to_pieces(self, size):
return int(ceil(size / self.piecelen))
def pieces_to_time(self, pieces):
return int(ceil(pieces / self.piece_per_sec))
def download_range(self):
first = self.playback_pos
if self.wraparound:
wraplen = first + self.wraparound_delta + 1 - self.last_piece
if wraplen > 0:
last = self.first_piece + wraplen
else:
last = first + self.wraparound_delta + 1
else:
last = self.last_piece + 1
return (first, last)
def get_wraparound(self):
return self.wraparound
def increase_high_range(self, factor = 1):
self.high_prob_curr_time += factor * self.high_prob_curr_time_limit[2]
if self.high_prob_curr_time > self.high_prob_curr_time_limit[1]:
self.high_prob_curr_time = self.high_prob_curr_time_limit[1]
self.high_prob_curr_pieces += int(factor * self.high_prob_curr_pieces_limit[2])
if self.high_prob_curr_pieces > self.high_prob_curr_pieces_limit[1]:
self.high_prob_curr_pieces = self.high_prob_curr_pieces_limit[1]
if DEBUG:
log('vs::change_high_range: increase,', self.high_prob_curr_time, 'seconds or', self.high_prob_curr_pieces, 'pieces')
def decrease_high_range(self, factor = 1):
self.high_prob_curr_time -= factor * self.high_prob_curr_time_limit[2]
if self.high_prob_curr_time < self.high_prob_curr_time_limit[0]:
self.high_prob_curr_time = self.high_prob_curr_time_limit[0]
self.high_prob_curr_pieces -= int(factor * self.high_prob_curr_pieces_limit[2])
if self.high_prob_curr_pieces < self.high_prob_curr_pieces_limit[0]:
self.high_prob_curr_pieces = self.high_prob_curr_pieces_limit[0]
if DEBUG:
log('vs::change_high_range: decrease,', self.high_prob_curr_time, 'seconds or', self.high_prob_curr_pieces, 'pieces')
def set_high_range(self, seconds = None, pieces = None):
if seconds:
self.high_prob_curr_time = seconds
if pieces:
self.high_prob_curr_pieces = pieces
def get_high_range(self, min_size = None):
first, _ = self.download_range()
if self.prebuf_extra_pieces is not None and first in self.prebuf_extra_pieces:
return (first, first + 1)
pieces_needed = min(self.time_to_pieces(self.high_prob_curr_time), self.high_prob_curr_pieces)
if min_size is not None:
pieces_needed = max(pieces_needed, min_size)
last = min(self.last_piece, first + pieces_needed, first + self.high_prob_curr_pieces_limit[1])
return (first, last)
def in_high_range(self, piece):
first, last = self.get_high_range()
return self.in_range(first, last, piece)
def get_range_length(self, first, last):
if self.wraparound and first > last:
return self.last_piece - first + last - self.first_piece
else:
return last - first
def get_high_range_length(self):
first, last = self.get_high_range()
return self.get_range_length(first, last)
def generate_high_range(self, min_size = None):
first, last = self.get_high_range(min_size)
return self.generate_range((first, last))
def generate_download_range(self):
first, last = self.download_range()
return self.generate_range((first, last))
def get_download_range_length(self):
first, last = self.download_range()
return self.get_range_length(first, last)
def range2set(_range, maxrange):
if _range[0] <= _range[1]:
_set = set(xrange(_range[0], _range[1] + 1))
else:
_set = set(xrange(_range[0], maxrange)) | set(range(0, _range[1] + 1))
return _set
| |
"""Alexa entity adapters."""
import logging
from typing import List
from homeassistant.components import (
alarm_control_panel,
alert,
automation,
binary_sensor,
camera,
cover,
fan,
group,
image_processing,
input_boolean,
input_number,
light,
lock,
media_player,
scene,
script,
sensor,
switch,
timer,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_SUPPORTED_FEATURES,
ATTR_UNIT_OF_MEASUREMENT,
CLOUD_NEVER_EXPOSED_ENTITIES,
CONF_NAME,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import callback
from homeassistant.helpers import network
from homeassistant.util.decorator import Registry
from .capabilities import (
Alexa,
AlexaBrightnessController,
AlexaCameraStreamController,
AlexaChannelController,
AlexaColorController,
AlexaColorTemperatureController,
AlexaContactSensor,
AlexaDoorbellEventSource,
AlexaEndpointHealth,
AlexaEqualizerController,
AlexaEventDetectionSensor,
AlexaInputController,
AlexaLockController,
AlexaModeController,
AlexaMotionSensor,
AlexaPercentageController,
AlexaPlaybackController,
AlexaPlaybackStateReporter,
AlexaPowerController,
AlexaPowerLevelController,
AlexaRangeController,
AlexaSceneController,
AlexaSecurityPanelController,
AlexaSeekController,
AlexaSpeaker,
AlexaStepSpeaker,
AlexaTemperatureSensor,
AlexaThermostatController,
AlexaTimeHoldController,
AlexaToggleController,
)
from .const import CONF_DESCRIPTION, CONF_DISPLAY_CATEGORIES
_LOGGER = logging.getLogger(__name__)
ENTITY_ADAPTERS = Registry()
TRANSLATION_TABLE = dict.fromkeys(map(ord, r"}{\/|\"()[]+~!><*%"), None)
class DisplayCategory:
"""Possible display categories for Discovery response.
https://developer.amazon.com/docs/device-apis/alexa-discovery.html#display-categories
"""
# Describes a combination of devices set to a specific state, when the
# state change must occur in a specific order. For example, a "watch
# Netflix" scene might require the: 1. TV to be powered on & 2. Input set
# to HDMI1. Applies to Scenes
ACTIVITY_TRIGGER = "ACTIVITY_TRIGGER"
# Indicates media devices with video or photo capabilities.
CAMERA = "CAMERA"
# Indicates a non-mobile computer, such as a desktop computer.
COMPUTER = "COMPUTER"
# Indicates an endpoint that detects and reports contact.
CONTACT_SENSOR = "CONTACT_SENSOR"
# Indicates a door.
DOOR = "DOOR"
# Indicates a doorbell.
DOORBELL = "DOORBELL"
# Indicates a window covering on the outside of a structure.
EXTERIOR_BLIND = "EXTERIOR_BLIND"
# Indicates a fan.
FAN = "FAN"
# Indicates a game console, such as Microsoft Xbox or Nintendo Switch
GAME_CONSOLE = "GAME_CONSOLE"
# Indicates a garage door. Garage doors must implement the ModeController interface to open and close the door.
GARAGE_DOOR = "GARAGE_DOOR"
# Indicates a window covering on the inside of a structure.
INTERIOR_BLIND = "INTERIOR_BLIND"
# Indicates a laptop or other mobile computer.
LAPTOP = "LAPTOP"
# Indicates light sources or fixtures.
LIGHT = "LIGHT"
# Indicates a microwave oven.
MICROWAVE = "MICROWAVE"
# Indicates a mobile phone.
MOBILE_PHONE = "MOBILE_PHONE"
# Indicates an endpoint that detects and reports motion.
MOTION_SENSOR = "MOTION_SENSOR"
# Indicates a network-connected music system.
MUSIC_SYSTEM = "MUSIC_SYSTEM"
# An endpoint that cannot be described in on of the other categories.
OTHER = "OTHER"
# Indicates a network router.
NETWORK_HARDWARE = "NETWORK_HARDWARE"
# Indicates an oven cooking appliance.
OVEN = "OVEN"
# Indicates a non-mobile phone, such as landline or an IP phone.
PHONE = "PHONE"
# Describes a combination of devices set to a specific state, when the
# order of the state change is not important. For example a bedtime scene
# might include turning off lights and lowering the thermostat, but the
# order is unimportant. Applies to Scenes
SCENE_TRIGGER = "SCENE_TRIGGER"
# Indicates a projector screen.
SCREEN = "SCREEN"
# Indicates a security panel.
SECURITY_PANEL = "SECURITY_PANEL"
# Indicates an endpoint that locks.
SMARTLOCK = "SMARTLOCK"
# Indicates modules that are plugged into an existing electrical outlet.
# Can control a variety of devices.
SMARTPLUG = "SMARTPLUG"
# Indicates the endpoint is a speaker or speaker system.
SPEAKER = "SPEAKER"
# Indicates a streaming device such as Apple TV, Chromecast, or Roku.
STREAMING_DEVICE = "STREAMING_DEVICE"
# Indicates in-wall switches wired to the electrical system. Can control a
# variety of devices.
SWITCH = "SWITCH"
# Indicates a tablet computer.
TABLET = "TABLET"
# Indicates endpoints that report the temperature only.
TEMPERATURE_SENSOR = "TEMPERATURE_SENSOR"
# Indicates endpoints that control temperature, stand-alone air
# conditioners, or heaters with direct temperature control.
THERMOSTAT = "THERMOSTAT"
# Indicates the endpoint is a television.
TV = "TV"
# Indicates a network-connected wearable device, such as an Apple Watch, Fitbit, or Samsung Gear.
WEARABLE = "WEARABLE"
class AlexaEntity:
"""An adaptation of an entity, expressed in Alexa's terms.
The API handlers should manipulate entities only through this interface.
"""
def __init__(self, hass, config, entity):
"""Initialize Alexa Entity."""
self.hass = hass
self.config = config
self.entity = entity
self.entity_conf = config.entity_config.get(entity.entity_id, {})
@property
def entity_id(self):
"""Return the Entity ID."""
return self.entity.entity_id
def friendly_name(self):
"""Return the Alexa API friendly name."""
return self.entity_conf.get(CONF_NAME, self.entity.name).translate(
TRANSLATION_TABLE
)
def description(self):
"""Return the Alexa API description."""
description = self.entity_conf.get(CONF_DESCRIPTION) or self.entity_id
return f"{description} via Home Assistant".translate(TRANSLATION_TABLE)
def alexa_id(self):
"""Return the Alexa API entity id."""
return self.entity.entity_id.replace(".", "#").translate(TRANSLATION_TABLE)
def display_categories(self):
"""Return a list of display categories."""
entity_conf = self.config.entity_config.get(self.entity.entity_id, {})
if CONF_DISPLAY_CATEGORIES in entity_conf:
return [entity_conf[CONF_DISPLAY_CATEGORIES]]
return self.default_display_categories()
def default_display_categories(self):
"""Return a list of default display categories.
This can be overridden by the user in the Home Assistant configuration.
See also DisplayCategory.
"""
raise NotImplementedError
def get_interface(self, capability):
"""Return the given AlexaInterface.
Raises _UnsupportedInterface.
"""
def interfaces(self):
"""Return a list of supported interfaces.
Used for discovery. The list should contain AlexaInterface instances.
If the list is empty, this entity will not be discovered.
"""
raise NotImplementedError
def serialize_properties(self):
"""Yield each supported property in API format."""
for interface in self.interfaces():
if not interface.properties_proactively_reported():
continue
yield from interface.serialize_properties()
def serialize_discovery(self):
"""Serialize the entity for discovery."""
result = {
"displayCategories": self.display_categories(),
"cookie": {},
"endpointId": self.alexa_id(),
"friendlyName": self.friendly_name(),
"description": self.description(),
"manufacturerName": "Home Assistant",
}
locale = self.config.locale
capabilities = [
i.serialize_discovery()
for i in self.interfaces()
if locale in i.supported_locales
]
result["capabilities"] = capabilities
return result
@callback
def async_get_entities(hass, config) -> List[AlexaEntity]:
"""Return all entities that are supported by Alexa."""
entities = []
for state in hass.states.async_all():
if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
continue
if state.domain not in ENTITY_ADAPTERS:
continue
alexa_entity = ENTITY_ADAPTERS[state.domain](hass, config, state)
if not list(alexa_entity.interfaces()):
continue
entities.append(alexa_entity)
return entities
@ENTITY_ADAPTERS.register(alert.DOMAIN)
@ENTITY_ADAPTERS.register(automation.DOMAIN)
@ENTITY_ADAPTERS.register(group.DOMAIN)
@ENTITY_ADAPTERS.register(input_boolean.DOMAIN)
class GenericCapabilities(AlexaEntity):
"""A generic, on/off device.
The choice of last resort.
"""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaPowerController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(switch.DOMAIN)
class SwitchCapabilities(AlexaEntity):
"""Class to represent Switch capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class == switch.DEVICE_CLASS_OUTLET:
return [DisplayCategory.SMARTPLUG]
return [DisplayCategory.SWITCH]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaPowerController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(climate.DOMAIN)
class ClimateCapabilities(AlexaEntity):
"""Class to represent Climate capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.THERMOSTAT]
def interfaces(self):
"""Yield the supported interfaces."""
# If we support two modes, one being off, we allow turning on too.
if climate.HVAC_MODE_OFF in self.entity.attributes.get(
climate.ATTR_HVAC_MODES, []
):
yield AlexaPowerController(self.entity)
yield AlexaThermostatController(self.hass, self.entity)
yield AlexaTemperatureSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(cover.DOMAIN)
class CoverCapabilities(AlexaEntity):
"""Class to represent Cover capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class in (cover.DEVICE_CLASS_GARAGE, cover.DEVICE_CLASS_GATE):
return [DisplayCategory.GARAGE_DOOR]
if device_class == cover.DEVICE_CLASS_DOOR:
return [DisplayCategory.DOOR]
if device_class in (
cover.DEVICE_CLASS_BLIND,
cover.DEVICE_CLASS_SHADE,
cover.DEVICE_CLASS_CURTAIN,
):
return [DisplayCategory.INTERIOR_BLIND]
if device_class in (
cover.DEVICE_CLASS_WINDOW,
cover.DEVICE_CLASS_AWNING,
cover.DEVICE_CLASS_SHUTTER,
):
return [DisplayCategory.EXTERIOR_BLIND]
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class not in (cover.DEVICE_CLASS_GARAGE, cover.DEVICE_CLASS_GATE):
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & cover.SUPPORT_SET_POSITION:
yield AlexaRangeController(
self.entity, instance=f"{cover.DOMAIN}.{cover.ATTR_POSITION}"
)
elif supported & (cover.SUPPORT_CLOSE | cover.SUPPORT_OPEN):
yield AlexaModeController(
self.entity, instance=f"{cover.DOMAIN}.{cover.ATTR_POSITION}"
)
if supported & cover.SUPPORT_SET_TILT_POSITION:
yield AlexaRangeController(self.entity, instance=f"{cover.DOMAIN}.tilt")
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(light.DOMAIN)
class LightCapabilities(AlexaEntity):
"""Class to represent Light capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.LIGHT]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & light.SUPPORT_BRIGHTNESS:
yield AlexaBrightnessController(self.entity)
if supported & light.SUPPORT_COLOR:
yield AlexaColorController(self.entity)
if supported & light.SUPPORT_COLOR_TEMP:
yield AlexaColorTemperatureController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(fan.DOMAIN)
class FanCapabilities(AlexaEntity):
"""Class to represent Fan capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.FAN]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & fan.SUPPORT_SET_SPEED:
yield AlexaPercentageController(self.entity)
yield AlexaPowerLevelController(self.entity)
yield AlexaRangeController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_SPEED}"
)
if supported & fan.SUPPORT_OSCILLATE:
yield AlexaToggleController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}"
)
if supported & fan.SUPPORT_DIRECTION:
yield AlexaModeController(
self.entity, instance=f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}"
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(lock.DOMAIN)
class LockCapabilities(AlexaEntity):
"""Class to represent Lock capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SMARTLOCK]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaLockController(self.entity),
AlexaEndpointHealth(self.hass, self.entity),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(media_player.const.DOMAIN)
class MediaPlayerCapabilities(AlexaEntity):
"""Class to represent MediaPlayer capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
device_class = self.entity.attributes.get(ATTR_DEVICE_CLASS)
if device_class == media_player.DEVICE_CLASS_SPEAKER:
return [DisplayCategory.SPEAKER]
return [DisplayCategory.TV]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaPowerController(self.entity)
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & media_player.const.SUPPORT_VOLUME_SET:
yield AlexaSpeaker(self.entity)
elif supported & media_player.const.SUPPORT_VOLUME_STEP:
yield AlexaStepSpeaker(self.entity)
playback_features = (
media_player.const.SUPPORT_PLAY
| media_player.const.SUPPORT_PAUSE
| media_player.const.SUPPORT_STOP
| media_player.const.SUPPORT_NEXT_TRACK
| media_player.const.SUPPORT_PREVIOUS_TRACK
)
if supported & playback_features:
yield AlexaPlaybackController(self.entity)
yield AlexaPlaybackStateReporter(self.entity)
if supported & media_player.const.SUPPORT_SEEK:
yield AlexaSeekController(self.entity)
if supported & media_player.SUPPORT_SELECT_SOURCE:
inputs = AlexaInputController.get_valid_inputs(
self.entity.attributes.get(
media_player.const.ATTR_INPUT_SOURCE_LIST, []
)
)
if len(inputs) > 0:
yield AlexaInputController(self.entity)
if supported & media_player.const.SUPPORT_PLAY_MEDIA:
yield AlexaChannelController(self.entity)
if supported & media_player.const.SUPPORT_SELECT_SOUND_MODE:
inputs = AlexaInputController.get_valid_inputs(
self.entity.attributes.get(media_player.const.ATTR_SOUND_MODE_LIST, [])
)
if len(inputs) > 0:
yield AlexaEqualizerController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(scene.DOMAIN)
class SceneCapabilities(AlexaEntity):
"""Class to represent Scene capabilities."""
def description(self):
"""Return the Alexa API description."""
description = AlexaEntity.description(self)
if "scene" not in description.casefold():
return f"{description} (Scene)"
return description
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SCENE_TRIGGER]
def interfaces(self):
"""Yield the supported interfaces."""
return [
AlexaSceneController(self.entity, supports_deactivation=False),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(script.DOMAIN)
class ScriptCapabilities(AlexaEntity):
"""Class to represent Script capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.ACTIVITY_TRIGGER]
def interfaces(self):
"""Yield the supported interfaces."""
can_cancel = bool(self.entity.attributes.get("can_cancel"))
return [
AlexaSceneController(self.entity, supports_deactivation=can_cancel),
Alexa(self.hass),
]
@ENTITY_ADAPTERS.register(sensor.DOMAIN)
class SensorCapabilities(AlexaEntity):
"""Class to represent Sensor capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
# although there are other kinds of sensors, all but temperature
# sensors are currently ignored.
return [DisplayCategory.TEMPERATURE_SENSOR]
def interfaces(self):
"""Yield the supported interfaces."""
attrs = self.entity.attributes
if attrs.get(ATTR_UNIT_OF_MEASUREMENT) in (TEMP_FAHRENHEIT, TEMP_CELSIUS):
yield AlexaTemperatureSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(binary_sensor.DOMAIN)
class BinarySensorCapabilities(AlexaEntity):
"""Class to represent BinarySensor capabilities."""
TYPE_CONTACT = "contact"
TYPE_MOTION = "motion"
TYPE_PRESENCE = "presence"
def default_display_categories(self):
"""Return the display categories for this entity."""
sensor_type = self.get_type()
if sensor_type is self.TYPE_CONTACT:
return [DisplayCategory.CONTACT_SENSOR]
if sensor_type is self.TYPE_MOTION:
return [DisplayCategory.MOTION_SENSOR]
if sensor_type is self.TYPE_PRESENCE:
return [DisplayCategory.CAMERA]
def interfaces(self):
"""Yield the supported interfaces."""
sensor_type = self.get_type()
if sensor_type is self.TYPE_CONTACT:
yield AlexaContactSensor(self.hass, self.entity)
elif sensor_type is self.TYPE_MOTION:
yield AlexaMotionSensor(self.hass, self.entity)
elif sensor_type is self.TYPE_PRESENCE:
yield AlexaEventDetectionSensor(self.hass, self.entity)
# yield additional interfaces based on specified display category in config.
entity_conf = self.config.entity_config.get(self.entity.entity_id, {})
if CONF_DISPLAY_CATEGORIES in entity_conf:
if entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.DOORBELL:
yield AlexaDoorbellEventSource(self.entity)
elif entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.CONTACT_SENSOR:
yield AlexaContactSensor(self.hass, self.entity)
elif entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.MOTION_SENSOR:
yield AlexaMotionSensor(self.hass, self.entity)
elif entity_conf[CONF_DISPLAY_CATEGORIES] == DisplayCategory.CAMERA:
yield AlexaEventDetectionSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
def get_type(self):
"""Return the type of binary sensor."""
attrs = self.entity.attributes
if attrs.get(ATTR_DEVICE_CLASS) in (
binary_sensor.DEVICE_CLASS_DOOR,
binary_sensor.DEVICE_CLASS_GARAGE_DOOR,
binary_sensor.DEVICE_CLASS_OPENING,
binary_sensor.DEVICE_CLASS_WINDOW,
):
return self.TYPE_CONTACT
if attrs.get(ATTR_DEVICE_CLASS) == binary_sensor.DEVICE_CLASS_MOTION:
return self.TYPE_MOTION
if attrs.get(ATTR_DEVICE_CLASS) == binary_sensor.DEVICE_CLASS_PRESENCE:
return self.TYPE_PRESENCE
@ENTITY_ADAPTERS.register(alarm_control_panel.DOMAIN)
class AlarmControlPanelCapabilities(AlexaEntity):
"""Class to represent Alarm capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.SECURITY_PANEL]
def interfaces(self):
"""Yield the supported interfaces."""
if not self.entity.attributes.get("code_arm_required"):
yield AlexaSecurityPanelController(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(image_processing.DOMAIN)
class ImageProcessingCapabilities(AlexaEntity):
"""Class to represent image_processing capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.CAMERA]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaEventDetectionSensor(self.hass, self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(input_number.DOMAIN)
class InputNumberCapabilities(AlexaEntity):
"""Class to represent input_number capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaRangeController(
self.entity, instance=f"{input_number.DOMAIN}.{input_number.ATTR_VALUE}"
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(timer.DOMAIN)
class TimerCapabilities(AlexaEntity):
"""Class to represent Timer capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
yield AlexaTimeHoldController(self.entity, allow_remote_resume=True)
yield AlexaPowerController(self.entity)
yield Alexa(self.entity)
@ENTITY_ADAPTERS.register(vacuum.DOMAIN)
class VacuumCapabilities(AlexaEntity):
"""Class to represent vacuum capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.OTHER]
def interfaces(self):
"""Yield the supported interfaces."""
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if (
(supported & vacuum.SUPPORT_TURN_ON) or (supported & vacuum.SUPPORT_START)
) and (
(supported & vacuum.SUPPORT_TURN_OFF)
or (supported & vacuum.SUPPORT_RETURN_HOME)
):
yield AlexaPowerController(self.entity)
if supported & vacuum.SUPPORT_FAN_SPEED:
yield AlexaRangeController(
self.entity, instance=f"{vacuum.DOMAIN}.{vacuum.ATTR_FAN_SPEED}"
)
if supported & vacuum.SUPPORT_PAUSE:
support_resume = bool(supported & vacuum.SUPPORT_START)
yield AlexaTimeHoldController(
self.entity, allow_remote_resume=support_resume
)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
@ENTITY_ADAPTERS.register(camera.DOMAIN)
class CameraCapabilities(AlexaEntity):
"""Class to represent Camera capabilities."""
def default_display_categories(self):
"""Return the display categories for this entity."""
return [DisplayCategory.CAMERA]
def interfaces(self):
"""Yield the supported interfaces."""
if self._check_requirements():
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & camera.SUPPORT_STREAM:
yield AlexaCameraStreamController(self.entity)
yield AlexaEndpointHealth(self.hass, self.entity)
yield Alexa(self.hass)
def _check_requirements(self):
"""Check the hass URL for HTTPS scheme."""
if "stream" not in self.hass.config.components:
_LOGGER.debug(
"%s requires stream component for AlexaCameraStreamController",
self.entity_id,
)
return False
try:
network.get_url(
self.hass,
allow_internal=False,
allow_ip=False,
require_ssl=True,
require_standard_port=True,
)
except network.NoURLAvailableError:
_LOGGER.debug(
"%s requires HTTPS for AlexaCameraStreamController", self.entity_id
)
return False
return True
| |
# -*- coding: utf-8 -*-
import db, wx, MySQLdb, os, csv
from strings import trans
from datetime import datetime, timedelta
from sqlalchemy import func, or_, and_
from sqlalchemy.exc import IntegrityError, OperationalError, InvalidRequestError
from db import Person, Member, Shoptime, ShopOccupant, Bike, Feedback
from ui import GetShoptimeTypeDescription, FormatTimedelta
from difflib import SequenceMatcher
_controller = None
def PrintShortStack(start = 0, limit = None, prefix = ""):
import traceback
trace = traceback.extract_stack()
end = -start - 1
start = 0
if limit is not None:
start = end - limit
for line in trace[start : end]:
filename = line[0]
lineNumber = line[1]
function = line[2]
text = line[3]
file = os.path.sep.join(filename.split(os.path.sep)[-2:])
print("{4}{0}({1}) in {2}:\n{4}\t\"{3}\"".format(
file, lineNumber, function, text, prefix))
def HandleDBAPIError(error):
print("***Error on commit ({0}): {1}".format(error.orig[0], error.orig[1]))
print("\tStatement: {0}".format(error.statement))
print("\tParams: {0}".format(error.params))
print("\tStacktrace follows:")
PrintShortStack(limit = 4, start = 2, prefix = "\t")
if error.connection_invalidated:
print("\tConnection to database invalidate - reconnecting")
db.Connect()
else:
db.session.rollback()
print("")
def HandleSQLAlchemyError(error):
print("*** Error on commit: {0}".format("; ".join(error.args)))
print("\tStacktrace follows:")
PrintShortStack(limit = 4, start = 2, prefix = "\t")
print("")
db.session.rollback()
def FuzzyStringSearch(searchString,
sequence,
key = lambda x : x,
sanitizer = lambda x: x.lower().replace('0', 'o'),
resultCount = 10,
isJunk = None):
matcher = SequenceMatcher(isJunk)
matcher.set_seq2(sanitizer(searchString))
matches = []
for item in sequence:
matcher.set_seq1(sanitizer(key(item)))
if matcher.quick_ratio() >= 0.5:
ratio = matcher.ratio()
if (ratio >= 0.5):
matches.append((item, ratio))
matches.sort(key = lambda x: -x[1])
return matches[:resultCount]
class Controller:
def __init__(self):
self._lastPersonCreated = None
self._signoutTimeout = 5 #hours
self._ui = None
self.PeriodicUpdate(None)
#results = FuzzyStringSearch(searchString = "DM039",
# sequence = db.session.query(Bike).all(),
# key = lambda bike: bike.serial)
#results = FuzzyStringSearch(searchString = "Jimmy James",
# sequence = db.session.query(Person).all(),
# key = lambda x: "{0} {1}".format(x.firstName, x.lastName),
# isJunk = lambda x: x in " \t")
#for item in results:
# print item
def SetUI(self, ui):
self._ui = ui
def Commit(self):
try:
db.session.commit()
except (IntegrityError, OperationalError), error:
HandleDBAPIError(error)
except (InvalidRequestError), error:
HandleSQLAlchemyError(error)
else:
return True
return False
def Rollback(self):
db.session.rollback()
def GetPersonByFullName(self, firstName, lastName):
return db.session.query(Person) \
.filter(Person.firstName == firstName) \
.filter(Person.lastName == lastName).first()
def GetPersonByID(self, personID):
return db.session.query(Person) \
.filter(Person.id == personID).first()
def GetCurrentMembers(self):
return db.session.query(Person) \
.join(Member) \
.filter(or_(
Member.endDate >= func.current_timestamp(),
Member.endDate == None)).all()
def WriteCurrentMemberEmails(self, filename):
output = csv.writer(open(filename, "wb"))
for person in self.GetCurrentMembers():
if person.memberInfo.emailAddress:
output.writerow((person.firstName, person.lastName,
person.memberInfo.emailAddress, person.memberInfo.endDate))
def FindPeopleByPartialName(self, partialName):
partialName = ' '.join(partialName.split()) #strip out extra spaces
namelen = len(partialName)
return db.session.query(Person).filter(
or_(
or_(
func.left(Person.firstName, namelen) == partialName,
func.left(Person.lastName, namelen) == partialName,
),
func.left(Person.firstName + u" " + Person.lastName, namelen) == partialName
)
).all()
def GetPeopleInShop(self):
return db.session.query(Person) \
.join(ShopOccupant) \
.filter(ShopOccupant.personID == Person.id) \
.order_by(ShopOccupant.start).all()
def FindPeopleBySerialNumber(self, serial):
return db.session.query(Person) \
.join(Bike) \
.filter(and_( \
Bike.personID == Person.id, \
func.instr(Bike.serial, serial) > 0)).all()
def AuthenticateMechanic(self, parent, activity):
if self._ui is not None:
return self._ui.AuthenticateMechanic(parent, activity)
def ShowNewPersonDialog(self, parent, firstName = u"", lastName = u""):
if self._ui is not None:
return self._ui.ShowNewPersonDialog(parent, firstName, lastName)
else:
return None
def SignPersonIn(self, person, type):
if person is None:
person = self._lastPersonCreated
if self._ui is not None and person.occupantInfo is not None:
if person.occupantInfo.type == type:
widget = self._ui.GetOccupantNameWidget(person)
error = trans.sigininAlreadySignedIn
typeDesc = GetShoptimeTypeDescription(type)
self._ui.FlashError(
error.format(person.Name(), typeDesc), [widget])
return
else:
self.SignPersonOut(person)
occupant = ShopOccupant()
occupant.personID = person.id
occupant.start = datetime.now()
occupant.type = type
person.occupantInfo = occupant
if self.Commit():
if self._ui is not None:
self._ui.AddOccupant(person, datetime.now(), type)
return occupant
else:
return None
def SignPersonOut(self, person):
if person.occupantInfo:
shoptime = Shoptime()
shoptime.start = person.occupantInfo.start
if datetime.now() - shoptime.start > timedelta(hours = self._signoutTimeout):
shoptime.end = None
else:
shoptime.end = datetime.now()
shoptime.type = person.occupantInfo.type
shoptime.notes = u""
person.shoptimes.append(shoptime)
db.session.delete(person.occupantInfo)
self.Commit()
else:
raise RuntimeError("Trying to sign {0} out, "\
"but they aren't in the occupants table.".format(person))
if self._ui is not None:
self._ui.RemoveOccupant(person)
def CreatePerson(self, person):
db.session.add(person)
if self.Commit():
self._lastPersonCreated = person
return person
else:
return None
def CreateBike(self, bike, person = None):
if person:
person.bikes.append(bike)
else:
bike.personID = None
db.session.add(bike)
if self.Commit():
return bike
else:
return None
def AddFeedback(self, feedback):
db.session.add(feedback)
if self.Commit():
return feedback
else:
return None
def GetFeedback(self):
return db.session.query(Feedback).all()
def GetLastPersonCreated(self):
return self._lastPersonCreated
def FlashError(self, *argv, **argd):
if self._ui is not None:
self._ui.FlashError(*argv, **argd)
def StopFlashing(self):
if self._ui is not None:
self._ui.ResetError()
def ViewPersonInfo(self, parent, person):
if self.AuthenticateMechanic(parent, trans.authenticateView.format(person.Name())):
if self._ui is not None:
self._ui.ShowViewPersonDialog(parent, person)
def PeriodicUpdate(self, event):
people = self.GetPeopleInShop()
timeout = 8
for person in people:
if datetime.now() - person.occupantInfo.start > timedelta(hours = self._signoutTimeout):
print("{0} has been signed in for more than {1} hours and has been removed." \
.format(person.Name(), self._signoutTimeout))
self.SignPersonOut(person)
def DebugSignRandomPeopleIn(self, howmany):
for person in self.GetPeopleInShop():
self.SignPersonOut(person)
people = db.session.query(Person) \
.order_by(func.rand()) \
.limit(howmany).all()
for person in people:
self.SignPersonIn(person, "shoptime")
def FixLongShoptimes(self):
shoptimes = db.session.query(Shoptime).all()
maxTime = timedelta(hours = self._signoutTimeout)
for shoptime in shoptimes:
if shoptime.end is not None:
duration = shoptime.end - shoptime.start
if duration > maxTime:
shoptime.end = None
print("{0}: {1} of {2} marked indefinite.".format(
shoptime.person.Name(),
FormatTimedelta(duration),
shoptime.type))
self.Commit()
def GetController():
global _controller
if not _controller:
_controller = Controller()
return _controller
def ResetController():
global _controller
_controller = Controller()
return _controller
| |
#import unicodedata
class Listener:
def __init__(self):
import log
self.myLog = log.Log('listener.log')
# files and paths
self.fileName = 'rectest.wav' # default file name
import os
if not os.path.isfile(self.fileName):
file = open(self.fileName,'w')
file.write("")
file.close()
self.teachPath = 'recordings/'
# check that teachPath folder exists and create it if necessary
import os
if not os.path.isdir(self.teachPath):
os.makedirs(self.teachPath)
# audio data
self.max_freq = 11025
self.fs = 0
self.numSamples = 0
self.bitsPerSample = 16 # number of bits per sample in the file...assumed to be 16
self.rawData = [] # raw data from wave file
self.normalizedData = [] # wave file data normalized on -1 to 1
self.normalizedDataDB = [] # normalized data coverted to decibels
# FFT data
self.fftData = []
self.fftDataABS = []
self.fftNumUsefulBins = 11026 # set when FFT performed or when FFT data read from file
self.fftBinSize = 0.0
# A-weight data
self.fftDataAweight = []
self.normalized_aWeight = []
self.statsRMSdB_A = 0.0
#stats
self.statsMaxAmplitudeDB = 0.0
self.statsRMSdB = 0.0
self.statsCorrelation = 0.0
def audioCapture(self):
# captures audio into a file
# from http://www.g7smy.co.uk/?p=283
#SAVEFILE='rectest.wav'
#DURATION='1'
#RATE='22050'
#FORMAT='S16_LE' # see manual for arecord
#CHANNELS='1'
from subprocess import call
# this function won't work in python 3.5
call('/usr/bin/arecord -D plughw:1 -f S16_LE -c1 -r22050 --duration=1 ' + self.fileName + ' > /dev/null 2>&1', shell=True)
def saveFFT(self, newFile, myFFT):
# save FFT data to a file
file = open(newFile,'w') # open the file in write mode
# only save useful data
for x in range(self.fftNumUsefulBins):
file.write(str(abs(myFFT[x])) + '\n')
file.close() # be nice and close out the file
def getAudioData(self, audioFile=""):
# get audio data out of the file
if audioFile=="":
audioFile = self.fileName
from scipy.io import wavfile
import os
if not os.path.isfile(audioFile):
self.myLog.add("ERROR: audioFile does not exist")
exit()
# https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.io.wavfile.read.html
self.fs, self.rawData = wavfile.read(audioFile) # load the data
# from http://samcarcagno.altervista.org/blog/basic-sound-processing-python/
if self.rawData.dtype == "int16":
self.bitsPerSample = 16
elif self.rawData.dtype == "int32":
self.bitsPerSample = 32
else: # unknown....we asked for 16 so assume it...but log an error
self.bitsPerSample = 16
self.myLog.add("WARNING in getAudioData(): unknown number of bits per sample: " + self.rawData.dtype + " continuing with 16 bits")
self.numSamples=len(self.rawData)
self.normalizeData()
def normalizeData(self): # normalize audio data
# normalize wave data between -1 and 1
normalizeFactor = 2**(self.bitsPerSample-1)
self.normalizedData = []
self.normalizedDataDB = []
#self.normalizedData=[x/normalizeFactor for x in self.rawData]
for x in self.rawData:
self.normalizedData.append(x/normalizeFactor)
self.normalizedDataDB.append(self.calculateDecibels(abs(x/normalizeFactor)))
def getFFTData(self, filename):
# read data from a file
data = []
file = open(filename,'r')
# TODO: file error checks
for line in file:
data.append(float(line))
file.close()
return data
def printStats(self): #print stats about data
#print('Sample freq: ' + str(self.fs) + ' Hz')
#print('FFT # useful bins: ' + str(self.fftNumUsefulBins))
#print('FFT bin size: ' + str(self.fftBinSize) + ' Hz/bin')
#print('Correlation data points: ' + str(len(self.correlationData)))
#print('Max Amplitude: ' + str(round(self.statsMaxAmplitudeDB,1)) + ' dB\tRMS: ' + str(round(self.statsRMSdB,1)) + ' dB\tRMS: ' + str(round(self.statsRMSdB_A,1)) + ' dB(A)')
print('Max Amplitude: ' + str(round(self.statsMaxAmplitudeDB,1)) + ' dB\tRMS: ' + str(round(self.statsRMSdB,1)) + ' dB')
def doFFT(self):
# from https://stackoverflow.com/questions/23377665/python-scipy-fft-wav-files
from scipy.fftpack import rfft
self.fftData = rfft(self.normalizedData) # calculate real FFT
self.setFFTnumUsefulBins()
self.setFFTbinSize()
self.calculateFFTABS()
def calculateFFTABS(self):
# use absolute value of data because only care about amplitude
self.fftDataABS = [abs(x) for x in self.fftData]
def setFFTnumUsefulBins(self):
if self.numSamples == 0:
self.myLog.add("ERROR in setFFTnumUsefulBins(): numSamples == 0 --> about to divide by zero")
exit()
# from https://docs.scipy.org/doc/scipy/reference/tutorial/fftpack.html#one-dimensional-discrete-fourier-transforms
if self.numSamples % 2 == 0: # even number of samples
# n/2 + 1
# item [0] is the zero-frequency term
# item [1]....[n/2] are the positive frequency terms
self.fftNumUsefulBins = int((self.numSamples / 2)) + 1
else: # odd number of samples
#...which is odd because we should have an even number
#...because the audio sample rates are even
#...and the number of seconds to record using 'arecord' is an integer
# TODO: can probaby do error checking...but not expecting to get here
self.myLog.add("ERROR in doFFT(): odd number of audio samples")
exit()
def setFFTbinSize(self):
if self.fftNumUsefulBins == 0: # don't divide by zero
self.fftBinSize = 0
else:
self.fftBinSize = float(self.fs/2)/self.fftNumUsefulBins # max frequency found is Fs/2 divided by number of real bins
def calculateAweight(self): # calculate A-weight of current FFT
# TODO: make sure this is even correct...probably NOT
# lookup aWeight for each frequency bin from FFT data
# https://stackoverflow.com/questions/4364823/how-do-i-obtain-the-frequencies-of-each-value-in-an-fft
data = []
for binNum in range(0,int(self.fftNumUsefulBins/2)):
data.append(self.aWeightLookup(binNum*self.fftBinSize) * self.fftData[binNum])
self.fftDataAweight = data
# do inverse transform to get aWeight wave data
from scipy.fftpack import irfft
self.normalized_aWeight = irfft(self.fftDataAweight)
# do RMS on data to get dB(A)
self.statsRMSdB_A = self.calculateRMS(self.normalized_aWeight)
pass
def aWeightLookup(self,frequency): # look up A-weight for a frequency and return the coefficient to multiply by
# http://www.diracdelta.co.uk/science/source/a/w/aweighting/source.html
coefficient = 1.0 # placeholder until we know the forumula
if frequency > 0:
f2 = frequency ** 2
f4 = frequency ** 4
from math import log10
# a = 10*log10(1.562339*f4/((f2 + 11589.0930520225)*(f2 + 544440.6704605728)))
# b = 10*log10(2.242881e+16*f4/((f2 + 424.31867740600904)*(f2 + 148699001.40839997)))
# skip the log10() because we're not in dB yet
# a = (1.562339*f4/((f2 + 11589.0930520225)*(f2 + 544440.6704605728)))
# b = (2.242881e+16*f4/((f2 + 424.31867740600904)*(f2 + 148699001.40839997)))
a = (1.562339*f4)/(((f2 + 11589.0930520225)*(f2 + 544440.6704605728)))
b = (2.242881e+16*f4/((f2 + 424.31867740600904)*(f2 + 148699001.40839997)))
# print("Freq: " + str(frequency) + '\tA-factor: ' + str(a+b))
# print("Freq: " + str(frequency) + '\tA-factor db: ' + str(self.calculateDecibels(a)+self.calculateDecibels(b)))
return (a + b)
else:
return -1E+32
def calculateStats(self): # calculate stats about the wave file
maxDB=-100.0
for x in self.normalizedDataDB:
if x>maxDB:
maxDB = x
self.statsMaxAmplitudeDB = maxDB
self.statsRMSdB = self.calculateDecibels(self.calculateRMS(self.normalizedData))
# self.calculateAweight()
def calculateRMS(self,data):
# https://stackoverflow.com/questions/5613244/root-mean-square-in-numpy
from numpy import mean, sqrt, square
return sqrt(mean(square(data)))
def calculateDecibels(self,ratio):
from math import log10
if ratio==0:
self.myLog.add("MATH ERROR: log10(zero) in calculateDecibels() ")
return -100.0
else:
# TODO: age old question ....10 or 20 x log10(ratio)
# think it's 20 times
return 20 * log10(ratio)
9
| |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import numpy as np
from ..util import import_
import pytest
from .. import ODESys
from ..core import integrate_chained
from ..symbolic import SymbolicSys, PartiallySolvedSystem, symmetricsys
from ..util import requires, pycvodes_double
from ._robertson import run_integration, get_ode_exprs
_yref_1e11 = (0.2083340149701255e-7, 0.8333360770334713e-13, 0.9999999791665050)
@requires('sym', 'sympy', 'pyodeint')
def test_run_integration():
xout, yout, info = run_integration(integrator='odeint')[:3]
assert info['success'] is True
@requires('sym', 'sympy', 'pycvodes')
@pycvodes_double
def test_run_integration__atol_dict():
xout, yout, info = run_integration(
integrator='cvode', atol={'A': 1e-10, 'B': 1e-11, 'C': 1e-6}, nsteps=1500)[:3]
assert info['success'] is True
@requires('sym', 'sympy', 'pycvodes')
@pycvodes_double
def test_run_integration__atol_list():
xout, yout, info = run_integration(
integrator='cvode', atol=[1e-10, 1e-11, 1e-6], nsteps=1500)[:3]
assert info['success'] is True
def _test_goe(symbolic=False, reduced=0, extra_forgive=1, logc=False,
logt=False, zero_conc=0, zero_time=0, nonnegative=None,
atol=1e-14, rtol=1e-10, integrator='cvode', nsteps=7000, **kwargs):
sympy = import_('sympy')
ny, nk = 3, 3
k = (.04, 1e4, 3e7)
y0 = (1, zero_conc, zero_conc)
t0, tend = zero_time, 1e11
tot0 = np.sum(y0)
kw = dict(integrator=integrator, atol=atol, rtol=rtol, nsteps=nsteps)
kw.update(kwargs)
atol_forgive = {
0: 6,
1: 15000,
2: 7,
3: 4
}
names = 'A B C'.split()
if symbolic:
_s = SymbolicSys.from_callback(get_ode_exprs(logc=False, logt=False)[0], ny, nk,
lower_bounds=[0]*ny if nonnegative else None, names=names)
logexp = (sympy.log, sympy.exp)
if reduced:
other1, other2 = [_ for _ in range(3) if _ != (reduced-1)]
s = PartiallySolvedSystem(_s, lambda x0, y0, p0: {
_s.dep[reduced-1]: y0[0] + y0[1] + y0[2] - _s.dep[other1] - _s.dep[other2]
})
else:
s = _s
if logc or logt:
SS = symmetricsys(logexp if logc else None, logexp if logt else None)
s = SS.from_other(s)
else:
f, j = get_ode_exprs(logc=logc, logt=logt, reduced=reduced)
if reduced:
ny -= 1
k += y0
y0 = [y0[idx] for idx in range(3) if idx != reduced - 1]
names.pop(reduced - 1)
s = ODESys(f, j, autonomous_interface=not logt, names=names)
if logc:
y0 = np.log(y0)
if logt:
t0 = np.log(t0)
tend = np.log(tend)
x, y, i = s.integrate((t0, tend), y0, k, **kw)
assert i['success'] is True
if logc and not symbolic:
y = np.exp(y)
if reduced and not symbolic:
y = np.insert(y, reduced-1, tot0 - np.sum(y, axis=1), axis=1)
assert np.allclose(_yref_1e11, y[-1, :],
atol=kw['atol']*atol_forgive[reduced]*extra_forgive,
rtol=kw['rtol'])
@requires('sym', 'sympy', 'pycvodes')
@pycvodes_double
def test_get_ode_exprs_symbolic():
_test_goe(symbolic=True, logc=True, logt=False, zero_conc=1e-20,
atol=1e-8, rtol=1e-10, extra_forgive=2, first_step=1e-14)
_test_goe(symbolic=True, logc=True, logt=True, zero_conc=1e-20, zero_time=1e-12,
atol=1e-8, rtol=1e-12, extra_forgive=2)
_test_goe(symbolic=True, logc=False, logt=True, zero_conc=0, zero_time=1e-12,
atol=1e-9, rtol=5e-13, extra_forgive=0.4)
for reduced in range(4):
_test_goe(symbolic=True, reduced=reduced, first_step=1e-14, extra_forgive=5)
if reduced != 2:
_test_goe(symbolic=True, reduced=reduced, logc=True, logt=False, zero_conc=1e-16,
atol=1e-8, rtol=1e-10, extra_forgive=2, first_step=1e-14)
if reduced == 3:
_test_goe(symbolic=True, reduced=reduced, logc=True, logt=True, zero_conc=1e-18,
zero_time=1e-12, atol=1e-12, rtol=1e-10, extra_forgive=2e-4) # note extra_forgive
if reduced != 3:
_test_goe(symbolic=True, reduced=reduced, logc=False, logt=True, zero_time=1e-12,
atol=1e-12, rtol=5e-13, extra_forgive=1, first_step=1e-14)
_test_goe(symbolic=True, reduced=reduced, logc=False, logt=True, zero_time=1e-9, atol=1e-13, rtol=1e-14,
first_step=1e-10, extra_forgive=2)
@requires('sym', 'sympy', 'pycvodes')
@pycvodes_double
def test_get_ode_exprs_ODESys():
_test_goe(symbolic=False, logc=True, logt=False, zero_conc=1e-20,
atol=1e-8, rtol=1e-10, extra_forgive=2, first_step=1e-14)
_test_goe(symbolic=False, logc=True, logt=True, zero_conc=1e-20, zero_time=1e-12,
atol=1e-8, rtol=1e-12, extra_forgive=2)
_test_goe(symbolic=False, logc=False, logt=True, zero_conc=0, zero_time=1e-12,
atol=1e-8, rtol=1e-12, extra_forgive=0.4)
for reduced in range(4):
_test_goe(symbolic=False, reduced=reduced, extra_forgive=3)
if reduced != 2:
_test_goe(symbolic=False, reduced=reduced, logc=True, logt=False, zero_conc=1e-18,
atol=1e-10, rtol=1e-10, extra_forgive=20, first_step=1e-14, nsteps=17000)
if reduced == 3:
_test_goe(symbolic=False, reduced=reduced, logc=True, logt=True, zero_conc=1e-18, zero_time=1e-12,
atol=1e-12, rtol=5e-13, extra_forgive=1e-3, first_step=1e-13) # note extra_forgive
_test_goe(symbolic=False, reduced=reduced, logc=False, logt=True, zero_time=1e-12,
atol=1e-8, rtol=1e-10, extra_forgive=1, nonnegative=True) # tests RecoverableError
_test_goe(symbolic=False, reduced=reduced, logc=False, logt=True, zero_time=1e-9,
atol=1e-13, rtol=1e-14, first_step=1e-14, extra_forgive=3)
@requires('sym', 'sympy', 'pycvodes')
@pycvodes_double
@pytest.mark.parametrize('reduced_nsteps', [
(0, [(1, 1705*1.01), (4988*1.01, 1), (200, 1633), (4988*0.69, 1705*0.69)]), # pays off in steps!
(1, [(1, 1563*1.1), (100, 1700*1.01)]), # worse than using nothing
(2, [(1, 1674*1.1), (100, 1597*1.1)]), # no pay back
(3, [(1, 1591*1.1), (5000, 1), (100, 1600), (4572*0.66, 1100)]) # no pay back
])
def test_integrate_chained_robertson(reduced_nsteps):
reduced, all_nsteps = reduced_nsteps
rtols = {0: 0.02, 1: 0.1, 2: 0.02, 3: 0.015}
odes = logsys, linsys = [ODESys(*get_ode_exprs(l, l, reduced=reduced)) for l in [True, False]]
def pre(x, y, p):
return np.log(x), np.log(y), p
def post(x, y, p):
return np.exp(x), np.exp(y), p
logsys.pre_processors = [pre]
logsys.post_processors = [post]
zero_time, zero_conc = 1e-10, 1e-18
init_conc = (1, zero_conc, zero_conc)
k = (.04, 1e4, 3e7)
for nsteps in all_nsteps:
y0 = [_ for i, _ in enumerate(init_conc) if i != reduced - 1]
#_atol = [1e-18, 1e-24, 1e-10]
_atol = [1e-10]*3
x, y, nfo = integrate_chained(odes, {'nsteps': nsteps, 'return_on_error': [True, False]}, (zero_time, 1e11),
y0, k+init_conc, integrator='cvode', atol=[at for i, at in enumerate(_atol) if i != reduced - 1], rtol=1e-14, first_step=1e-12)
if reduced > 0:
y = np.insert(y, reduced-1, init_conc[0] - np.sum(y, axis=1), axis=1)
assert np.allclose(_yref_1e11, y[-1, :], atol=_atol, rtol=rtols[reduced])
assert nfo['success'] == True # noqa
assert nfo['nfev'] > 100
assert nfo['njev'] > 10
with pytest.raises(KeyError):
nfo['asdjklda']
@requires('sym', 'sympy', 'pycvodes')
@pycvodes_double
def test_integrate_chained_multi_robertson():
odes = logsys, linsys = [ODESys(*get_ode_exprs(l, l)) for l in [True, False]]
def pre(x, y, p):
return np.log(x), np.log(y), p
def post(x, y, p):
return np.exp(x), np.exp(y), p
logsys.pre_processors = [pre]
logsys.post_processors = [post]
zero_time, zero_conc = 1e-10, 1e-18
init_conc = (1, zero_conc, zero_conc)
k = (.04, 1e4, 3e7)
for sys_iter, kw in [(odes, {'nsteps': [100, 1660], 'return_on_error': [True, False]}),
(odes[1:], {'nsteps': [1705*1.01]})]:
results = integrate_chained(
sys_iter, kw, [(zero_time, 1e11)]*3,
[init_conc]*3, [k+init_conc]*3, integrator='cvode', atol=1e-10, rtol=1e-14, first_step=1e-14)
assert len(results) == 3
for res in results:
x, y, nfo = res
assert np.allclose(_yref_1e11, y[-1, :], atol=1e-16, rtol=0.02)
assert nfo['success'] is True
assert nfo['nfev'] > 100
assert nfo['njev'] > 10
assert nfo['nsys'] in (1, 2)
| |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class Test_make_row(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud_bigtable.happybase.table import make_row
return make_row(*args, **kwargs)
def test_it(self):
with self.assertRaises(NotImplementedError):
self._callFUT({}, False)
class Test_make_ordered_row(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud_bigtable.happybase.table import make_ordered_row
return make_ordered_row(*args, **kwargs)
def test_it(self):
with self.assertRaises(NotImplementedError):
self._callFUT([], False)
class Test__gc_rule_to_dict(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud_bigtable.happybase.table import _gc_rule_to_dict
return _gc_rule_to_dict(*args, **kwargs)
def test_with_null(self):
gc_rule = None
result = self._callFUT(gc_rule)
self.assertEqual(result, {})
def test_with_max_versions(self):
from gcloud_bigtable.column_family import GarbageCollectionRule
max_versions = 2
gc_rule = GarbageCollectionRule(max_num_versions=max_versions)
result = self._callFUT(gc_rule)
expected_result = {'max_versions': max_versions}
self.assertEqual(result, expected_result)
def test_with_max_age(self):
import datetime
from gcloud_bigtable.column_family import GarbageCollectionRule
time_to_live = 101
max_age = datetime.timedelta(seconds=time_to_live)
gc_rule = GarbageCollectionRule(max_age=max_age)
result = self._callFUT(gc_rule)
expected_result = {'time_to_live': time_to_live}
self.assertEqual(result, expected_result)
def test_with_non_gc_rule(self):
gc_rule = object()
result = self._callFUT(gc_rule)
self.assertTrue(result is gc_rule)
def test_with_gc_rule_union(self):
from gcloud_bigtable.column_family import GarbageCollectionRuleUnion
gc_rule = GarbageCollectionRuleUnion()
result = self._callFUT(gc_rule)
self.assertTrue(result is gc_rule)
def test_with_intersection_other_than_two(self):
from gcloud_bigtable.column_family import (
GarbageCollectionRuleIntersection)
gc_rule = GarbageCollectionRuleIntersection(rules=[])
result = self._callFUT(gc_rule)
self.assertTrue(result is gc_rule)
def test_with_intersection_two_max_num_versions(self):
from gcloud_bigtable.column_family import GarbageCollectionRule
from gcloud_bigtable.column_family import (
GarbageCollectionRuleIntersection)
rule1 = GarbageCollectionRule(max_num_versions=1)
rule2 = GarbageCollectionRule(max_num_versions=2)
gc_rule = GarbageCollectionRuleIntersection(rules=[rule1, rule2])
result = self._callFUT(gc_rule)
self.assertTrue(result is gc_rule)
def test_with_intersection_two_rules(self):
import datetime
from gcloud_bigtable.column_family import GarbageCollectionRule
from gcloud_bigtable.column_family import (
GarbageCollectionRuleIntersection)
time_to_live = 101
max_age = datetime.timedelta(seconds=time_to_live)
rule1 = GarbageCollectionRule(max_age=max_age)
max_versions = 2
rule2 = GarbageCollectionRule(max_num_versions=max_versions)
gc_rule = GarbageCollectionRuleIntersection(rules=[rule1, rule2])
result = self._callFUT(gc_rule)
expected_result = {
'max_versions': max_versions,
'time_to_live': time_to_live,
}
self.assertEqual(result, expected_result)
def test_with_intersection_two_nested_rules(self):
from gcloud_bigtable.column_family import (
GarbageCollectionRuleIntersection)
rule1 = GarbageCollectionRuleIntersection(rules=[])
rule2 = GarbageCollectionRuleIntersection(rules=[])
gc_rule = GarbageCollectionRuleIntersection(rules=[rule1, rule2])
result = self._callFUT(gc_rule)
self.assertTrue(result is gc_rule)
class Test__convert_to_time_range(unittest2.TestCase):
def _callFUT(self, timestamp=None):
from gcloud_bigtable.happybase.table import _convert_to_time_range
return _convert_to_time_range(timestamp=timestamp)
def test_null(self):
timestamp = None
result = self._callFUT(timestamp=timestamp)
self.assertEqual(result, None)
def test_invalid_type(self):
timestamp = object()
with self.assertRaises(TypeError):
self._callFUT(timestamp=timestamp)
def test_success(self):
from gcloud_bigtable._helpers import _microseconds_to_timestamp
from gcloud_bigtable.row import TimestampRange
timestamp = 1441928298571
ts_dt = _microseconds_to_timestamp(1000 * timestamp)
result = self._callFUT(timestamp=timestamp)
self.assertTrue(isinstance(result, TimestampRange))
self.assertEqual(result.start, None)
self.assertEqual(result.end, ts_dt)
class Test__cells_to_pairs(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud_bigtable.happybase.table import _cells_to_pairs
return _cells_to_pairs(*args, **kwargs)
def test_without_timestamp(self):
from gcloud_bigtable.row_data import Cell
value1 = 'foo'
cell1 = Cell(value=value1, timestamp=None)
value2 = 'bar'
cell2 = Cell(value=value2, timestamp=None)
result = self._callFUT([cell1, cell2])
self.assertEqual(result, [value1, value2])
def test_with_timestamp(self):
from gcloud_bigtable._helpers import _microseconds_to_timestamp
from gcloud_bigtable.row_data import Cell
value1 = 'foo'
ts1_millis = 1221934570148
ts1 = _microseconds_to_timestamp(ts1_millis * 1000)
cell1 = Cell(value=value1, timestamp=ts1)
value2 = 'bar'
ts2_millis = 1221955575548
ts2 = _microseconds_to_timestamp(ts2_millis * 1000)
cell2 = Cell(value=value2, timestamp=ts2)
result = self._callFUT([cell1, cell2], include_timestamp=True)
self.assertEqual(result,
[(value1, ts1_millis), (value2, ts2_millis)])
class Test__filter_chain_helper(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud_bigtable.happybase.table import _filter_chain_helper
return _filter_chain_helper(*args, **kwargs)
def test_no_filters(self):
with self.assertRaises(ValueError):
self._callFUT()
def test_single_filter(self):
from gcloud_bigtable.row import RowFilter
versions = 1337
result = self._callFUT(versions=versions)
self.assertTrue(isinstance(result, RowFilter))
# Relies on the fact that RowFilter instances can
# only have one value set.
self.assertEqual(result.cells_per_column_limit_filter, versions)
def test_existing_filters(self):
from gcloud_bigtable.row import RowFilter
filters = []
versions = 1337
result = self._callFUT(versions=versions, filters=filters)
# Make sure filters has grown.
self.assertEqual(filters, [result])
self.assertTrue(isinstance(result, RowFilter))
# Relies on the fact that RowFilter instances can
# only have one value set.
self.assertEqual(result.cells_per_column_limit_filter, versions)
def _column_helper(self, num_filters, versions=None, timestamp=None):
from gcloud_bigtable.row import RowFilter
from gcloud_bigtable.row import RowFilterChain
col_fam = 'cf1'
qual = 'qual'
column = col_fam + ':' + qual
result = self._callFUT(column, versions=versions, timestamp=timestamp)
self.assertTrue(isinstance(result, RowFilterChain))
self.assertEqual(len(result.filters), num_filters)
fam_filter = result.filters[0]
qual_filter = result.filters[1]
self.assertTrue(isinstance(fam_filter, RowFilter))
self.assertTrue(isinstance(qual_filter, RowFilter))
# Relies on the fact that RowFilter instances can
# only have one value set.
self.assertEqual(fam_filter.family_name_regex_filter, col_fam)
self.assertEqual(qual_filter.column_qualifier_regex_filter, qual)
return result
def test_column_only(self):
self._column_helper(num_filters=2)
def test_with_versions(self):
from gcloud_bigtable.row import RowFilter
versions = 11
result = self._column_helper(num_filters=3, versions=versions)
version_filter = result.filters[2]
self.assertTrue(isinstance(version_filter, RowFilter))
# Relies on the fact that RowFilter instances can
# only have one value set.
self.assertEqual(version_filter.cells_per_column_limit_filter,
versions)
def test_with_timestamp(self):
from gcloud_bigtable._helpers import _microseconds_to_timestamp
from gcloud_bigtable.row import RowFilter
from gcloud_bigtable.row import TimestampRange
timestamp = 1441928298571
result = self._column_helper(num_filters=3, timestamp=timestamp)
range_filter = result.filters[2]
self.assertTrue(isinstance(range_filter, RowFilter))
# Relies on the fact that RowFilter instances can
# only have one value set.
time_range = range_filter.timestamp_range_filter
self.assertTrue(isinstance(time_range, TimestampRange))
self.assertEqual(time_range.start, None)
ts_dt = _microseconds_to_timestamp(1000 * timestamp)
self.assertEqual(time_range.end, ts_dt)
def test_with_all_options(self):
versions = 11
timestamp = 1441928298571
self._column_helper(num_filters=4, versions=versions,
timestamp=timestamp)
class Test__columns_filter_helper(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud_bigtable.happybase.table import _columns_filter_helper
return _columns_filter_helper(*args, **kwargs)
def test_no_columns(self):
columns = []
with self.assertRaises(ValueError):
self._callFUT(columns)
def test_single_column(self):
from gcloud_bigtable.row import RowFilter
col_fam = 'cf1'
columns = [col_fam]
result = self._callFUT(columns)
expected_result = RowFilter(family_name_regex_filter=col_fam)
self.assertEqual(result, expected_result)
def test_column_and_column_familieis(self):
from gcloud_bigtable.row import RowFilter
from gcloud_bigtable.row import RowFilterChain
from gcloud_bigtable.row import RowFilterUnion
col_fam1 = 'cf1'
col_fam2 = 'cf2'
col_qual2 = 'qual2'
columns = [col_fam1, col_fam2 + ':' + col_qual2]
result = self._callFUT(columns)
self.assertTrue(isinstance(result, RowFilterUnion))
self.assertEqual(len(result.filters), 2)
filter1 = result.filters[0]
filter2 = result.filters[1]
self.assertTrue(isinstance(filter1, RowFilter))
self.assertEqual(filter1.family_name_regex_filter, col_fam1)
self.assertTrue(isinstance(filter2, RowFilterChain))
filter2a, filter2b = filter2.filters
self.assertTrue(isinstance(filter2a, RowFilter))
self.assertEqual(filter2a.family_name_regex_filter, col_fam2)
self.assertTrue(isinstance(filter2b, RowFilter))
self.assertEqual(filter2b.column_qualifier_regex_filter, col_qual2)
class Test__row_keys_filter_helper(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud_bigtable.happybase.table import _row_keys_filter_helper
return _row_keys_filter_helper(*args, **kwargs)
def test_no_rows(self):
row_keys = []
with self.assertRaises(ValueError):
self._callFUT(row_keys)
def test_single_row(self):
from gcloud_bigtable.row import RowFilter
row_key = b'row-key'
row_keys = [row_key]
result = self._callFUT(row_keys)
expected_result = RowFilter(row_key_regex_filter=row_key)
self.assertEqual(result, expected_result)
def test_many_rows(self):
from gcloud_bigtable.row import RowFilter
from gcloud_bigtable.row import RowFilterUnion
row_key1 = b'row-key1'
row_key2 = b'row-key2'
row_key3 = b'row-key3'
row_keys = [row_key1, row_key2, row_key3]
result = self._callFUT(row_keys)
filter1 = RowFilter(row_key_regex_filter=row_key1)
filter2 = RowFilter(row_key_regex_filter=row_key2)
filter3 = RowFilter(row_key_regex_filter=row_key3)
expected_result = RowFilterUnion(filters=[filter1, filter2, filter3])
self.assertEqual(result, expected_result)
class Test__string_successor(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud_bigtable.happybase.table import _string_successor
return _string_successor(*args, **kwargs)
def test_with_alphanumeric(self):
self.assertEqual(self._callFUT(b'boa'), b'bob')
self.assertEqual(self._callFUT(b'abc1'), b'abc2')
def test_with_last_byte(self):
self.assertEqual(self._callFUT(b'boa\xff'), b'bob')
def test_with_empty_string(self):
self.assertEqual(self._callFUT(b''), b'')
def test_with_all_last_bytes(self):
self.assertEqual(self._callFUT(b'\xff\xff\xff'), b'')
def test_with_unicode_input(self):
self.assertEqual(self._callFUT(u'boa'), b'bob')
class TestTable(unittest2.TestCase):
def _getTargetClass(self):
from gcloud_bigtable.happybase.table import Table
return Table
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
from gcloud_bigtable._testing import _Monkey
from gcloud_bigtable.happybase import table as MUT
name = 'table-name'
cluster = object()
connection = _Connection(cluster)
tables_constructed = []
def make_low_level_table(*args, **kwargs):
result = _MockLowLevelTable(*args, **kwargs)
tables_constructed.append(result)
return result
with _Monkey(MUT, _LowLevelTable=make_low_level_table):
table = self._makeOne(name, connection)
self.assertEqual(table.name, name)
self.assertEqual(table.connection, connection)
table_instance, = tables_constructed
self.assertEqual(table._low_level_table, table_instance)
self.assertEqual(table_instance.args, (name, cluster))
self.assertEqual(table_instance.kwargs, {})
def test_constructor_null_connection(self):
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
self.assertEqual(table.name, name)
self.assertEqual(table.connection, connection)
self.assertEqual(table._low_level_table, None)
def test___repr__(self):
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
self.assertEqual(repr(table), '<table.Table name=\'table-name\'>')
def test_families(self):
from gcloud_bigtable._testing import _MockCalled
from gcloud_bigtable._testing import _Monkey
from gcloud_bigtable.happybase import table as MUT
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
table._low_level_table = _MockLowLevelTable()
# Mock the column families to be returned.
col_fam_name = 'fam'
gc_rule = object()
col_fam = _MockLowLevelColumnFamily(col_fam_name, gc_rule=gc_rule)
col_fams = {col_fam_name: col_fam}
table._low_level_table.column_families = col_fams
to_dict_result = object()
mock_gc_rule_to_dict = _MockCalled(to_dict_result)
with _Monkey(MUT, _gc_rule_to_dict=mock_gc_rule_to_dict):
result = table.families()
self.assertEqual(result, {col_fam_name: to_dict_result})
self.assertEqual(table._low_level_table.list_column_families_calls, 1)
# Check the input to our mock.
mock_gc_rule_to_dict.check_called(self, [(gc_rule,)])
def test_regions(self):
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
with self.assertRaises(NotImplementedError):
table.regions()
def test_row_empty_row(self):
from gcloud_bigtable._testing import _MockCalled
from gcloud_bigtable._testing import _Monkey
from gcloud_bigtable.happybase import table as MUT
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
table._low_level_table = _MockLowLevelTable()
table._low_level_table.read_row_result = None
fake_filter = object()
mock_filter_chain_helper = _MockCalled(fake_filter)
row_key = 'row-key'
timestamp = object()
with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper):
result = table.row(row_key, timestamp=timestamp)
# read_row_result == None --> No results.
self.assertEqual(result, {})
read_row_args = (row_key,)
read_row_kwargs = {'filter_': fake_filter}
self.assertEqual(table._low_level_table.read_row_calls, [
(read_row_args, read_row_kwargs),
])
expected_kwargs = {
'filters': [],
'versions': 1,
'timestamp': timestamp,
}
mock_filter_chain_helper.check_called(self, [()], [expected_kwargs])
def test_row_with_columns(self):
from gcloud_bigtable._testing import _MockCalled
from gcloud_bigtable._testing import _Monkey
from gcloud_bigtable.happybase import table as MUT
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
table._low_level_table = _MockLowLevelTable()
table._low_level_table.read_row_result = None
fake_col_filter = object()
mock_columns_filter_helper = _MockCalled(fake_col_filter)
fake_filter = object()
mock_filter_chain_helper = _MockCalled(fake_filter)
row_key = 'row-key'
columns = object()
with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper,
_columns_filter_helper=mock_columns_filter_helper):
result = table.row(row_key, columns=columns)
# read_row_result == None --> No results.
self.assertEqual(result, {})
read_row_args = (row_key,)
read_row_kwargs = {'filter_': fake_filter}
self.assertEqual(table._low_level_table.read_row_calls, [
(read_row_args, read_row_kwargs),
])
mock_columns_filter_helper.check_called(self, [(columns,)])
expected_kwargs = {
'filters': [fake_col_filter],
'versions': 1,
'timestamp': None,
}
mock_filter_chain_helper.check_called(self, [()], [expected_kwargs])
def test_row_with_results(self):
from gcloud_bigtable._testing import _MockCalled
from gcloud_bigtable._testing import _Monkey
from gcloud_bigtable.happybase import table as MUT
from gcloud_bigtable.row_data import PartialRowData
row_key = 'row-key'
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
table._low_level_table = _MockLowLevelTable()
partial_row = PartialRowData(row_key)
table._low_level_table.read_row_result = partial_row
fake_filter = object()
mock_filter_chain_helper = _MockCalled(fake_filter)
fake_pair = object()
mock_cells_to_pairs = _MockCalled([fake_pair])
col_fam = u'cf1'
qual = b'qual'
fake_cells = object()
partial_row._cells = {col_fam: {qual: fake_cells}}
include_timestamp = object()
with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper,
_cells_to_pairs=mock_cells_to_pairs):
result = table.row(row_key, include_timestamp=include_timestamp)
# The results come from _cells_to_pairs.
expected_result = {col_fam.encode('ascii') + b':' + qual: fake_pair}
self.assertEqual(result, expected_result)
read_row_args = (row_key,)
read_row_kwargs = {'filter_': fake_filter}
self.assertEqual(table._low_level_table.read_row_calls, [
(read_row_args, read_row_kwargs),
])
expected_kwargs = {
'filters': [],
'versions': 1,
'timestamp': None,
}
mock_filter_chain_helper.check_called(self, [()], [expected_kwargs])
to_pairs_kwargs = {'include_timestamp': include_timestamp}
mock_cells_to_pairs.check_called(
self, [(fake_cells,)], [to_pairs_kwargs])
def test_rows_empty_row(self):
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
result = table.rows([])
self.assertEqual(result, [])
def test_rows_with_columns(self):
from gcloud_bigtable._testing import _MockCalled
from gcloud_bigtable._testing import _Monkey
from gcloud_bigtable.happybase import table as MUT
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
table._low_level_table = _MockLowLevelTable()
rr_result = _MockPartialRowsData()
table._low_level_table.read_rows_result = rr_result
self.assertEqual(rr_result.consume_all_calls, 0)
fake_col_filter = object()
mock_columns_filter_helper = _MockCalled(fake_col_filter)
fake_rows_filter = object()
mock_row_keys_filter_helper = _MockCalled(fake_rows_filter)
fake_filter = object()
mock_filter_chain_helper = _MockCalled(fake_filter)
rows = ['row-key']
columns = object()
with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper,
_row_keys_filter_helper=mock_row_keys_filter_helper,
_columns_filter_helper=mock_columns_filter_helper):
result = table.rows(rows, columns=columns)
# read_rows_result == Empty PartialRowsData --> No results.
self.assertEqual(result, [])
read_rows_args = ()
read_rows_kwargs = {'filter_': fake_filter}
self.assertEqual(table._low_level_table.read_rows_calls, [
(read_rows_args, read_rows_kwargs),
])
self.assertEqual(rr_result.consume_all_calls, 1)
mock_columns_filter_helper.check_called(self, [(columns,)])
mock_row_keys_filter_helper.check_called(self, [(rows,)])
expected_kwargs = {
'filters': [fake_col_filter, fake_rows_filter],
'versions': 1,
'timestamp': None,
}
mock_filter_chain_helper.check_called(self, [()], [expected_kwargs])
def test_rows_with_results(self):
from gcloud_bigtable._testing import _MockCalled
from gcloud_bigtable._testing import _Monkey
from gcloud_bigtable.happybase import table as MUT
from gcloud_bigtable.row_data import PartialRowData
row_key1 = 'row-key1'
row_key2 = 'row-key2'
rows = [row_key1, row_key2]
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
table._low_level_table = _MockLowLevelTable()
row1 = PartialRowData(row_key1)
# Return row1 but not row2
rr_result = _MockPartialRowsData(rows={row_key1: row1})
table._low_level_table.read_rows_result = rr_result
self.assertEqual(rr_result.consume_all_calls, 0)
fake_rows_filter = object()
mock_row_keys_filter_helper = _MockCalled(fake_rows_filter)
fake_filter = object()
mock_filter_chain_helper = _MockCalled(fake_filter)
fake_pair = object()
mock_cells_to_pairs = _MockCalled([fake_pair])
col_fam = u'cf1'
qual = b'qual'
fake_cells = object()
row1._cells = {col_fam: {qual: fake_cells}}
include_timestamp = object()
with _Monkey(MUT, _row_keys_filter_helper=mock_row_keys_filter_helper,
_filter_chain_helper=mock_filter_chain_helper,
_cells_to_pairs=mock_cells_to_pairs):
result = table.rows(rows, include_timestamp=include_timestamp)
# read_rows_result == PartialRowsData with row_key1
expected_result = {col_fam.encode('ascii') + b':' + qual: fake_pair}
self.assertEqual(result, [(row_key1, expected_result)])
read_rows_args = ()
read_rows_kwargs = {'filter_': fake_filter}
self.assertEqual(table._low_level_table.read_rows_calls, [
(read_rows_args, read_rows_kwargs),
])
self.assertEqual(rr_result.consume_all_calls, 1)
mock_row_keys_filter_helper.check_called(self, [(rows,)])
expected_kwargs = {
'filters': [fake_rows_filter],
'versions': 1,
'timestamp': None,
}
mock_filter_chain_helper.check_called(self, [()], [expected_kwargs])
to_pairs_kwargs = {'include_timestamp': include_timestamp}
mock_cells_to_pairs.check_called(
self, [(fake_cells,)], [to_pairs_kwargs])
def test_cells_empty_row(self):
from gcloud_bigtable._testing import _MockCalled
from gcloud_bigtable._testing import _Monkey
from gcloud_bigtable.happybase import table as MUT
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
table._low_level_table = _MockLowLevelTable()
table._low_level_table.read_row_result = None
fake_filter = object()
mock_filter_chain_helper = _MockCalled(fake_filter)
row_key = 'row-key'
column = 'fam:col1'
with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper):
result = table.cells(row_key, column)
# read_row_result == None --> No results.
self.assertEqual(result, [])
read_row_args = (row_key,)
read_row_kwargs = {'filter_': fake_filter}
self.assertEqual(table._low_level_table.read_row_calls, [
(read_row_args, read_row_kwargs),
])
expected_kwargs = {
'column': column,
'versions': None,
'timestamp': None,
}
mock_filter_chain_helper.check_called(self, [()], [expected_kwargs])
def test_cells_with_results(self):
from gcloud_bigtable._testing import _MockCalled
from gcloud_bigtable._testing import _Monkey
from gcloud_bigtable.happybase import table as MUT
from gcloud_bigtable.row_data import PartialRowData
row_key = 'row-key'
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
table._low_level_table = _MockLowLevelTable()
partial_row = PartialRowData(row_key)
table._low_level_table.read_row_result = partial_row
# These are all passed to mocks.
versions = object()
timestamp = object()
include_timestamp = object()
fake_filter = object()
mock_filter_chain_helper = _MockCalled(fake_filter)
fake_result = object()
mock_cells_to_pairs = _MockCalled(fake_result)
col_fam = 'cf1'
qual = 'qual'
fake_cells = object()
partial_row._cells = {col_fam: {qual: fake_cells}}
column = col_fam + ':' + qual
with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper,
_cells_to_pairs=mock_cells_to_pairs):
result = table.cells(row_key, column, versions=versions,
timestamp=timestamp,
include_timestamp=include_timestamp)
self.assertEqual(result, fake_result)
read_row_args = (row_key,)
read_row_kwargs = {'filter_': fake_filter}
self.assertEqual(table._low_level_table.read_row_calls, [
(read_row_args, read_row_kwargs),
])
filter_kwargs = {
'column': column,
'versions': versions,
'timestamp': timestamp,
}
mock_filter_chain_helper.check_called(self, [()], [filter_kwargs])
to_pairs_kwargs = {'include_timestamp': include_timestamp}
mock_cells_to_pairs.check_called(
self, [(fake_cells,)], [to_pairs_kwargs])
def test_scan_with_batch_size(self):
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
with self.assertRaises(ValueError):
list(table.scan(batch_size=object()))
def test_scan_with_scan_batching(self):
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
with self.assertRaises(ValueError):
list(table.scan(scan_batching=object()))
def test_scan_with_sorted_columns(self):
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
with self.assertRaises(ValueError):
list(table.scan(sorted_columns=object()))
def test_scan_with_invalid_limit(self):
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
with self.assertRaises(ValueError):
list(table.scan(limit=-10))
def test_scan_with_row_prefix_and_row_start(self):
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
with self.assertRaises(ValueError):
list(table.scan(row_prefix='a', row_stop='abc'))
def test_scan_with_string_filter(self):
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
with self.assertRaises(TypeError):
list(table.scan(filter='some-string'))
def _scan_test_helper(self, row_start=None, row_stop=None, row_prefix=None,
columns=None, filter_=None, timestamp=None,
include_timestamp=False, limit=None, rr_result=None,
expected_result=None):
import types
from gcloud_bigtable._testing import _MockCalled
from gcloud_bigtable._testing import _Monkey
from gcloud_bigtable.happybase import table as MUT
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
table._low_level_table = _MockLowLevelTable()
rr_result = rr_result or _MockPartialRowsData()
table._low_level_table.read_rows_result = rr_result
self.assertEqual(rr_result.consume_next_calls, 0)
fake_col_filter = object()
mock_columns_filter_helper = _MockCalled(fake_col_filter)
fake_filter = object()
mock_filter_chain_helper = _MockCalled(fake_filter)
with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper,
_columns_filter_helper=mock_columns_filter_helper):
result = table.scan(row_start=row_start, row_stop=row_stop,
row_prefix=row_prefix, columns=columns,
filter=filter_, timestamp=timestamp,
include_timestamp=include_timestamp,
limit=limit)
self.assertTrue(isinstance(result, types.GeneratorType))
# Need to consume the result while the monkey patch is applied.
# read_rows_result == Empty PartialRowsData --> No results.
expected_result = expected_result or []
self.assertEqual(list(result), expected_result)
read_rows_args = ()
if row_prefix:
row_start = row_prefix
row_stop = MUT._string_successor(row_prefix)
read_rows_kwargs = {
'end_key': row_stop,
'filter_': fake_filter,
'limit': limit,
'start_key': row_start,
}
self.assertEqual(table._low_level_table.read_rows_calls, [
(read_rows_args, read_rows_kwargs),
])
self.assertEqual(rr_result.consume_next_calls,
rr_result.iterations + 1)
if columns is not None:
mock_columns_filter_helper.check_called(self, [(columns,)])
else:
mock_columns_filter_helper.check_called(self, [])
filters = []
if filter_ is not None:
filters.append(filter_)
if columns:
filters.append(fake_col_filter)
expected_kwargs = {
'filters': filters,
'versions': 1,
'timestamp': timestamp,
}
mock_filter_chain_helper.check_called(self, [()], [expected_kwargs])
def test_scan_with_columns(self):
columns = object()
self._scan_test_helper(columns=columns)
def test_scan_with_row_start_and_stop(self):
row_start = 'bar'
row_stop = 'foo'
self._scan_test_helper(row_start=row_start, row_stop=row_stop)
def test_scan_with_row_prefix(self):
row_prefix = 'row-prefi'
self._scan_test_helper(row_prefix=row_prefix)
def test_scan_with_filter(self):
mock_filter = object()
self._scan_test_helper(filter_=mock_filter)
def test_scan_with_no_results(self):
limit = 1337
timestamp = object()
self._scan_test_helper(timestamp=timestamp, limit=limit)
def test_scan_with_results(self):
from gcloud_bigtable.row_data import PartialRowData
row_key1 = 'row-key1'
row1 = PartialRowData(row_key1)
rr_result = _MockPartialRowsData(rows={row_key1: row1}, iterations=1)
include_timestamp = object()
expected_result = [(row_key1, {})]
self._scan_test_helper(include_timestamp=include_timestamp,
rr_result=rr_result,
expected_result=expected_result)
def test_put(self):
from gcloud_bigtable._testing import _Monkey
from gcloud_bigtable.happybase import table as MUT
from gcloud_bigtable.happybase.table import _WAL_SENTINEL
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
batches_created = []
def make_batch(*args, **kwargs):
result = _MockBatch(*args, **kwargs)
batches_created.append(result)
return result
row = 'row-key'
data = {'fam:col': 'foo'}
timestamp = None
with _Monkey(MUT, Batch=make_batch):
result = table.put(row, data, timestamp=timestamp)
# There is no return value.
self.assertEqual(result, None)
# Check how the batch was created and used.
batch, = batches_created
self.assertTrue(isinstance(batch, _MockBatch))
self.assertEqual(batch.args, (table,))
expected_kwargs = {
'timestamp': timestamp,
'batch_size': None,
'transaction': False,
'wal': _WAL_SENTINEL,
}
self.assertEqual(batch.kwargs, expected_kwargs)
# Make sure it was a successful context manager
self.assertEqual(batch.exit_vals, [(None, None, None)])
self.assertEqual(batch.put_args, [(row, data)])
self.assertEqual(batch.delete_args, [])
def test_delete(self):
from gcloud_bigtable._testing import _Monkey
from gcloud_bigtable.happybase import table as MUT
from gcloud_bigtable.happybase.table import _WAL_SENTINEL
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
batches_created = []
def make_batch(*args, **kwargs):
result = _MockBatch(*args, **kwargs)
batches_created.append(result)
return result
row = 'row-key'
columns = ['fam:col1', 'fam:col2']
timestamp = None
with _Monkey(MUT, Batch=make_batch):
result = table.delete(row, columns=columns, timestamp=timestamp)
# There is no return value.
self.assertEqual(result, None)
# Check how the batch was created and used.
batch, = batches_created
self.assertTrue(isinstance(batch, _MockBatch))
self.assertEqual(batch.args, (table,))
expected_kwargs = {
'timestamp': timestamp,
'batch_size': None,
'transaction': False,
'wal': _WAL_SENTINEL,
}
self.assertEqual(batch.kwargs, expected_kwargs)
# Make sure it was a successful context manager
self.assertEqual(batch.exit_vals, [(None, None, None)])
self.assertEqual(batch.put_args, [])
self.assertEqual(batch.delete_args, [(row, columns)])
def test_batch(self):
from gcloud_bigtable._testing import _Monkey
from gcloud_bigtable.happybase import table as MUT
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
timestamp = object()
batch_size = 42
transaction = False # Must be False when batch_size is non-null
wal = object()
with _Monkey(MUT, Batch=_MockBatch):
result = table.batch(timestamp=timestamp, batch_size=batch_size,
transaction=transaction, wal=wal)
self.assertTrue(isinstance(result, _MockBatch))
self.assertEqual(result.args, (table,))
expected_kwargs = {
'timestamp': timestamp,
'batch_size': batch_size,
'transaction': transaction,
'wal': wal,
}
self.assertEqual(result.kwargs, expected_kwargs)
def test_counter_get(self):
klass = self._getTargetClass()
counter_value = 1337
class TableWithInc(klass):
incremented = []
value = counter_value
def counter_inc(self, row, column, value=1):
self.incremented.append((row, column, value))
self.value += value
return self.value
name = 'table-name'
connection = None
table = TableWithInc(name, connection)
row = 'row-key'
column = 'fam:col1'
self.assertEqual(TableWithInc.incremented, [])
result = table.counter_get(row, column)
self.assertEqual(result, counter_value)
self.assertEqual(TableWithInc.incremented, [(row, column, 0)])
def test_counter_set(self):
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
row = 'row-key'
column = 'fam:col1'
value = 42
with self.assertRaises(NotImplementedError):
table.counter_set(row, column, value=value)
def test_counter_dec(self):
klass = self._getTargetClass()
counter_value = 42
class TableWithInc(klass):
incremented = []
value = counter_value
def counter_inc(self, row, column, value=1):
self.incremented.append((row, column, value))
self.value += value
return self.value
name = 'table-name'
connection = None
table = TableWithInc(name, connection)
row = 'row-key'
column = 'fam:col1'
dec_value = 987
self.assertEqual(TableWithInc.incremented, [])
result = table.counter_dec(row, column, value=dec_value)
self.assertEqual(result, counter_value - dec_value)
self.assertEqual(TableWithInc.incremented, [(row, column, -dec_value)])
def _counter_inc_helper(self, row, column, value, commit_result):
name = 'table-name'
connection = None
table = self._makeOne(name, connection)
# Mock the return values.
table._low_level_table = _MockLowLevelTable()
table._low_level_table.row_values[row] = _MockLowLevelRow(
row, commit_result=commit_result)
result = table.counter_inc(row, column, value=value)
incremented_value = value + _MockLowLevelRow.COUNTER_DEFAULT
self.assertEqual(result, incremented_value)
# Check the row values returned.
row_obj = table._low_level_table.row_values[row]
self.assertEqual(row_obj.counts,
{tuple(column.split(':')): incremented_value})
def test_counter_inc(self):
import struct
row = 'row-key'
col_fam = 'fam'
col_qual = 'col1'
column = col_fam + ':' + col_qual
value = 42
packed_value = struct.pack('>q', value)
fake_timestamp = None
commit_result = {
col_fam: {
col_qual: [(packed_value, fake_timestamp)],
}
}
self._counter_inc_helper(row, column, value, commit_result)
def test_counter_inc_bad_result(self):
row = 'row-key'
col_fam = 'fam'
col_qual = 'col1'
column = col_fam + ':' + col_qual
value = 42
commit_result = None
with self.assertRaises(TypeError):
self._counter_inc_helper(row, column, value, commit_result)
def test_counter_inc_result_key_error(self):
row = 'row-key'
col_fam = 'fam'
col_qual = 'col1'
column = col_fam + ':' + col_qual
value = 42
commit_result = {}
with self.assertRaises(KeyError):
self._counter_inc_helper(row, column, value, commit_result)
def test_counter_inc_result_nested_key_error(self):
row = 'row-key'
col_fam = 'fam'
col_qual = 'col1'
column = col_fam + ':' + col_qual
value = 42
commit_result = {col_fam: {}}
with self.assertRaises(KeyError):
self._counter_inc_helper(row, column, value, commit_result)
def test_counter_inc_result_non_unique_cell(self):
row = 'row-key'
col_fam = 'fam'
col_qual = 'col1'
column = col_fam + ':' + col_qual
value = 42
fake_timestamp = None
packed_value = None
commit_result = {
col_fam: {
col_qual: [
(packed_value, fake_timestamp),
(packed_value, fake_timestamp),
],
}
}
with self.assertRaises(ValueError):
self._counter_inc_helper(row, column, value, commit_result)
class _MockLowLevelTable(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.list_column_families_calls = 0
self.column_families = {}
self.row_values = {}
self.read_row_calls = []
self.read_row_result = None
self.read_rows_calls = []
self.read_rows_result = None
def list_column_families(self):
self.list_column_families_calls += 1
return self.column_families
def row(self, row_key):
return self.row_values[row_key]
def read_row(self, *args, **kwargs):
self.read_row_calls.append((args, kwargs))
return self.read_row_result
def read_rows(self, *args, **kwargs):
self.read_rows_calls.append((args, kwargs))
return self.read_rows_result
class _MockLowLevelColumnFamily(object):
def __init__(self, column_family_id, gc_rule=None):
self.column_family_id = column_family_id
self.gc_rule = gc_rule
class _Connection(object):
def __init__(self, cluster):
self._cluster = cluster
class _MockBatch(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.exit_vals = []
self.put_args = []
self.delete_args = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.exit_vals.append((exc_type, exc_value, traceback))
def put(self, *args):
self.put_args.append(args)
def delete(self, *args):
self.delete_args.append(args)
class _MockLowLevelRow(object):
COUNTER_DEFAULT = 0
def __init__(self, row_key, commit_result=None):
self.row_key = row_key
self.counts = {}
self.commit_result = commit_result
def increment_cell_value(self, column_family_id, column, int_value):
count = self.counts.setdefault((column_family_id, column),
self.COUNTER_DEFAULT)
self.counts[(column_family_id, column)] = count + int_value
def commit_modifications(self):
return self.commit_result
class _MockPartialRowsData(object):
def __init__(self, rows=None, iterations=0):
self.rows = rows or {}
self.consume_all_calls = 0
self.consume_next_calls = 0
self.iterations = iterations
def consume_all(self):
self.consume_all_calls += 1
def consume_next(self):
self.consume_next_calls += 1
if self.consume_next_calls > self.iterations:
raise StopIteration
| |
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import builtins
import collections
import gc
import json
import unittest.mock as mock
import sys
import threading
import unittest
import ukip
import evdev
import pyudev
import usb
sys.modules['evdev'] = mock.MagicMock()
sys.modules['pyudev'] = mock.MagicMock()
sys.modules['usb'] = mock.MagicMock()
# This is needed, because the whole library is (Magic)mocked.
# Therefore, without this an error is thrown, that usb.core.USBError is not
# inheriting from BaseException.
class USBError(IOError):
pass
class UkipTest(unittest.TestCase):
def setUp(self):
super(UkipTest, self).setUp()
usb.core.USBError = USBError
ukip._event_devices_timings = {}
ukip._event_devices_keystrokes = {}
class FakePyudevDevice(object):
product = None
device_node = None
action = None
ID_VENDOR_ID = None
ID_MODEL_ID = None
def get(self, attribute):
return getattr(self, attribute)
class FakeEvent(object):
value = None
type = None
sec = None
usec = None
scancode = None
self.pyudev_device = FakePyudevDevice()
self.pyudev_device.product = 'FakeProduct'
self.pyudev_device.device_node = '/dev/input/event1337'
self.pyudev_device.action = 'add'
# Pyudev devices emit the PID and VID as strings (hex values, but str).
# Also, the PID (product ID) is called model ID (ID_MODEL_ID).
self.pyudev_device.ID_VENDOR_ID = '123'
self.pyudev_device.ID_MODEL_ID = '456'
self.fake_event = FakeEvent()
self.fake_event.value = evdev.KeyEvent.key_down
self.fake_event.type = evdev.ecodes.EV_KEY
self.fake_event.sec = 13
self.fake_event.usec = 477827
self.fake_event.scancode = 45
self.mock_inputdevice = mock.create_autospec(evdev.InputDevice)
self.mock_pyusb_device = mock.MagicMock()
self.mock_pyusb_device.product = 'SomeVendor Keyboard'
# PyUSB devices emit the PID and VID as integers.
self.mock_pyusb_device.idVendor = 123
self.mock_pyusb_device.idProduct = 456
self.mock_pyusb_device.is_kernel_driver_active.return_value = True
self.mock_usb_config = mock.create_autospec(usb.core.Configuration)
self.mock_usb_config.bNumInterfaces = 1
self.event_device_path = '/dev/input/event1337'
evdev.InputDevice.side_effect = None
@mock.patch.object(ukip, 'enforce_monitor_mode', autospec=True)
def test_check_for_attack_trigger_monitor(self, monitor_mode_mock):
"""Tests if the monitor mode is triggered for attacking device times."""
ukip._UKIP_RUN_MODE = ukip.UKIP_AVAILABLE_MODES.MONITOR
# Need to access the global variable.
ukip._event_devices_timings[self.event_device_path] = collections.deque(
maxlen=ukip.KEYSTROKE_WINDOW)
ukip._event_devices_keystrokes[self.event_device_path] = collections.deque(
maxlen=ukip.KEYSTROKE_WINDOW)
# Push amount of KEYSTROKE_WINDOW times into the ringbuffer, that trigger
# the monitor mode.
ukip._event_devices_timings[self.event_device_path].append(1555146977759524)
ukip._event_devices_timings[self.event_device_path].append(1555146977759525)
ukip._event_devices_timings[self.event_device_path].append(1555146977759526)
ukip._event_devices_timings[self.event_device_path].append(1555146977759527)
ukip._event_devices_timings[self.event_device_path].append(1555146977759528)
ukip.check_for_attack(self.event_device_path, self.mock_pyusb_device)
# The timings trigger, so call the monitor mode.
monitor_mode_mock.assert_called_once_with(self.mock_pyusb_device,
self.event_device_path)
@mock.patch.object(ukip, 'enforce_monitor_mode', autospec=True)
def test_check_for_attack_not_trigger_monitor(self, monitor_mode_mock):
"""Tests if the monitor mode is NOT triggered for benign device times."""
ukip._UKIP_RUN_MODE = ukip.UKIP_AVAILABLE_MODES.MONITOR
# Need to access the global variable.
ukip._event_devices_timings[self.event_device_path] = collections.deque(
maxlen=ukip.KEYSTROKE_WINDOW)
# Normal typing, that doesn't trigger the monitor mode.
ukip._event_devices_timings[self.event_device_path].append(1555146977759524)
ukip._event_devices_timings[self.event_device_path].append(1555146980127487)
ukip._event_devices_timings[self.event_device_path].append(1555146982271470)
ukip._event_devices_timings[self.event_device_path].append(1555146984415453)
ukip._event_devices_timings[self.event_device_path].append(1555146986559436)
ukip.check_for_attack(self.event_device_path, self.mock_pyusb_device)
# Since normal typing, the monitor mode was not called.
self.assertFalse(monitor_mode_mock.called)
@mock.patch.object(ukip, 'enforce_monitor_mode', autospec=True)
def test_check_for_attack_no_times(self, monitor_mode_mock):
"""Checks if function returns early, if no times are provided."""
ukip._UKIP_RUN_MODE = ukip.UKIP_AVAILABLE_MODES.MONITOR
ukip._event_devices_timings[self.event_device_path] = collections.deque(
maxlen=ukip.KEYSTROKE_WINDOW)
not_enough_timings = ukip.check_for_attack(self.event_device_path,
self.mock_pyusb_device)
# Not enough times, so bail out of the function call early (return False).
self.assertIs(not_enough_timings, False)
# When not enough times, return value is None and monitor mode is not
# called.
self.assertFalse(monitor_mode_mock.called)
@mock.patch.object(ukip, 'enforce_hardening_mode', autospec=True)
@mock.patch.object(ukip, 'enforce_monitor_mode', autospec=True)
def test_check_for_attack_proper_run_mode(self, monitor_mode_mock,
hardening_mode_mock):
"""Tests if the proper mode is executed based on global selection."""
# Need to access the global variable.
ukip._event_devices_timings[self.event_device_path] = collections.deque(
maxlen=ukip.KEYSTROKE_WINDOW)
# Push amount of KEYSTROKE_WINDOW times into the ringbuffer, that triggers
# the chosen mode.
ukip._event_devices_timings[self.event_device_path].append(1555146977759524)
ukip._event_devices_timings[self.event_device_path].append(1555146977759525)
ukip._event_devices_timings[self.event_device_path].append(1555146977759526)
ukip._event_devices_timings[self.event_device_path].append(1555146977759527)
ukip._event_devices_timings[self.event_device_path].append(1555146977759528)
# First test with the MONITOR mode.
ukip._UKIP_RUN_MODE = ukip.UKIP_AVAILABLE_MODES.MONITOR
ukip.check_for_attack(self.event_device_path, self.mock_pyusb_device)
monitor_mode_mock.assert_called_once_with(self.mock_pyusb_device,
self.event_device_path)
# Finally, test with the HARDENING mode.
ukip._UKIP_RUN_MODE = ukip.UKIP_AVAILABLE_MODES.HARDENING
ukip.check_for_attack(self.event_device_path, self.mock_pyusb_device)
hardening_mode_mock.assert_called_once_with(self.mock_pyusb_device,
self.event_device_path)
@mock.patch.object(ukip, 'log', autospec=True)
@mock.patch.object(ukip, 'enforce_hardening_mode', autospec=True)
@mock.patch.object(ukip, 'enforce_monitor_mode', autospec=True)
def test_check_for_attack_no_run_mode(self, monitor_mode_mock,
hardening_mode_mock, logging_mock):
"""Tests when no run mode is set."""
# Need to access the global variable.
ukip._event_devices_timings[self.event_device_path] = collections.deque(
maxlen=ukip.KEYSTROKE_WINDOW)
# Push amount of KEYSTROKE_WINDOW times into the ringbuffer, that would
# trigger a chosen mode.
ukip._event_devices_timings[self.event_device_path].append(1555146977759524)
ukip._event_devices_timings[self.event_device_path].append(1555146977759525)
ukip._event_devices_timings[self.event_device_path].append(1555146977759526)
ukip._event_devices_timings[self.event_device_path].append(1555146977759527)
ukip._event_devices_timings[self.event_device_path].append(1555146977759528)
# Set the run mode to None.
ukip._UKIP_RUN_MODE = None
ukip.check_for_attack(self.event_device_path, self.mock_pyusb_device)
# No mode should trigger.
self.assertFalse(monitor_mode_mock.called)
self.assertFalse(hardening_mode_mock.called)
# But the error should be logged.
logging_mock.error.assert_called_once()
@mock.patch.object(ukip, 'check_for_attack', autospec=True)
def test_add_to_ring_buffer_create_key_time(self, check_for_attack_mock):
"""Tests the ringbuffer key creation on adding a time for the first time."""
# At the beginning the global dict is empty.
self.assertFalse(ukip._event_devices_timings)
# The event_device_path wasn't present, but should be created now.
ukip.add_to_ring_buffer(self.event_device_path, 1555146977759524, 'x',
self.mock_pyusb_device)
# Check if the key was successfully created.
self.assertTrue(ukip._event_devices_timings.get(self.event_device_path))
# Check if the check_for_attack function was called on the created key.
check_for_attack_mock.assert_called_once_with(self.event_device_path,
self.mock_pyusb_device)
@mock.patch.object(ukip, 'check_for_attack', autospec=True)
def test_add_to_ring_buffer_create_key_keystroke(self, check_for_attack_mock):
"""Tests the ringbuffer key creation on adding an initial keystroke."""
# At the beginning the global dict is empty.
self.assertFalse(ukip._event_devices_keystrokes)
# The event_device_path wasn't present, but should be created now.
ukip.add_to_ring_buffer(self.event_device_path, 1555146977759524, 'x',
self.mock_pyusb_device)
# Check if the key was successfully created.
self.assertTrue(ukip._event_devices_keystrokes.get(self.event_device_path))
# Check if the check_for_attack function was called on the created key.
check_for_attack_mock.assert_called_once_with(self.event_device_path,
self.mock_pyusb_device)
@mock.patch.object(ukip, 'check_for_attack', autospec=True)
def test_add_to_ring_buffer_multiple_values(self, check_for_attack_mock):
"""Tests if the ringbuffer is working correctly with the set window."""
ukip.add_to_ring_buffer(self.event_device_path, 1555146977759524, 'a',
self.mock_pyusb_device)
self.assertEqual(
len(ukip._event_devices_timings.get(self.event_device_path)), 1)
ukip.add_to_ring_buffer(self.event_device_path, 1555146980127487, 'b',
self.mock_pyusb_device)
self.assertEqual(
len(ukip._event_devices_timings.get(self.event_device_path)), 2)
ukip.add_to_ring_buffer(self.event_device_path, 1555146980303490, 'c',
self.mock_pyusb_device)
self.assertEqual(
len(ukip._event_devices_timings.get(self.event_device_path)), 3)
ukip.add_to_ring_buffer(self.event_device_path, 1555146982271470, 'd',
self.mock_pyusb_device)
self.assertEqual(
len(ukip._event_devices_timings.get(self.event_device_path)), 4)
ukip.add_to_ring_buffer(self.event_device_path, 1555146984271470, 'e',
self.mock_pyusb_device)
self.assertEqual(
len(ukip._event_devices_timings.get(self.event_device_path)), 5)
ukip.add_to_ring_buffer(self.event_device_path, 1555147982271470, 'f',
self.mock_pyusb_device)
# Since it's a ringbuffer, the length for both dicts is still
# KEYSTROKE_WINDOW.
self.assertEqual(
len(ukip._event_devices_timings.get(self.event_device_path)),
ukip.KEYSTROKE_WINDOW)
self.assertEqual(
len(ukip._event_devices_timings.get(self.event_device_path)),
ukip.KEYSTROKE_WINDOW)
# The check_for_attack function was called KEYSTROKE_WINDOW + 1 times.
self.assertEqual(check_for_attack_mock.call_count,
ukip.KEYSTROKE_WINDOW + 1)
@mock.patch.object(ukip, 'log', autospec=True)
def test_enforce_monitor_mode_with_product(self, logging_mock):
"""Tests which logging message is emitted when device has a product set."""
self.fill_test_ringbuffer_with_data()
ukip.enforce_monitor_mode(self.mock_pyusb_device, self.event_device_path)
logging_mock.warning.assert_called_with(
'[UKIP] The device %s with the vendor id %s and the product'
' id %s would have been blocked. The causing timings are: %s.',
self.mock_pyusb_device.product, hex(self.mock_pyusb_device.idVendor),
hex(self.mock_pyusb_device.idProduct),
ukip._event_devices_timings[self.event_device_path])
@mock.patch.object(ukip, 'log', autospec=True)
def test_enforce_monitor_mode_no_product(self, logging_mock):
"""Tests which logging message is emitted when device has NO product set."""
self.fill_test_ringbuffer_with_data()
self.mock_pyusb_device.product = None
ukip.enforce_monitor_mode(self.mock_pyusb_device, self.event_device_path)
logging_mock.warning.assert_called_with(
'[UKIP] The device %s with the vendor id %s and the product'
' id %s would have been blocked. The causing timings are: %s.',
'UNKNOWN', hex(self.mock_pyusb_device.idVendor),
hex(self.mock_pyusb_device.idProduct),
ukip._event_devices_timings[self.event_device_path])
@mock.patch.object(ukip, 'load_keycodes_from_file', autospec=True)
@mock.patch.object(evdev, 'InputDevice', autospec=True)
@mock.patch.object(usb.core, 'find', autospec=True)
def test_monitor_device_thread_library_calls(self, usb_core_find_mock,
input_device_mock,
load_keycodes_from_file_mock):
"""Tests if all the calls to the libraries are made."""
vendor_id = int(self.pyudev_device.ID_VENDOR_ID, 16)
product_id = int(self.pyudev_device.ID_MODEL_ID, 16)
ukip.monitor_device_thread(self.pyudev_device, vendor_id, product_id)
load_keycodes_from_file_mock.assert_called()
input_device_mock.assert_called_once_with(self.pyudev_device.device_node)
usb_core_find_mock.assert_called_once_with(
idVendor=vendor_id, idProduct=product_id)
def test_monitor_device_thread_logging(self):
"""Tests the initial logging of the thread starting function."""
# TODO Implement this test.
@mock.patch.object(ukip, 'load_keycodes_from_file', autospec=True)
@mock.patch.object(ukip, 'log', autospec=True)
def test_monitor_device_thread_exception_inputdevice(
self, logging_mock, load_keycodes_from_file_mock):
"""Tests exception and log message for the InputDevice creation."""
log_message = ('There was an error while starting the thread for device '
'monitoring: %s')
exception_message = '[Errno 19] No such device'
exception_object = OSError(exception_message)
evdev.InputDevice.side_effect = exception_object
vendor_id = int(self.pyudev_device.ID_VENDOR_ID, 16)
product_id = int(self.pyudev_device.ID_MODEL_ID, 16)
ukip.monitor_device_thread(self.pyudev_device, vendor_id, product_id)
load_keycodes_from_file_mock.assert_called()
logging_mock.warning.assert_called()
@mock.patch.object(ukip, 'load_keycodes_from_file', autospec=True)
@mock.patch.object(ukip, 'log', autospec=True)
def test_monitor_device_thread_exception_read_loop(
self, logging_mock, load_keycodes_from_file_mock):
"""Tests exception and log message in read_loop."""
log_message = 'Events found for unbound device: %s'
exception_message = '[Errno 19] No such device'
exception_object = OSError(exception_message)
local_mock_inputdevice = mock.MagicMock()
evdev.InputDevice.return_value = local_mock_inputdevice
local_mock_inputdevice.read_loop.side_effect = exception_object
vendor_id = int(self.pyudev_device.ID_VENDOR_ID, 16)
product_id = int(self.pyudev_device.ID_MODEL_ID, 16)
ukip.monitor_device_thread(self.pyudev_device, vendor_id, product_id)
load_keycodes_from_file_mock.assert_called()
logging_mock.warning.assert_called()
def test_monitor_device_thread_keystroke_in_ms(self):
"""Tests if add_to_ringbuffer was called with the keystroke time in ms."""
# TODO Implement this test.
def test_monitor_device_thread_keystroke_shift(self):
"""Tests if add_to_ringbuffer was called with the upper case keystroke."""
# TODO Implement this test.
def test_monitor_device_thread_keystroke_capslock(self):
"""Tests if add_to_ringbuffer was called with the upper case keystroke."""
# TODO Implement this test.
@mock.patch.object(pyudev, 'Context', autospec=True)
@mock.patch.object(pyudev.Monitor, 'from_netlink', autospec=True)
def test_init_device_list_library_calls(self, netlink_mock, context_mock):
"""Tests if the initial library calls are made."""
ukip.init_device_list()
self.assertEqual(context_mock.call_count, 1)
self.assertEqual(netlink_mock.call_count, 1)
def test_init_device_list_exceptions(self):
"""Tests if exceptions were raised (ValueError and DeviceError)."""
# TODO Implement this test.
def test_init_device_list_device_count(self):
"""Tests if the number of devices is increased when iterating."""
# TODO Implement this test.
def test_init_device_list_invalid_pid_vid(self):
"""Tests if a ValueError is raised, when the VID/PID cannot be converted."""
# TODO Implement this test.
def test_init_device_list_runtimeerror(self):
"""Tests if the RuntimeError is thrown, when the thread failed to start."""
# TODO Implement this test.
def test_main_threading(self):
"""Tests if the thread was started."""
# TODO Implement this test.
def test_main_too_many_arguments(self):
"""Tests if no arguments were provided to main."""
# TODO Implement this test.
@mock.patch.object(pyudev.Monitor, 'from_netlink', autospec=True)
def test_main_filter_by(self, netlink_mock):
"""Tests if the monitor filter_by was actually called."""
monitor_mock = mock.MagicMock()
pyudev.Monitor.from_netlink.return_value = monitor_mock
monitor_mock.poll.side_effect = [self.pyudev_device, None]
netlink_mock.return_value = monitor_mock
ukip.main(['ukip.py'])
calls = [mock.call(subsystem='input'), mock.call(subsystem='input')]
monitor_mock.filter_by.assert_has_calls(calls)
@mock.patch.object(builtins, 'open', autospec=True)
def test_check_local_allowlist(self, open_mock):
"""Tests if the local allowlist check returns the allowlist on success."""
open_mock.return_value.__enter__ = open_mock
# Prepare a fake file, that looks similar to the actual file.
open_mock.return_value.__iter__.return_value = iter([
'# This is the config file\n', '# for UKIP.\n',
'0x3784:0x3472 a,b,c\n'
])
# Call with a PID and VID that will be found.
allowlist = ukip.check_local_allowlist('0x3784', '0x3472')
# If the PID and VID are found, the function returns the allowlist.
self.assertEqual(
allowlist,
ukip.AllowlistConfigReturn(
allowlist=['a', 'b', 'c'], device_present=True))
@mock.patch.object(builtins, 'open', autospec=True)
def test_check_local_allowlist_two_devices(self, open_mock):
"""Tests if the local allowlist with two devices, where one matches."""
open_mock.return_value.__enter__ = open_mock
# Prepare a fake file, that looks similar to the actual file.
open_mock.return_value.__iter__.return_value = iter([
'# This is the config file\n', '# for UKIP.\n',
'0x1337:0x1234 x,y,z\n', '0x3784:0x3472 a,b,c\n'
])
# Call with a PID and VID that will be found.
allowlist = ukip.check_local_allowlist('0x3784', '0x3472')
# If the PID and VID are found, the function returns the allowlist.
self.assertEqual(
allowlist,
ukip.AllowlistConfigReturn(
allowlist=['a', 'b', 'c'], device_present=True))
@mock.patch.object(builtins, 'open', autospec=True)
def test_check_local_allowlist_only_comments(self, open_mock):
"""Tests if the local allowlist check returns False when only comments."""
open_mock.return_value.__enter__ = open_mock
# Prepare a fake file, with only comments.
open_mock.return_value.__iter__.return_value = iter([
'# This is the config file\n', '# for UKIP.\n',
'# One more comment line.\n'
])
# Lookup for a PID and VID.
allowlist = ukip.check_local_allowlist('0x3784', '0x3472')
# If there are only comment in the config file, return False.
self.assertEqual(
allowlist,
ukip.AllowlistConfigReturn(allowlist=[], device_present=False))
@mock.patch.object(builtins, 'open', autospec=True)
def test_check_local_allowlist_no_device(self, open_mock):
"""Tests if the allowlist check returns False when device not in file."""
open_mock.return_value.__enter__ = open_mock
open_mock.return_value.__iter__.return_value = iter([
'# This is the config file\n', '# for UKIP.\n',
'0x3784:0x3472 a,b,c\n'
])
# Lookup for a PID and VID which are not in the config file.
allowlist = ukip.check_local_allowlist('0x1234', '0x3472')
# If the device cannot be found in the config file, return False.
self.assertEqual(
allowlist,
ukip.AllowlistConfigReturn(allowlist=[], device_present=False))
@mock.patch.object(builtins, 'open', autospec=True)
def test_check_local_allowlist_key_val_parsing(self, open_mock):
"""Tests if the config file could be parsed into keys and values."""
open_mock.return_value.__enter__ = open_mock
open_mock.return_value.__iter__.return_value = iter([
'# This is the config file\n', '# for UKIP.\n',
'cannotparse\n'
])
# Check if the exception was raised.
self.assertRaises(ukip.AllowlistFileError, ukip.check_local_allowlist,
'0x1234', '0x3472')
@mock.patch.object(builtins, 'open', autospec=True)
def test_check_local_allowlist_device_parsing(self, open_mock):
"""Tests if the device in the config file can be parsed."""
open_mock.return_value.__enter__ = open_mock
open_mock.return_value.__iter__.return_value = iter([
'# This is the config file\n', '# for UKIP.\n',
'37843472 a,b,c\n'
])
self.assertRaises(ukip.AllowlistFileError, ukip.check_local_allowlist,
'0x3784', '0x3472')
@mock.patch.object(builtins, 'open', autospec=True)
def test_check_local_allowlist_parsing(self, open_mock):
"""Tests if allowlist could be parsed from the config file."""
open_mock.return_value.__enter__ = open_mock
open_mock.return_value.__iter__.return_value = iter([
'# This is the config file\n', '# for UKIP.\n',
'0x3784:0x3472 cannotparse\n'
])
# The device will be found, but the allowlist cannot be parsed.
allowlist = ukip.check_local_allowlist('0x3784', '0x3472')
# If the allowlist is a word, that is not 'any' or 'none', return False.
self.assertEqual(
allowlist,
ukip.AllowlistConfigReturn(allowlist=[], device_present=False))
@mock.patch.object(builtins, 'open', autospec=True)
def test_check_local_allowlist_file_not_found(self, open_mock):
"""Tests if the config file could be found."""
open_mock.side_effect = ukip.AllowlistFileError(
'The config file /etc/ukip/allowlist could not be found: %s')
self.assertRaises(ukip.AllowlistFileError, ukip.check_local_allowlist,
'0x3784', '0x3472')
@mock.patch.object(builtins, 'open', autospec=True)
def test_check_local_allowlist_empty_lines(self, open_mock):
"""Tests if the allowlist check returns False when only empty lines."""
open_mock.return_value.__enter__ = open_mock
# Prepare a fake file, with only empty lines.
open_mock.return_value.__iter__.return_value = iter(
['\n', ' \n', ' \n'])
# Lookup for a PID and VID.
allowlist = ukip.check_local_allowlist('0x3784', '0x3472')
# If there are only empty lines in the config file, return False.
self.assertEqual(
allowlist,
ukip.AllowlistConfigReturn(allowlist=[], device_present=False))
@mock.patch.object(builtins, 'open', autospec=True)
def test_check_local_allowlist_allow_all(self, open_mock):
"""Tests if the allowlist check returns True for "allow all characters"."""
open_mock.return_value.__enter__ = open_mock
# Prepare a fake file, with only empty lines.
open_mock.return_value.__iter__.return_value = iter([
'0x1234:0x1337 any\n',
])
# Lookup for a PID and VID.
allowlist = ukip.check_local_allowlist('0x1234', '0x1337')
# If all possible characters are allowed for a device, return an empty list
# and True.
self.assertEqual(
allowlist,
ukip.AllowlistConfigReturn(allowlist=[], device_present=True))
@mock.patch.object(builtins, 'open', autospec=True)
def test_check_local_allowlist_deny_all(self, open_mock):
"""Tests if the allowlist is an empty list when denying all characters."""
open_mock.return_value.__enter__ = open_mock
# Prepare a fake file, with only empty lines.
open_mock.return_value.__iter__.return_value = iter([
'0x1234:0x1337 none\n',
])
# Lookup for a PID and VID.
allowlist = ukip.check_local_allowlist('0x1234', '0x1337')
# If no characters are allowed for the given device, return an empty list.
self.assertEqual(
allowlist,
ukip.AllowlistConfigReturn(allowlist=[], device_present=False))
def fill_test_ringbuffer_with_data(self):
"""A helper function to add times and trigger the hardening mode."""
ukip.add_to_ring_buffer(self.event_device_path, 1555146977759524, 'a',
self.mock_pyusb_device)
ukip.add_to_ring_buffer(self.event_device_path, 1555146977859525, 'b',
self.mock_pyusb_device)
ukip.add_to_ring_buffer(self.event_device_path, 1555146977959526, 'c',
self.mock_pyusb_device)
ukip.add_to_ring_buffer(self.event_device_path, 1555146977959527, 'd',
self.mock_pyusb_device)
ukip.add_to_ring_buffer(self.event_device_path, 1555146977959528, 'e',
self.mock_pyusb_device)
@mock.patch.object(gc, 'collect', wraps=gc.collect)
@mock.patch.object(ukip, 'check_local_allowlist', autospec=True)
@mock.patch.object(ukip, 'log', autospec=True)
def test_enforce_hardening_mode_with_product(self, logging_mock,
check_allowlist_mock, gc_mock):
"""Tests which logging message is emitted when device has a product set."""
self.fill_test_ringbuffer_with_data()
self.mock_pyusb_device.__iter__.return_value = iter([self.mock_usb_config])
# Need a link, because after the function is run, the dicts are deleted.
timings = ukip._event_devices_timings[self.event_device_path]
# Return the allowlist from /etc/ukip/allowlist.
check_allowlist_mock.return_value = ukip.AllowlistConfigReturn(
allowlist=['a', 'b', 'c'], device_present=True)
ukip.enforce_hardening_mode(self.mock_pyusb_device, self.event_device_path)
check_allowlist_mock.assert_called_once_with(
hex(self.mock_pyusb_device.idProduct),
hex(self.mock_pyusb_device.idVendor))
# Only 1 interface, so the range is 0.
self.mock_pyusb_device.detach_kernel_driver.assert_called_once_with(0)
logging_mock.warning.assert_called_with(
'[UKIP] The device %s with the vendor id %s and the product id %s '
'was blocked. The causing timings were: %s.',
self.mock_pyusb_device.product, hex(self.mock_pyusb_device.idVendor),
hex(self.mock_pyusb_device.idProduct), timings)
# The error was not logged.
self.assertFalse(logging_mock.error.called)
# The dicts are deleted now.
self.assertFalse(ukip._event_devices_timings)
self.assertFalse(ukip._event_devices_keystrokes)
# And the garbage collector ran.
gc_mock.assert_called_once()
@mock.patch.object(gc, 'collect', wraps=gc.collect)
@mock.patch.object(ukip, 'check_local_allowlist', autospec=True)
@mock.patch.object(ukip, 'log', autospec=True)
def test_enforce_hardening_mode_no_product(self, logging_mock,
check_allowlist_mock, gc_mock):
"""Tests which logging message is emitted when device has no product set."""
self.fill_test_ringbuffer_with_data()
self.mock_pyusb_device.__iter__.return_value = iter([self.mock_usb_config])
self.mock_pyusb_device.product = None
# Need a link, because after the function is run, the dicts are deleted.
timings = ukip._event_devices_timings[self.event_device_path]
# Return the allowlist from /etc/ukip/allowlist.
check_allowlist_mock.return_value = ukip.AllowlistConfigReturn(
allowlist=['a', 'b', 'c'], device_present=True)
ukip.enforce_hardening_mode(self.mock_pyusb_device, self.event_device_path)
check_allowlist_mock.assert_called_once_with(
hex(self.mock_pyusb_device.idProduct),
hex(self.mock_pyusb_device.idVendor))
# Only 1 interface, so the range is 0.
self.mock_pyusb_device.detach_kernel_driver.assert_called_once_with(0)
logging_mock.warning.assert_called_with(
'[UKIP] The device with the vendor id %s and the product id %s was '
'blocked. The causing timings were: %s.',
hex(self.mock_pyusb_device.idVendor),
hex(self.mock_pyusb_device.idProduct), timings)
self.assertFalse(logging_mock.error.called)
# The dicts are deleted now.
self.assertFalse(ukip._event_devices_timings)
self.assertFalse(ukip._event_devices_keystrokes)
# And the garbage collector ran.
gc_mock.assert_called_once()
@mock.patch.object(ukip, 'check_local_allowlist', autospec=True)
@mock.patch.object(ukip, 'log', autospec=True)
def test_enforce_hardening_mode_no_active_driver(self, logging_mock,
check_allowlist_mock):
"""Tests flow through function when no interface has an active driver."""
self.fill_test_ringbuffer_with_data()
self.mock_pyusb_device.__iter__.return_value = iter([self.mock_usb_config])
self.mock_pyusb_device.is_kernel_driver_active.return_value = False
# Return the allowlist from /etc/ukip/allowlist.
check_allowlist_mock.return_value = ukip.AllowlistConfigReturn(
allowlist=['a', 'b', 'c'], device_present=True)
ukip.enforce_hardening_mode(self.mock_pyusb_device, self.event_device_path)
check_allowlist_mock.assert_called_once_with(
hex(self.mock_pyusb_device.idProduct),
hex(self.mock_pyusb_device.idVendor))
self.assertFalse(self.mock_pyusb_device.detach_kernel_driver.called)
self.assertFalse(logging_mock.warning.called)
self.assertFalse(logging_mock.error.called)
@mock.patch.object(ukip, 'check_local_allowlist', autospec=True)
@mock.patch.object(ukip, 'log', autospec=True)
def test_enforce_hardening_mode_ioerror(self, logging_mock,
check_allowlist_mock):
"""Tests IOError/log message for unbinding a driver from an interface."""
self.fill_test_ringbuffer_with_data()
log_message = ('There was an error in unbinding the interface for the USB '
'device %s: %s')
exception_message = '[Errno 16] Device or resource busy'
exception_object = IOError(exception_message)
product_id = hex(self.mock_pyusb_device.idProduct)
vendor_id = hex(self.mock_pyusb_device.idVendor)
pid_and_vid = '%s:%s' % (product_id, vendor_id)
self.mock_pyusb_device.__iter__.return_value = iter([self.mock_usb_config])
self.mock_pyusb_device.detach_kernel_driver.side_effect = exception_object
# Return the allowlist from /etc/ukip/allowlist.
check_allowlist_mock.return_value = ukip.AllowlistConfigReturn(
allowlist=['a', 'b', 'c'], device_present=True)
ukip.enforce_hardening_mode(self.mock_pyusb_device, self.event_device_path)
check_allowlist_mock.assert_called_once_with(
hex(self.mock_pyusb_device.idProduct),
hex(self.mock_pyusb_device.idVendor))
logging_mock.warning.assert_called()
@mock.patch.object(gc, 'collect', wraps=gc.collect)
@mock.patch.object(ukip, 'check_local_allowlist', autospec=True)
@mock.patch.object(ukip, 'log', autospec=True)
def test_enforce_hardening_mode_multiple_interfaces_error(
self, logging_mock, check_allowlist_mock, gc_mock):
"""Tests multiple interfaces, with one failing with an IOError."""
self.fill_test_ringbuffer_with_data()
log_message = ('There was an error in unbinding the interface for the USB '
'device %s: %s')
exception_message = '[Errno 16] Device or resource busy'
exception_object = IOError(exception_message)
product_id = hex(self.mock_pyusb_device.idProduct)
vendor_id = hex(self.mock_pyusb_device.idVendor)
pid_and_vid = '%s:%s' % (product_id, vendor_id)
self.mock_pyusb_device.__iter__.return_value = iter([self.mock_usb_config])
self.mock_usb_config.bNumInterfaces = 2
self.mock_pyusb_device.detach_kernel_driver.side_effect = [
exception_object, mock.DEFAULT
]
# Need a link, because after the function is run, the dicts are deleted.
timings = ukip._event_devices_timings[self.event_device_path]
# Return the allowlist from /etc/ukip/allowlist.
check_allowlist_mock.return_value = ukip.AllowlistConfigReturn(
allowlist=['a', 'b', 'c'], device_present=True)
ukip.enforce_hardening_mode(self.mock_pyusb_device, self.event_device_path)
check_allowlist_mock.assert_called_once_with(
hex(self.mock_pyusb_device.idProduct),
hex(self.mock_pyusb_device.idVendor))
call = [
mock.call(
'[UKIP] The device %s with the vendor id %s and the product id '
'%s was blocked. The causing timings were: %s.',
self.mock_pyusb_device.product,
hex(self.mock_pyusb_device.idVendor),
hex(self.mock_pyusb_device.idProduct), timings)
]
logging_mock.warning.assert_has_calls(call)
# The dicts are deleted now.
self.assertFalse(ukip._event_devices_timings)
self.assertFalse(ukip._event_devices_keystrokes)
# And the garbage collector ran.
gc_mock.assert_called_once()
@mock.patch.object(ukip, 'check_local_allowlist', autospec=True)
@mock.patch.object(ukip, 'log', autospec=True)
def test_enforce_hardening_mode_oserror(self, logging_mock,
check_allowlist_mock):
"""Tests OSError/log message for unbinding a driver from an interface."""
self.fill_test_ringbuffer_with_data()
log_message = ('There was an error in unbinding the interface for the USB '
'device %s: %s')
exception_message = 'access violation'
exception_object = OSError(exception_message)
product_id = hex(self.mock_pyusb_device.idProduct)
vendor_id = hex(self.mock_pyusb_device.idVendor)
pid_and_vid = '%s:%s' % (product_id, vendor_id)
self.mock_pyusb_device.__iter__.return_value = iter([self.mock_usb_config])
self.mock_pyusb_device.detach_kernel_driver.side_effect = exception_object
# Return the allowlist from /etc/ukip/allowlist.
check_allowlist_mock.return_value = ukip.AllowlistConfigReturn(
allowlist=['a', 'b', 'c'], device_present=True)
ukip.enforce_hardening_mode(self.mock_pyusb_device, self.event_device_path)
check_allowlist_mock.assert_called_once_with(
hex(self.mock_pyusb_device.idProduct),
hex(self.mock_pyusb_device.idVendor))
logging_mock.warning.assert_called()
@mock.patch.object(ukip, 'check_local_allowlist', autospec=True)
@mock.patch.object(ukip, 'log', autospec=True)
def test_enforce_hardening_mode_valueerror(self, logging_mock,
check_allowlist_mock):
"""Tests ValueError/log message for unbinding a driver from an interface."""
self.fill_test_ringbuffer_with_data()
log_message = ('There was an error in unbinding the interface for the USB '
'device %s: %s')
exception_message = 'Invalid configuration'
exception_object = ValueError(exception_message)
product_id = hex(self.mock_pyusb_device.idProduct)
vendor_id = hex(self.mock_pyusb_device.idVendor)
pid_and_vid = '%s:%s' % (product_id, vendor_id)
self.mock_pyusb_device.__iter__.return_value = iter([self.mock_usb_config])
self.mock_pyusb_device.detach_kernel_driver.side_effect = exception_object
# Return the allowlist from /etc/ukip/allowlist.
check_allowlist_mock.return_value = ukip.AllowlistConfigReturn(
allowlist=['a', 'b', 'c'], device_present=True)
ukip.enforce_hardening_mode(self.mock_pyusb_device, self.event_device_path)
check_allowlist_mock.assert_called_once_with(
hex(self.mock_pyusb_device.idProduct),
hex(self.mock_pyusb_device.idVendor))
logging_mock.warning.assert_called()
@mock.patch.object(ukip, 'check_local_allowlist', autospec=True)
@mock.patch.object(ukip, 'log', autospec=True)
def test_enforce_hardening_mode_usberror(self, logging_mock,
check_allowlist_mock):
"""Tests USBError/log message for unbinding a driver from an interface."""
self.fill_test_ringbuffer_with_data()
log_message = ('There was an error in unbinding the interface for the USB '
'device %s: %s')
exception_message = 'USBError Accessing Configurations'
exception_object = usb.core.USBError(exception_message)
product_id = hex(self.mock_pyusb_device.idProduct)
vendor_id = hex(self.mock_pyusb_device.idVendor)
pid_and_vid = '%s:%s' % (product_id, vendor_id)
self.mock_pyusb_device.__iter__.return_value = iter([self.mock_usb_config])
self.mock_pyusb_device.detach_kernel_driver.side_effect = exception_object
# Return the allowlist from /etc/ukip/allowlist.
check_allowlist_mock.return_value = ukip.AllowlistConfigReturn(
allowlist=['a', 'b', 'c'], device_present=True)
ukip.enforce_hardening_mode(self.mock_pyusb_device, self.event_device_path)
check_allowlist_mock.assert_called_once_with(
hex(self.mock_pyusb_device.idProduct),
hex(self.mock_pyusb_device.idVendor))
logging_mock.warning.assert_called()
@mock.patch.object(gc, 'collect', wraps=gc.collect)
@mock.patch.object(ukip, 'check_local_allowlist', autospec=True)
@mock.patch.object(ukip, 'log', autospec=True)
def test_enforce_hardening_mode_any_keyword(self, logging_mock,
check_allowlist_mock, gc_mock):
"""Tests an early return if the any keyword is set in the allowlist."""
self.fill_test_ringbuffer_with_data()
self.mock_pyusb_device.__iter__.return_value = iter([self.mock_usb_config])
# Device present and empty allowlist -> any keyword was set.
check_allowlist_mock.return_value = ukip.AllowlistConfigReturn(
allowlist=[], device_present=True)
ukip.enforce_hardening_mode(self.mock_pyusb_device, self.event_device_path)
check_allowlist_mock.assert_called_once_with(
hex(self.mock_pyusb_device.idProduct),
hex(self.mock_pyusb_device.idVendor))
# Due to the early return, none of the followup functions are called.
self.assertFalse(self.mock_pyusb_device.detach_kernel_driver.called)
self.assertFalse(logging_mock.called)
self.assertFalse(gc_mock.called)
@mock.patch.object(gc, 'collect', wraps=gc.collect)
@mock.patch.object(ukip, 'check_local_allowlist', autospec=True)
@mock.patch.object(ukip, 'log', autospec=True)
def test_enforce_hardening_mode_keystrokes_allowed(self, logging_mock,
check_allowlist_mock,
gc_mock):
"""Tests an early return if the typed keys are allowed in the allowlist."""
# This sets the typed keys to [a,b,c,d,e]
self.fill_test_ringbuffer_with_data()
self.mock_pyusb_device.__iter__.return_value = iter([self.mock_usb_config])
# Device present and allowlist set to typed characters.
check_allowlist_mock.return_value = ukip.AllowlistConfigReturn(
allowlist=['a', 'b', 'c', 'd', 'e'], device_present=True)
ukip.enforce_hardening_mode(self.mock_pyusb_device, self.event_device_path)
check_allowlist_mock.assert_called_once_with(
hex(self.mock_pyusb_device.idProduct),
hex(self.mock_pyusb_device.idVendor))
# Due to the early return, none of the followup functions are called.
self.assertFalse(self.mock_pyusb_device.detach_kernel_driver.called)
self.assertFalse(logging_mock.called)
self.assertFalse(gc_mock.called)
@mock.patch.object(gc, 'collect', wraps=gc.collect)
@mock.patch.object(ukip, 'check_local_allowlist', autospec=True)
@mock.patch.object(ukip, 'log', autospec=True)
def test_enforce_hardening_mode_keystrokes_allowed_subset(
self, logging_mock, check_allowlist_mock, gc_mock):
"""Tests an early return with a subset of allowed keys."""
ukip.add_to_ring_buffer(self.event_device_path, 1555146977759524, 'a',
self.mock_pyusb_device)
ukip.add_to_ring_buffer(self.event_device_path, 1555146977859525, 'b',
self.mock_pyusb_device)
ukip.add_to_ring_buffer(self.event_device_path, 1555146977959526, 'c',
self.mock_pyusb_device)
self.mock_pyusb_device.__iter__.return_value = iter([self.mock_usb_config])
# Device present and allowlist set to typed characters.
check_allowlist_mock.return_value = ukip.AllowlistConfigReturn(
allowlist=['a', 'b', 'c', 'd', 'e', 'f'], device_present=True)
ukip.enforce_hardening_mode(self.mock_pyusb_device, self.event_device_path)
check_allowlist_mock.assert_called_once_with(
hex(self.mock_pyusb_device.idProduct),
hex(self.mock_pyusb_device.idVendor))
# Due to the early return, none of the followup functions are called.
self.assertFalse(self.mock_pyusb_device.detach_kernel_driver.called)
self.assertFalse(logging_mock.called)
self.assertFalse(gc_mock.called)
@mock.patch.object(gc, 'collect', wraps=gc.collect)
@mock.patch.object(ukip, 'check_local_allowlist', autospec=True)
@mock.patch.object(ukip, 'log', autospec=True)
def test_enforce_hardening_mode_device_not_present(self, logging_mock,
check_allowlist_mock,
gc_mock):
"""Tests function flow when the device is not present in the allowlist."""
self.fill_test_ringbuffer_with_data()
self.mock_pyusb_device.__iter__.return_value = iter([self.mock_usb_config])
# Need a link, because after the function is run, the dicts are deleted.
timings = ukip._event_devices_timings[self.event_device_path]
# Return the allowlist from /etc/ukip/allowlist.
check_allowlist_mock.return_value = ukip.AllowlistConfigReturn(
allowlist=[], device_present=False)
ukip.enforce_hardening_mode(self.mock_pyusb_device, self.event_device_path)
check_allowlist_mock.assert_called_once_with(
hex(self.mock_pyusb_device.idProduct),
hex(self.mock_pyusb_device.idVendor))
# Only 1 interface, so the range is 0.
self.mock_pyusb_device.detach_kernel_driver.assert_called_once_with(0)
logging_mock.warning.assert_called_with(
'[UKIP] The device %s with the vendor id %s and the product id %s '
'was blocked. The causing timings were: %s.',
self.mock_pyusb_device.product, hex(self.mock_pyusb_device.idVendor),
hex(self.mock_pyusb_device.idProduct), timings)
# The error was not logged.
self.assertFalse(logging_mock.error.called)
# The dicts are deleted now.
self.assertFalse(ukip._event_devices_timings)
self.assertFalse(ukip._event_devices_keystrokes)
# And the garbage collector ran.
gc_mock.assert_called_once()
@mock.patch.object(gc, 'collect', wraps=gc.collect)
@mock.patch.object(ukip, 'check_local_allowlist', autospec=True)
@mock.patch.object(ukip, 'log', autospec=True)
def test_enforce_hardening_mode_one_key_off(self, logging_mock,
check_allowlist_mock, gc_mock):
"""Tests the hardening mode when one typed key is not allowed."""
# This sets the typed keys to [a,b,c,d,e]
self.fill_test_ringbuffer_with_data()
self.mock_pyusb_device.__iter__.return_value = iter([self.mock_usb_config])
# Need a link, because after the function is run, the dicts are deleted.
timings = ukip._event_devices_timings[self.event_device_path]
# Return the allowlist from /etc/ukip/allowlist. The 'e' from the typed
# keys is not allowed.
check_allowlist_mock.return_value = ukip.AllowlistConfigReturn(
allowlist=['a', 'b', 'c', 'd', 'f'], device_present=False)
ukip.enforce_hardening_mode(self.mock_pyusb_device, self.event_device_path)
check_allowlist_mock.assert_called_once_with(
hex(self.mock_pyusb_device.idProduct),
hex(self.mock_pyusb_device.idVendor))
# Only 1 interface, so the range is 0.
self.mock_pyusb_device.detach_kernel_driver.assert_called_once_with(0)
logging_mock.warning.assert_called_with(
'[UKIP] The device %s with the vendor id %s and the product id %s '
'was blocked. The causing timings were: %s.',
self.mock_pyusb_device.product, hex(self.mock_pyusb_device.idVendor),
hex(self.mock_pyusb_device.idProduct), timings)
# The error was not logged.
self.assertFalse(logging_mock.error.called)
# The dicts are deleted now.
self.assertFalse(ukip._event_devices_timings)
self.assertFalse(ukip._event_devices_keystrokes)
# And the garbage collector ran.
gc_mock.assert_called_once()
@mock.patch.object(ukip, 'log', autospec=True)
@mock.patch.object(builtins, 'open')
def test_load_keycodes_from_file(self, open_mock, logging_mock):
"""Tests if the keycode file returns the KeycodesReturn class."""
handle = open_mock().__enter__.return_value
keycode_file_content = [{
'lowcodes': [{
'1': 'ESC',
'2': '1'
}],
'capscodes': [{
'1': 'ESC',
'2': '!'
}]
}]
file_mock = mock.MagicMock(side_effect=keycode_file_content)
json_mock = mock.patch('json.load', file_mock)
with open_mock:
with json_mock as json_load_mock:
keycodes = ukip.load_keycodes_from_file()
json_load_mock.assert_called_with(handle)
self.assertEqual(keycodes.lower_codes, {1: 'ESC', 2: '1'})
self.assertEqual(keycodes.capped_codes, {1: 'ESC', 2: '!'})
logging_mock.assert_not_called()
@mock.patch.object(ukip, 'log', autospec=True)
@mock.patch.object(builtins, 'open')
def test_load_keycodes_from_file_missing_keyword(self, open_mock,
logging_mock):
"""Tests the keycode file returns when a keyword is missing."""
handle = open_mock().__enter__.return_value
keycode_file_content = [{
'not_low_codes': [{
'1': 'ESC',
'2': '1'
}],
'capscodes': [{
'1': 'ESC',
'2': '!'
}]
}]
file_mock = mock.MagicMock(side_effect=keycode_file_content)
json_mock = mock.patch('json.load', file_mock)
with open_mock:
with json_mock as json_load_mock:
keycodes = ukip.load_keycodes_from_file()
json_load_mock.assert_called_with(handle)
# The lowcodes keyword is missing in the keycodes file.
self.assertEqual(keycodes.lower_codes, {})
self.assertEqual(keycodes.capped_codes, {})
logging_mock.error.assert_called()
@mock.patch.object(ukip, 'log', autospec=True)
@mock.patch.object(json, 'load', autospec=True)
@mock.patch.object(builtins, 'open', autospec=True)
def test_load_keycodes_from_file_overflowerror(self, open_mock, json_mock,
logging_mock):
"""Tests if KeycodesFileError is raised on an OverflowError."""
json_mock.side_effect = OverflowError
self.assertRaises(ukip.KeycodesFileError, ukip.load_keycodes_from_file)
open_mock.assert_called()
json_mock.assert_called()
logging_mock.assert_not_called()
@mock.patch.object(ukip, 'log', autospec=True)
@mock.patch.object(json, 'load', autospec=True)
@mock.patch.object(builtins, 'open', autospec=True)
def test_load_keycodes_from_file_valueerror(self, open_mock, json_mock,
logging_mock):
"""Tests if KeycodesFileError is raised on a ValueError."""
json_mock.side_effect = ValueError
self.assertRaises(ukip.KeycodesFileError, ukip.load_keycodes_from_file)
open_mock.assert_called()
json_mock.assert_called()
logging_mock.assert_not_called()
@mock.patch.object(ukip, 'log', autospec=True)
@mock.patch.object(json, 'load', autospec=True)
@mock.patch.object(builtins, 'open', autospec=True)
def test_load_keycodes_from_file_typeerror(self, open_mock, json_mock,
logging_mock):
"""Tests if KeycodesFileError is raised on a TypeError."""
json_mock.side_effect = TypeError
self.assertRaises(ukip.KeycodesFileError, ukip.load_keycodes_from_file)
open_mock.assert_called()
json_mock.assert_called()
logging_mock.assert_not_called()
@mock.patch.object(ukip, 'log', autospec=True)
@mock.patch.object(json, 'load', autospec=True)
@mock.patch.object(builtins, 'open', autospec=True)
def test_load_keycodes_from_file_not_found(self, open_mock, json_mock,
logging_mock):
"""Tests if KeycodesFileError is raised on a FileNotFoundError."""
json_mock.side_effect = FileNotFoundError
self.assertRaises(ukip.KeycodesFileError, ukip.load_keycodes_from_file)
open_mock.assert_called()
json_mock.assert_called()
logging_mock.assert_not_called()
if __name__ == '__main__':
unittest.main()
| |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Meta tests for mappers.
The test checks the output of the swapper to a ground truth DAG (one for each
test/swapper) saved in as a pickle (in `test/python/pickles/`). If they need
to be regenerated, the DAG candidate is compiled and run in a simulator and
the count is checked before being saved. This happens with (in the root
directory):
> python -m test.python.transpiler.test_mappers regenerate
To make a new swapper pass throw all the common tests, create a new class inside the file
`path/to/test_mappers.py` that:
* the class name should start with `Tests...`.
* inheriting from ``SwapperCommonTestCases, QiskitTestCase``
* overwrite the required attribute ``pass_class``
For example::
class TestsSomeSwap(SwapperCommonTestCases, QiskitTestCase):
pass_class = SomeSwap # The pass class
additional_args = {'seed_transpiler': 42} # In case SomeSwap.__init__ requires
# additional arguments
To **add a test for all the swappers**, add a new method ``test_foo``to the
``SwapperCommonTestCases`` class:
* defining the following required ``self`` attributes: ``self.count``,
``self.shots``, ``self.delta``. They are required for the regeneration of the
ground truth.
* use the ``self.assertResult`` assertion for comparing for regeneration of the
ground truth.
* explicitly set a unique ``name`` of the ``QuantumCircuit``, as it it used
for the name of the pickle file of the ground truth.
For example::
def test_a_common_test(self):
self.count = {'000': 512, '110': 512} # The expected count for this circuit
self.shots = 1024 # Shots to run in the backend.
self.delta = 5 # This is delta for the AlmostEqual during
# the count check
coupling_map = [[0, 1], [0, 2]] # The coupling map for this specific test
qr = QuantumRegister(3, 'q') #
cr = ClassicalRegister(3, 'c') # Set the circuit to test
circuit = QuantumCircuit(qr, cr, # and don't forget to put a name
name='some_name') # (it will be used to save the pickle
circuit.h(qr[1]) #
circuit.cx(qr[1], qr[2]) #
circuit.measure(qr, cr) #
result = transpile(circuit, self.create_backend(), coupling_map=coupling_map,
pass_manager=self.create_passmanager(coupling_map))
self.assertResult(result, circuit)
```
"""
# pylint: disable=attribute-defined-outside-init
import unittest
import pickle
import sys
import os
from qiskit import execute
from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit, BasicAer
from qiskit.transpiler import PassManager
from qiskit.compiler import transpile
from qiskit.transpiler.passes import BasicSwap, LookaheadSwap, StochasticSwap, SetLayout
from qiskit.transpiler import CouplingMap, Layout
from qiskit.test import QiskitTestCase
DIRNAME = QiskitTestCase._get_resource_path('pickles')
class CommonUtilitiesMixin:
"""Utilities for meta testing.
Subclasses should redefine the ``pass_class`` argument, with a Swap Mapper
class.
Note: This class assumes that the subclass is also inheriting from
``QiskitTestCase``, and it uses ``QiskitTestCase`` methods directly.
"""
regenerate_expected = False
seed_simulator = 42
seed_transpiler = 42
additional_args = {}
pass_class = None
def create_passmanager(self, coupling_map, initial_layout=None):
"""Returns a PassManager using self.pass_class(coupling_map, initial_layout)"""
passmanager = PassManager()
if initial_layout:
passmanager.append(SetLayout(Layout(initial_layout)))
# pylint: disable=not-callable
passmanager.append(self.pass_class(CouplingMap(coupling_map), **self.additional_args))
return passmanager
def create_backend(self):
"""Returns a Backend."""
return BasicAer.get_backend('qasm_simulator')
def generate_ground_truth(self, transpiled_result, filename):
"""Generates the expected result into a file.
Checks if transpiled_result matches self.counts by running in a backend
(self.create_backend()). That's saved in a pickle in filename.
Args:
transpiled_result (DAGCircuit): The DAGCircuit to execute.
filename (string): Where the pickle is saved.
"""
sim_backend = self.create_backend()
job = execute(transpiled_result, sim_backend, seed_simulator=self.seed_simulator,
seed_transpiler=self.seed_transpiler, shots=self.shots)
self.assertDictAlmostEqual(self.counts, job.result().get_counts(), delta=self.delta)
with open(filename, "wb") as output_file:
pickle.dump(transpiled_result, output_file)
def assertResult(self, result, circuit):
"""Fetches the pickle in circuit.name file and compares it with result."""
picklename = '%s_%s.pickle' % (type(self).__name__, circuit.name)
filename = os.path.join(DIRNAME, picklename)
if self.regenerate_expected:
# Run result in backend to test that is valid.
self.generate_ground_truth(result, filename)
with open(filename, "rb") as input_file:
expected = pickle.load(input_file)
self.assertEqual(result, expected)
class SwapperCommonTestCases(CommonUtilitiesMixin):
"""Tests that are run in several mappers.
The tests here will be run in several mappers. When adding a test, please
ensure that the test:
* defines ``self.count``, ``self.shots``, ``self.delta``.
* uses the ``self.assertResult`` assertion for comparing for regeneration of
the ground truth.
* explicitly sets a unique ``name`` of the ``QuantumCircuit``.
See also ``CommonUtilitiesMixin`` and the module docstring.
"""
def test_a_cx_to_map(self):
"""A single CX needs to be remapped.
q0:----------m-----
|
q1:-[H]-(+)--|-m---
| | |
q2:------.---|-|-m-
| | |
c0:----------.-|-|-
c1:------------.-|-
c2:--------------.-
CouplingMap map: [1]<-[0]->[2]
expected count: '000': 50%
'110': 50%
"""
self.counts = {'000': 512, '110': 512}
self.shots = 1024
self.delta = 5
coupling_map = [[0, 1], [0, 2]]
qr = QuantumRegister(3, 'q')
cr = ClassicalRegister(3, 'c')
circuit = QuantumCircuit(qr, cr, name='a_cx_to_map')
circuit.h(qr[1])
circuit.cx(qr[1], qr[2])
circuit.measure(qr, cr)
result = transpile(circuit, self.create_backend(), coupling_map=coupling_map,
seed_transpiler=self.seed_transpiler,
pass_manager=self.create_passmanager(coupling_map))
self.assertResult(result, circuit)
def test_initial_layout(self):
"""Using a non-trivial initial_layout.
q3:----------------m--
q0:----------m-----|--
| |
q1:-[H]-(+)--|-m---|--
| | | |
q2:------.---|-|-m-|--
| | | |
c0:----------.-|-|-|--
c1:------------.-|-|--
c2:--------------.-|--
c3:----------------.--
CouplingMap map: [1]<-[0]->[2]->[3]
expected count: '000': 50%
'110': 50%
"""
self.counts = {'0000': 512, '0110': 512}
self.shots = 1024
self.delta = 5
coupling_map = [[0, 1], [0, 2], [2, 3]]
qr = QuantumRegister(4, 'q')
cr = ClassicalRegister(4, 'c')
circuit = QuantumCircuit(qr, cr, name='initial_layout')
circuit.h(qr[1])
circuit.cx(qr[1], qr[2])
circuit.measure(qr, cr)
layout = {qr[3]: 0, qr[0]: 1, qr[1]: 2, qr[2]: 3}
result = transpile(circuit, self.create_backend(), coupling_map=coupling_map,
seed_transpiler=self.seed_transpiler,
pass_manager=self.create_passmanager(coupling_map, layout))
self.assertResult(result, circuit)
def test_handle_measurement(self):
"""Handle measurement correctly.
q0:--.-----(+)-m-------
| | |
q1:-(+)-(+)-|--|-m-----
| | | |
q2:------|--|--|-|-m---
| | | | |
q3:-[H]--.--.--|-|-|-m-
| | | |
c0:------------.-|-|-|-
c1:--------------.-|-|-
c2:----------------.-|-
c3:------------------.-
CouplingMap map: [0]->[1]->[2]->[3]
expected count: '0000': 50%
'1011': 50%
"""
self.counts = {'1011': 512, '0000': 512}
self.shots = 1024
self.delta = 5
coupling_map = [[0, 1], [1, 2], [2, 3]]
qr = QuantumRegister(4, 'q')
cr = ClassicalRegister(4, 'c')
circuit = QuantumCircuit(qr, cr, name='handle_measurement')
circuit.h(qr[3])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[3], qr[1])
circuit.cx(qr[3], qr[0])
circuit.measure(qr, cr)
result = transpile(circuit, self.create_backend(), coupling_map=coupling_map,
seed_transpiler=self.seed_transpiler,
pass_manager=self.create_passmanager(coupling_map))
self.assertResult(result, circuit)
class TestsBasicSwap(SwapperCommonTestCases, QiskitTestCase):
"""Test SwapperCommonTestCases using BasicSwap."""
pass_class = BasicSwap
class TestsLookaheadSwap(SwapperCommonTestCases, QiskitTestCase):
"""Test SwapperCommonTestCases using LookaheadSwap."""
pass_class = LookaheadSwap
class TestsStochasticSwap(SwapperCommonTestCases, QiskitTestCase):
"""Test SwapperCommonTestCases using StochasticSwap."""
pass_class = StochasticSwap
additional_args = {'seed': 0}
if __name__ == '__main__':
if len(sys.argv) >= 2 and sys.argv[1] == 'regenerate':
CommonUtilitiesMixin.regenerate_expected = True
for picklefilename in os.listdir(DIRNAME):
os.remove(os.path.join(DIRNAME, picklefilename))
del sys.argv[1]
unittest.main()
| |
"""
Chart demos using RawQuerySet
"""
from chartit import DataPool, Chart
from django.shortcuts import render_to_response
from .decorators import add_source_code_and_doc
from .models import MonthlyWeatherByCity, MonthlyWeatherSeattle
from .models import SalesHistory, BookStore, Book
@add_source_code_and_doc
def basicline(_, title, code, doc, sidebar_items):
"""
A Basic Line Chart
------------------
This is just a simple line chart with data from 2 different columns using
a ``RawQuerySet`` source.
"""
# start_code
ds = DataPool(
series=[{
'options': {
'source': MonthlyWeatherByCity.objects.raw(
"SELECT id, month, houston_temp, boston_temp "
"FROM demoproject_monthlyweatherbycity")
},
'terms': [
'month',
'houston_temp',
'boston_temp'
]
}]
)
cht = Chart(
datasource=ds,
series_options=[{
'options': {
'type': 'line',
'stacking': False
},
'terms': {
'month': [
'boston_temp',
'houston_temp'
]
}
}],
chart_options={
'title': {
'text': 'Weather Data of Boston and Houston'
},
'xAxis': {
'title': {
'text': 'Month number'
}
}
}
)
# end_code
return render_to_response('chart_code.html',
{
'chart_list': cht,
'code': code,
'title': title,
'doc': doc,
'sidebar_items': sidebar_items})
@add_source_code_and_doc
def mapf_for_x(_, title, code, doc, sidebar_items):
"""
Mapping the x-axis
------------------
This example demonstrates how to use the ``sortf_mapf_mts`` parameter to
*map* the x-axis. The database only has month numbers (1-12) but not the
month names. To display the month names in the graph, we create the
``monthname`` function and pass it to the ``Chart`` as the mapping funtion
(``mapf``).
Points to note:
- ``mts`` is ``False`` because we want to sort by month numbers and map to
the month names *after* they are sorted in order of month numbers.
Setting it to ``True`` would sort after mapping giving an incorrect sort
order like ``Apr``, ``Aug``, ``Dec``, ``...``.
"""
# start_code
ds = DataPool(
series=[{
'options': {
'source': MonthlyWeatherByCity.objects.raw(
"SELECT * FROM demoproject_monthlyweatherbycity"
)
},
'terms': [
'month',
'houston_temp',
'boston_temp'
]
}]
)
def monthname(month_num):
names = {1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun',
7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'}
return names[month_num]
cht = Chart(
datasource=ds,
series_options=[{
'options': {
'type': 'line',
'stacking': False
},
'terms': {
'month': [
'boston_temp',
'houston_temp'
]
}
}],
chart_options={
'title': {
'text': 'Weather Data of Boston and Houston'
},
'xAxis': {
'title': {
'text': 'Month'
}
}
},
x_sortf_mapf_mts=(None, monthname, False))
# end_code
return render_to_response('chart_code.html',
{
'chart_list': cht,
'code': code,
'title': title,
'doc': doc,
'sidebar_items': sidebar_items})
@add_source_code_and_doc
def multi_table_same_x(_, title, code, doc, sidebar_items):
"""
Data from multiple models on same chart
----------------------------------------
This example demonstrates data from two different models
``MonthlyWeatherByCity`` and ``MonthlyWeatherSeattle`` on the same chart
and on the same x-axis. Notice that we've mixed ``RawQuerySet`` and
``QuerySet`` sources together!
"""
# start_code
ds = DataPool(
series=[{
'options': {
'source': MonthlyWeatherByCity.objects.raw(
"SELECT * FROM demoproject_monthlyweatherbycity"
)
},
'terms': [
'month',
'houston_temp',
'boston_temp'
]}, {
'options': {
'source': MonthlyWeatherSeattle.objects.all()
},
'terms': [
{'month_seattle': 'month'},
'seattle_temp'
]}
]
)
cht = Chart(
datasource=ds,
series_options=[{
'options': {
'type': 'line',
'stacking': False
},
'terms': {
'month': [
'boston_temp',
'houston_temp'],
'month_seattle': ['seattle_temp']
}
}],
chart_options={
'title': {
'text': 'Weather by Month (from 2 different tables)'},
'xAxis': {
'title': {
'text': 'Month number'
}
}
}
)
# end_code
return render_to_response('chart_code.html',
{
'chart_list': cht,
'code': code,
'title': title,
'doc': doc,
'sidebar_items': sidebar_items})
@add_source_code_and_doc
def basicline_with_datefield(_, title, code, doc, sidebar_items):
"""
A Basic Line Chart with DateField
---------------------------------
This chart plots sales quantities per day from the first book store.
Points to note:
- ``sale_date`` is a DateField
"""
# start_code
ds = DataPool(
series=[{
'options': {
'source': SalesHistory.objects.raw(
"SELECT * FROM demoproject_saleshistory "
"WHERE bookstore_id=%s LIMIT 10",
[BookStore.objects.first().pk]
)
},
'terms': [
'sale_date',
'sale_qty',
]
}]
)
cht = Chart(
datasource=ds,
series_options=[{
'options': {
'type': 'line',
'stacking': False
},
'terms': {
'sale_date': [
'sale_qty',
]
}
}],
chart_options={
'title': {
'text': 'Sales QTY per day'
},
'xAxis': {
'title': {
'text': 'Sale date'
}
}
}
)
# end_code
return render_to_response('chart_code.html',
{
'chart_list': cht,
'code': code,
'title': title,
'doc': doc,
'sidebar_items': sidebar_items})
@add_source_code_and_doc
def datetimefield_from_related_model(_, title, code, doc, sidebar_items):
"""
A Basic Line Chart with DateTimeField from related model
--------------------------------------------------------
This chart plots sales quantities from the first book store based on
when the book was published.
"""
# start_code
ds = DataPool(
series=[{
'options': {
'source': SalesHistory.objects.raw(
"SELECT * FROM demoproject_saleshistory "
"WHERE bookstore_id=%s LIMIT 10",
[BookStore.objects.first().pk]
)
},
'terms': [
'book__published_at',
'sale_qty',
]
}]
)
cht = Chart(
datasource=ds,
series_options=[{
'options': {
'type': 'line',
'stacking': False
},
'terms': {
'book__published_at': [
'sale_qty',
]
}
}],
chart_options={
'title': {
'text': 'Sales QTY vs. Book publish date'
},
'xAxis': {
'title': {
'text': 'Publish date'
}
}
}
)
# end_code
return render_to_response('chart_code.html',
{
'chart_list': cht,
'code': code,
'title': title,
'doc': doc,
'sidebar_items': sidebar_items})
@add_source_code_and_doc
def extra_datefield(_, title, code, doc, sidebar_items):
"""
A Basic Line Chart using extra DateField, not defined in the model
------------------------------------------------------------------
This chart plots sales quantities per day from the first book store.
In the ``RawQuerySet`` we select extra fields, which are not defined
inside the model.
"""
# start_code
ds = DataPool(
series=[{
'options': {
# NOTE: strftime is SQLite function.
# For MySQL use DATE_FORMAT
'source': SalesHistory.objects.raw(
"SELECT id, sale_qty, "
"strftime('%%Y/%%m/%%d', sale_date) as sold_at"
" FROM demoproject_saleshistory "
"WHERE bookstore_id=%s LIMIT 10",
[BookStore.objects.first().pk]
)
},
'terms': [
'sold_at',
'sale_qty',
]
}]
)
cht = Chart(
datasource=ds,
series_options=[{
'options': {
'type': 'line',
'stacking': False
},
'terms': {
'sold_at': [
'sale_qty',
]
}
}],
chart_options={
'title': {
'text': 'Sales QTY per day'
},
'xAxis': {
'title': {
'text': 'Sale date'
}
}
}
)
# end_code
return render_to_response('chart_code.html',
{
'chart_list': cht,
'code': code,
'title': title,
'doc': doc,
'sidebar_items': sidebar_items})
@add_source_code_and_doc
def avg_count(_, title, code, doc, sidebar_items):
"""
A Basic Line Chart using AVG and COUNT
--------------------------------------
This chart plots the average book rating in each genre
together with the number of books in each genre.
NOTE that we use the SQL functions for average and count!
"""
# start_code
ds = DataPool(
series=[{
'options': {
'source': Book.objects.raw(
"SELECT "
" demoproject_book.id, "
" demoproject_genre.name as genre_name, "
" avg(rating) as rating_avg, "
" count(genre_id) as genre_count "
"FROM demoproject_book "
"JOIN demoproject_genre ON "
" genre_id == demoproject_genre.id "
"GROUP BY genre_id "
)
},
'terms': [
'genre_name',
'rating_avg',
'genre_count'
]
}]
)
cht = Chart(
datasource=ds,
series_options=[{
'options': {
'type': 'line',
'stacking': False
},
'terms': {
'genre_name': [
'rating_avg', 'genre_count'
]
}
}],
chart_options={
'title': {
'text': 'Book rating and count per Genre'
},
'xAxis': {
'title': {
'text': 'Genre'
}
}
}
)
# end_code
return render_to_response('chart_code.html',
{
'chart_list': cht,
'code': code,
'title': title,
'doc': doc,
'sidebar_items': sidebar_items})
| |
#!/usr/bin/env
"""Common utils."""
__author__ = 'Volodymyr Varchuk'
__email__ = "vladimir.varchuk@rackspace.com"
from collections import namedtuple
DELETE_TMPLT = 'DELETE FROM {table} WHERE {conditions};'
UPDATE_TMPLT = 'UPDATE {table} SET {statements} WHERE {conditions};'
INSERT_TMPLT = 'INSERT INTO {table} ({columns}) VALUES({values});'
SELECT_TMPLT = 'SELECT MAX(idx) FROM {table} WHERE {conditions};'
UPSERT_TMLPT = 'do $$\
begin\
{update}\
IF FOUND THEN\
RETURN;\
END IF;\
BEGIN\
{insert}\
RETURN;\
EXCEPTION WHEN unique_violation THEN\
END;\
end\
$$'
DatabaseInfo = namedtuple('DatabaseInfo', ['database_name', 'schema_name'])
def get_cleaned_field_name(field_name):
"""returns cleared field name for flat database"""
ret_val = None
for i in range(len(field_name)):
if field_name[i].isalpha():
ret_val = field_name[i:]
break
return ret_val
def is_id_field(field_name):
"""check if field is ID field"""
return field_name in ['id', 'oid', '_id', '_oid', '_id_oid', 'id_oid']
def get_table_name_schema(str_list):
"""returns string which represented table name including data base name,
schema name, table name"""
str_list_quotes = str_list[:-1] + ['"' + str_list[-1] + '"']
return '.'.join(filter(None, str_list_quotes))
def get_schema(schema_in):
"""returns schema dictionary from json"""
if type(schema_in) is list:
return schema_in[0]
else:
return schema_in
def get_schema_dict(schema_in):
"""returns schema dictionary from schema_engine object"""
if type(schema_in) != dict:
return schema_in.schema
else:
return schema_in
def get_cleaned_path(path):
"""returns list of path nodes excluding number parts"""
new_path_clear = []
for path_el in path:
if not path_el.isdigit():
new_path_clear.append(path_el)
return new_path_clear
def get_postgres_type(type_name):
"""converts json schema type to PostgreSQL type"""
return {
'STRING': 'text',
'INT': 'integer',
'BOOLEAN': 'boolean',
'LONG': 'bigint',
'TIMESTAMP': 'timestamp',
'DOUBLE': 'double precision',
'BIGINT': 'bigint',
'TINYINT': 'integer'
}[type_name.upper()]
def get_table_name_from_list(spath):
"""returns table name from nodes list"""
spathl = spath[:]
for spathl_it in spathl:
if spathl_it.isdigit():
spathl.remove(spathl_it)
if len(spathl) > 1:
return '_'.join(
['_'.join((spathl_el[:-1] if spathl_el[-1] == 's' else spathl_el)
for spathl_el in spathl[:-1]),
get_cleaned_field_name(spathl[-1])])
else:
return spathl[-1]
def get_idx_column_name_from_list(spath):
"""returns list idx_columns"""
spathl = spath[:]
for spathl_it in spathl:
if spathl_it.isdigit():
spathl.remove(spathl_it)
if len(spathl) > 1:
return '_'.join(['_'.join((spathl_el) for spathl_el in spathl[:-1]),
spathl[-1]])
else:
return spathl[-1]
def get_root_table_from_path(path):
"""extract root table name from path"""
spath = path.split('.')
if len(spath) == 0:
return path
else:
return spath[0]
def get_indexes_dictionary(path):
"""returns dictionary of indexes with values from path to object"""
index_dict = {}
spath = path.split('.')
iter_i = reversed(xrange(len(spath)))
for i in iter_i:
if spath[i].isdigit():
table_name = get_table_name_from_list(spath)
index_dict[table_name] = str(int(spath[i]) + 1)
del spath[i]
del spath[i - 1]
next(iter_i)
else:
del spath[i]
return index_dict
def get_indexes_dictionary_idx(path):
"""returns dictionary of indexes with values from path to object with
idx column name convention"""
index_dict = {}
spath = path.split('.')
iter_i = reversed(xrange(len(spath)))
for i in iter_i:
if spath[i].isdigit():
table_name = get_idx_column_name_from_list(spath)
index_dict[table_name] = str(int(spath[i]) + 1)
del spath[i]
del spath[i - 1]
next(iter_i)
else:
del spath[i]
return index_dict
def get_last_idx_from_path(path):
"""returns last valid index number from path to object"""
spath = path.split('.')
if spath[-1].isdigit():
return str(int(spath[-1]) + 1)
else:
return None
def get_ids_list(lst):
"""returns list of ids"""
if type(lst) is list:
list_it = lst[0]
else:
list_it = lst
# search for _id/id :{oid, id_bscon} struct
ids_to_add = {}
for list_it_el in list_it:
if get_cleaned_field_name(list_it_el) in ['id', '_id']:
if type(list_it[list_it_el]) is dict:
for e_el in list_it[list_it_el]:
if get_cleaned_field_name(e_el) in ["oid"]:
ids_to_add[get_cleaned_field_name(
list_it_el) + "_" + get_cleaned_field_name(
e_el)] = get_postgres_type(
list_it[list_it_el][e_el])
if len(ids_to_add) != 0:
return ids_to_add
# search for _id/id fields
for list_it_el in list_it:
if get_cleaned_field_name(list_it_el) in ['id', '_id']:
ids_to_add[
get_cleaned_field_name(list_it_el)] = get_postgres_type(
list_it[list_it_el])
if len(ids_to_add) != 0:
return ids_to_add
# set index column to idx if id_oid or id not found
ids_to_add['idx'] = 'bigint'
return ids_to_add
def get_tables_structure(schema, table, table_mappings, parent_tables_ids,
root_table, parent_key):
"""returns structure of all tables in chema with following view:
{table1_name:{
field1_name:TYPE,
field2_name:TYPE
}}
"""
if type(schema) is list:
table_struct = schema[0]
else:
table_struct = schema
table_mappings[table] = {}
for ids in parent_tables_ids:
table_mappings[table][ids] = parent_tables_ids[ids]
if not root_table:
table_mappings[table][u'idx'] = u'bigint'
parent_tables_ids[table + u'_idx'] = u'bigint'
else:
root_ids = get_ids_list(schema)
root_id_key = root_ids.iterkeys().next()
parent_tables_ids[table + '_' + root_id_key] = root_ids[
root_id_key].decode('utf-8')
root_table = 0
if not type(table_struct) is dict:
table_mappings[table][
get_cleaned_field_name(parent_key)] = get_postgres_type(
table_struct)
return table_mappings
for element in table_struct:
if type(table_struct[element]) is list:
get_tables_structure(table_struct[element], table[
:-1] + '_' +
get_cleaned_field_name(
element),
table_mappings, parent_tables_ids.copy(),
root_table, element)
elif type(table_struct[element]) is dict:
get_table_struct_from_dict(table_struct[element], table,
table_mappings, parent_tables_ids.copy(),
get_cleaned_field_name(
element))
else:
table_mappings[table][
get_cleaned_field_name(element)] = get_postgres_type(
table_struct[element])
return table_mappings
def get_table_struct_from_dict(schema, table, table_mappings, parent_tables_ids,
parent_name):
"""returns tables structures for enclosed objects"""
for column in schema:
if type(schema[column]) is dict:
get_table_struct_from_dict(schema[column], table, table_mappings,
parent_tables_ids,
parent_name + '_' +
get_cleaned_field_name(
column))
elif type(schema[column]) is list:
get_tables_structure(schema[column],
table[:-1] + '_' + parent_name + '_' + column,
table_mappings,
parent_tables_ids, 0, column)
else:
table_mappings[table][
parent_name + '_' + get_cleaned_field_name(
column)] = get_postgres_type(
schema[column])
def get_column_type(schema, table, field_name, collection_name):
"""returns column type regarding schema"""
return get_tables_structure(schema, collection_name, {}, {}, 1, '')[table][
field_name]
def get_quotes_using(schema, table, field_name, collection_name):
"""returns true or false if qoutes needed to use regarding field type"""
quotes_not_needed = ['int', 'bigint', 'integer', 'double']
return not get_column_type(schema, table, field_name, collection_name) in \
quotes_not_needed
def get_part_schema(schema_in, path):
"""returns 'child' part of the schema related to path"""
schema = get_schema(schema_in)
w_path = []
if type(path) is list:
w_path = get_cleaned_path(path)
current_path = w_path[0]
else:
current_path = path
if current_path in schema.keys():
if type(schema[current_path]) is dict:
if len(w_path) > 1:
return get_part_schema(schema[current_path], w_path[1:])
else:
return schema[current_path]
elif type(schema[current_path]) is list:
if type(w_path) is list:
if len(w_path[1:]) == 0:
return schema[current_path]
else:
return get_part_schema(schema[current_path], w_path[1:])
else:
return schema[current_path]
else:
return schema[current_path]
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for api module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.core import config
from tensorflow.python.autograph.impl import api
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.utils import py_func
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
tf = utils.fake_tf()
class ApiTest(test.TestCase):
def setUp(self):
config.COMPILED_IMPORT_STATEMENTS = (
'from __future__ import print_function',
)
def test_decorator_recurses(self):
class TestClass(object):
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
with self.test_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], sess.run(x).tolist())
def test_decorator_does_not_recurse(self):
class TestClass(object):
def called_member(self, a):
return tf.negative(a)
@api.convert(recursive=False)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
with self.test_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], sess.run(x).tolist())
def test_decorator_calls_unconverted_graph(self):
class TestClass(object):
@api.do_not_convert(api.RunMode.GRAPH)
def called_member(self, a):
return tf.negative(a)
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
with self.test_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], sess.run(x).tolist())
def test_decorator_calls_unconverted_py_func(self):
class TestClass(object):
@api.do_not_convert(
api.RunMode.PY_FUNC, return_dtypes=py_func.MatchDType(1))
def called_member(self, a):
return np.negative(a)
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
y = self.called_member(a)
# set_shape works around while_loop's limitations.
# TODO(mdan): Allow specifying shapes (or ShapeLike) instead.
y.set_shape(a.shape)
x //= y
return x
tc = TestClass()
with self.test_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], sess.run(x).tolist())
def test_decorator_calls_decorated(self):
class TestClass(object):
@api.convert()
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
with self.test_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], sess.run(x).tolist())
def test_decorator_preserves_argspec(self):
class TestClass(object):
def called_member(self, a):
if a < 0:
a = -a
return a
called_member_converted = api.convert()(called_member)
tc = TestClass()
self.assertListEqual(
list(tf_inspect.getfullargspec(tc.called_member)),
list(tf_inspect.getfullargspec(tc.called_member_converted)))
def test_convert_call_site_decorator(self):
class TestClass(object):
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while tf.reduce_sum(x) > s:
x //= api.converted_call(self.called_member, False, False, False, {},
self, a)
return x
tc = TestClass()
with self.test_session() as sess:
x = tc.test_method(
constant_op.constant([2, 4]), constant_op.constant(1),
constant_op.constant(-2))
self.assertListEqual([0, 1], sess.run(x).tolist())
def test_converted_call_builtin(self):
x = api.converted_call(range, False, False, False, {}, 3)
self.assertEqual((0, 1, 2), tuple(x))
def test_converted_call_function(self):
def test_fn(x):
if x < 0:
return -x
return x
with self.test_session() as sess:
x = api.converted_call(test_fn, False, False, False, {},
constant_op.constant(-1))
self.assertEqual(1, sess.run(x))
def test_converted_call_method(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_method(self):
if self.x < 0:
return -self.x
return self.x
with self.test_session() as sess:
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(tc.test_method, False, False, False, {}, tc)
self.assertEqual(1, sess.run(x))
def test_converted_call_method_by_class(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_method(self):
if self.x < 0:
return -self.x
return self.x
with self.test_session() as sess:
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(TestClass.test_method, False, False, False, {}, tc)
self.assertEqual(1, sess.run(x))
def test_converted_call_callable_object(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def __call__(self):
if self.x < 0:
return -self.x
return self.x
with self.test_session() as sess:
tc = TestClass(constant_op.constant(-1))
x = api.converted_call(tc, False, False, False, {})
self.assertEqual(1, sess.run(x))
def test_converted_call_constructor(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_method(self):
if self.x < 0:
return -self.x
return self.x
with self.test_session() as sess:
tc = api.converted_call(TestClass, False, False, False, {},
constant_op.constant(-1))
# tc is now a converted object.
x = tc.test_method()
self.assertEqual(1, sess.run(x))
def test_converted_call_already_converted(self):
def f(x):
return x == 0
with self.test_session() as sess:
x = api.converted_call(f, False, False, False, {},
constant_op.constant(0))
self.assertTrue(sess.run(x))
converted_f = api.to_graph(f)
x = api.converted_call(converted_f, False, False, False, {},
constant_op.constant(0))
self.assertTrue(sess.run(x))
def test_to_graph_basic(self):
def test_fn(x, s):
while tf.reduce_sum(x) > s:
x //= 2
return x
compiled_fn = api.to_graph(test_fn)
with self.test_session() as sess:
x = compiled_fn(constant_op.constant([4, 8]), 4)
self.assertListEqual([1, 2], sess.run(x).tolist())
def test_to_code_basic(self):
def test_fn(x, s):
while tf.reduce_sum(x) > s:
x /= 2
return x
compiled_code = api.to_code(test_fn)
# Just check that it is parseable Python code.
self.assertIsNotNone(parser.parse_str(compiled_code))
def test_source_map_attribute_present(self):
def test_fn(y):
return y**2
self.assertTrue(hasattr(api.to_graph(test_fn), 'ag_source_map'))
if __name__ == '__main__':
test.main()
| |
#!/usr/bin/env python
import sys
import os
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from hmmlearn.hmm import *
from sklearn.externals import joblib
import ipdb
from math import (
log,
exp
)
from sklearn.preprocessing import (
scale,
normalize
)
import time
def matplot_list(list_data,
figure_index,
title,
label_string,
save=False,
linewidth='3.0'):
# if you want to save, title is necessary as a save name.
global n_state
global covariance_type_string
plt.figure(figure_index, figsize=(40,30), dpi=80)
ax = plt.subplot(111)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0))
plt.grid(True)
i = 0
for data in list_data:
i = i + 1
index = np.asarray(data).shape
O = (np.arange(index[0])*0.01).tolist()
plt.plot(O, data, label=label_string[i-1],linewidth=linewidth)
plt.legend(loc='best', frameon=True)
plt.title(title)
plt.annotate('State=4 Sub_State='+str(n_state)+' GaussianHMM_cov='+covariance_type_string,
xy=(0, 0), xycoords='data',
xytext=(+10, +30), textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))
if save:
plt.savefig(title+".eps", format="eps")
def scaling(X):
_index, _column = X.shape
Data_scaled = []
scale_length = 10
for i in range(scale_length, _index-scale_length-2):
scaled = scale(X[i-scale_length:i+scale_length + 1, :])
Data_scaled.append(scaled[scale_length,:])
scaled_array = np.asarray(Data_scaled)
return scaled_array
def load_data(path, preprocessing_scaling=False, preprocessing_normalize=False, norm='l2'):
"""
df.columns = u'time', u'.endpoint_state.header.seq',
u'.endpoint_state.header.stamp.secs',
u'.endpoint_state.header.stamp.nsecs',
u'.endpoint_state.header.frame_id', u'.endpoint_state.pose.position.x',
u'.endpoint_state.pose.position.y', u'.endpoint_state.pose.position.z',
u'.endpoint_state.pose.orientation.x',
u'.endpoint_state.pose.orientation.y',
u'.endpoint_state.pose.orientation.z',
u'.endpoint_state.pose.orientation.w',
u'.endpoint_state.twist.linear.x', u'.endpoint_state.twist.linear.y',
u'.endpoint_state.twist.linear.z', u'.endpoint_state.twist.angular.x',
u'.endpoint_state.twist.angular.y', u'.endpoint_state.twist.angular.z',
u'.endpoint_state.wrench.force.x', u'.endpoint_state.wrench.force.y',
u'.endpoint_state.wrench.force.z', u'.endpoint_state.wrench.torque.x',
u'.endpoint_state.wrench.torque.y', u'.endpoint_state.wrench.torque.z',
u'.joint_state.header.seq', u'.joint_state.header.stamp.secs',
u'.joint_state.header.stamp.nsecs', u'.joint_state.header.frame_id',
u'.joint_state.name', u'.joint_state.position',
u'.joint_state.velocity', u'.joint_state.effort',
u'.wrench_stamped.header.seq', u'.wrench_stamped.header.stamp.secs',
u'.wrench_stamped.header.stamp.nsecs',
u'.wrench_stamped.header.frame_id', u'.wrench_stamped.wrench.force.x',
u'.wrench_stamped.wrench.force.y', u'.wrench_stamped.wrench.force.z',
u'.wrench_stamped.wrench.torque.x', u'.wrench_stamped.wrench.torque.y',
u'.wrench_stamped.wrench.torque.z', u'.tag']
"""
df = pd.read_csv(path+"/tag_multimodal.csv",sep=',')
print "%s" %(path)
df = df[[u'.wrench_stamped.wrench.force.x',
u'.wrench_stamped.wrench.force.y',
u'.wrench_stamped.wrench.force.z',
u'.wrench_stamped.wrench.torque.x',
u'.wrench_stamped.wrench.torque.y',
u'.wrench_stamped.wrench.torque.z',
u'.tag']]
df = df[df.values[:,-2] !=0]
Data = df.values
index, columns = Data.shape
time_data = []
for i in range(index):
time_data.append((i+1)*0.01)
df['time'] = pd.Series(time_data)
X_1 = df.values[df.values[:,-2] ==1]
X_2 = df.values[df.values[:,-2] ==2]
X_3 = df.values[df.values[:,-2] ==3]
X_4 = df.values[df.values[:,-2] ==4]
X_1_time = X_1[:,-1][-1]
X_2_time = X_2[:,-1][-1]
X_3_time = X_3[:,-1][-1]
X_4_time = X_4[:,-1][-1]
X_time = [[0],
[X_1_time],
[X_2_time],
[X_3_time],
[X_4_time]]
X_time = np.array(X_time)
df = df[[u'time',
u'.wrench_stamped.wrench.force.x',
u'.wrench_stamped.wrench.force.y',
u'.wrench_stamped.wrench.force.z',
u'.wrench_stamped.wrench.torque.x',
u'.wrench_stamped.wrench.torque.y',
u'.wrench_stamped.wrench.torque.z']]
Data = df.values
Data
return Data, X_time
def main():
ipdb.set_trace()
index = ['01',
'02',
'03',
'04',
'05',
'06',
'07',
'08',
'09',
'11',
'12',
'13',
'14',
'15',
'16',
'18',
'19',
'20',
'21',
'22',
'23',
'24']
path = "/home/ben//ML_data/REAL_BAXTER_PICK_N_PLACE_5_18/success/"
for i in range(24):
Data,Time_Index = load_data(path=path+index[i])
np.savetxt(path+index[i]+'/R_Torques.dat', Data, fmt='%.6f')
np.savetxt(path+index[i]+'/R_State.dat', Time_Index, fmt='%.6f')
return 0
if __name__ == '__main__':
sys.exit(main())
| |
'''
To run a Bokeh application on a Bokeh server from a single Python script,
pass the script name to ``bokeh serve`` on the command line:
.. code-block:: sh
bokeh serve app_script.py
By default, the Bokeh application will be served by the Bokeh server on a
default port ({DEFAULT_PORT}) at localhost, under the path ``/app_script``,
i.e.,
.. code-block:: none
http://localhost:{DEFAULT_PORT}/app_script
It is also possible to run the same commmand with jupyter notebooks:
.. code-block:: sh
bokeh serve app_notebook.ipynb
This will generate the same results as described with a python script
and the application will be served on a default port ({DEFAULT_PORT})
at localhost, under the path ``/app_notebook``
Applications can also be created from directories. The directory should
contain a ``main.py`` (and any other helper modules that are required) as
well as any additional assets (e.g., theme files). Pass the directory name
to ``bokeh serve`` to run the application:
.. code-block:: sh
bokeh serve app_dir
It is possible to run multiple applications at once:
.. code-block:: sh
bokeh serve app_script.py app_dir
If you would like to automatically open a browser to display the HTML
page(s), you can pass the ``--show`` option on the command line:
.. code-block:: sh
bokeh serve app_script.py app_dir --show
This will open two pages, for ``/app_script`` and ``/app_dir``,
respectively.
If you would like to pass command line arguments to Bokeh applications,
you can pass the ``--args`` option as the LAST option on the command
line:
.. code-block:: sh
bokeh serve app_script.py myapp.py --args foo bar --baz
Everything that follows ``--args`` will be included in ``sys.argv`` when
the application runs. In this case, when ``myapp.py`` executes, the
contents of ``sys.argv`` will be ``['myapp.py', 'foo', 'bar', '--baz']``,
consistent with standard Python expectations for ``sys.argv``.
Note that if multiple scripts or directories are provided, they
all receive the same set of command line arguments (if any) given by
``--args``.
If you have only one application, the server root will redirect to it.
Otherwise, You can see an index of all running applications at the server root:
.. code-block:: none
http://localhost:5006/
This index can be disabled with the ``--disable-index`` option, and the redirect
behavior can be disabled with the ``--disable-index-redirect`` option.
Network Configuration
~~~~~~~~~~~~~~~~~~~~~
To control the port that the Bokeh server listens on, use the ``--port``
argument:
.. code-block:: sh
bokeh serve app_script.py --port 8080
To listen on an arbitrary port, pass ``0`` as the port number. The actual
port number will be logged at startup.
Similarly, a specific network address can be specified with the
``--address`` argument. For example:
.. code-block:: sh
bokeh serve app_script.py --address 0.0.0.0
will have the Bokeh server listen all available network addresses.
Bokeh server can fork the underlying tornado server into multiprocess. This is
useful when trying to handle multiple connections especially in the context of
apps which require high computational loads. Default behavior is one process.
using 0 will auto-detect the number of cores and spin up corresponding number of
processes
.. code-block:: sh
bokeh serve app_script.py --num-procs 2
By default, cross site connections to the Bokeh server websocket are not
allowed. You can enable websocket connections originating from additional
hosts by specifying them with the ``--allow-websocket-origin`` option:
.. code-block:: sh
bokeh serve app_script.py --allow-websocket-origin foo.com:8081
It is possible to specify multiple allowed websocket origins by adding
the ``--allow-websocket-origin`` option multiple times.
The Bokeh server can also add an optional prefix to all URL paths.
This can often be useful in conjunction with "reverse proxy" setups.
.. code-block:: sh
bokeh serve app_script.py --prefix foobar
Then the application will be served under the following URL:
.. code-block:: none
http://localhost:{DEFAULT_PORT}/foobar/app_script
If needed, Bokeh server can send keep-alive pings at a fixed interval.
To configure this feature, set the ``--keep-alive`` option:
.. code-block:: sh
bokeh serve app_script.py --keep-alive 10000
The value is specified in milliseconds. The default keep-alive interval
is 37 seconds. Give a value of 0 to disable keep-alive pings.
To control how often statistic logs are written, set the
--stats-log-frequency option:
.. code-block:: sh
bokeh serve app_script.py --stats-log-frequency 30000
The value is specified in milliseconds. The default interval for
logging stats is 15 seconds. Only positive integer values are accepted.
To have the Bokeh server override the remote IP and URI scheme/protocol for
all requests with ``X-Real-Ip``, ``X-Forwarded-For``, ``X-Scheme``,
``X-Forwarded-Proto`` headers (if they are provided), set the
``--use-xheaders`` option:
.. code-block:: sh
bokeh serve app_script.py --use-xheaders
This is typically needed when running a Bokeh server behind a reverse proxy
that is SSL-terminated.
.. warning::
It is not advised to set this option on a Bokeh server directly facing
the Internet.
Session ID Options
~~~~~~~~~~~~~~~~~~
Typically, each browser tab connected to a Bokeh server will have
its own session ID. When the server generates an ID, it will make
it cryptographically unguessable. This keeps users from accessing
one another's sessions.
To control who can use a Bokeh application, the server can sign
sessions with a secret key and reject "made up" session
names. There are three modes, controlled by the ``--session-ids``
argument:
.. code-block:: sh
bokeh serve app_script.py --session-ids signed
The available modes are: {SESSION_ID_MODES}
In ``unsigned`` mode, the server will accept any session ID
provided to it in the URL. For example,
``http://localhost/app_script?bokeh-session-id=foo`` will create a
session ``foo``. In ``unsigned`` mode, if the session ID isn't
provided with ``?bokeh-session-id=`` in the URL, the server will
still generate a cryptographically-unguessable ID. However, the
server allows clients to create guessable or deliberately-shared
sessions if they want to.
``unsigned`` mode is most useful when the server is running
locally for development, for example you can have multiple
processes access a fixed session name such as
``default``. ``unsigned`` mode is also convenient because there's
no need to generate or configure a secret key.
In ``signed`` mode, the session ID must be in a special format and
signed with a secret key. Attempts to use the application with an
invalid session ID will fail, but if no ``?bokeh-session-id=``
parameter is provided, the server will generate a fresh, signed
session ID. The result of ``signed`` mode is that only secure
session IDs are allowed but anyone can connect to the server.
In ``external-signed`` mode, the session ID must be signed but the
server itself won't generate a session ID; the
``?bokeh-session-id=`` parameter will be required. To use this
mode, you would need some sort of external process (such as
another web app) which would use the
``bokeh.util.session_id.generate_session_id()`` function to create
valid session IDs. The external process and the Bokeh server must
share the same ``BOKEH_SECRET_KEY`` environment variable.
``external-signed`` mode is useful if you want another process to
authenticate access to the Bokeh server; if someone is permitted
to use the Bokeh application, you would generate a session ID for
them, then redirect them to the Bokeh server with that valid
session ID. If you don't generate a session ID for someone, then
they can't load the app from the Bokeh server.
In both ``signed`` and ``external-signed`` mode, the secret key
must be kept secret; anyone with the key can generate a valid
session ID.
The secret key should be set in a ``BOKEH_SECRET_KEY`` environment
variable and should be a cryptographically random string with at
least 256 bits (32 bytes) of entropy. You can generate a new
secret key with the ``bokeh secret`` command.
Session Expiration Options
~~~~~~~~~~~~~~~~~~~~~~~~~~
To configure how often to check for unused sessions. set the
--check-unused-sessions option:
.. code-block:: sh
bokeh serve app_script.py --check-unused-sessions 10000
The value is specified in milliseconds. The default interval for
checking for unused sessions is 17 seconds. Only positive integer
values are accepted.
To configure how often unused sessions last. set the
--unused-session-lifetime option:
.. code-block:: sh
bokeh serve app_script.py --unused-session-lifetime 60000
The value is specified in milliseconds. The default lifetime interval
for unused sessions is 15 seconds. Only positive integer values are
accepted.
Logging Options
~~~~~~~~~~~~~~~
The logging level can be controlled by the ``--log-level`` argument:
.. code-block:: sh
bokeh serve app_script.py --log-level debug
The available log levels are: {LOGLEVELS}
The log format can be controlled by the ``--log-format`` argument:
.. code-block:: sh
bokeh serve app_script.py --log-format "%(levelname)s: %(message)s"
The default log format is ``"{DEFAULT_LOG_FORMAT}"``
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
import argparse
import warnings
from bokeh.application import Application
from bokeh.resources import DEFAULT_SERVER_PORT
from bokeh.util.logconfig import basicConfig
from bokeh.util.string import nice_join, format_docstring
from bokeh.settings import settings
from os import getpid
from ..subcommand import Subcommand
from ..util import build_single_handler_applications, die, report_server_init_errors
LOGLEVELS = ('trace', 'debug', 'info', 'warning', 'error', 'critical')
SESSION_ID_MODES = ('unsigned', 'signed', 'external-signed')
DEFAULT_LOG_FORMAT = "%(asctime)s %(message)s"
__doc__ = format_docstring(__doc__,
DEFAULT_PORT=DEFAULT_SERVER_PORT,
LOGLEVELS=nice_join(LOGLEVELS),
SESSION_ID_MODES=nice_join(SESSION_ID_MODES),
DEFAULT_LOG_FORMAT=DEFAULT_LOG_FORMAT
)
def _fixup_deprecated_host_args(args):
if args.host is not None and len(args.host) > 0:
if args.allow_websocket_origin is None:
args.allow_websocket_origin = []
args.allow_websocket_origin += args.host
args.allow_websocket_origin = list(set(args.allow_websocket_origin))
warnings.warn(
"The --host parameter is deprecated because it is no longer needed. "
"It will be removed and trigger an error in a future release. "
"Values set now will be copied to --allow-websocket-origin. "
"Depending on your use case, you may need to set current --host "
"values for 'allow_websocket_origin' instead."
)
base_serve_args = (
('--port', dict(
metavar = 'PORT',
type = int,
help = "Port to listen on",
default = DEFAULT_SERVER_PORT
)),
('--address', dict(
metavar = 'ADDRESS',
type = str,
help = "Address to listen on",
default = None,
)),
('--log-level', dict(
metavar = 'LOG-LEVEL',
action = 'store',
default = 'info',
choices = LOGLEVELS,
help = "One of: %s" % nice_join(LOGLEVELS),
)),
('--log-format', dict(
metavar ='LOG-FORMAT',
action = 'store',
default = DEFAULT_LOG_FORMAT,
help = "A standard Python logging format string (default: %r)" % DEFAULT_LOG_FORMAT.replace("%", "%%"),
)),
)
class Serve(Subcommand):
''' Subcommand to launch the Bokeh server.
'''
#: name for this subcommand
name = "serve"
help = "Run a Bokeh server hosting one or more applications"
args = base_serve_args + (
('files', dict(
metavar='DIRECTORY-OR-SCRIPT',
nargs='*',
help="The app directories or scripts to serve (serve empty document if not specified)",
default=None,
)),
('--args', dict(
metavar='COMMAND-LINE-ARGS',
nargs=argparse.REMAINDER,
help="Any command line arguments remaining are passed on to the application handler",
)),
('--show', dict(
action='store_true',
help="Open server app(s) in a browser",
)),
('--allow-websocket-origin', dict(
metavar='HOST[:PORT]',
action='append',
type=str,
help="Public hostnames which may connect to the Bokeh websocket",
)),
('--host', dict(
metavar='HOST[:PORT]',
action='append',
type=str,
help="*** DEPRECATED ***",
)),
('--prefix', dict(
metavar='PREFIX',
type=str,
help="URL prefix for Bokeh server URLs",
default=None,
)),
('--keep-alive', dict(
metavar='MILLISECONDS',
type=int,
help="How often to send a keep-alive ping to clients, 0 to disable.",
default=None,
)),
('--check-unused-sessions', dict(
metavar='MILLISECONDS',
type=int,
help="How often to check for unused sessions",
default=None,
)),
('--unused-session-lifetime', dict(
metavar='MILLISECONDS',
type=int,
help="How long unused sessions last",
default=None,
)),
('--stats-log-frequency', dict(
metavar='MILLISECONDS',
type=int,
help="How often to log stats",
default=None,
)),
('--use-xheaders', dict(
action='store_true',
help="Prefer X-headers for IP/protocol information",
)),
('--session-ids', dict(
metavar='MODE',
action = 'store',
default = None,
choices = SESSION_ID_MODES,
help = "One of: %s" % nice_join(SESSION_ID_MODES),
)),
('--disable-index', dict(
action = 'store_true',
help = 'Do not use the default index on the root path',
)),
('--disable-index-redirect', dict(
action = 'store_true',
help = 'Do not redirect to running app from root path',
)),
('--num-procs', dict(
metavar='N',
action='store',
help="Number of worker processes for an app. Default to one. Using "
"0 will autodetect number of cores",
default=1,
type=int,
)),
)
def invoke(self, args):
'''
'''
# protect this import inside a function so that "bokeh info" can work
# even if Tornado is not installed
from bokeh.server.server import Server
argvs = { f : args.args for f in args.files}
applications = build_single_handler_applications(args.files, argvs)
log_level = getattr(logging, args.log_level.upper())
basicConfig(level=log_level, format=args.log_format)
# This should remain here until --host is removed entirely
_fixup_deprecated_host_args(args)
if len(applications) == 0:
# create an empty application by default
applications['/'] = Application()
if args.keep_alive is not None:
if args.keep_alive == 0:
log.info("Keep-alive ping disabled")
else:
log.info("Keep-alive ping configured every %d milliseconds", args.keep_alive)
# rename to be compatible with Server
args.keep_alive_milliseconds = args.keep_alive
if args.check_unused_sessions is not None:
log.info("Check for unused sessions every %d milliseconds", args.check_unused_sessions)
# rename to be compatible with Server
args.check_unused_sessions_milliseconds = args.check_unused_sessions
if args.unused_session_lifetime is not None:
log.info("Unused sessions last for %d milliseconds", args.unused_session_lifetime)
# rename to be compatible with Server
args.unused_session_lifetime_milliseconds = args.unused_session_lifetime
if args.stats_log_frequency is not None:
log.info("Log statistics every %d milliseconds", args.stats_log_frequency)
# rename to be compatible with Server
args.stats_log_frequency_milliseconds = args.stats_log_frequency
server_kwargs = { key: getattr(args, key) for key in ['port',
'address',
'allow_websocket_origin',
'num_procs',
'prefix',
'keep_alive_milliseconds',
'check_unused_sessions_milliseconds',
'unused_session_lifetime_milliseconds',
'stats_log_frequency_milliseconds',
'use_xheaders',
]
if getattr(args, key, None) is not None }
server_kwargs['sign_sessions'] = settings.sign_sessions()
server_kwargs['secret_key'] = settings.secret_key_bytes()
server_kwargs['generate_session_ids'] = True
if args.session_ids is None:
# no --session-ids means use the env vars
pass
elif args.session_ids == 'unsigned':
server_kwargs['sign_sessions'] = False
elif args.session_ids == 'signed':
server_kwargs['sign_sessions'] = True
elif args.session_ids == 'external-signed':
server_kwargs['sign_sessions'] = True
server_kwargs['generate_session_ids'] = False
else:
raise RuntimeError("argparse should have filtered out --session-ids mode " +
args.session_ids)
if server_kwargs['sign_sessions'] and not server_kwargs['secret_key']:
die("To sign sessions, the BOKEH_SECRET_KEY environment variable must be set; " +
"the `bokeh secret` command can be used to generate a new key.")
server_kwargs['use_index'] = not args.disable_index
server_kwargs['redirect_root'] = not args.disable_index_redirect
with report_server_init_errors(**server_kwargs):
server = Server(applications, **server_kwargs)
if args.show:
# we have to defer opening in browser until we start up the server
def show_callback():
for route in applications.keys():
server.show(route)
server.io_loop.add_callback(show_callback)
address_string = 'localhost'
if server.address is not None and server.address != '':
address_string = server.address
for route in sorted(applications.keys()):
url = "http://%s:%d%s%s" % (address_string, server.port, server.prefix, route)
log.info("Bokeh app running at: %s" % url)
log.info("Starting Bokeh server with process id: %d" % getpid())
server.run_until_shutdown()
| |
# -*- coding: utf-8 -*-
import os
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from unittest.mock import patch
except ImportError as e:
from mock import patch
from readability import xauth, ReaderClient, ParserClient
class ClientInitTest(unittest.TestCase):
"""
Test that passing tokens to the constructor bypasses looking in ENV.
"""
def setUp(self):
self.env_cache = {}
for var in ['READABILITY_PARSER_TOKEN', 'READABILITY_CONSUMER_KEY', 'READABILITY_CONSUMER_SECRET']:
if var in os.environ:
self.env_cache[var] = os.environ[var]
del os.environ[var]
def tearDown(self):
for key, val in self.env_cache.items():
os.environ[key] = val
def test_reader(self):
"""
Test that passing tokens to the constructor bypasses looking in ENV.
"""
with patch('readability.core.required_from_env') as mock:
ReaderClient(
consumer_key='consumer_key',
consumer_secret='consumer_secret',
# Fake xauth since we wont be actually making calls for this test
token_key='token_key',
token_secret='token_secret')
self.assertEqual(mock.call_count, 0)
def test_parser(self):
with patch('readability.core.required_from_env') as mock:
ParserClient(token='token')
self.assertEqual(mock.call_count, 0)
class ReaderClientNoBookmarkTest(unittest.TestCase):
"""
Tests for the Readability ReaderClient class that need no bookmarks.
"""
def setUp(self):
"""
Need to get a token for each test.
"""
token_key, token_secret = xauth()
self.reader_client = ReaderClient(token_key, token_secret)
def test_get_article(self):
"""
Test the `get_article` method.
"""
article_id = 'orrspy2p'
response = self.reader_client.get_article(article_id)
self.assertEqual(response.status_code, 200)
# spot check some keys
some_expected_keys = set(['direction', 'title', 'url', 'excerpt',
'content', 'processed', 'short_url', 'date_published'])
keys_set = set(response.json().keys())
self.assertTrue(some_expected_keys.issubset(keys_set))
def test_get_article_404(self):
"""
Try getting an article that doesn't exist.
"""
article_id = 'antidisestablishmentarianism'
response = self.reader_client.get_article(article_id)
self.assertEqual(response.status_code, 404)
def test_get_user(self):
"""
Test getting user data
"""
user_response = self.reader_client.get_user()
self.assertEqual(user_response.status_code, 200)
some_expected_keys = set(['username', 'first_name', 'last_name',
'date_joined', 'email_into_address'])
received_keys = set(user_response.json().keys())
self.assertTrue(some_expected_keys.issubset(received_keys))
def test_get_empty_tags(self):
"""
Test getting an empty set of tags. Since there are no bookmarks
present in this test, there should be no tags.
"""
tag_response = self.reader_client.get_tags()
self.assertEqual(tag_response.status_code, 200)
response_json = tag_response.json()
self.assertTrue('tags' in response_json)
self.assertEqual(len(response_json['tags']), 0)
class ReaderClientSingleBookmarkTest(unittest.TestCase):
"""
Tests that only need one bookmark
"""
def setUp(self):
"""
Get a client and add a bookmark
"""
token_key, token_secret = xauth()
self.reader_client = ReaderClient(token_key=token_key, token_secret=token_secret)
self.url = 'http://www.theatlantic.com/technology/archive/2013/01/the-never-before-told-story-of-the-worlds-first-computer-art-its-a-sexy-dame/267439/'
add_response = self.reader_client.add_bookmark(self.url)
self.assertTrue(add_response.status_code in [201, 202])
def tearDown(self):
"""
Remove all added bookmarks.
"""
for bm in self.reader_client.get_bookmarks().json()['bookmarks']:
del_response = self.reader_client.delete_bookmark(bm['id'])
self.assertEqual(del_response.status_code, 204)
def test_get_bookmark(self):
"""
Test getting one bookmark by id
"""
bookmark_id = self._get_bookmark_data()['id']
bm_response = self.reader_client.get_bookmark(bookmark_id)
self.assertEqual(bm_response.status_code, 200)
some_expected_keys = set(['article', 'user_id', 'favorite', 'id'])
received_keys = set(bm_response.json().keys())
self.assertTrue(some_expected_keys.issubset(received_keys))
def test_bookmark_tag_functionality(self):
"""
Test adding, fetching and deleting tags on a bookmark.
"""
bookmark_id = self._get_bookmark_data()['id']
# test getting empty tags
tag_response = self.reader_client.get_bookmark_tags(bookmark_id)
self.assertEqual(tag_response.status_code, 200)
self.assertEqual(len(tag_response.json()['tags']), 0)
# test adding tags
tags = ['tag', 'another tag']
tag_string = ', '.join(tags)
tag_add_response = \
self.reader_client.add_tags_to_bookmark(bookmark_id, tag_string)
self.assertEqual(tag_add_response.status_code, 202)
# re-fetch tags. should have 2
retag_response = self.reader_client.get_bookmark_tags(bookmark_id)
self.assertEqual(retag_response.status_code, 200)
self.assertEqual(len(retag_response.json()['tags']), 2)
for tag in retag_response.json()['tags']:
self.assertTrue(tag['text'] in tags)
# test getting tags for user
user_tag_resp = self.reader_client.get_tags()
self.assertEqual(user_tag_resp.status_code, 200)
self.assertEqual(len(user_tag_resp.json()['tags']), 2)
for tag in user_tag_resp.json()['tags']:
self.assertTrue(tag['text'] in tags)
# test getting a single tag while we're here
single_tag_resp = self.reader_client.get_tag(tag['id'])
self.assertEqual(single_tag_resp.status_code, 200)
self.assertTrue('applied_count' in single_tag_resp.json())
self.assertTrue('id' in single_tag_resp.json())
self.assertTrue('text' in single_tag_resp.json())
# delete tags
for tag in retag_response.json()['tags']:
del_response = self.reader_client.delete_tag_from_bookmark(
bookmark_id, tag['id'])
self.assertEqual(del_response.status_code, 204)
# check that tags are gone
tag_response = self.reader_client.get_bookmark_tags(bookmark_id)
self.assertEqual(tag_response.status_code, 200)
self.assertEqual(len(tag_response.json()['tags']), 0)
def _get_bookmark_data(self):
"""
Convenience method to get a single bookmark's data.
"""
bm_response = self.reader_client.get_bookmarks()
self.assertEqual(bm_response.status_code, 200)
bm_response_json = bm_response.json()
self.assertTrue(len(bm_response_json['bookmarks']) > 0)
return bm_response_json['bookmarks'][0]
class ReaderClientMultipleBookmarkTest(unittest.TestCase):
"""
Tests for bookmark functionality
"""
def setUp(self):
"""
Add a few bookmarks.
"""
token_key, token_secret = xauth()
self.reader_client = ReaderClient(token_key=token_key, token_secret=token_secret)
self.urls = [
'http://www.theatlantic.com/technology/archive/2013/01/the-never-before-told-story-of-the-worlds-first-computer-art-its-a-sexy-dame/267439/',
'http://www.theatlantic.com/business/archive/2013/01/why-smart-poor-students-dont-apply-to-selective-colleges-and-how-to-fix-it/272490/',
]
self.favorite_urls = [
'http://www.theatlantic.com/sexes/archive/2013/01/the-lonely-existence-of-mel-feit-mens-rights-advocate/267413/',
'http://www.theatlantic.com/technology/archive/2013/01/women-in-combat-an-idea-whose-time-has-come-aided-by-technology/272483/'
]
self.archive_urls = [
'http://www.theatlantic.com/business/archive/2013/01/what-economics-can-and-cant-tell-us-about-the-legacy-of-legal-abortion/267459/',
'http://www.theatlantic.com/business/archive/2013/01/5-ways-to-understand-just-how-absurd-spains-26-unemployment-rate-is/272502/'
]
self.all_urls = self.urls + self.favorite_urls + self.archive_urls
for url in self.urls:
response = self.reader_client.add_bookmark(url)
self.assertTrue(response.status_code in [201, 202])
for url in self.favorite_urls:
response = self.reader_client.add_bookmark(url, favorite=True)
self.assertTrue(response.status_code in [201, 202])
for url in self.archive_urls:
response = self.reader_client.add_bookmark(url, archive=True)
self.assertTrue(response.status_code in [201, 202])
def test_get_bookmarks(self):
"""
Test getting all bookmarks
"""
response = self.reader_client.get_bookmarks()
self.assertEqual(response.status_code, 200)
self.assertEqual(
len(response.json()['bookmarks']), len(self.all_urls))
# test favorite bookmarks
response = self.reader_client.get_bookmarks(favorite=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(
len(response.json()['bookmarks']), len(self.favorite_urls))
for bm in response.json()['bookmarks']:
self.assertTrue(bm['article']['url'] in self.favorite_urls)
# test archive bookmarks
response = self.reader_client.get_bookmarks(archive=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(
len(response.json()['bookmarks']), len(self.archive_urls))
for bm in response.json()['bookmarks']:
self.assertTrue(bm['article']['url'] in self.archive_urls)
def tearDown(self):
"""
Remove all added bookmarks.
"""
for bm in self.reader_client.get_bookmarks().json()['bookmarks']:
del_response = self.reader_client.delete_bookmark(bm['id'])
self.assertEqual(del_response.status_code, 204)
if __name__ == '__main__':
unittest.main(warnings='ignore')
| |
# -*- coding: utf-8 -*-
# flake8: noqa
# tensorpack documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 27 01:41:24 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, re
import mock
import inspect
from sphinx.domains import Domain
class GithubURLDomain(Domain):
"""
Resolve certain links in markdown files to github source.
"""
name = "githuburl"
ROOT = "https://github.com/tensorpack/tensorpack/blob/master/"
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
github_url = None
if ".html" not in target:
if target.startswith("../../") and not target.startswith("../../modules"):
url = target.replace("../", "")
github_url = url
if github_url is not None:
if github_url.endswith("README"):
# bug of recommonmark.
# https://github.com/readthedocs/recommonmark/blob/ddd56e7717e9745f11300059e4268e204138a6b1/recommonmark/parser.py#L152-L155
github_url += ".md"
print("Ref {} resolved to github:{}".format(target, github_url))
contnode["refuri"] = self.ROOT + github_url
return [("githuburl:any", contnode)]
else:
return []
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
os.environ['DOC_BUILDING'] = '1'
ON_RTD = (os.environ.get('READTHEDOCS') == 'True')
MOCK_MODULES = ['tabulate', 'h5py',
'cv2', 'zmq', 'lmdb',
'msgpack', 'msgpack_numpy', 'pyarrow',
'sklearn', 'sklearn.datasets',
'scipy', 'scipy.misc', 'scipy.io',
'tornado', 'tornado.concurrent',
'horovod', 'horovod.tensorflow',
'subprocess32', 'functools32', 'psutil']
# it's better to have tensorflow installed (for some docs to show)
# but it's OK to mock it as well
try:
import tensorflow
except ImportError:
mod = sys.modules['tensorflow'] = mock.Mock(name='tensorflow')
mod.__version__ = mod.VERSION = '1.12'
MOCK_MODULES.extend(['tensorflow.python.training.monitored_session'])
MOCK_MODULES.extend(['tensorflow.python.training'])
MOCK_MODULES.extend(['tensorflow.python.client'])
MOCK_MODULES.extend(['tensorflow.python.framework'])
MOCK_MODULES.extend(['tensorflow.python.platform'])
MOCK_MODULES.extend(['tensorflow.python.tools'])
MOCK_MODULES.extend(['tensorflow.contrib.graph_editor'])
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock(name=mod_name)
sys.modules['cv2'].__version__ = '3.2.1' # fake version
sys.modules['msgpack'].version = (0, 5, 2)
import tensorpack
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '3.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'recommonmark',
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.napoleon',
#'sphinx.ext.autosectionlabel',
#'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# -- Configurations for plugins ------------
napoleon_google_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_special_with_doc = True
napoleon_numpy_docstring = False
napoleon_use_rtype = False
if ON_RTD:
intersphinx_timeout = 10
else:
# skip this when building locally
intersphinx_timeout = 0.1
intersphinx_mapping = {
'python': ('https://docs.python.org/3.6', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
}
# -------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tensorpack'
copyright = u'2015 - 2020, Yuxin Wu, et al.'
author = u'Yuxin Wu, et al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = tensorpack.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build', 'README.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# 'tensorpack.' prefix was removed by js
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['tensorpack.']
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# avoid li fonts being larger
# TODO but li indices fonts are still larger
html_compact_lists = False
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'tensorpackdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'tensorpack.tex', u'tensorpack documentation',
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tensorpack', u'tensorpack documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'tensorpack', u'tensorpack documentation',
author, 'tensorpack', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
suppress_warnings = ['image.nonlocal_uri']
#autodoc_member_order = 'bysource'
def process_signature(app, what, name, obj, options, signature,
return_annotation):
if signature:
# replace Mock function names
signature = re.sub('<Mock name=\'([^\']+)\'.*>', '\g<1>', signature)
signature = re.sub('tensorflow', 'tf', signature)
# add scope name to layer signatures:
if hasattr(obj, 'use_scope'):
if obj.use_scope:
signature = signature[0] + 'variable_scope_name, ' + signature[1:]
elif obj.use_scope is None:
signature = signature[0] + '[variable_scope_name,] ' + signature[1:]
# signature: arg list
return signature, return_annotation
_DEPRECATED_NAMES = set([
# deprecated stuff:
'QueueInputTrainer',
'dump_dataflow_to_process_queue',
'DistributedTrainerReplicated',
'DistributedTrainerParameterServer',
'Augmentor',
"get_model_loader",
# renamed items that should not appear in docs
'load_chkpt_vars',
'save_chkpt_vars',
'DumpTensor',
'DumpParamAsImage',
'get_nr_gpu',
'TrainingMonitor',
'PeakMemoryTracker',
'TowerFuncWrapper',
'PrefetchData',
'MultiProcessPrefetchData',
'PrefetchDataZMQ',
'MultiThreadPrefetchData',
# deprecated or renamed symbolic code
'Deconv2D',
# shouldn't appear in doc:
'l2_regularizer', 'l1_regularizer',
# internal only
'execute_only_once',
'humanize_time_delta',
'SessionUpdate',
'get_checkpoint_path',
'IterSpeedCounter'
])
def autodoc_skip_member(app, what, name, obj, skip, options):
# we hide something deliberately
if getattr(obj, '__HIDE_SPHINX_DOC__', False):
return True
if name == '__init__':
if obj.__doc__ and skip:
# include_init_with_doc doesn't work well for decorated init
# https://github.com/sphinx-doc/sphinx/issues/4258
return False
# Hide some names that are deprecated or not intended to be used
if name in _DEPRECATED_NAMES:
return True
if name in ['__iter__', '__len__', 'reset_state', 'get_data', 'size']:
# skip these methods with empty docstring
if not obj.__doc__ and inspect.isfunction(obj):
# https://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3
cls = getattr(inspect.getmodule(obj),
obj.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0])
if issubclass(cls, tensorpack.DataFlow):
return True
return None
def setup(app):
from recommonmark.transform import AutoStructify
app.add_domain(GithubURLDomain)
app.connect('autodoc-process-signature', process_signature)
app.connect('autodoc-skip-member', autodoc_skip_member)
app.add_config_value(
'recommonmark_config',
{'auto_toc_tree_section': 'Contents',
'enable_math': True,
'enable_inline_math': True,
'enable_eval_rst': True
}, True)
app.add_transform(AutoStructify)
| |
#!/usr/bin/env python
from __future__ import print_function
from builtins import input
from builtins import range
import sys
import pmagpy.pmag as pmag
import pmagpy.pmagplotlib as pmagplotlib
def main():
"""
NAME
site_edit_magic.py
DESCRIPTION
makes equal area projections site by site
from pmag_specimens.txt file with
Fisher confidence ellipse using McFadden and McElhinny (1988)
technique for combining lines and planes
allows testing and reject specimens for bad orientations
SYNTAX
site_edit_magic.py [command line options]
OPTIONS
-h: prints help and quits
-f: specify pmag_specimen format file, default is pmag_specimens.txt
-fsa: specify er_samples.txt file
-exc: use existing pmag_criteria.txt file
-N: reset all sample flags to good
OUPUT
edited er_samples.txt file
"""
dir_path='.'
FIG={} # plot dictionary
FIG['eqarea']=1 # eqarea is figure 1
in_file='pmag_specimens.txt'
sampfile='er_samples.txt'
out_file=""
fmt,plot='svg',1
Crits=""
M,N=180.,1
repeat=''
renew=0
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-f' in sys.argv:
ind=sys.argv.index("-f")
in_file=sys.argv[ind+1]
if '-fsa' in sys.argv:
ind=sys.argv.index("-fsa")
sampfile=sys.argv[ind+1]
if '-exc' in sys.argv:
Crits,file_type=pmag.magic_read(dir_path+'/pmag_criteria.txt')
for crit in Crits:
if crit['pmag_criteria_code']=='DE-SPEC':
M=float(crit['specimen_mad'])
N=float(crit['specimen_n'])
if '-fmt' in sys.argv:
ind=sys.argv.index("-fmt")
fmt=sys.argv[ind+1]
if '-N' in sys.argv: renew=1
#
if in_file[0]!="/":in_file=dir_path+'/'+in_file
if sampfile[0]!="/":sampfile=dir_path+'/'+sampfile
crd='s'
Specs,file_type=pmag.magic_read(in_file)
if file_type!='pmag_specimens':
print(' bad pmag_specimen input file')
sys.exit()
Samps,file_type=pmag.magic_read(sampfile)
if file_type!='er_samples':
print(' bad er_samples input file')
sys.exit()
SO_methods=[]
for rec in Samps:
if 'sample_orientation_flag' not in list(rec.keys()): rec['sample_orientation_flag']='g'
if 'sample_description' not in list(rec.keys()): rec['sample_description']=''
if renew==1:
rec['sample_orientation_flag']='g'
description=rec['sample_description']
if '#' in description:
newdesc=""
c=0
while description[c]!='#' and c<len(description)-1: # look for first pound sign
newdesc=newdesc+description[c]
c+=1
while description[c]=='#':
c+=1# skip first set of pound signs
while description[c]!='#':c+=1 # find second set of pound signs
while description[c]=='#' and c<len(description)-1:c+=1 # skip second set of pound signs
while c<len(description)-1: # look for first pound sign
newdesc=newdesc+description[c]
c+=1
rec['sample_description']=newdesc # edit out old comment about orientations
if "magic_method_codes" in rec:
methlist=rec["magic_method_codes"]
for meth in methlist.split(":"):
if "SO" in meth.strip() and "SO-POM" not in meth.strip():
if meth.strip() not in SO_methods: SO_methods.append(meth.strip())
pmag.magic_write(sampfile,Samps,'er_samples')
SO_priorities=pmag.set_priorities(SO_methods,0)
sitelist=[]
for rec in Specs:
if rec['er_site_name'] not in sitelist: sitelist.append(rec['er_site_name'])
sitelist.sort()
EQ={}
EQ['eqarea']=1
pmagplotlib.plot_init(EQ['eqarea'],5,5)
k=0
while k<len(sitelist):
site=sitelist[k]
print(site)
data=[]
ThisSiteSpecs=pmag.get_dictitem(Specs,'er_site_name',site,'T')
ThisSiteSpecs=pmag.get_dictitem(ThisSiteSpecs,'specimen_tilt_correction','-1','T') # get all the unoriented data
for spec in ThisSiteSpecs:
if spec['specimen_mad']!="" and spec['specimen_n']!="" and float(spec['specimen_mad'])<=M and float(spec['specimen_n'])>=N:
# good spec, now get orientation....
redo,p=1,0
if len(SO_methods)<=1:
az_type=SO_methods[0]
orient=pmag.find_samp_rec(spec["er_sample_name"],Samps,az_type)
redo=0
while redo==1:
if p>=len(SO_priorities):
print("no orientation data for ",spec['er_sample_name'])
orient["sample_azimuth"]=""
orient["sample_dip"]=""
redo=0
else:
az_type=SO_methods[SO_methods.index(SO_priorities[p])]
orient=pmag.find_samp_rec(spec["er_sample_name"],Samps,az_type)
if orient["sample_azimuth"] !="":
redo=0
p+=1
if orient['sample_azimuth']!="":
rec={}
for key in list(spec.keys()):rec[key]=spec[key]
rec['dec'],rec['inc']=pmag.dogeo(float(spec['specimen_dec']),float(spec['specimen_inc']),float(orient['sample_azimuth']),float(orient['sample_dip']))
rec["tilt_correction"]='1'
crd='g'
rec['sample_azimuth']=orient['sample_azimuth']
rec['sample_dip']=orient['sample_dip']
data.append(rec)
if len(data)>2:
print('specimen, dec, inc, n_meas/MAD,| method codes ')
for i in range(len(data)):
print('%s: %7.1f %7.1f %s / %s | %s' % (data[i]['er_specimen_name'], data[i]['dec'], data[i]['inc'], data[i]['specimen_n'], data[i]['specimen_mad'], data[i]['magic_method_codes']))
fpars=pmag.dolnp(data,'specimen_direction_type')
print("\n Site lines planes kappa a95 dec inc")
print(site, fpars["n_lines"], fpars["n_planes"], fpars["K"], fpars["alpha95"], fpars["dec"], fpars["inc"], fpars["R"])
if out_file!="":
if float(fpars["alpha95"])<=acutoff and float(fpars["K"])>=kcutoff:
out.write('%s %s %s\n'%(fpars["dec"],fpars['inc'],fpars['alpha95']))
pmagplotlib.plotLNP(EQ['eqarea'],site,data,fpars,'specimen_direction_type')
pmagplotlib.drawFIGS(EQ)
if k!=0 and repeat!='y':
ans=input("s[a]ve plot, [q]uit, [e]dit specimens, [p]revious site, <return> to continue:\n ")
elif k==0 and repeat!='y':
ans=input("s[a]ve plot, [q]uit, [e]dit specimens, <return> to continue:\n ")
if ans=="p": k-=2
if ans=="a":
files={}
files['eqarea']=site+'_'+crd+'_eqarea'+'.'+fmt
pmagplotlib.saveP(EQ,files)
if ans=="q": sys.exit()
if ans=="e" and Samps==[]:
print("can't edit samples without orientation file, sorry")
elif ans=="e":
# k-=1
testspec=input("Enter name of specimen to check: ")
for spec in data:
if spec['er_specimen_name']==testspec:
# first test wrong direction of drill arrows (flip drill direction in opposite direction and re-calculate d,i
d,i=pmag.dogeo(float(spec['specimen_dec']),float(spec['specimen_inc']),float(spec['sample_azimuth'])-180.,-float(spec['sample_dip']))
XY=pmag.dimap(d,i)
pmagplotlib.plotXY(EQ['eqarea'],[XY[0]],[XY[1]],sym='g^')
# first test wrong end of compass (take az-180.)
d,i=pmag.dogeo(float(spec['specimen_dec']),float(spec['specimen_inc']),float(spec['sample_azimuth'])-180.,float(spec['sample_dip']))
XY=pmag.dimap(d,i)
pmagplotlib.plotXY(EQ['eqarea'],[XY[0]],[XY[1]],sym='kv')
# did the sample spin in the hole?
# now spin around specimen's z
X_up,Y_up,X_d,Y_d=[],[],[],[]
for incr in range(0,360,5):
d,i=pmag.dogeo(float(spec['specimen_dec'])+incr,float(spec['specimen_inc']),float(spec['sample_azimuth']),float(spec['sample_dip']))
XY=pmag.dimap(d,i)
if i>=0:
X_d.append(XY[0])
Y_d.append(XY[1])
else:
X_up.append(XY[0])
Y_up.append(XY[1])
pmagplotlib.plotXY(EQ['eqarea'],X_d,Y_d,sym='b.')
pmagplotlib.plotXY(EQ['eqarea'],X_up,Y_up,sym='c.')
pmagplotlib.drawFIGS(EQ)
break
print("Triangle: wrong arrow for drill direction.")
print("Delta: wrong end of compass.")
print("Small circle: wrong mark on sample. [cyan upper hemisphere]")
deleteme=input("Mark this sample as bad? y/[n] ")
if deleteme=='y':
reason=input("Reason: [1] broke, [2] wrong drill direction, [3] wrong compass direction, [4] bad mark, [5] displaced block [6] other ")
if reason=='1':
description=' sample broke while drilling'
if reason=='2':
description=' wrong drill direction '
if reason=='3':
description=' wrong compass direction '
if reason=='4':
description=' bad mark in field'
if reason=='5':
description=' displaced block'
if reason=='6':
description=input('Enter brief reason for deletion: ')
for samp in Samps:
if samp['er_sample_name']==spec['er_sample_name']:
samp['sample_orientation_flag']='b'
samp['sample_description']=samp['sample_description']+' ## direction deleted because: '+description+'##' # mark description
pmag.magic_write(sampfile,Samps,'er_samples')
repeat=input("Mark another sample, this site? y/[n] ")
if repeat=='y': k-=1
else:
print('skipping site - not enough data with specified coordinate system')
k+=1
print("sample flags stored in ",sampfile)
if __name__ == "__main__":
main()
| |
"""Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(RuntimeWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
"""Check that dense and sparse minibatch update give the same results"""
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
def _has_blas_lib(libname):
from numpy.distutils.system_info import get_info
return libname in get_info('blas_opt').get('libraries', [])
@if_not_mac_os()
def test_k_means_plus_plus_init_2_jobs():
if _has_blas_lib('openblas'):
raise SkipTest('Multi-process bug with OpenBLAS (see issue #636)')
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
verbose=10, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, verbose=10, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, verbose=10,
init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
"""Check if copy_x=False returns nearly equal X after de-centering."""
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
"""Check k_means with a bad initialization does not yield a singleton
Starting with bad centers that are quickly ignored should not
result in a repositioning of the centers to the center of mass that
would lead to collapsed centers which in turns make the clustering
dependent of the numerical unstabilities.
"""
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
"""Check that increasing the number of init increases the quality"""
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Deep AutoEncoder from Martens & Grosse (2015).
This script demonstrates training on TPUs with TPUStrategy using the KFAC
optimizer, updating the damping parameter according to the
Levenberg-Marquardt rule, and using the quadratic model method for adapting
the learning rate and momentum parameters.
See third_party/tensorflow_kfac/google/examples/ae_tpu_xm_launcher.py
for an example Borg launch script. If you can't access this launch script,
some important things to know about running K-FAC on TPUs (at least for this
example) are that you must use high-precision matrix multiplications.
iterations_per_loop is not relevant when using TPU Strategy, but you must set
it to 1 when using TPU Estimator.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl import flags
import kfac
import tensorflow.compat.v1 as tf
from kfac.examples import autoencoder_mnist
from kfac.examples import mnist
# TODO(znado): figure out the bug with this and update_damping_immediately=True.
# TODO(znado): Add checkpointing code to the training loop.
flags.DEFINE_integer('save_checkpoints_steps', 500,
'Number of iterations between model checkpoints.')
flags.DEFINE_string('model_dir', '', 'Model dir.')
# iterations_per_loop is not used with TPU Strategy. We keep the flag so the
# Estimator launching script can be used.
flags.DEFINE_integer('iterations_per_loop', 1,
'Number of iterations in a TPU training loop.')
flags.DEFINE_string('master', None,
'GRPC URL of the master '
'(e.g. grpc://ip.address.of.tpu:8470).')
FLAGS = flags.FLAGS
def make_train_op(minibatch,
batch_loss,
layer_collection,
loss_fn):
"""Constructs optimizer and train op.
Args:
minibatch: Tuple[Tensor, Tensor] representing the current batch of input
images and labels.
batch_loss: Tensor of shape (), Loss with respect to minibatch to be
minimzed.
layer_collection: LayerCollection object. Registry for model parameters.
Required when using a K-FAC optimizer.
loss_fn: A function that when called constructs the graph to compute the
model loss on the current minibatch. Returns a Tensor of the loss scalar.
Returns:
train_op: Op that can be used to update model parameters.
optimizer: The KFAC optimizer used to produce train_op.
Raises:
ValueError: If layer_collection is None when K-FAC is selected as an
optimization method.
"""
# Do not use CrossShardOptimizer with K-FAC. K-FAC now handles its own
# cross-replica syncronization automatically!
return autoencoder_mnist.make_train_op(
minibatch=minibatch,
batch_size=minibatch[0].get_shape().as_list()[0],
batch_loss=batch_loss,
layer_collection=layer_collection,
loss_fn=loss_fn,
prev_train_batch=None,
placement_strategy='replica_round_robin',
)
def compute_squared_error(logits, targets):
"""Compute mean squared error."""
return tf.reduce_sum(
tf.reduce_mean(tf.square(targets - tf.nn.sigmoid(logits)), axis=0))
def compute_loss(logits, labels, model):
"""Compute loss value."""
loss_matrix = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,
labels=labels)
regularization_loss = tf.reduce_sum(model.losses)
crossentropy_loss = tf.reduce_sum(tf.reduce_mean(loss_matrix, axis=0))
return crossentropy_loss + regularization_loss
def mnist_input_fn(batch_size):
dataset, num_examples = mnist.load_mnist_as_dataset(flatten_images=True)
# Shuffle before repeat is correct unless you want repeat cases in the
# same batch.
dataset = (dataset.shuffle(num_examples)
.repeat()
.batch(batch_size, drop_remainder=True)
.prefetch(tf.data.experimental.AUTOTUNE))
return dataset
def _train_step(batch):
"""Estimator model_fn for an autoencoder with adaptive damping."""
features, labels = batch
model = autoencoder_mnist.get_keras_autoencoder(tensor=features)
def loss_fn(minibatch, logits=None):
"""Compute the model loss given a batch of inputs.
Args:
minibatch: `Tuple[Tensor, Tensor]` for the current batch of input images
and labels.
logits: `Tensor` for the current batch of logits. If None then reuses the
AutoEncoder to compute them.
Returns:
`Tensor` for the batch loss.
"""
features, labels = minibatch
del labels
if logits is None:
logits = model(features)
batch_loss = compute_loss(logits=logits, labels=features, model=model)
return batch_loss
logits = model.output
pre_update_batch_loss = loss_fn((features, labels), logits)
pre_update_batch_error = compute_squared_error(logits, features)
# binary_crossentropy corresponds to sigmoid_crossentropy.
layer_collection = kfac.keras.utils.get_layer_collection(
model, 'binary_crossentropy', seed=FLAGS.seed + 1)
global_step = tf.train.get_or_create_global_step()
train_op, kfac_optimizer = make_train_op(
(features, labels),
pre_update_batch_loss,
layer_collection,
loss_fn)
tensors_to_print = {
'learning_rate': kfac_optimizer.learning_rate,
'momentum': kfac_optimizer.momentum,
'damping': kfac_optimizer.damping,
'global_step': global_step,
'loss': pre_update_batch_loss,
'error': pre_update_batch_error,
}
if FLAGS.adapt_damping:
tensors_to_print['qmodel_change'] = kfac_optimizer.qmodel_change
tensors_to_print['rho'] = kfac_optimizer.rho
with tf.control_dependencies([train_op]):
return {k: tf.identity(v) for k, v in tensors_to_print.items()}
def train():
"""Trains the Autoencoder using TPU Strategy."""
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=FLAGS.master)
tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
tpu_strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)
with tpu_strategy.scope():
data = mnist_input_fn(batch_size=FLAGS.batch_size)
train_iterator = tpu_strategy.make_dataset_iterator(data)
tensor_dict = tpu_strategy.experimental_run(_train_step, train_iterator)
for k, v in tensor_dict.items():
if k in ('loss', 'error'): # Losses are NOT scaled for num replicas.
tensor_dict[k] = tpu_strategy.reduce(tf.distribute.ReduceOp.MEAN, v)
else: # Other tensors (hyperparameters) are identical across replicas.
# experimental_local_results gives you a tuple of per-replica values.
tensor_dict[k] = tpu_strategy.experimental_local_results(v)
config = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(cluster_resolver.master(), config=config) as session:
session.run(tf.global_variables_initializer())
session.run(train_iterator.initializer)
print('Starting training.')
for step in range(FLAGS.train_steps):
values_dict = session.run(tensor_dict)
print('Training Step: {}'.format(step))
for k, v in values_dict.items():
print('{}: {}'.format(k, v))
print('Done training.')
def main(argv):
del argv # Unused.
tf.set_random_seed(FLAGS.seed)
# Invert using cholesky decomposition + triangular solve. This is the only
# code path for matrix inversion supported on TPU right now.
kfac.utils.set_global_constants(posdef_inv_method='cholesky')
kfac.fisher_factors.set_global_constants(
eigenvalue_decomposition_threshold=10000)
train()
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.app.run(main)
| |
# *** encoding: utf-8 ***
"""
An :class:`~.InputProcessor` receives callbacks for the keystrokes parsed from
the input in the :class:`~prompt_toolkit.inputstream.InputStream` instance.
The `InputProcessor` will according to the implemented keybindings call the
correct callbacks when new key presses are feed through `feed`.
"""
from __future__ import unicode_literals
from prompt_toolkit.buffer import EditReadOnlyBuffer
from prompt_toolkit.filters.cli import ViNavigationMode
from prompt_toolkit.keys import Keys, Key
from prompt_toolkit.utils import Event
from .registry import BaseRegistry
from collections import deque
from six.moves import range
import weakref
import six
__all__ = (
'InputProcessor',
'KeyPress',
)
class KeyPress(object):
"""
:param key: A `Keys` instance or text (one character).
:param data: The received string on stdin. (Often vt100 escape codes.)
"""
def __init__(self, key, data=None):
assert isinstance(key, (six.text_type, Key))
assert data is None or isinstance(data, six.text_type)
if data is None:
data = key.name if isinstance(key, Key) else key
self.key = key
self.data = data
def __repr__(self):
return '%s(key=%r, data=%r)' % (
self.__class__.__name__, self.key, self.data)
def __eq__(self, other):
return self.key == other.key and self.data == other.data
class InputProcessor(object):
"""
Statemachine that receives :class:`KeyPress` instances and according to the
key bindings in the given :class:`Registry`, calls the matching handlers.
::
p = InputProcessor(registry)
# Send keys into the processor.
p.feed(KeyPress(Keys.ControlX, '\x18'))
p.feed(KeyPress(Keys.ControlC, '\x03')
# Process all the keys in the queue.
p.process_keys()
# Now the ControlX-ControlC callback will be called if this sequence is
# registered in the registry.
:param registry: `BaseRegistry` instance.
:param cli_ref: weakref to `CommandLineInterface`.
"""
def __init__(self, registry, cli_ref):
assert isinstance(registry, BaseRegistry)
self._registry = registry
self._cli_ref = cli_ref
self.beforeKeyPress = Event(self)
self.afterKeyPress = Event(self)
# The queue of keys not yet send to our _process generator/state machine.
self.input_queue = deque()
# The key buffer that is matched in the generator state machine.
# (This is at at most the amount of keys that make up for one key binding.)
self.key_buffer = []
# Simple macro recording. (Like readline does.)
self.record_macro = False
self.macro = []
self.reset()
def reset(self):
self._previous_key_sequence = []
self._previous_handler = None
self._process_coroutine = self._process()
self._process_coroutine.send(None)
#: Readline argument (for repetition of commands.)
#: https://www.gnu.org/software/bash/manual/html_node/Readline-Arguments.html
self.arg = None
def start_macro(self):
" Start recording macro. "
self.record_macro = True
self.macro = []
def end_macro(self):
" End recording macro. "
self.record_macro = False
def call_macro(self):
for k in self.macro:
self.feed(k)
def _get_matches(self, key_presses):
"""
For a list of :class:`KeyPress` instances. Give the matching handlers
that would handle this.
"""
keys = tuple(k.key for k in key_presses)
cli = self._cli_ref()
# Try match, with mode flag
return [b for b in self._registry.get_bindings_for_keys(keys) if b.filter(cli)]
def _is_prefix_of_longer_match(self, key_presses):
"""
For a list of :class:`KeyPress` instances. Return True if there is any
handler that is bound to a suffix of this keys.
"""
keys = tuple(k.key for k in key_presses)
cli = self._cli_ref()
# Get the filters for all the key bindings that have a longer match.
# Note that we transform it into a `set`, because we don't care about
# the actual bindings and executing it more than once doesn't make
# sense. (Many key bindings share the same filter.)
filters = set(b.filter for b in self._registry.get_bindings_starting_with_keys(keys))
# When any key binding is active, return True.
return any(f(cli) for f in filters)
def _process(self):
"""
Coroutine implementing the key match algorithm. Key strokes are sent
into this generator, and it calls the appropriate handlers.
"""
buffer = self.key_buffer
retry = False
while True:
if retry:
retry = False
else:
buffer.append((yield))
# If we have some key presses, check for matches.
if buffer:
is_prefix_of_longer_match = self._is_prefix_of_longer_match(buffer)
matches = self._get_matches(buffer)
# When eager matches were found, give priority to them and also
# ignore all the longer matches.
eager_matches = [m for m in matches if m.eager(self._cli_ref())]
if eager_matches:
matches = eager_matches
is_prefix_of_longer_match = False
# Exact matches found, call handler.
if not is_prefix_of_longer_match and matches:
self._call_handler(matches[-1], key_sequence=buffer[:])
del buffer[:] # Keep reference.
# No match found.
elif not is_prefix_of_longer_match and not matches:
retry = True
found = False
# Loop over the input, try longest match first and shift.
for i in range(len(buffer), 0, -1):
matches = self._get_matches(buffer[:i])
if matches:
self._call_handler(matches[-1], key_sequence=buffer[:i])
del buffer[:i]
found = True
break
if not found:
del buffer[:1]
def feed(self, key_press):
"""
Add a new :class:`KeyPress` to the input queue.
(Don't forget to call `process_keys` in order to process the queue.)
"""
assert isinstance(key_press, KeyPress)
self.input_queue.append(key_press)
def process_keys(self):
"""
Process all the keys in the `input_queue`.
(To be called after `feed`.)
Note: because of the `feed`/`process_keys` separation, it is
possible to call `feed` from inside a key binding.
This function keeps looping until the queue is empty.
"""
while self.input_queue:
key_press = self.input_queue.popleft()
if key_press.key != Keys.CPRResponse:
self.beforeKeyPress.fire()
self._process_coroutine.send(key_press)
if key_press.key != Keys.CPRResponse:
self.afterKeyPress.fire()
# Invalidate user interface.
cli = self._cli_ref()
if cli:
cli.invalidate()
def _call_handler(self, handler, key_sequence=None):
was_recording = self.record_macro
arg = self.arg
self.arg = None
event = KeyPressEvent(
weakref.ref(self), arg=arg, key_sequence=key_sequence,
previous_key_sequence=self._previous_key_sequence,
is_repeat=(handler == self._previous_handler))
# Save the state of the current buffer.
cli = event.cli # Can be `None` (In unit-tests only.)
if handler.save_before(event) and cli:
cli.current_buffer.save_to_undo_stack()
# Call handler.
try:
handler.call(event)
self._fix_vi_cursor_position(event)
except EditReadOnlyBuffer:
# When a key binding does an attempt to change a buffer which is
# read-only, we can just silently ignore that.
pass
self._previous_key_sequence = key_sequence
self._previous_handler = handler
# Record the key sequence in our macro. (Only if we're in macro mode
# before and after executing the key.)
if self.record_macro and was_recording:
self.macro.extend(key_sequence)
def _fix_vi_cursor_position(self, event):
"""
After every command, make sure that if we are in Vi navigation mode, we
never put the cursor after the last character of a line. (Unless it's
an empty line.)
"""
cli = self._cli_ref()
if cli:
buff = cli.current_buffer
preferred_column = buff.preferred_column
if (ViNavigationMode()(event.cli) and
buff.document.is_cursor_at_the_end_of_line and
len(buff.document.current_line) > 0):
buff.cursor_position -= 1
# Set the preferred_column for arrow up/down again.
# (This was cleared after changing the cursor position.)
buff.preferred_column = preferred_column
class KeyPressEvent(object):
"""
Key press event, delivered to key bindings.
:param input_processor_ref: Weak reference to the `InputProcessor`.
:param arg: Repetition argument.
:param key_sequence: List of `KeyPress` instances.
:param previouskey_sequence: Previous list of `KeyPress` instances.
:param is_repeat: True when the previous event was delivered to the same handler.
"""
def __init__(self, input_processor_ref, arg=None, key_sequence=None,
previous_key_sequence=None, is_repeat=False):
self._input_processor_ref = input_processor_ref
self.key_sequence = key_sequence
self.previous_key_sequence = previous_key_sequence
#: True when the previous key sequence was handled by the same handler.
self.is_repeat = is_repeat
self._arg = arg
def __repr__(self):
return 'KeyPressEvent(arg=%r, key_sequence=%r, is_repeat=%r)' % (
self.arg, self.key_sequence, self.is_repeat)
@property
def data(self):
return self.key_sequence[-1].data
@property
def input_processor(self):
return self._input_processor_ref()
@property
def cli(self):
"""
Command line interface.
"""
return self.input_processor._cli_ref()
@property
def current_buffer(self):
"""
The current buffer.
"""
return self.cli.current_buffer
@property
def arg(self):
"""
Repetition argument.
"""
if self._arg == '-':
return -1
result = int(self._arg or 1)
# Don't exceed a million.
if int(result) >= 1000000:
result = 1
return result
@property
def arg_present(self):
"""
True if repetition argument was explicitly provided.
"""
return self._arg is not None
def append_to_arg_count(self, data):
"""
Add digit to the input argument.
:param data: the typed digit as string
"""
assert data in '-0123456789'
current = self._arg
if data == '-':
assert current is None or current == '-'
result = data
elif current is None:
result = data
else:
result = "%s%s" % (current, data)
self.input_processor.arg = result
| |
import numpy as np
import theano
import lasagne
## ALIASES ##
L = lasagne.layers
T = theano.tensor
get_output = L.get_output
get_all_params = L.get_all_params
cross_entropy = lasagne.objectives.categorical_crossentropy
get_layers = L.get_all_layers
class Network(object):
"""
Wrapper for neural networks for MNK that automates network compilation and
provides some conveninece functions for freezing, saving, and loading params
Things to consider doing:
mod save/load to use named layers
add self.reinitialize(layers)
"""
def __init__(self, architecture):
self.architecture = architecture
self.input_var = T.tensor4('inputs')
self.target_var = T.ivector('targets')
self.update_algo = lasagne.updates.adam # just a default
self.build()
self.objectives()
self.compile_functions()
self.val_trace = np.zeros(500)
self.train_trace = np.zeros(500)
self.trace_loc = 0
def build(self):
"""
Generates network graph, grabs params and output symbols
"""
self.net = self.architecture(self.input_var)
self.prediction = get_output(self.net)
self.test_prediction = get_output(self.net, deterministic=True)
self.params = get_all_params(self.net, trainable=True)
self.value_layer = get_layers(self.net)[-4]
self.value_prediction = get_output(self.value_layer)
return None
def objectives(self):
"""
Adds loss and accuracy nodes
"""
self.loss = cross_entropy(self.prediction, self.target_var)
self.loss = self.loss.mean()
self.itemized_loss = cross_entropy(self.test_prediction, self.target_var)
self.test_loss = self.itemized_loss.mean()
self.test_acc = T.mean(
T.eq(T.argmax(self.test_prediction, axis=1), self.target_var),
dtype=theano.config.floatX
)
self.updates = self.update_algo(self.loss, self.params)
return None
def compile_functions(self):
"""
Compiles theano functions for computing output, losses, etc
"""
self.output_fn = theano.function([self.input_var], self.test_prediction)
self.value_fn = theano.function([self.input_var], self.value_prediction)
self.train_fn = theano.function(
[self.input_var, self.target_var], self.loss,
updates=self.updates
)
self.test_fn = theano.function(
[self.input_var, self.target_var],
[self.test_loss, self.test_acc]
)
self.itemized_test_fn = theano.function(
[self.input_var, self.target_var],
self.itemized_loss
)
return None
def update_traces(self):
"""
Saves traces for plotting
"""
self.val_trace[self.trace_loc] = self.val_err
self.train_trace[self.trace_loc] = self.train_err
self.trace_loc += 1 # so hacky
return None
def freeze_params(self, net=None, exclude=None):
"""
Sets params to be untrainable
Excludes layers in optional arg exclude (tuple or list)
"""
if net is None:
net = self.net
layers = get_layers(net)
num_layers = len(layers)
exclude = [i if i >= 0 else num_layers + i for i in exclude]
if exclude is not None:
layers = [layer for l, layer in enumerate(layers) if not (l in exclude)]
for layer in layers:
for param in layer.params:
layer.params[param].remove('trainable')
self.params = get_all_params(net, trainable=True) # CAUTION: needs rewritten to not throw errors as autoencoders develop
return None
def unfreeze_params(self):
"""
Sets all parameters back to trainable
"""
for layer in L.get_all_layers(self.net):
for param in layer.params:
layer.params[param].add('trainable')
self.params = L.get_all_params(self.net, trainable=True)
return None
def save_params(self, param_file):
"""
Save parameters for reuse later
"""
all_params = L.get_all_param_values(self.net)
np.savez(param_file, *all_params)
return None
def load_params(self, paramsfile):
"""
Loads parameters from npz files
"""
with np.load(paramsfile) as loaded:
params_list = [(i[0], i[1]) for i in loaded.items()]
params_order = np.array([i[0][4:6] for i in params_list]).astype(int)
params_list = [params_list[i] for i in params_order.argsort()]
L.set_all_param_values(self.net, [i[1] for i in params_list])
return None
class Autoencoder(Network):
"""
Wrapper for training and testing transfer learning with an autoencoder.
Almost as cool as it sounds.
Later, use super() to cut down bloat inside functions
"""
def __init__(self, architecture):
self.architecture = architecture
self.input_var = T.tensor4('inputs')
self.target_var = T.ivector('targets')
self.ae_target_var = T.tensor4('ae inputs')
self.update_algo = lasagne.updates.adam
self.val_trace = []
self.train_trace = []
self.build()
self.objectives()
self.compile_functions()
def build(self):
"""Generates graph, caches params, output symbols"""
self.autoencoder, self.value_layer, self.net = self.architecture(self.input_var)
self.prediction = get_output(self.net)
self.test_prediction = get_output(self.net, deterministic=True)
self.value_prediction = get_output(self.value_layer)
self.image = get_output(self.autoencoder)
self.test_image = get_output(self.autoencoder, deterministic=True)
self.params = get_all_params(self.net)
self.ae_params = get_all_params(self.autoencoder)
return None
def objectives(self):
"""Loss functions, etc"""
self.loss = cross_entropy(self.prediction, self.target_var).mean()
self.itemized_test_loss = cross_entropy(self.test_prediction, self.target_var)
self.test_loss = self.itemized_test_loss.mean()
self.test_acc = T.mean(
T.eq(T.argmax(self.test_prediction, axis=1), self.target_var),
dtype=theano.config.floatX
)
self.updates = self.update_algo(self.loss, self.params)
self.ae_loss = T.mean((self.ae_target_var - self.image)**2, dtype=theano.config.floatX)
self.ae_test_loss = T.mean((self.ae_target_var - self.test_image)**2, dtype=theano.config.floatX)
self.ae_updates = self.update_algo(self.ae_loss, self.ae_params)
return None
def compile_functions(self):
"""Compile theano functions"""
self.output_fn = theano.function([self.input_var], self.test_prediction)
self.value_fn = theano.function([self.input_var], self.value_prediction)
self.train_fn = theano.function(
[self.input_var, self.target_var],
self.loss,
updates = self.updates
)
self.test_fn = theano.function(
[self.input_var, self.target_var],
[self.test_loss, self.test_acc]
)
self.itemized_test_fn = theano.function(
[self.input_var, self.target_var],
self.itemized_test_loss
)
self.ae_output_fn = theano.function([self.input_var], self.test_image)
self.ae_train_fn = theano.function(
[self.input_var, self.ae_target_var],
self.ae_loss,
updates=self.ae_updates
)
self.ae_test_fn = theano.function(
[self.input_var, self.ae_target_var],
self.ae_test_loss
)
return None
| |
from __future__ import absolute_import
import os
import re
from OpenSSL import SSL
from netlib import http_auth, certutils, tcp
from .. import utils, platform, version
from .primitives import RegularProxyMode, SpoofMode, SSLSpoofMode, TransparentProxyMode, UpstreamProxyMode, ReverseProxyMode, Socks5ProxyMode
TRANSPARENT_SSL_PORTS = [443, 8443]
CONF_BASENAME = "mitmproxy"
CA_DIR = "~/.mitmproxy"
class HostMatcher(object):
def __init__(self, patterns=[]):
self.patterns = list(patterns)
self.regexes = [re.compile(p, re.IGNORECASE) for p in self.patterns]
def __call__(self, address):
address = tcp.Address.wrap(address)
host = "%s:%s" % (address.host, address.port)
if any(rex.search(host) for rex in self.regexes):
return True
else:
return False
def __nonzero__(self):
return bool(self.patterns)
class ProxyConfig:
def __init__(
self,
host='',
port=8080,
server_version=version.NAMEVERSION,
cadir=CA_DIR,
clientcerts=None,
no_upstream_cert=False,
body_size_limit=None,
mode=None,
upstream_server=None,
http_form_in=None,
http_form_out=None,
authenticator=None,
ignore_hosts=[],
tcp_hosts=[],
ciphers_client=None,
ciphers_server=None,
certs=[],
ssl_version_client=tcp.SSL_DEFAULT_METHOD,
ssl_version_server=tcp.SSL_DEFAULT_METHOD,
ssl_ports=TRANSPARENT_SSL_PORTS,
spoofed_ssl_port=None,
ssl_verify_upstream_cert=False,
ssl_upstream_trusted_cadir=None,
ssl_upstream_trusted_ca=None
):
self.host = host
self.port = port
self.server_version = server_version
self.ciphers_client = ciphers_client
self.ciphers_server = ciphers_server
self.clientcerts = clientcerts
self.no_upstream_cert = no_upstream_cert
self.body_size_limit = body_size_limit
if mode == "transparent":
self.mode = TransparentProxyMode(platform.resolver(), ssl_ports)
elif mode == "socks5":
self.mode = Socks5ProxyMode(ssl_ports)
elif mode == "reverse":
self.mode = ReverseProxyMode(upstream_server)
elif mode == "upstream":
self.mode = UpstreamProxyMode(upstream_server)
elif mode == "spoof":
self.mode = SpoofMode()
elif mode == "sslspoof":
self.mode = SSLSpoofMode(spoofed_ssl_port)
else:
self.mode = RegularProxyMode()
# Handle manual overrides of the http forms
self.mode.http_form_in = http_form_in or self.mode.http_form_in
self.mode.http_form_out = http_form_out or self.mode.http_form_out
self.check_ignore = HostMatcher(ignore_hosts)
self.check_tcp = HostMatcher(tcp_hosts)
self.authenticator = authenticator
self.cadir = os.path.expanduser(cadir)
self.certstore = certutils.CertStore.from_store(
self.cadir,
CONF_BASENAME)
for spec, cert in certs:
self.certstore.add_cert_file(spec, cert)
self.ssl_ports = ssl_ports
if isinstance(ssl_version_client, int):
self.openssl_method_client = ssl_version_client
else:
self.openssl_method_client = tcp.SSL_VERSIONS[ssl_version_client]
if isinstance(ssl_version_server, int):
self.openssl_method_server = ssl_version_server
else:
self.openssl_method_server = tcp.SSL_VERSIONS[ssl_version_server]
if ssl_verify_upstream_cert:
self.openssl_verification_mode_server = SSL.VERIFY_PEER
else:
self.openssl_verification_mode_server = SSL.VERIFY_NONE
self.openssl_trusted_cadir_server = ssl_upstream_trusted_cadir
self.openssl_trusted_ca_server = ssl_upstream_trusted_ca
self.openssl_options_client = tcp.SSL_DEFAULT_OPTIONS
self.openssl_options_server = tcp.SSL_DEFAULT_OPTIONS
def process_proxy_options(parser, options):
body_size_limit = utils.parse_size(options.body_size_limit)
c = 0
mode, upstream_server, spoofed_ssl_port = None, None, None
if options.transparent_proxy:
c += 1
if not platform.resolver:
return parser.error(
"Transparent mode not supported on this platform.")
mode = "transparent"
if options.socks_proxy:
c += 1
mode = "socks5"
if options.reverse_proxy:
c += 1
mode = "reverse"
upstream_server = options.reverse_proxy
if options.upstream_proxy:
c += 1
mode = "upstream"
upstream_server = options.upstream_proxy
if options.spoof_mode:
c += 1
mode = "spoof"
if options.ssl_spoof_mode:
c += 1
mode = "sslspoof"
spoofed_ssl_port = options.spoofed_ssl_port
if c > 1:
return parser.error(
"Transparent, SOCKS5, reverse and upstream proxy mode "
"are mutually exclusive.")
if options.clientcerts:
options.clientcerts = os.path.expanduser(options.clientcerts)
if not os.path.exists(
options.clientcerts) or not os.path.isdir(
options.clientcerts):
return parser.error(
"Client certificate directory does not exist or is not a directory: %s" %
options.clientcerts)
if (options.auth_nonanonymous or options.auth_singleuser or options.auth_htpasswd):
if options.auth_singleuser:
if len(options.auth_singleuser.split(':')) != 2:
return parser.error(
"Invalid single-user specification. Please use the format username:password")
username, password = options.auth_singleuser.split(':')
password_manager = http_auth.PassManSingleUser(username, password)
elif options.auth_nonanonymous:
password_manager = http_auth.PassManNonAnon()
elif options.auth_htpasswd:
try:
password_manager = http_auth.PassManHtpasswd(
options.auth_htpasswd)
except ValueError as v:
return parser.error(v.message)
authenticator = http_auth.BasicProxyAuth(password_manager, "mitmproxy")
else:
authenticator = http_auth.NullProxyAuth(None)
certs = []
for i in options.certs:
parts = i.split("=", 1)
if len(parts) == 1:
parts = ["*", parts[0]]
parts[1] = os.path.expanduser(parts[1])
if not os.path.exists(parts[1]):
parser.error("Certificate file does not exist: %s" % parts[1])
certs.append(parts)
ssl_ports = options.ssl_ports
if options.ssl_ports != TRANSPARENT_SSL_PORTS:
# arparse appends to default value by default, strip that off.
# see http://bugs.python.org/issue16399
ssl_ports = ssl_ports[len(TRANSPARENT_SSL_PORTS):]
return ProxyConfig(
host=options.addr,
port=options.port,
cadir=options.cadir,
clientcerts=options.clientcerts,
no_upstream_cert=options.no_upstream_cert,
body_size_limit=body_size_limit,
mode=mode,
upstream_server=upstream_server,
http_form_in=options.http_form_in,
http_form_out=options.http_form_out,
ignore_hosts=options.ignore_hosts,
tcp_hosts=options.tcp_hosts,
authenticator=authenticator,
ciphers_client=options.ciphers_client,
ciphers_server=options.ciphers_server,
certs=certs,
ssl_version_client=options.ssl_version_client,
ssl_version_server=options.ssl_version_server,
ssl_ports=ssl_ports,
spoofed_ssl_port=spoofed_ssl_port,
ssl_verify_upstream_cert=options.ssl_verify_upstream_cert,
ssl_upstream_trusted_cadir=options.ssl_upstream_trusted_cadir,
ssl_upstream_trusted_ca=options.ssl_upstream_trusted_ca
)
def ssl_option_group(parser):
group = parser.add_argument_group("SSL")
group.add_argument(
"--cert",
dest='certs',
default=[],
type=str,
metavar="SPEC",
action="append",
help='Add an SSL certificate. SPEC is of the form "[domain=]path". '
'The domain may include a wildcard, and is equal to "*" if not specified. '
'The file at path is a certificate in PEM format. If a private key is included in the PEM, '
'it is used, else the default key in the conf dir is used. '
'The PEM file should contain the full certificate chain, with the leaf certificate as the first entry. '
'Can be passed multiple times.')
group.add_argument(
"--ciphers-client", action="store",
type=str, dest="ciphers_client", default=None,
help="Set supported ciphers for client connections. (OpenSSL Syntax)"
)
group.add_argument(
"--ciphers-server", action="store",
type=str, dest="ciphers_server", default=None,
help="Set supported ciphers for server connections. (OpenSSL Syntax)"
)
group.add_argument(
"--client-certs", action="store",
type=str, dest="clientcerts", default=None,
help="Client certificate directory."
)
group.add_argument(
"--no-upstream-cert", default=False,
action="store_true", dest="no_upstream_cert",
help="Don't connect to upstream server to look up certificate details."
)
group.add_argument(
"--verify-upstream-cert", default=False,
action="store_true", dest="ssl_verify_upstream_cert",
help="Verify upstream server SSL/TLS certificates and fail if invalid "
"or not present."
)
group.add_argument(
"--upstream-trusted-cadir", default=None, action="store",
dest="ssl_upstream_trusted_cadir",
help="Path to a directory of trusted CA certificates for upstream "
"server verification prepared using the c_rehash tool."
)
group.add_argument(
"--upstream-trusted-ca", default=None, action="store",
dest="ssl_upstream_trusted_ca",
help="Path to a PEM formatted trusted CA certificate."
)
group.add_argument(
"--ssl-port",
action="append",
type=int,
dest="ssl_ports",
default=list(TRANSPARENT_SSL_PORTS),
metavar="PORT",
help="Can be passed multiple times. Specify destination ports which are assumed to be SSL. "
"Defaults to %s." %
str(TRANSPARENT_SSL_PORTS))
group.add_argument(
"--ssl-version-client", dest="ssl_version_client", type=str, default=tcp.SSL_DEFAULT_VERSION,
choices=tcp.SSL_VERSIONS.keys(),
help=""""
Use a specified protocol for client connections:
TLSv1.2, TLSv1.1, TLSv1, SSLv3, SSLv2, SSLv23.
Default to SSLv23."""
)
group.add_argument(
"--ssl-version-server", dest="ssl_version_server", type=str, default=tcp.SSL_DEFAULT_VERSION,
choices=tcp.SSL_VERSIONS.keys(),
help=""""
Use a specified protocol for server connections:
TLSv1.2, TLSv1.1, TLSv1, SSLv3, SSLv2, SSLv23.
Default to SSLv23."""
)
| |
#!/usr/bin/env python
# Copyright (c) 2005-2009 Jaroslav Gresula
#
# Distributed under the MIT license (See accompanying file
# LICENSE.txt or copy at http://jagpdf.org/LICENSE.txt)
#
import glyphlist
import glob
import re
from string import Template
import sys
import md5
from collections import defaultdict
import math
import random
from StringIO import StringIO
import os
AFM_DIR = '../../external/data/Core14_AFMs/'
TYPEMAN_DIR = '../src/resources/typeman/'
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
class Face:
def __init__( self, ctx ):
self.ctx = ctx
self.chars = []
self.FontName = ""
self.FullName = ""
self.FamilyName = ""
self.FontBBox = None #[llx lly urx ury]
self.EncodingScheme = ""
self.CharacterSet = ""
self.CapHeight = 0
self.XHeight = 0
self.Ascender = 0
self.Descender = 0
self.UnderlinePosition = 0
self.BuiltinEncoding = 0
self.UnderlineThickness = 0
self.ItalicAngle = 0
self.IsFixedPitch = True
self.Weight = 400 # 100-900
self.StdHW = 0
self.StdVW = 0
self.KernGetter = "NULL"
self.md5 = md5.new()
def finalize(self):
if self.EncodingScheme == 'FontSpecific':
# sort by unicode
self.chars.sort( lambda l,r: cmp(l.code,r.code) )
else:
# sort by code
self.chars.sort( lambda l,r: cmp(l.unicode,r.unicode) )
def on_afm_line(self,line):
"""called for each input line"""
self.md5.update( line )
class HandlerBase:
def __init__( self, face ):
self.face = face
def on_line( self, s ):
self.face.on_afm_line( s )
self.process_line_( s )
class FontMetricsHandler(HandlerBase):
def __init__( self, face, arg ):
HandlerBase.__init__( self, face )
def process_line_( self, s ):
kwd, val = get_kwd_and_val(s)
if kwd in set( ['FontName','FullName','FamilyName', 'EncodingScheme', 'CharacterSet'] ):
setattr( self.face, kwd, val ) #verbatim
elif kwd in set( [
'CapHeight',
'XHeight',
'Ascender',
'Descender',
'UnderlinePosition',
'UnderlineThickness',
'StdHW', 'StdVW' ] ):
setattr( self.face, kwd, int(val) )
elif kwd in set( ['ItalicAngle'] ):
setattr( self.face, kwd, float(val) )
elif kwd == "FontBBox":
self.face.FontBBox = [int(s) for s in val.split(" ") ]
assert( len(self.face.FontBBox) == 4 )
elif kwd == "IsFixedPitch":
self.face.IsFixedPitch = val=='true' and True or False
elif kwd == "Weight":
#see: http://www.w3.org/TR/CSS21/fonts.html#font-boldness
self.face.Weight = { 'Medium' : 400,
'Normal' : 400,
'Roman' : 400,
'Bold' : 700,
'Light' : 300 }[val]
elif kwd in set( ['Version', 'Notice', 'Comment', 'Characters'] ):
pass #ignore
elif kwd in set( ['MappingScheme', 'EscChar', 'IsBaseFont', 'VVector', 'IsFixedV', 'CharWidth'] ):
assert not "unsupported keyword"
else:
print "kwd: ", kwd
assert not "unknown keyword"
class CharMetricsHandler(HandlerBase):
def __init__( self, face, arg ):
HandlerBase.__init__( self, face )
def process_line_( self, s ):
l = [ item.strip().split( ' ', 1) for item in s.split(';')[:-1] ]
rd = dict( l )
bbox = [int(s) for s in rd['B'].split(" ") ]
assert( len(bbox) == 4 )
try:
u = glyphlist.glyph_to_unicode_map[rd['N']]
except:
assert( self.face.EncodingScheme == 'FontSpecific' )
u = 0
self.face.chars.append( Bunch(unicode=u,
code=int(rd['C']),
widthx=int(rd['WX']),
bbox=bbox) )
class KernDataHandler(HandlerBase):
def __init__( self, face, arg ):
HandlerBase.__init__( self, face )
def process_line_( self, s ):
assert not "should not get here"
class KernPairsHandler(HandlerBase):
def __init__( self, face, arg ):
HandlerBase.__init__( self, face )
self.getter_fun = None
def process_line_( self, s ):
kwd, left, right, value = s.split(' ')
assert( kwd == 'KPX' )
left = glyphlist.glyph_to_unicode_map[left]
right = glyphlist.glyph_to_unicode_map[right]
# store the kerning info to ctx.kern_dict,
# which is (left, right) -> {get_fun: value}
if not self.getter_fun:
self.getter_fun = 'kern_' + font_name_to_id(self.face.FontName)
self.face.KernGetter = self.getter_fun
self.face.ctx.kern_dict[(left,right)][self.getter_fun] = value
def get_kwd_and_val( line ):
sp = line.split( " ", 1 )
assert( len(sp) == 1 or len(sp) == 2 )
if len(sp) == 1:
return sp[0], None
else:
return sp
def get_handler_type( handler ):
return globals()[handler+'Handler']
def font_name_to_id( fontname ):
return re.sub( '[^a-zA-Z_]', '_', fontname )
def font_name_to_enum( fontname ):
return "T1_" + font_name_to_id( fontname ).upper()
def process_afm(instream, ctx):
"""processes single afm file"""
handlers = []
face = Face(ctx)
for line in instream:
line = line.strip()
key, val = get_kwd_and_val( line )
if key.startswith( 'Start' ):
handlers.append( get_handler_type( key[5:] )(face, val) )
elif key.startswith( 'End' ):
last=handlers.pop()
assert( last.__class__==get_handler_type(key[3:]) )
else:
handlers[-1].on_line( line )
face.finalize()
return face;
def process_afm_dir(dirname, ctx):
"""non-recursively processes diretory of afm files"""
faces = []
for fname in glob.glob( dirname + '/*.afm' ):
faces.append(process_afm(open(fname), ctx))
return faces
###########################################################################
# header generator
cpp_header_muster="""
"""
def do_cpp_header( faces, outs ):
ENUMS = ",\n ".join( [ font_name_to_enum(f.FontName) for f in faces ] )
header_templ = os.path.join(TYPEMAN_DIR, 't1adobestandardfonts.h.template')
header_templ = open(header_templ).read()
outs.write( Template(header_templ).substitute(locals()))
###########################################################################
# cpp generator
cpp_impl_muster="""
"""
kern_getter_templ="""
Int $getter_fun(kern_offsets_t const& krec) {
return krec.$value_holder;
}
"""
def make_kern_pair_key(left, right):
return left + (right << 16)
def output_kern_table(templ, ctx, getter_to_index, value_to_index):
# insertion into the hash table depends on randomizer, so make it
# deterministic here
random.seed(0)
# these 3 primes in combination with table size give ~93% load factor
hash1_p = 226783
hash2_p = 1354601
hash3_p = 1622471
hash_table_size = 3491
num_hash_functions = 3
num_cells = 1
h = HFunctionsDivision(hash1_p, hash2_p, hash3_p)
# these 2 primes in combination with table size give ~62% load factor
hash1_p = 16069
hash2_p = 43787
hash_table_size = 5261
num_hash_functions = 2
h = HFunctionsDivision(hash1_p, hash2_p)
# 2 primes, 2 cells -> 91.7%
hash1_p = 1984061
hash2_p = 885931
num_cells = 2
h = HFunctionsDivision(hash1_p, hash2_p)
hash_table_size = 1777
#
ch = CuckooNest(hash_table_size, h, num_cells)
result = []
min_unicode, max_unicode = sys.maxint, 0
values = {} # offset tuple -> its index
values[(0, 0, 0, 0, 0, 0)] = 0
for k, v in ctx.kern_dict.iteritems():
key = make_kern_pair_key(*k)
min_unicode = min(min_unicode, k[0], k[1])
max_unicode = max(max_unicode, k[0], k[1])
value = 8 * [0]
for getter, val in v.iteritems():
value[getter_to_index[getter]] = value_to_index[val]
value = tuple(value)
try:
value_i = values[value]
except KeyError:
value_i = len(values)
values[value] = value_i
ch.insert(key, str(value_i))
result += ch.c_output("{0xffffffff, 0}")
kerning_table = ",\n ".join(result)
num_kerning_offsets = len(values)
offset_list = [(v, k) for k, v in values.iteritems()]
offset_list.sort()
off_tuples = (os for i, os in offset_list)
off_strings = (", ".join(str(o) for o in off) for off in off_tuples)
offset_c_values = ("{%s}" % s for s in off_strings)
kerning_offsets = ",\n ".join(offset_c_values)
return Template(templ).safe_substitute(locals())
def output_kern_data(templ, ctx):
"""outputs data needed for pair kerning"""
getters, values = set(), set()
for pair, d in ctx.kern_dict.iteritems():
for g, val in d.iteritems():
getters.add(g)
values.add(val)
getter_to_index = dict([(g, i) for i, g in enumerate(getters)])
vlist = [(v, i + 1) for i, v in enumerate(values)]
vlist.append((0, 0))
vlist.sort(lambda l, r : cmp(l[1], r[1]))
value_to_index = dict(vlist)
kern_values = ",\n ".join((str(v) for v, i in vlist))
templ = output_kern_table(templ, ctx, getter_to_index, value_to_index)
# output getter functions (they access offset value for given font)
kerning_getters = []
for getter_fun, value_holder_i in getter_to_index.iteritems():
value_holder = "offset_%d" % value_holder_i
kerning_getters.append(Template(kern_getter_templ).substitute(locals()))
kerning_getters = "\n".join(kerning_getters)
return Template(templ).safe_substitute(locals())
def do_cpp_impl(faces, outs, ctx):
FACE_PTRS = ",\n ".join( [ "&"+font_name_to_id(f.FontName) for f in faces ] )
FACE_DEFS = []
for face in faces:
FACE_DEFS.append( do_cpp_impl_face(face) )
FACE_DEFS = "\n".join( FACE_DEFS )
impl_templ = os.path.join(TYPEMAN_DIR, 't1adobestandardfonts.cpp.template')
impl_templ = open(impl_templ).read()
impl_templ = output_kern_data(impl_templ, ctx)
outs.write(Template(impl_templ).substitute(locals()))
cpp_impl_face_muster="""
const int ${FACEID}_num_glyphs = $NUM_GLYPHS;
const t1s_glyph ${FACEID}_glyphs[${FACEID}_num_glyphs] = {
${GLYPHS_DEF}
};
const t1s_face $FACEID = {
{
/* units */ 1000,
/* bbox_xmin */ $FontBBox_xmin,
/* bbox_ymin */ $FontBBox_ymin,
/* bbox_xmax */ $FontBBox_xmax,
/* bbox_ymax */ $FontBBox_ymax,
/* baseline_distance */ $BaselineDistance,
/* ascender */ $Ascender,
/* descender */ $Descender,
/* avg_width */ $AvgWidth,
/* max_width */ $MaxWidth,
/* missing_width */ $MissingWidth,
/* cap height */ $CapHeight,
/* xheight */ $XHeight
}, /* font metrics */
/* font name */ \"$FontName\",
/* full name */ \"$FullName\",
/* family name */ \"$FamilyName\",
/* encoding scheme */ \"$EncodingScheme\",
/* built-in encoding */ $BuiltinEncoding,
/* char set */ \"$CharacterSet\",
/* underline position */ $UnderlinePosition,
/* underline thickness */ $UnderlineThickness,
/* italic angle */ $ItalicAngle,
/* is fixed pitch */ $IsFixedPitch,
/* weight */ $Weight,
/* horizontal stem w */ $StdHW,
/* vertical stem w */ $StdVW,
/* num glyphs */ $NUM_GLYPHS,
/* glyph metrics */ ${FACEID}_glyphs,
/* kerning getter */ ${KernGetter},
/* hash */ { $HASH }
};
"""
def calc_face_width_attrs( face ):
AvgWidth, MaxWidth, MissingWidth = 0, -1, -1
for c in face.chars:
AvgWidth += c.widthx
if c.widthx > MaxWidth:
MaxWidth = c.widthx
if c.unicode == 32:
MissingWidth = c.widthx
AvgWidth = AvgWidth / len( face.chars )
return locals()
def do_cpp_impl_face(face):
FACEID = font_name_to_id( face.FontName )
NUM_GLYPHS = len(face.chars)
GLYPHS_DEF = []
for i in range( 0, NUM_GLYPHS, 5 ):
GLYPHS_DEF.append( ", ".join( ["{%d,%d,%d}" % (c.unicode, c.code, c.widthx)
for c in face.chars[i:i+5]] ) )
GLYPHS_DEF = ",\n ".join(GLYPHS_DEF)
locals().update( face.__dict__ )
locals()['IsFixedPitch'] = locals()['IsFixedPitch'] and "true" or "false"
locals()['BuiltinEncoding'] = locals()['EncodingScheme'] == 'FontSpecific' and "true" or "false"
HASH = ", ".join( [ "0x%02x"%ord(b) for b in face.md5.digest() ] )
locals().update( calc_face_width_attrs(face) )
FontBBox_xmin = face.FontBBox[0]
FontBBox_ymin = face.FontBBox[1]
FontBBox_xmax = face.FontBBox[2]
FontBBox_ymax = face.FontBBox[3]
# taken from FreeType, t1objs.c
BaselineDistance = 1000*12/10
if BaselineDistance < locals()['Ascender']-locals()['Descender']:
BaselineDistance = locals()['Ascender']-locals()['Descender']
return Template(cpp_impl_face_muster).substitute( locals() )
def gen_cpp_jagbase():
ctx = Bunch(kern_dict=defaultdict(lambda : {}))
faces = process_afm_dir(AFM_DIR, ctx)
if faces:
header_file = os.path.join(TYPEMAN_DIR, 't1adobestandardfonts.h')
do_cpp_header(faces, open(header_file, 'wb' ))
impl_file = os.path.join(TYPEMAN_DIR, 't1adobestandardfonts.cpp')
do_cpp_impl(faces, open(impl_file, 'wb'), ctx)
#C 33 ; WX 600 ; N exclam ; B 202 -15 398 572 ;
def encoding_status():
content = open(AFM_DIR + 'Courier-Bold.afm').read()
names = re.compile('; N ([a-zA-Z]+) ;')
core_names = set(names.findall(content))
encodings = ['windows-1250', 'windows-1251', 'windows-1252', 'windows-1253']
for enc in encodings:
for i in xrange(128,256):
try:
c = unicode(chr(i), enc)
assert len(c) == 1
codepoint = ord(c[0])
name = glyphlist.unicode_to_glyph(codepoint)
if name not in core_names:
print enc, name, "0x%x" % codepoint
except UnicodeDecodeError, err:
print enc, err
# ---------------------------------------------------------------------------
# kerning stats
#
def kern_generator():
from glyphlist import glyph_to_unicode_map as gmap
for fontfile in glob.glob('../../external/data/Core14_AFMs/*.afm'):
for line in open(fontfile):
if line.startswith('KPX'):
kpx, left, right, offset = line.split()
yield fontfile, gmap[left], gmap[right], offset
def kern_stats():
# unique lefts per font
# avg number of rights per single left
# % of kern pairs in all pair in lorem ipsum
kd = defaultdict(lambda : {})
pairs_total = 0
pairs_unique = set()
values_unique = set()
pairs_per_font = defaultdict(lambda : 0)
pairs_freq_font = defaultdict(lambda : 0)
max_unicode = 0
max_left = 0
max_right = 0
min_left = sys.maxint
min_right = sys.maxint
max_diff = 0
glyphs = set()
max_val, min_val = 0, sys.maxint
for font, left, right, val in kern_generator():
kd[font][(left, right)] = val
pairs_total += 1
pairs_unique.add((left, right))
values_unique.add(val)
max_val = max(max_val, int(val))
min_val = min(min_val, int(val))
pairs_per_font[font] += 1
pairs_freq_font[(left, right)] += 1
max_unicode = max(max_unicode, left, right)
max_left = max(max_left, left)
max_right = max(max_right, right)
min_left = min(min_left, left)
min_right = min(min_right, right)
max_diff = max(max_diff, abs(left - right))
glyphs.add(left)
glyphs.add(right)
# post-proc
pairs_dist = defaultdict(lambda : 0)
for v in pairs_freq_font.itervalues():
pairs_dist[v] += 1
# out
log2_glyphs = defaultdict(lambda : 0)
for g in glyphs:
log2_glyphs[math.ceil(math.log(g, 2))] += 1
print 'total:', pairs_total
print 'unique pairs:', len(pairs_unique), ', tree depth:', math.log(len(pairs_unique), 2)
print 'unique glyphs:', len(glyphs)
print 'unique values:', len(values_unique)
print 'min val:', min_val, ', max_val:', max_val, ", diff:", (max_val - min_val)
print 'pairs per font:', ', '.join([str(v) for v in pairs_per_font.itervalues()])
print 'pairs freq in fonts:', ', '.join(['%d: %d' % (k, v) for k, v in pairs_dist.iteritems()])
print 'bits per glyph:', ', '.join(("%d: %d" % (k, v) for k, v in log2_glyphs.iteritems()))
print 'max unicode:', max_unicode, ', max left:', max_left, ', max right:', max_right
print 'min left:', min_left, ', min right:', min_right, ', max diff:', max_diff
class CuckooNest:
def __init__(self, nr_buckets, hash_funs, nr_cells=1):
self.nr_buckets = nr_buckets
self.hash_funs = hash_funs
self.nr_cells = nr_cells
self.table = nr_cells * nr_buckets * [None]
self.nr_items = 0
def cells(self, n, key):
"""Calculate hash using n-th hash function and return a list of cell
indices."""
pos = self.hash_funs(n, key) % self.nr_buckets
return [self.nr_cells * pos + n for n in range(self.nr_cells)]
def insert(self, key, value):
cells = self.cells(0, key)
item = (key, value)
for n in xrange(self.nr_items + 1):
for cell in cells:
if None == self.table[cell]:
self.table[cell] = item
self.nr_items += 1
return
p0 = random.choice(cells)
item, self.table[p0] = self.table[p0], item
all_cells = [self.cells(i, item[0]) for i in range(len(self.hash_funs))]
all_cells.remove(cells)
cells = random.choice(all_cells)
raise TableFull('cannot insert %d' % item[0])
def load_factor(self):
return float(self.nr_items) / len(self.table)
def lookup(self, key):
for i in range(len(self.hash_funs)):
pos = self.cells(i, key)
for p in pos:
if self.table[p] and self.table[p][0] == key:
return self.table[p][1]
return None
def stats(self):
print '#items:', self.nr_items
print 'load factor:', float(self.nr_items) / len(self.table)
def load_factor(self):
return float(self.nr_items) / len(self.table)
def c_output(self, empty_slot):
result = []
for i in range(len(self.table)):
item = self.table[i]
if item != None:
result.append("{0x%08x, %s}" % item)
else:
result.append(empty_slot)
return result
class TableFull(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class HFunctionsDivision:
def __init__(self, *primes):
self.primes = primes
def __call__(self, i, key):
return key % self.primes[i]
def __len__(self):
return len(self.primes)
def __str__(self):
return 'Division: ' + ', '.join((str(p) for p in self.primes))
def HDivisionIter():
# h = HFunctionsDivision(1984061, 885931)
# yield CuckooNest(1777, h, 2), h
while 1:
from primes import primes
h = HFunctionsDivision(random.choice(primes),
random.choice(primes),
random.choice(primes))
yield CuckooNest(3491, h), h
import itertools
def eratosthenes():
'''Yields the sequence of prime numbers via the Sieve of Eratosthenes.'''
D = { } # map each composite integer to its first-found prime factor
for q in itertools.count(2): # q gets 2, 3, 4, 5, ... ad infinitum
p = D.pop(q, None)
if p is None:
# q not a key in D, so q is prime, therefore, yield it
yield q
# mark q squared as not-prime (with q as first-found prime factor)
D[q*q] = q
else:
# let x <- smallest (N*p)+q which wasn't yet known to be composite
# we just learned x is composite, with p first-found prime factor,
# since p is the first-found prime factor of q -- find and mark it
x = p + q
while x in D:
x += p
D[x] = p
def gen_primes(n):
print "primes = [ \\"
for p in eratosthenes():
if p > n:
break
print "%d," % p
print "]"
def get_pairs_dict():
pairs_dict = {}
min_key, max_key = sys.maxint, 0
for font, left, right, val in kern_generator():
pairs_dict.setdefault((left, right), {})[font] = val
return pairs_dict
def output_keys():
for k, v in get_pairs_dict().iteritems():
print make_kern_pair_key(*k)
def hfuns_generator(n):
from primes import primes
while 1:
yield HFunctionsDivision(*[random.choice(primes) for i in range(n)])
def test_load_factor():
from primes import primes
N = 50
p1 = primes.index(5003) # 3271
p2 = primes.index(6007)
sizes = primes[p1:p2]
pairs_dict = get_pairs_dict()
items = [(make_kern_pair_key(*k), v) for k, v in pairs_dict.iteritems()]
cells = 1
maximize_load_factor(N, items, sizes, hfuns_generator(2), cells)
def maximize_load_factor(N, input_data, nr_buckets_lst, hfun_gen, nr_cells):
found, reset = 1, 2
low, high = 0, len(nr_buckets_lst)
status = reset
while high > low:
if status & reset:
max_factor = 0.0
nr_tries = N
mid = low + (high - low) / 2
else:
nr_tries *= 2
status = reset
for i in xrange(nr_tries):
hfuns = hfun_gen.next()
htable = CuckooNest(nr_buckets_lst[mid], hfuns, nr_cells)
try:
for key, val in input_data:
htable.insert(key, val)
print 'OK:', nr_buckets_lst[mid], htable.load_factor(), hfuns
high = mid - 1
status = found | reset
break
except TableFull:
if htable.load_factor() > max_factor:
max_factor = htable.load_factor()
status = 0
if status == reset:
print 'not found:', nr_buckets_lst[mid], ', load factor:', max_factor, \
'target was:', len(input_data) / float(nr_buckets_lst[mid])
low = mid + 1
def construct_hash_table():
pairs_dict = get_pairs_dict()
found = False
best_lf = 0.0
hiter = HDivisionIter()
for h, funs in hiter:
try:
for k, v in pairs_dict.iteritems():
h.insert(make_kern_pair_key(*k), v)
h.stats()
found = True
break
except TableFull, exc:
if h.load_factor() > best_lf:
print 'Load factor: %.3f' % h.load_factor(), 'for', funs
best_lf = h.load_factor()
# verify
if found:
for k, v in pairs_dict.iteritems():
assert v == h.lookup(make_kern_pair_key(*k))
assert h.lookup(make_kern_pair_key(5000, 5000)) == None
print 'OK for ' + str(funs)
return h
else:
print 'FAILED'
def kern_frequency(fname):
h = construct_hash_table()
data = " ".join(open(fname).read().split())
freq = 0
for i in range(0, len(data)-1):
k = make_kern_pair_key(ord(data[i]), ord(data[i+1]))
if h.lookup(k):
freq += 1
return len(data), freq, float(freq) / (len(data)-1)
if __name__ == "__main__":
#gen_cpp_jagbase()
#encoding_status()
#kern_stats()
#construct_hash_table()
test_load_factor()
#gen_primes(0x20002c) # redirect to primes.py
#print kern_frequency('/home/jarda/tmp/kant-critique-142.txt')
#test_is_prime()
#output_keys()
| |
"""
A Cython plugin for coverage.py
Requires the coverage package at least in version 4.0 (which added the plugin API).
"""
from __future__ import absolute_import
import re
import os.path
from collections import defaultdict
from coverage.plugin import CoveragePlugin, FileTracer, FileReporter # requires coverage.py 4.0+
from .Utils import find_root_package_dir, is_package_dir, open_source_file
from . import __version__
def _find_c_source(base_path):
if os.path.exists(base_path + '.c'):
c_file = base_path + '.c'
elif os.path.exists(base_path + '.cpp'):
c_file = base_path + '.cpp'
else:
c_file = None
return c_file
def _find_dep_file_path(main_file, file_path):
abs_path = os.path.abspath(file_path)
if file_path.endswith('.pxi') and not os.path.exists(abs_path):
# include files are looked up relative to the main source file
pxi_file_path = os.path.join(os.path.dirname(main_file), file_path)
if os.path.exists(pxi_file_path):
abs_path = os.path.abspath(pxi_file_path)
return abs_path
class Plugin(CoveragePlugin):
# map from traced file paths to absolute file paths
_file_path_map = None
# map from traced file paths to corresponding C files
_c_files_map = None
# map from parsed C files to their content
_parsed_c_files = None
def sys_info(self):
return [('Cython version', __version__)]
def file_tracer(self, filename):
"""
Try to find a C source file for a file path found by the tracer.
"""
if filename.startswith('<') or filename.startswith('memory:'):
return None
c_file = py_file = None
filename = os.path.abspath(filename)
if self._c_files_map and filename in self._c_files_map:
c_file = self._c_files_map[filename][0]
if c_file is None:
c_file, py_file = self._find_source_files(filename)
if not c_file:
return None
# parse all source file paths and lines from C file
# to learn about all relevant source files right away (pyx/pxi/pxd)
# FIXME: this might already be too late if the first executed line
# is not from the main .pyx file but a file with a different
# name than the .c file (which prevents us from finding the
# .c file)
self._parse_lines(c_file, filename)
if self._file_path_map is None:
self._file_path_map = {}
return CythonModuleTracer(filename, py_file, c_file, self._c_files_map, self._file_path_map)
def file_reporter(self, filename):
# TODO: let coverage.py handle .py files itself
#ext = os.path.splitext(filename)[1].lower()
#if ext == '.py':
# from coverage.python import PythonFileReporter
# return PythonFileReporter(filename)
filename = os.path.abspath(filename)
if self._c_files_map and filename in self._c_files_map:
c_file, rel_file_path, code = self._c_files_map[filename]
else:
c_file, _ = self._find_source_files(filename)
if not c_file:
return None # unknown file
rel_file_path, code = self._parse_lines(c_file, filename)
return CythonModuleReporter(c_file, filename, rel_file_path, code)
def _find_source_files(self, filename):
basename, ext = os.path.splitext(filename)
ext = ext.lower()
if ext in ('.py', '.pyx', '.pxd', '.c', '.cpp'):
pass
elif ext in ('.so', '.pyd'):
platform_suffix = re.search(r'[.]cpython-[0-9]+[a-z]*$', basename, re.I)
if platform_suffix:
basename = basename[:platform_suffix.start()]
elif ext == '.pxi':
# if we get here, it means that the first traced line of a Cython module was
# not in the main module but in an include file, so try a little harder to
# find the main source file
self._find_c_source_files(os.path.dirname(filename), filename)
if filename in self._c_files_map:
return self._c_files_map[filename][0], None
else:
# none of our business
return None, None
c_file = filename if ext in ('.c', '.cpp') else _find_c_source(basename)
if c_file is None:
# a module "pkg/mod.so" can have a source file "pkg/pkg.mod.c"
package_root = find_root_package_dir.uncached(filename)
package_path = os.path.relpath(basename, package_root).split(os.path.sep)
if len(package_path) > 1:
test_basepath = os.path.join(os.path.dirname(filename), '.'.join(package_path))
c_file = _find_c_source(test_basepath)
py_source_file = None
if c_file:
py_source_file = os.path.splitext(c_file)[0] + '.py'
if not os.path.exists(py_source_file):
py_source_file = None
try:
with open(c_file, 'rb') as f:
if b'/* Generated by Cython ' not in f.read(30):
return None # not a Cython file
except (IOError, OSError):
c_file = None
return c_file, py_source_file
def _find_c_source_files(self, dir_path, source_file):
"""
Desperately parse all C files in the directory or its package parents
(not re-descending) to find the (included) source file in one of them.
"""
if not os.path.isdir(dir_path):
return
splitext = os.path.splitext
for filename in os.listdir(dir_path):
ext = splitext(filename)[1].lower()
if ext in ('.c', '.cpp'):
self._parse_lines(os.path.join(dir_path, filename), source_file)
if source_file in self._c_files_map:
return
# not found? then try one package up
if is_package_dir(dir_path):
self._find_c_source_files(os.path.dirname(dir_path), source_file)
def _parse_lines(self, c_file, sourcefile):
"""
Parse a Cython generated C/C++ source file and find the executable lines.
Each executable line starts with a comment header that states source file
and line number, as well as the surrounding range of source code lines.
"""
if self._parsed_c_files is None:
self._parsed_c_files = {}
if c_file in self._parsed_c_files:
code_lines = self._parsed_c_files[c_file]
else:
match_source_path_line = re.compile(r' */[*] +"(.*)":([0-9]+)$').match
match_current_code_line = re.compile(r' *[*] (.*) # <<<<<<+$').match
match_comment_end = re.compile(r' *[*]/$').match
not_executable = re.compile(
r'\s*c(?:type)?def\s+'
r'(?:(?:public|external)\s+)?'
r'(?:struct|union|enum|class)'
r'(\s+[^:]+|)\s*:'
).match
code_lines = defaultdict(dict)
filenames = set()
with open(c_file) as lines:
lines = iter(lines)
for line in lines:
match = match_source_path_line(line)
if not match:
continue
filename, lineno = match.groups()
filenames.add(filename)
lineno = int(lineno)
for comment_line in lines:
match = match_current_code_line(comment_line)
if match:
code_line = match.group(1).rstrip()
if not_executable(code_line):
break
code_lines[filename][lineno] = code_line
break
elif match_comment_end(comment_line):
# unexpected comment format - false positive?
break
self._parsed_c_files[c_file] = code_lines
if self._c_files_map is None:
self._c_files_map = {}
for filename, code in code_lines.items():
abs_path = _find_dep_file_path(c_file, filename)
self._c_files_map[abs_path] = (c_file, filename, code)
if sourcefile not in self._c_files_map:
return (None,) * 2 # e.g. shared library file
return self._c_files_map[sourcefile][1:]
class CythonModuleTracer(FileTracer):
"""
Find the Python/Cython source file for a Cython module.
"""
def __init__(self, module_file, py_file, c_file, c_files_map, file_path_map):
super(CythonModuleTracer, self).__init__()
self.module_file = module_file
self.py_file = py_file
self.c_file = c_file
self._c_files_map = c_files_map
self._file_path_map = file_path_map
def has_dynamic_source_filename(self):
return True
def dynamic_source_filename(self, filename, frame):
"""
Determine source file path. Called by the function call tracer.
"""
source_file = frame.f_code.co_filename
try:
return self._file_path_map[source_file]
except KeyError:
pass
abs_path = os.path.abspath(source_file)
if self.py_file and source_file[-3:].lower() == '.py':
# always let coverage.py handle this case itself
self._file_path_map[source_file] = self.py_file
return self.py_file
assert self._c_files_map is not None
if abs_path not in self._c_files_map:
self._c_files_map[abs_path] = (self.c_file, source_file, None)
self._file_path_map[source_file] = abs_path
return abs_path
class CythonModuleReporter(FileReporter):
"""
Provide detailed trace information for one source file to coverage.py.
"""
def __init__(self, c_file, source_file, rel_file_path, code):
super(CythonModuleReporter, self).__init__(source_file)
self.name = rel_file_path
self.c_file = c_file
self._code = code
def lines(self):
"""
Return set of line numbers that are possibly executable.
"""
return set(self._code)
def _iter_source_tokens(self):
current_line = 1
for line_no, code_line in sorted(self._code.items()):
while line_no > current_line:
yield []
current_line += 1
yield [('txt', code_line)]
current_line += 1
def source(self):
"""
Return the source code of the file as a string.
"""
if os.path.exists(self.filename):
with open_source_file(self.filename) as f:
return f.read()
else:
return '\n'.join(
(tokens[0][1] if tokens else '')
for tokens in self._iter_source_tokens())
def source_token_lines(self):
"""
Iterate over the source code tokens.
"""
if os.path.exists(self.filename):
with open_source_file(self.filename) as f:
for line in f:
yield [('txt', line.rstrip('\n'))]
else:
for line in self._iter_source_tokens():
yield [('txt', line)]
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
| |
import os
import sys
import pygame
from pygame.locals import *
import objects
from colors import *
import scramble
# set the window's position onscreen
x = 450
y = 100
os.environ['SDL_VIDEO_WINDOW_POS'] = '%d, %d' % (x, y)
pygame.init()
clock = pygame.time.Clock()
FPS = 40
times_large = pygame.font.SysFont("Times New Roman", 72)
times_small = pygame.font.SysFont("Times New Roman", 18, bold=True)
# draw constants
# a lot of the constants are related to one another to create
# proportionality
SCREEN_WIDTH, SCREEN_HEIGHT = 480, 600
FIELD_WIDTH = SCREEN_WIDTH * 5 / 8
NUM_COLUMNS = 15
CELL_HEIGHT = CELL_WIDTH = FIELD_WIDTH / NUM_COLUMNS
NUM_ROWS = SCREEN_HEIGHT / CELL_HEIGHT
DISPLAYSURFACE = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption("TriteTris!")
def terminate():
pygame.quit()
sys.exit()
# drawing functions
def draw_screen(surface, field):
'''
Draws the given field and its current state to the display surface.
'''
for i in range(field.num_rows):
for j in range(field.num_columns):
if field.cells[i][j]:
cell_color = field.cells[i][j]['color']
block_rect = pygame.Rect(j * CELL_WIDTH, i * CELL_HEIGHT, \
CELL_WIDTH, CELL_HEIGHT)
draw_outlined_rect(surface, cell_color, block_rect)
# field boundaries
pygame.draw.line(surface, BLACK, (FIELD_WIDTH, 0), \
(FIELD_WIDTH, SCREEN_HEIGHT))
pygame.draw.line(surface, BLACK, (FIELD_WIDTH, (SCREEN_HEIGHT / 4) * 3), \
(SCREEN_WIDTH, (SCREEN_HEIGHT / 4) * 3))
# draw the next block up
nb_boundaries = (((SCREEN_WIDTH + FIELD_WIDTH) / 2) - CELL_WIDTH, (SCREEN_HEIGHT / 8 * 7) - CELL_HEIGHT)
draw_next_block(surface, field, nb_boundaries)
next_text = times_small.render("Next Block:", True, BLACK)
next_text_rect = next_text.get_rect()
next_text_rect.centerx = nb_boundaries[0] - CELL_WIDTH
next_text_rect.centery = nb_boundaries[1] - (2 * CELL_HEIGHT)
surface.blit(next_text, next_text_rect)
def draw_outlined_rect(surface, color, rect):
pygame.draw.rect(surface, color, rect)
pygame.draw.rect(surface, BLACK, rect, 1)
def draw_next_block(surface, field, init_loc):
next_block = field.block_queue[0]
offsets = next_block.get_offsets()
next_block.locations = [init_loc]
for offset in offsets:
new_location = (init_loc[0] + offset[1] * CELL_HEIGHT, \
init_loc[1] + offset[0] * CELL_WIDTH)
next_block.locations.append(new_location)
for location in next_block.locations:
row, column = location
rect = pygame.Rect(row, column, CELL_WIDTH, CELL_HEIGHT)
draw_outlined_rect(surface, next_block.color, rect)
def tick(field):
if field.active_block_has_landed():
field.deactivate_active_block()
for i in range(len(field.cells)):
if all(field.cells[i]):
field.cells.pop(i)
field.cells.insert(0, [None for j in range(field.num_columns)])
if not field.active_block:
field.get_block_from_queue()
if not field.place_active_block((1,4)):
return False
field.move_active_block([1,0])
return True
def intro():
options = {'Play':play, 'Quit':terminate}
option_list = options.keys()
option_list.sort()
curr_opt_index = 0
option_text = []
i = 0
for key in option_list:
text = times_large.render(key, True, BLACK)
rect = text.get_rect()
rect.centerx = SCREEN_WIDTH / 2
rect.centery = (3 * SCREEN_HEIGHT / 8) + (72 * i)
option_text.append({'key':key, 'text':text, 'rect':rect})
i += 1
selected = option_text[0]
while True:
for event in pygame.event.get():
if event.type == QUIT:
terminate()
if event.type == KEYUP:
if event.key == K_ESCAPE:
terminate()
if event.key == K_UP:
curr_opt_index = (curr_opt_index - 1) % len(option_list)
selected = option_text[curr_opt_index]
if event.key == K_DOWN:
curr_opt_index = (curr_opt_index + 1) % len(option_list)
selected = option_text[curr_opt_index]
if event.key == K_RETURN:
options[selected['key']]()
DISPLAYSURFACE.fill(LIGHT_GREY)
for entry in option_text:
if entry['text'] == selected['text']:
pygame.draw.circle(DISPLAYSURFACE, BLACK, (entry['rect'].left - 25, entry['rect'].centery), 15)
DISPLAYSURFACE.blit(entry['text'], entry['rect'])
pygame.display.update()
def play():
# load font and messages
pause_msg = "PAUSED"
scrambler = scramble.Scrambler(pause_msg)
pause_text = times_large.render(pause_msg, True, BLACK)
pause_text_rect = pause_text.get_rect()
pause_text_rect.centerx = SCREEN_WIDTH / 2
pause_text_rect.centery = SCREEN_HEIGHT / 2
game_over_text = times_large.render("Game Over", True, BLACK)
game_over_text_rect = game_over_text.get_rect()
game_over_text_rect.center = pause_text_rect.center
# game-specific objects
directions = {K_LEFT:[0,-1], K_RIGHT:[0,1]}
field = objects.Field(NUM_ROWS, NUM_COLUMNS)
field.place_active_block((1,4))
time_counter = 0
tick_delay = 600
pause = False
game_over = False
while True:
if not (pause or game_over):
time_counter += FPS
if time_counter >= tick_delay:
if not tick(field):
game_over = True
time_counter = 0
for event in pygame.event.get():
if event.type == QUIT:
terminate()
if event.type == KEYUP:
if event.key == K_ESCAPE:
terminate()
if event.key == K_p:
if not game_over:
pause = not pause
if pause:
scrambled_msg = scrambler.get_scrambled_word()
pause_text = times_large.render(scrambled_msg, True, BLACK)
if not (pause or game_over):
if event.key == K_UP:
field.rotate_active_block()
if event.key in directions:
field.move_active_block(directions[event.key])
if event.key == K_DOWN:
time_counter = tick_delay
if event.key == K_SPACE:
field.drop_active_block()
DISPLAYSURFACE.fill(LIGHT_GREY)
if pause:
DISPLAYSURFACE.blit(pause_text, pause_text_rect)
else:
draw_screen(DISPLAYSURFACE, field)
if game_over:
DISPLAYSURFACE.blit(game_over_text, game_over_text_rect)
pygame.display.update()
clock.tick(FPS)
if __name__ == '__main__':
intro()
| |
from datetime import timedelta, datetime
import nomad
from typing import Dict
from django.conf import settings
from django.db.models import Count, Prefetch
from django.db.models.aggregates import Avg, Sum
from django.db.models.expressions import F, Q
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django_filters.rest_framework import DjangoFilterBackend
import django_filters
from rest_framework import status, filters, generics
from rest_framework.exceptions import APIException
from rest_framework.exceptions import ValidationError
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.settings import api_settings
from rest_framework.views import APIView
from data_refinery_api.serializers import (
ComputationalResultSerializer,
DetailedExperimentSerializer,
DetailedSampleSerializer,
ExperimentSerializer,
InstitutionSerializer,
OrganismIndexSerializer,
OrganismSerializer,
PlatformSerializer,
ProcessorSerializer,
SampleSerializer,
# Job
DownloaderJobSerializer,
ProcessorJobSerializer,
SurveyJobSerializer,
# Dataset
APITokenSerializer,
CreateDatasetSerializer,
DatasetDetailsSerializer,
DatasetSerializer,
)
from data_refinery_common.job_lookup import ProcessorPipeline
from data_refinery_common.message_queue import send_job
from data_refinery_common.models import (
APIToken,
ComputationalResult,
ComputedFile,
Dataset,
DownloaderJob,
Experiment,
ExperimentSampleAssociation,
Organism,
OrganismIndex,
OriginalFile,
Processor,
ProcessorJob,
ProcessorJobDatasetAssociation,
Sample,
SurveyJob,
)
from data_refinery_common.utils import get_env_variable, get_active_volumes
##
# Custom Views
##
class PaginatedAPIView(APIView):
pagination_class = api_settings.DEFAULT_PAGINATION_CLASS
@property
def paginator(self):
"""
The paginator instance associated with the view, or `None`.
"""
if not hasattr(self, '_paginator'):
if self.pagination_class is None:
self._paginator = None
else:
self._paginator = self.pagination_class()
return self._paginator
def paginate_queryset(self, queryset):
"""
Return a single page of results, or `None` if pagination is disabled.
"""
if self.paginator is None:
return None
return self.paginator.paginate_queryset(queryset, self.request, view=self)
def get_paginated_response(self, data):
"""
Return a paginated style `Response` object for the given output data.
"""
assert self.paginator is not None
return self.paginator.get_paginated_response(data)
##
# Search and Filter
##
class ExperimentFilter(django_filters.FilterSet):
queryset = Experiment.processed_public_objects.all()
has_publication = django_filters.BooleanFilter(field_name="has_publication")
submitter_institution = \
django_filters.ModelMultipleChoiceFilter(field_name="submitter_institution",
to_field_name="submitter_institution",
queryset=queryset)
submitter_institution.always_filter = False
technology = django_filters.ModelMultipleChoiceFilter(field_name="technology",
to_field_name="technology",
queryset=queryset)
technology.always_filter = False
source_first_published = django_filters.DateTimeFilter(field_name="source_first_published")
organisms__name = django_filters.ModelMultipleChoiceFilter(field_name="organisms__name",
to_field_name="name",
queryset=Organism.objects.all())
organisms__name.always_filter = False
samples__platform_name = \
django_filters.ModelMultipleChoiceFilter(field_name="samples__platform_name",
to_field_name="platform_name",
queryset=Sample.objects.all())
samples__platform_name.always_filter = False
class Meta:
model = Experiment
fields = [ 'has_publication',
'submitter_institution',
'technology',
'source_first_published',
'organisms__name',
'samples__platform_name']
def to_html(self, request, queryset, view):
# Don't render the FKs in browsable view
return ''
# Via: https://github.com/encode/django-rest-framework/issues/3905#issuecomment-294391278
class NoMarkupDjangoFilterBackend(DjangoFilterBackend):
def to_html(self, request, queryset, view):
# We want this, but currently it incurs a huge performance penality on ChoiceFields with 1000+ choices
return ''
# ListAPIView is read-only!
class SearchAndFilter(generics.ListAPIView):
"""
Search and filter for experiments and samples.
Ex: search/?search=human&has_publication=True
Interactive filtering allows users to explore results more easily. It can be enabled using the parameter `filter_order`.
The filter names should be sent sepparated by commas and depending on the order in which the filters are applied the
number of samples per filter will be different.
"""
serializer_class = ExperimentSerializer
pagination_class = LimitOffsetPagination
filter_backends = (NoMarkupDjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
filter_class = ExperimentFilter
# Ordering
ordering_fields = ('total_samples_count', 'id', 'created_at', 'source_first_published', 'accession_code',)
ordering = ('-total_samples_count',)
def filter_samples_count(self, queryset, name, value):
return queryset.filter(total_samples_count=value)
# via http://www.django-rest-framework.org/api-guide/filtering/#searchfilter
# '^' Starts-with search.
# '=' Exact matches.
# '@' Full-text search.
# '$' Regex search.
search_fields = ( 'title',
'description',
'accession_code',
'alternate_accession_code',
'protocol_description',
'publication_title',
'publication_doi',
'publication_authors',
'pubmed_id',
'submitter_institution',
'experimentannotation__data',
# '@sample__accession_code',
# '@sample__platform_name',
# '@sample__platform_accession_code',
# '@sample__organism__name',
# '@sample__sex',
# '@sample__specimen_part',
# '@sample__disease',
# '@sample__compound'
)
filter_fields = ('has_publication', 'platform_name')
def get_queryset(self):
# For Prod:
queryset = Experiment.processed_public_objects.all()
# For Dev:
# queryset = Experiment.objects.all()
# Set up eager loading to avoid N+1 selects
queryset = self.get_serializer_class().setup_eager_loading(queryset)
return queryset
def list(self, request, *args, **kwargs):
""" Adds counts on certain filter fields to result JSON."""
response = super(SearchAndFilter, self).list(request, args, kwargs)
filter_param_names = ['organisms__name', 'technology', 'has_publication', 'platform']
# mapping between parameter names and category names
filter_name_map = {
'technology': 'technology',
'has_publication': 'publication',
'organisms__name': 'organism',
'platform': 'platforms'
}
# With interactive filtering, the filters in the last group are calculated differently, since they should stay unchanged when applied.
# ref https://github.com/AlexsLemonade/refinebio-frontend/issues/374#issuecomment-436373470
# This is only enabled when the parameter `filter_order` is provided (eg `filter_order=technology,platform`)
last_filter = self.get_last_filter()
if last_filter and last_filter in filter_param_names:
# 1. Calculate all filters except the one in the last category
queryset = self.search_queryset(request.query_params)
filter_names = [f for f in filter_param_names if f != last_filter]
response.data['filters'] = self.get_filters(queryset, filter_names)
# 2. Calculate the filters in the last category.
# We use a queryset built with all filters except those in the last category
params_without_last_category = request.query_params.copy()
params_without_last_category.pop(last_filter)
queryset_without_last_category = self.search_queryset(params_without_last_category)
last_category_filters = self.get_filters(queryset_without_last_category, [last_filter])
response.data['filters'][filter_name_map[last_filter]] = last_category_filters[filter_name_map[last_filter]]
else:
# Otherwise calculate the filters with the search term
response.data['filters'] = self.get_filters(self.search_queryset(), filter_param_names)
return response
def get_last_filter(self):
request = self.request
if 'filter_order' not in request.query_params:
return False
filter_order = request.query_params['filter_order']
last_filter = filter_order.split(',')[-1:][0]
# Ensure the last filter is valid and one of the applied filters
if not last_filter or last_filter not in request.query_params:
return False
return last_filter
def get_filters(self, queryset, filters_to_calculate):
result = {
'technology': {},
'publication': {},
'organism': {},
'platforms': {}
}
if 'technology' in filters_to_calculate:
# Technology
techs = queryset.values('technology').annotate(Count('technology', unique=True))
for tech in techs:
if not tech['technology'] or not tech['technology'].strip():
continue
result['technology'][tech['technology']] = tech['technology__count']
if 'has_publication' in filters_to_calculate:
# Publication
pubs = queryset.values('has_publication').annotate(Count('has_publication', unique=True))
for pub in pubs:
if pub['has_publication']:
result['publication']['has_publication'] = pub['has_publication__count']
if 'organisms__name' in filters_to_calculate:
# Organisms
organisms = queryset.values('organisms__name').annotate(Count('organisms__name', unique=True))
for organism in organisms:
# This experiment has no ExperimentOrganism-association, which is bad.
# This information may still live on the samples though.
if not organism['organisms__name']:
continue
result['organism'][organism['organisms__name']] = organism['organisms__name__count']
if 'platform' in filters_to_calculate:
# Platforms
platforms = queryset.values('samples__platform_name').annotate(Count('samples__platform_name', unique=True))
for plat in platforms:
if plat['samples__platform_name']:
result['platforms'][plat['samples__platform_name']] = plat['samples__platform_name__count']
return result
def search_queryset(self, filter_params = False):
if filter_params:
queryset = ExperimentFilter(filter_params, queryset=self.get_queryset()).qs
else:
queryset = self.get_queryset()
return filters.SearchFilter().filter_queryset(self.request, queryset, view=self)
##
# Dataset
##
class CreateDatasetView(generics.CreateAPIView):
""" Creates and returns new Dataset. """
queryset = Dataset.objects.all()
serializer_class = CreateDatasetSerializer
class DatasetView(generics.RetrieveUpdateAPIView):
""" View and modify a single Dataset. Set `start` to `true` along with a valid
activated API token (from /token/) to begin smashing and delivery.
You must also supply `email_address` with `start`, though this will never be serialized back to you.
"""
queryset = Dataset.objects.all()
serializer_class = DatasetSerializer
lookup_field = 'id'
def get_serializer_class(self):
if 'details' in self.request.query_params:
return DatasetDetailsSerializer
return self.serializer_class
def perform_update(self, serializer):
""" If `start` is set, fire off the job. Disables dataset data updates after that. """
old_object = self.get_object()
old_data = old_object.data
old_aggregate = old_object.aggregate_by
already_processing = old_object.is_processing
new_data = serializer.validated_data
if old_object.is_processed:
raise APIException("You may not update Datasets which have already been processed")
if new_data.get('start'):
# Make sure we have a valid activated token.
token_id = self.request.data.get('token_id')
try:
token = APIToken.objects.get(id=token_id, is_activated=True)
except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError
raise APIException("You must provide an active API token ID")
# We could be more aggressive with requirements checking here, but
# there could be use cases where you don't want to supply an email.
supplied_email_address = self.request.data.get('email_address', None)
if not already_processing:
# Create and dispatch the new job.
processor_job = ProcessorJob()
processor_job.pipeline_applied = "SMASHER"
processor_job.ram_amount = 4096
processor_job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = processor_job
pjda.dataset = old_object
pjda.save()
job_sent = False
obj = serializer.save()
if supplied_email_address is not None:
if obj.email_address != supplied_email_address:
obj.email_address = supplied_email_address
obj.save()
try:
# Hidden method of non-dispatching for testing purposes.
if not self.request.data.get('no_send_job', False):
job_sent = send_job(ProcessorPipeline.SMASHER, processor_job)
else:
# We didn't actually send it, but we also didn't want to.
job_sent = True
except Exception:
# job_sent is already false and the exception has
# already been logged by send_job, so nothing to
# do other than catch the exception.
pass
if not job_sent:
raise APIException("Unable to queue download job. Something has gone"
" wrong and we have been notified about it.")
serializer.validated_data['is_processing'] = True
obj = serializer.save()
return obj
# Don't allow critical data updates to jobs that have already been submitted,
# but do allow email address updating.
if already_processing:
serializer.validated_data['data'] = old_data
serializer.validated_data['aggregate_by'] = old_aggregate
serializer.save()
class DatasetStatsView(APIView):
""" Get stats for a given dataset. Ex:
{
"HOMO_SAPIENS": {
"num_experiments": 5,
"num_samples": 55 },
"GALLUS_GALLUS": {
"num_experiments": 5,
"num_samples": 55 },
}
"""
def get(self, request, id):
dataset = get_object_or_404(Dataset, id=id)
stats = {}
experiments = Experiment.objects.filter(accession_code__in=dataset.data.keys())
# Find all the species for these experiments
for experiment in experiments:
species_names = experiment.organisms.values_list('name')
for species_name in species_names:
species = stats.get(species_name[0], {"num_experiments": 0, "num_samples": 0})
species['num_experiments'] = species['num_experiments'] + 1
stats[species_name[0]] = species
# Count the samples
all_sample_accessions = [value[0] for value in dataset.data.values()]
empty_species = []
for species in stats.keys():
samples = Sample.objects.filter(accession_code__in=all_sample_accessions, organism__name=species)
stats[species]['num_samples'] = len(samples)
if stats[species]['num_samples'] == 0:
empty_species.append(species)
# Delete empty associations
for species in empty_species:
del stats[species]
return Response(stats)
class APITokenView(APIView):
"""
Return this response to this endpoint with `is_activated: true` to activate this API token.
You must include an activated token's ID to download processed datasets.
"""
def get(self, request, id=None):
""" Create a new token, or fetch a token by its ID. """
if id:
token = get_object_or_404(APIToken, id=id)
else:
token = APIToken()
token.save()
serializer = APITokenSerializer(token)
return Response(serializer.data)
def post(self, request, id=None):
""" Given a token's ID, activate it."""
id = request.data.get('id', None)
activated_token = get_object_or_404(APIToken, id=id)
activated_token.is_activated = request.data.get('is_activated', False)
activated_token.save()
serializer = APITokenSerializer(activated_token)
return Response(serializer.data)
##
# Experiments
##
class ExperimentList(PaginatedAPIView):
"""
List all Experiments.
Append the pk to the end of this URL to see a detail view.
"""
def get(self, request, format=None):
filter_dict = request.query_params.dict()
filter_dict.pop('limit', None)
filter_dict.pop('offset', None)
experiments = Experiment.public_objects.filter(**filter_dict)
page = self.paginate_queryset(experiments)
if page is not None:
serializer = ExperimentSerializer(page, many=True)
return self.get_paginated_response(serializer.data)
else:
serializer = ExperimentSerializer(experiments, many=True)
return Response(serializer.data)
class ExperimentDetail(APIView):
"""
Retrieve an Experiment instance.
"""
def get_object(self, pk):
try:
return Experiment.public_objects.get(pk=pk)
except Experiment.DoesNotExist:
raise Http404
except Exception:
try:
return Experiment.public_objects.get(accession_code=pk)
except Experiment.DoesNotExist:
raise Http404
return HttpResponseBadRequest("Bad PK or Accession")
def get(self, request, pk, format=None):
experiment = self.get_object(pk)
serializer = DetailedExperimentSerializer(experiment)
return Response(serializer.data)
##
# Samples
##
class SampleList(PaginatedAPIView):
"""
List all Samples.
Pass in a list of pk to an ids query parameter to filter by id.
Also accepts:
- `dataset_id` field instead of a list of accession codes
- `experiment_accession_code` to return the samples associated with a given experiment
Append the pk or accession_code to the end of this URL to see a detail view.
"""
def get(self, request, format=None):
filter_dict = request.query_params.dict()
filter_dict.pop('limit', None)
filter_dict.pop('offset', None)
order_by = filter_dict.pop('order_by', None)
ids = filter_dict.pop('ids', None)
filter_by = filter_dict.pop('filter_by', None)
if ids is not None:
ids = [ int(x) for x in ids.split(',')]
filter_dict['pk__in'] = ids
experiment_accession_code = filter_dict.pop('experiment_accession_code', None)
if experiment_accession_code:
experiment = get_object_or_404(Experiment.objects.values('id'), accession_code=experiment_accession_code)
filter_dict['experiments__in'] = [experiment['id']]
accession_codes = filter_dict.pop('accession_codes', None)
if accession_codes:
accession_codes = accession_codes.split(',')
filter_dict['accession_code__in'] = accession_codes
dataset_id = filter_dict.pop('dataset_id', None)
if dataset_id:
dataset = get_object_or_404(Dataset, id=dataset_id)
# Python doesn't provide a prettier way of doing this that I know about.
filter_dict['accession_code__in'] = [item for sublist in dataset.data.values() for item in sublist]
samples = Sample.public_objects \
.prefetch_related('sampleannotation_set') \
.prefetch_related('organism') \
.prefetch_related('results') \
.prefetch_related('results__processor') \
.prefetch_related('results__computationalresultannotation_set') \
.prefetch_related('results__computedfile_set') \
.filter(**filter_dict) \
.order_by('-is_processed')
if order_by:
samples = samples.order_by(order_by)
if filter_by:
samples = samples.filter( Q(sex__contains=filter_by) |
Q(age__contains=filter_by) |
Q(specimen_part__contains=filter_by) |
Q(genotype__contains=filter_by) |
Q(disease__contains=filter_by) |
Q(disease_stage__contains=filter_by) |
Q(cell_line__contains=filter_by) |
Q(treatment__contains=filter_by) |
Q(race__contains=filter_by) |
Q(subject__contains=filter_by) |
Q(compound__contains=filter_by) |
Q(time__contains=filter_by) |
Q(sampleannotation__data__contains=filter_by)
)
page = self.paginate_queryset(samples)
if page is not None:
serializer = DetailedSampleSerializer(page, many=True)
return self.get_paginated_response(serializer.data)
else:
serializer = DetailedSampleSerializer(samples, many=True)
return Response(serializer.data)
class SampleDetail(APIView):
"""
Retrieve a Sample instance.
"""
def get_object(self, pk):
try:
return Sample.public_objects.get(pk=pk)
except Sample.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
sample = self.get_object(pk)
serializer = DetailedSampleSerializer(sample)
return Response(serializer.data)
##
# Processor
##
class ProcessorList(APIView):
"""List all processors."""
def get(self, request, format=None):
processors = Processor.objects.all()
serializer = ProcessorSerializer(processors, many=True)
return Response(serializer.data)
##
# Results
##
class ResultsList(PaginatedAPIView):
"""
List all ComputationalResults.
Append the pk to the end of this URL to see a detail view.
"""
def get(self, request, format=None):
filter_dict = request.query_params.dict()
filter_dict.pop('limit', None)
filter_dict.pop('offset', None)
results = ComputationalResult.public_objects.filter(**filter_dict)
page = self.paginate_queryset(results)
if page is not None:
serializer = ComputationalResultSerializer(page, many=True)
return self.get_paginated_response(serializer.data)
else:
serializer = ComputationalResultSerializer(results, many=True)
return Response(serializer.data)
##
# Search Filter Models
##
class OrganismList(APIView):
"""
Unpaginated list of all the available organisms
"""
def get(self, request, format=None):
organisms = Organism.objects.all()
serializer = OrganismSerializer(organisms, many=True)
return Response(serializer.data)
class PlatformList(APIView):
"""
Unpaginated list of all the available "platform" information
"""
def get(self, request, format=None):
samples = Sample.public_objects.all().values("platform_accession_code", "platform_name").distinct()
serializer = PlatformSerializer(samples, many=True)
return Response(serializer.data)
class InstitutionList(APIView):
"""
Unpaginated list of all the available "institution" information
"""
def get(self, request, format=None):
experiments = Experiment.public_objects.all().values("submitter_institution").distinct()
serializer = InstitutionSerializer(experiments, many=True)
return Response(serializer.data)
##
# Jobs
##
class SurveyJobList(PaginatedAPIView):
"""
List of all SurveyJob.
Ex:
- ?start_time__lte=2018-03-23T15:29:40.848381Z
- ?start_time__lte=2018-03-23T15:29:40.848381Z&start_time__gte=2018-03-23T14:29:40.848381Z
- ?success=True
Works with required 'limit' and 'offset' params.
"""
def get(self, request, format=None):
filter_dict = request.query_params.dict()
limit = max(int(filter_dict.pop('limit', 100)), 100)
offset = int(filter_dict.pop('offset', 0))
jobs = SurveyJob.objects.filter(**filter_dict)[offset:(offset + limit)]
serializer = SurveyJobSerializer(jobs, many=True)
return Response(serializer.data)
class DownloaderJobList(PaginatedAPIView):
"""
List of all DownloaderJob
"""
def get(self, request, format=None):
filter_dict = request.query_params.dict()
limit = max(int(filter_dict.pop('limit', 100)), 100)
offset = int(filter_dict.pop('offset', 0))
jobs = DownloaderJob.objects.filter(**filter_dict)[offset: offset + limit]
serializer = DownloaderJobSerializer(jobs, many=True)
return Response(serializer.data)
class ProcessorJobList(PaginatedAPIView):
"""
List of all ProcessorJobs
"""
def get(self, request, format=None):
filter_dict = request.query_params.dict()
limit = max(int(filter_dict.pop('limit', 100)), 100)
offset = int(filter_dict.pop('offset', 0))
jobs = ProcessorJob.objects.filter(**filter_dict)[offset: offset + limit]
serializer = ProcessorJobSerializer(jobs, many=True)
return Response(serializer.data)
###
# Statistics
###
class Stats(APIView):
"""
Statistics about the health of the system.
?range=week includes statics for the last week
"""
def get(self, request, format=None):
range_param = request.query_params.dict().pop('range', None)
data = {}
data['survey_jobs'] = self._get_job_stats(SurveyJob.objects, range_param)
data['downloader_jobs'] = self._get_job_stats(DownloaderJob.objects, range_param)
data['processor_jobs'] = self._get_job_stats(ProcessorJob.objects, range_param)
data['samples'] = self._get_object_stats(Sample.objects, range_param)
data['experiments'] = self._get_object_stats(Experiment.objects, range_param)
data['input_data_size'] = self._get_input_data_size()
data['output_data_size'] = self._get_output_data_size()
data['active_volumes'] = list(get_active_volumes())
try:
nomad_stats = self._get_nomad_jobs_breakdown()
data['nomad_running_jobs'] = nomad_stats["nomad_running_jobs"]
data['nomad_pending_jobs'] = nomad_stats["nomad_pending_jobs"]
data['nomad_running_jobs_by_type'] = nomad_stats["nomad_running_jobs_by_type"]
data['nomad_pending_jobs_by_type'] = nomad_stats["nomad_pending_jobs_by_type"]
data['nomad_running_jobs_by_volume'] = nomad_stats["nomad_running_jobs_by_volume"]
data['nomad_pending_jobs_by_volume'] = nomad_stats["nomad_pending_jobs_by_volume"]
except nomad.api.exceptions.BaseNomadException:
# Nomad is not available right now, so exclude these.
pass
return Response(data)
def _aggregate_nomad_jobs_by_type(self, jobs: Dict):
"""Aggregates the pending and running job counts for each Nomad job type.
This is accomplished by using the stats that each
parameterized job has about its children jobs.
`jobs` should be a response from the Nomad API's jobs endpoint.
"""
job_types = set()
for job in jobs:
# Surveyor jobs don't have ids and RAM, so handle them specially.
if job["ID"].startswith("SURVEYOR"):
job_types.add("SURVEYOR")
elif job["ID"] == "SMASHER" or job["ID"] == "DOWNLOADER":
job_types.add(job["ID"])
else:
# Strips out the last two underscores like so:
# SALMON_1_16384 -> SALMON
job_type = "_".join(job["ID"].split("_")[0:-2])
job_types.add(job_type)
nomad_running_jobs_by_type = {}
nomad_pending_jobs_by_type = {}
for job_type in job_types:
# This will count SURVEYOR_DISPATCHER jobs as SURVEYOR
# jobs, but I think that's fine since we barely ever run
# SURVEYOR_DISPATCHER jobs and won't need to monitor them
# through the dashboard.
same_jobs = [job for job in jobs if job["ID"].startswith(job_type)]
aggregated_pending = 0
aggregated_running = 0
for job in same_jobs:
children = job["JobSummary"]["Children"]
aggregated_pending = aggregated_pending + children["Pending"]
aggregated_running = aggregated_running + children["Running"]
nomad_pending_jobs_by_type[job_type] = aggregated_pending
nomad_running_jobs_by_type[job_type] = aggregated_running
return nomad_pending_jobs_by_type, nomad_running_jobs_by_type
def _aggregate_nomad_jobs_by_volume(self, jobs: Dict):
"""Aggregates the job counts for each EBS volume.
This is accomplished by using the stats that each
parameterized job has about its children jobs.
`jobs` should be a response from the Nomad API's jobs endpoint.
"""
volume_ids = set()
for job in jobs:
# These job types don't have volume indices, so we just won't count them.
if not job["ID"].startswith("SURVEYOR") \
and job["ID"] != "SMASHER" \
and job["ID"] != "DOWNLOADER":
# Strips out the volume ID like so:
# SALMON_1_16384 -> 1
volume_id = "_".join(job["ID"].split("_")[-2])
volume_ids.add(volume_id)
nomad_running_jobs_by_volume = {}
nomad_pending_jobs_by_volume = {}
for volume_id in volume_ids:
if job["ID"].startswith("SURVEYOR") \
or job["ID"] == "SMASHER" \
or job["ID"] == "DOWNLOADER":
continue
def job_has_same_volume(job: Dict) -> bool:
"""Returns true if the job is on the same volume as this iteration of the loop.
These job types don't have volume indices, so we just
won't count them. We theoretically could try, but it
really would be more trouble than it's worth and this
endpoint is already going to have a hard time returning
a response in time.
"""
return not job["ID"].startswith("SURVEYOR") \
and job["ID"] != "SMASHER" \
and job["ID"] != "DOWNLOADER" \
and job["ID"].split("_")[-2] == volume_id
jobs_with_same_volume = [job for job in jobs if job_has_same_volume(job)]
aggregated_pending = 0
aggregated_running = 0
for job in jobs_with_same_volume:
children = job["JobSummary"]["Children"]
aggregated_pending = aggregated_pending + children["Pending"]
aggregated_running = aggregated_running + children["Running"]
nomad_pending_jobs_by_volume["volume_" + str(volume_id)] = aggregated_pending
nomad_running_jobs_by_volume["volume_" + str(volume_id)] = aggregated_running
return nomad_pending_jobs_by_volume, nomad_running_jobs_by_volume
def _get_nomad_jobs_breakdown(self):
nomad_host = get_env_variable("NOMAD_HOST")
nomad_port = get_env_variable("NOMAD_PORT", "4646")
nomad_client = nomad.Nomad(nomad_host, port=int(nomad_port), timeout=30)
jobs = nomad_client.jobs.get_jobs()
parameterized_jobs = [job for job in jobs if job['ParameterizedJob']]
nomad_pending_jobs_by_type, nomad_running_jobs_by_type = self._aggregate_nomad_jobs_by_type(parameterized_jobs)
# To get the total jobs for running and pending, the easiest
# AND the most efficient way is to sum up the stats we've
# already partially summed up.
nomad_running_jobs = 0
for job_type, num_jobs in nomad_running_jobs_by_type.items():
nomad_running_jobs = nomad_running_jobs + num_jobs
nomad_pending_jobs = 0
for job_type, num_jobs in nomad_pending_jobs_by_type.items():
nomad_pending_jobs = nomad_pending_jobs + num_jobs
nomad_pending_jobs_by_volume, nomad_running_jobs_by_volume = self._aggregate_nomad_jobs_by_volume(parameterized_jobs)
return {
"nomad_pending_jobs": nomad_pending_jobs,
"nomad_running_jobs": nomad_running_jobs,
"nomad_pending_jobs_by_type": nomad_pending_jobs_by_type,
"nomad_running_jobs_by_type": nomad_running_jobs_by_type,
"nomad_pending_jobs_by_volume": nomad_pending_jobs_by_volume,
"nomad_running_jobs_by_volume": nomad_running_jobs_by_volume
}
def _get_input_data_size(self):
total_size = OriginalFile.objects.filter(
sample__is_processed=True
).aggregate(
Sum('size_in_bytes')
)
return total_size['size_in_bytes__sum'] if total_size['size_in_bytes__sum'] else 0
def _get_output_data_size(self):
total_size = ComputedFile.public_objects.all().filter(
s3_bucket__isnull=False,
s3_key__isnull=True
).aggregate(
Sum('size_in_bytes')
)
return total_size['size_in_bytes__sum'] if total_size['size_in_bytes__sum'] else 0
def _get_job_stats(self, jobs, range_param):
result = {
'total': jobs.count(),
'pending': jobs.filter(start_time__isnull=True).count(),
'completed': jobs.filter(end_time__isnull=False).count(),
'successful': jobs.filter(success=True).count(),
'open': jobs.filter(start_time__isnull=False, end_time__isnull=True, success__isnull=True).count(),
# via https://stackoverflow.com/questions/32520655/get-average-of-difference-of-datetime-fields-in-django
'average_time': jobs.filter(start_time__isnull=False, end_time__isnull=False, success=True).aggregate(
average_time=Avg(F('end_time') - F('start_time')))['average_time']
}
if not result['average_time']:
result['average_time'] = 0
else:
result['average_time'] = result['average_time'].total_seconds()
if range_param:
result['timeline'] = self._jobs_timeline(jobs, range_param)
return result
def _get_object_stats(self, objects, range_param):
result = {
'total': objects.count()
}
if range_param:
result['timeline'] = self._created_timeline(objects, range_param)
return result
def _get_time_intervals(self, range_param):
interval_timedelta = {
'day': timedelta(days=1),
'week': timedelta(weeks=1),
'month': timedelta(weeks=4),
'year': timedelta(weeks=52)
}
interval_timestep = {
'day': timedelta(hours=1),
'week': timedelta(days=1),
'month': timedelta(days=2),
'year': timedelta(weeks=4)
}
current_date = datetime.now(tz=timezone.utc)
time_step = interval_timestep.get(range_param)
start_date = current_date - interval_timedelta.get(range_param)
intervals = [(current_date - time_step*(i+1), current_date - time_step*i)
for i in range(100) if current_date - time_step*(i+1) > start_date]
return intervals[::-1]
def _get_job_interval(self, jobs, start, end):
filtered_jobs = jobs.filter(created_at__gte=start, created_at__lte=end)
pending = filtered_jobs and jobs.filter(start_time__isnull=True)
failed = filtered_jobs and jobs.filter(success=False)
completed = filtered_jobs and jobs.filter(success=True)
open = filtered_jobs and jobs.filter(success__isnull=True)
return {
'start': start,
'end': end,
'total': filtered_jobs.count(),
'completed': completed.count(),
'pending': pending.count(),
'failed': failed.count(),
'open': open.count()
}
def _jobs_timeline(self, jobs, range_param):
return [self._get_job_interval(jobs, start, end) for (start, end) in self._get_time_intervals(range_param)]
def _created_timeline(self, objects, range_param):
results = []
for start, end in self._get_time_intervals(range_param):
total = objects.filter(created_at__gte=start, created_at__lte=end).count()
stats = {
'start': start,
'end': end,
'total': total
}
results.append(stats)
return results
###
# Transcriptome Indices
###
class TranscriptomeIndexDetail(APIView):
"""
Retrieve the S3 URL and index metadata associated with an OrganismIndex.
"""
def get(self, request, format=None):
"""
Gets the S3 url associated with the organism and length, along with other metadata about
the transcriptome index we have stored. Organism must be specified in underscore-delimited
uppercase, i.e. "GALLUS_GALLUS". Length must either be "long" or "short"
"""
params = request.query_params
# Verify that the required params are present
errors = dict()
if "organism" not in params:
errors["organism"] = "You must specify the organism of the index you want"
if "length" not in params:
errors["length"] = "You must specify the length of the transcriptome index"
if len(errors) > 0:
raise ValidationError(errors)
# Get the correct organism index object, serialize it, and return it
transcription_length = "TRANSCRIPTOME_" + params["length"].upper()
try:
organism_index = (OrganismIndex.public_objects.exclude(s3_url__exact="")
.distinct("organism__name", "index_type")
.get(organism__name=params["organism"].upper(),
index_type=transcription_length))
serializer = OrganismIndexSerializer(organism_index)
return Response(serializer.data)
except OrganismIndex.DoesNotExist:
raise Http404
| |
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
import re
import fixtures
from lxml import etree
import mock
import six
from ec2api.api import ec2utils
def update_dict(dict1, dict2):
"""Get a copy of union of two dicts."""
res = copy.deepcopy(dict1)
res.update(dict2)
return res
def purge_dict(dict1, trash_keys):
"""Get a copy of dict, removed keys."""
res = copy.deepcopy(dict1)
for key in trash_keys:
res.pop(key, None)
return res
def patch_dict(dict1, dict2, trash_iter):
"""Get a copy of union of two dicts, removed keys."""
res = update_dict(dict1, dict2)
res = purge_dict(res, trash_iter)
return res
def get_db_api_add_item(item_id_dict):
"""Generate db_api.add_item mock function."""
def db_api_add_item(context, kind, data):
if isinstance(item_id_dict, dict):
item_id = item_id_dict[kind]
else:
item_id = item_id_dict
data = update_dict(data, {'id': item_id})
data.setdefault('os_id')
data.setdefault('vpc_id')
return data
return db_api_add_item
def get_db_api_get_items(*items):
"""Generate db_api.get_items mock function."""
def db_api_get_items(context, kind):
return [copy.deepcopy(item)
for item in items
if ec2utils.get_ec2_id_kind(item['id']) == kind]
return db_api_get_items
def get_db_api_get_item_by_id(*items):
"""Generate db_api.get_item_by_id mock function."""
def db_api_get_item_by_id(context, item_id):
return next((copy.deepcopy(item)
for item in items
if item['id'] == item_id),
None)
return db_api_get_item_by_id
def get_db_api_get_items_by_ids(*items):
"""Generate db_api.get_items_by_ids mock function."""
def db_api_get_items_by_ids(context, item_ids):
return [copy.deepcopy(item)
for item in items
if (item['id'] in item_ids)]
return db_api_get_items_by_ids
def get_db_api_get_items_ids(*items):
"""Generate db_api.get_items_ids mock function."""
def db_api_get_items_ids(context, kind, item_ids=None, item_os_ids=None):
return [(item['id'], item['os_id'])
for item in items
if (ec2utils.get_ec2_id_kind(item['id']) == kind and
(not item_ids or item['id'] in item_ids) and
(not item_os_ids or item['os_id'] in item_os_ids))]
return db_api_get_items_ids
def get_neutron_create(kind, os_id, addon={}):
"""Generate Neutron create an object mock function."""
def neutron_create(body):
body = copy.deepcopy(body)
body[kind].update(addon)
body[kind]['id'] = os_id
return body
return neutron_create
def get_by_1st_arg_getter(results_dict_by_id, notfound_exception=None):
"""Generate mock function for getter by 1st argurment."""
def getter(obj_id):
try:
return copy.deepcopy(results_dict_by_id[obj_id])
except KeyError:
if notfound_exception:
raise notfound_exception
else:
return None
return getter
def get_by_2nd_arg_getter(results_dict_by_id):
"""Generate mock function for getter by 2nd argurment."""
def getter(_context, obj_id):
return copy.deepcopy(results_dict_by_id.get(obj_id))
return getter
def _safe_copy_parameters(args, kwargs):
# NOTE(ft): deepcopy fails to copy a complicated mock like
# neutron client mock or OnCrashCleaner object
def _safe_copy(obj):
try:
return copy.deepcopy(obj)
except Exception:
return obj
args = [_safe_copy(arg)
for arg in args]
kwargs = {key: _safe_copy(val)
for key, val in six.iteritems(kwargs)}
return (args, kwargs)
class CopyingMock(mock.MagicMock):
"""Mock class for calls with mutable arguments.
See https://docs.python.org/3/library/unittest.mock-examples.html#
coping-with-mutable-arguments
"""
def __call__(self, *args, **kwargs):
args, kwargs = _safe_copy_parameters(args, kwargs)
return super(CopyingMock, self).__call__(*args, **kwargs)
def deepcopy_call_args_saver(destination):
def side_effect(*args, **kwargs):
args, kwargs = _safe_copy_parameters(args, kwargs)
destination.append(mock.call(*args, **kwargs))
return side_effect
_xml_scheme = re.compile('\sxmlns=".*"')
def parse_xml(xml_string):
xml_string = _xml_scheme.sub('', xml_string.decode("utf-8"))
xml = etree.fromstring(xml_string)
def convert_node(node):
children = list(node)
if len(children):
if children[0].tag == 'item':
val = list(convert_node(child)[1] for child in children)
else:
val = dict(convert_node(child) for child in children)
elif node.tag.endswith('Set'):
val = []
else:
# TODO(ft): do not use private function
val = (ec2utils._try_convert(node.text)
if node.text
else node.text)
return node.tag, val
return dict([convert_node(xml)])
class KeepingHandler(logging.Handler):
def __init__(self):
super(KeepingHandler, self).__init__()
self._storage = []
def emit(self, record):
self._storage.append(record)
def emit_records_to(self, handlers, record_filter=None):
for record in self._storage:
if not record_filter or record_filter.filter(record):
for handler in handlers:
if self != handler:
handler.emit(record)
class ScreeningFilter(logging.Filter):
def __init__(self, name=None):
self._name = name
def filter(self, record):
if self._name is not None and record.name == self._name:
return False
return True
class ScreeningLogger(fixtures.Fixture):
def __init__(self, log_name=None):
super(ScreeningLogger, self).__init__()
self.handler = KeepingHandler()
if log_name:
self._filter = ScreeningFilter(name=log_name)
else:
self._filter = None
def setUp(self):
super(ScreeningLogger, self).setUp()
self.useFixture(fixtures.LogHandler(self.handler))
def __exit__(self, exc_type, exc_val, exc_tb):
res = super(ScreeningLogger, self).__exit__(exc_type, exc_val, exc_tb)
handlers = logging.getLogger().handlers
if exc_type:
self.handler.emit_records_to(handlers)
elif self._filter:
self.handler.emit_records_to(handlers, self._filter)
return res
def screen_logs(log_name=None):
def decorator(func):
def wrapper(*args, **kwargs):
with ScreeningLogger(log_name):
return func(*args, **kwargs)
return wrapper
return decorator
screen_unexpected_exception_logs = screen_logs('ec2api.api')
screen_all_logs = screen_logs()
| |
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
import six
import webob
from nova.api.openstack.compute import floating_ips as fips_v21
from nova.api.openstack import extensions
from nova import compute
from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
from nova import network
from nova import objects
from nova.objects import base as obj_base
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_network
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
TEST_INST = 1
WRONG_INST = 9999
def network_api_get_floating_ip(self, context, id):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': None}
def network_api_get_floating_ip_by_address(self, context, address):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': 10}
def network_api_get_floating_ips_by_project(self, context):
return [{'id': 1,
'address': '10.10.10.10',
'pool': 'nova',
'fixed_ip': {'address': '10.0.0.1',
'instance_uuid': FAKE_UUID,
'instance': objects.Instance(
**{'uuid': FAKE_UUID})}},
{'id': 2,
'pool': 'nova', 'interface': 'eth0',
'address': '10.10.10.11',
'fixed_ip': None}]
def compute_api_get(self, context, instance_id, expected_attrs=None,
want_objects=False):
return objects.Instance(uuid=FAKE_UUID, id=instance_id,
instance_type_id=1, host='bob')
def network_api_allocate(self, context):
return '10.10.10.10'
def network_api_release(self, context, address):
pass
def compute_api_associate(self, context, instance_id, address):
pass
def network_api_associate(self, context, floating_address, fixed_address):
pass
def network_api_disassociate(self, context, instance, floating_address):
pass
def fake_instance_get(context, instance_id):
return objects.Instance(**{
"id": 1,
"uuid": uuid.uuid4(),
"name": 'fake',
"user_id": 'fakeuser',
"project_id": '123'})
def stub_nw_info(test):
def get_nw_info_for_instance(instance):
return fake_network.fake_get_instance_nw_info(test)
return get_nw_info_for_instance
def get_instance_by_floating_ip_addr(self, context, address):
return None
class FloatingIpTestNeutronV21(test.NoDBTestCase):
floating_ips = fips_v21
def setUp(self):
super(FloatingIpTestNeutronV21, self).setUp()
self.flags(use_neutron=True)
self.controller = self.floating_ips.FloatingIPController()
def test_floatingip_delete(self):
req = fakes.HTTPRequest.blank('')
fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'}
with test.nested(
mock.patch.object(self.controller.network_api,
'disassociate_floating_ip'),
mock.patch.object(self.controller.network_api,
'disassociate_and_release_floating_ip'),
mock.patch.object(self.controller.network_api,
'release_floating_ip'),
mock.patch.object(self.controller.network_api,
'get_instance_id_by_floating_address',
return_value=None),
mock.patch.object(self.controller.network_api,
'get_floating_ip',
return_value=fip_val)) as (
disoc_fip, dis_and_del, rel_fip, _, _):
self.controller.delete(req, 1)
self.assertFalse(disoc_fip.called)
self.assertFalse(rel_fip.called)
# Only disassociate_and_release_floating_ip is
# called if using neutron
self.assertTrue(dis_and_del.called)
def _test_floatingip_delete_not_found(self, ex,
expect_ex=webob.exc.HTTPNotFound):
req = fakes.HTTPRequest.blank('')
with mock.patch.object(self.controller.network_api,
'get_floating_ip', side_effect=ex):
self.assertRaises(expect_ex,
self.controller.delete, req, 1)
def test_floatingip_delete_not_found_ip(self):
ex = exception.FloatingIpNotFound(id=1)
self._test_floatingip_delete_not_found(ex)
def test_floatingip_delete_not_found(self):
ex = exception.NotFound
self._test_floatingip_delete_not_found(ex)
def test_floatingip_delete_invalid_id(self):
ex = exception.InvalidID(id=1)
self._test_floatingip_delete_not_found(ex, webob.exc.HTTPBadRequest)
class FloatingIpTestV21(test.TestCase):
floating_ip = "10.10.10.10"
floating_ip_2 = "10.10.10.11"
floating_ips = fips_v21
validation_error = exception.ValidationError
def _create_floating_ips(self, floating_ips=None):
"""Create a floating IP object."""
if floating_ips is None:
floating_ips = [self.floating_ip]
elif not isinstance(floating_ips, (list, tuple)):
floating_ips = [floating_ips]
dict_ = {'pool': 'nova', 'host': 'fake_host'}
return db.floating_ip_bulk_create(
self.context, [dict(address=ip, **dict_) for ip in floating_ips],
)
def _delete_floating_ip(self):
db.floating_ip_destroy(self.context, self.floating_ip)
def setUp(self):
super(FloatingIpTestV21, self).setUp()
self.stubs.Set(compute.api.API, "get",
compute_api_get)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
self.stubs.Set(network.api.API, "get_floating_ips_by_project",
network_api_get_floating_ips_by_project)
self.stubs.Set(network.api.API, "release_floating_ip",
network_api_release)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(compute_utils, "get_nw_info_for_instance",
stub_nw_info(self))
fake_network.stub_out_nw_api_get_instance_nw_info(self)
self.stub_out('nova.db.instance_get',
fake_instance_get)
self.context = context.get_admin_context()
self._create_floating_ips()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = self.floating_ips.FloatingIPController()
self.manager = self.floating_ips.\
FloatingIPActionController(self.ext_mgr)
self.fake_req = fakes.HTTPRequest.blank('')
def tearDown(self):
self._delete_floating_ip()
super(FloatingIpTestV21, self).tearDown()
def test_floatingip_delete(self):
fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'}
with test.nested(
mock.patch.object(self.controller.network_api,
'disassociate_floating_ip'),
mock.patch.object(self.controller.network_api,
'release_floating_ip'),
mock.patch.object(self.controller.network_api,
'get_instance_id_by_floating_address',
return_value=None),
mock.patch.object(self.controller.network_api,
'get_floating_ip',
return_value=fip_val)) as (
disoc_fip, rel_fip, _, _):
self.controller.delete(self.fake_req, 1)
self.assertTrue(disoc_fip.called)
self.assertTrue(rel_fip.called)
def _test_floatingip_delete_not_found(self, ex,
expect_ex=webob.exc.HTTPNotFound):
with mock.patch.object(self.controller.network_api,
'get_floating_ip', side_effect=ex):
self.assertRaises(expect_ex,
self.controller.delete, self.fake_req, 1)
def test_floatingip_delete_not_found_ip(self):
ex = exception.FloatingIpNotFound(id=1)
self._test_floatingip_delete_not_found(ex)
def test_floatingip_delete_not_found(self):
ex = exception.NotFound
self._test_floatingip_delete_not_found(ex)
def test_floatingip_delete_invalid_id(self):
ex = exception.InvalidID(id=1)
self._test_floatingip_delete_not_found(ex, webob.exc.HTTPBadRequest)
def test_translate_floating_ip_view(self):
floating_ip_address = self.floating_ip
floating_ip = db.floating_ip_get_by_address(self.context,
floating_ip_address)
# NOTE(vish): network_get uses the id not the address
floating_ip = db.floating_ip_get(self.context, floating_ip['id'])
floating_obj = objects.FloatingIP()
objects.FloatingIP._from_db_object(self.context, floating_obj,
floating_ip)
view = self.floating_ips._translate_floating_ip_view(floating_obj)
self.assertIn('floating_ip', view)
self.assertTrue(view['floating_ip']['id'])
self.assertEqual(view['floating_ip']['ip'], floating_obj.address)
self.assertIsNone(view['floating_ip']['fixed_ip'])
self.assertIsNone(view['floating_ip']['instance_id'])
def test_translate_floating_ip_view_neutronesque(self):
uuid = 'ca469a10-fa76-11e5-86aa-5e5517507c66'
fixed_id = 'ae900cf4-fb73-11e5-86aa-5e5517507c66'
floating_ip = objects.floating_ip.NeutronFloatingIP(id=uuid,
address='1.2.3.4', pool='pool', context='ctxt',
fixed_ip_id=fixed_id)
view = self.floating_ips._translate_floating_ip_view(floating_ip)
self.assertEqual(uuid, view['floating_ip']['id'])
def test_translate_floating_ip_view_dict(self):
floating_ip = {'id': 0, 'address': '10.0.0.10', 'pool': 'nova',
'fixed_ip': None}
view = self.floating_ips._translate_floating_ip_view(floating_ip)
self.assertIn('floating_ip', view)
def test_translate_floating_ip_view_obj(self):
fip = objects.FixedIP(address='192.168.1.2', instance_uuid=FAKE_UUID)
floater = self._build_floating_ip('10.0.0.2', fip)
result = self.floating_ips._translate_floating_ip_view(floater)
expected = self._build_expected(floater, fip.address,
fip.instance_uuid)
self._test_result(expected, result)
def test_translate_floating_ip_bad_address(self):
fip = objects.FixedIP(instance_uuid=FAKE_UUID)
floater = self._build_floating_ip('10.0.0.2', fip)
result = self.floating_ips._translate_floating_ip_view(floater)
expected = self._build_expected(floater, None, fip.instance_uuid)
self._test_result(expected, result)
def test_translate_floating_ip_bad_instance_id(self):
fip = objects.FixedIP(address='192.168.1.2')
floater = self._build_floating_ip('10.0.0.2', fip)
result = self.floating_ips._translate_floating_ip_view(floater)
expected = self._build_expected(floater, fip.address, None)
self._test_result(expected, result)
def test_translate_floating_ip_bad_instance_and_address(self):
fip = objects.FixedIP()
floater = self._build_floating_ip('10.0.0.2', fip)
result = self.floating_ips._translate_floating_ip_view(floater)
expected = self._build_expected(floater, None, None)
self._test_result(expected, result)
def test_translate_floating_ip_null_fixed(self):
floater = self._build_floating_ip('10.0.0.2', None)
result = self.floating_ips._translate_floating_ip_view(floater)
expected = self._build_expected(floater, None, None)
self._test_result(expected, result)
def test_translate_floating_ip_unset_fixed(self):
floater = objects.FloatingIP(id=1, address='10.0.0.2', pool='foo')
result = self.floating_ips._translate_floating_ip_view(floater)
expected = self._build_expected(floater, None, None)
self._test_result(expected, result)
def test_translate_floating_ips_view(self):
mock_trans = mock.Mock()
mock_trans.return_value = {'floating_ip': 'foo'}
self.floating_ips._translate_floating_ip_view = mock_trans
fip1 = objects.FixedIP(address='192.168.1.2', instance_uuid=FAKE_UUID)
fip2 = objects.FixedIP(address='192.168.1.3', instance_uuid=FAKE_UUID)
floaters = [self._build_floating_ip('10.0.0.2', fip1),
self._build_floating_ip('10.0.0.3', fip2)]
result = self.floating_ips._translate_floating_ips_view(floaters)
called_floaters = [call[0][0] for call in mock_trans.call_args_list]
self.assertTrue(any(obj_base.obj_equal_prims(floaters[0], f)
for f in called_floaters),
"_translate_floating_ip_view was not called with all "
"floating ips")
self.assertTrue(any(obj_base.obj_equal_prims(floaters[1], f)
for f in called_floaters),
"_translate_floating_ip_view was not called with all "
"floating ips")
expected_result = {'floating_ips': ['foo', 'foo']}
self.assertEqual(expected_result, result)
def test_floating_ips_list(self):
res_dict = self.controller.index(self.fake_req)
response = {'floating_ips': [{'instance_id': FAKE_UUID,
'ip': '10.10.10.10',
'pool': 'nova',
'fixed_ip': '10.0.0.1',
'id': 1},
{'instance_id': None,
'ip': '10.10.10.11',
'pool': 'nova',
'fixed_ip': None,
'id': 2}]}
self.assertEqual(res_dict, response)
def test_floating_ip_release_nonexisting(self):
def fake_get_floating_ip(*args, **kwargs):
raise exception.FloatingIpNotFound(id=id)
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, self.fake_req, '9876')
self.assertIn("Floating IP not found for ID 9876", ex.explanation)
def test_floating_ip_release_race_cond(self):
def fake_get_floating_ip(*args, **kwargs):
return {'fixed_ip_id': 1, 'address': self.floating_ip}
def fake_get_instance_by_floating_ip_addr(*args, **kwargs):
return 'test-inst'
def fake_disassociate_floating_ip(*args, **kwargs):
raise exception.FloatingIpNotAssociated(args[3])
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
self.stubs.Set(self.floating_ips, "get_instance_by_floating_ip_addr",
fake_get_instance_by_floating_ip_addr)
self.stubs.Set(self.floating_ips, "disassociate_floating_ip",
fake_disassociate_floating_ip)
res = self.controller.delete(self.fake_req, '9876')
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller,
fips_v21.FloatingIPController):
status_int = self.controller.delete.wsgi_code
else:
status_int = res.status_int
self.assertEqual(status_int, 202)
def test_floating_ip_show(self):
res_dict = self.controller.show(self.fake_req, 1)
self.assertEqual(res_dict['floating_ip']['id'], 1)
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertIsNone(res_dict['floating_ip']['instance_id'])
def test_floating_ip_show_not_found(self):
def fake_get_floating_ip(*args, **kwargs):
raise exception.FloatingIpNotFound(id='fake')
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.fake_req, '9876')
self.assertIn("Floating IP not found for ID 9876", ex.explanation)
def test_show_associated_floating_ip(self):
def get_floating_ip(self, context, id):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip': {'address': '10.0.0.1',
'instance_uuid': FAKE_UUID,
'instance': {'uuid': FAKE_UUID}}}
self.stubs.Set(network.api.API, "get_floating_ip", get_floating_ip)
res_dict = self.controller.show(self.fake_req, 1)
self.assertEqual(res_dict['floating_ip']['id'], 1)
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertEqual(res_dict['floating_ip']['fixed_ip'], '10.0.0.1')
self.assertEqual(res_dict['floating_ip']['instance_id'], FAKE_UUID)
def test_recreation_of_floating_ip(self):
self._delete_floating_ip()
self._create_floating_ips()
def test_floating_ip_in_bulk_creation(self):
self._delete_floating_ip()
self._create_floating_ips([self.floating_ip, self.floating_ip_2])
all_ips = db.floating_ip_get_all(self.context)
ip_list = [ip['address'] for ip in all_ips]
self.assertIn(self.floating_ip, ip_list)
self.assertIn(self.floating_ip_2, ip_list)
def test_fail_floating_ip_in_bulk_creation(self):
self.assertRaises(exception.FloatingIpExists,
self._create_floating_ips,
[self.floating_ip, self.floating_ip_2])
all_ips = db.floating_ip_get_all(self.context)
ip_list = [ip['address'] for ip in all_ips]
self.assertIn(self.floating_ip, ip_list)
self.assertNotIn(self.floating_ip_2, ip_list)
def test_floating_ip_allocate_no_free_ips(self):
def fake_allocate(*args, **kwargs):
raise exception.NoMoreFloatingIps()
self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, self.fake_req)
self.assertIn('No more floating IPs', ex.explanation)
def test_floating_ip_allocate_no_free_ips_pool(self):
def fake_allocate(*args, **kwargs):
raise exception.NoMoreFloatingIps()
self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, self.fake_req,
{'pool': 'non_existent_pool'})
self.assertIn('No more floating IPs in pool non_existent_pool',
ex.explanation)
@mock.patch.object(network.api.API, 'allocate_floating_ip',
side_effect=exception.FloatingIpBadRequest(
'Bad floatingip request: Network '
'c8f0e88f-ae41-47cb-be6c-d8256ba80576 does not contain any '
'IPv4 subnet'))
def test_floating_ip_allocate_no_ipv4_subnet(self, allocate_mock):
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.fake_req,
{'pool': 'non_existent_pool'})
self.assertIn("does not contain any IPv4 subnet",
six.text_type(ex))
@mock.patch('nova.network.api.API.allocate_floating_ip',
side_effect=exception.FloatingIpLimitExceeded())
def test_floating_ip_allocate_over_quota(self, allocate_mock):
ex = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.fake_req)
self.assertIn('IP allocation over quota', ex.explanation)
@mock.patch('nova.network.api.API.allocate_floating_ip',
side_effect=exception.FloatingIpLimitExceeded())
def test_floating_ip_allocate_quota_exceed_in_pool(self, allocate_mock):
ex = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, self.fake_req,
{'pool': 'non_existent_pool'})
self.assertIn('IP allocation over quota in pool non_existent_pool.',
ex.explanation)
@mock.patch('nova.network.api.API.allocate_floating_ip',
side_effect=exception.FloatingIpPoolNotFound())
def test_floating_ip_create_with_unknown_pool(self, allocate_mock):
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, self.fake_req,
{'pool': 'non_existent_pool'})
self.assertIn('Floating IP pool not found.', ex.explanation)
def test_floating_ip_allocate(self):
def fake1(*args, **kwargs):
pass
def fake2(*args, **kwargs):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova'}
self.stubs.Set(network.api.API, "allocate_floating_ip",
fake1)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake2)
res_dict = self.controller.create(self.fake_req)
ip = res_dict['floating_ip']
expected = {
"id": 1,
"instance_id": None,
"ip": "10.10.10.10",
"fixed_ip": None,
"pool": 'nova'}
self.assertEqual(ip, expected)
def test_floating_ip_release(self):
self.controller.delete(self.fake_req, 1)
def _test_floating_ip_associate(self, fixed_address):
def fake_associate_floating_ip(*args, **kwargs):
self.assertEqual(fixed_address, kwargs['fixed_address'])
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip))
rsp = self.manager._add_floating_ip(self.fake_req, TEST_INST,
body=body)
self.assertEqual(202, rsp.status_int)
def test_floating_ip_associate(self):
self._test_floating_ip_associate(fixed_address='192.168.1.100')
@mock.patch.object(network.model.NetworkInfo, 'fixed_ips')
def test_associate_floating_ip_v4v6_fixed_ip(self, fixed_ips_mock):
fixed_address = '192.168.1.100'
fixed_ips_mock.return_value = [{'address': 'fc00:2001:db8::100'},
{'address': fixed_address}]
self._test_floating_ip_associate(fixed_address=fixed_address)
@mock.patch.object(network.model.NetworkInfo, 'fixed_ips',
return_value=[{'address': 'fc00:2001:db8::100'}])
def test_associate_floating_ip_v6_fixed_ip(self, fixed_ips_mock):
body = dict(addFloatingIp=dict(address=self.floating_ip))
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._add_floating_ip, self.fake_req,
TEST_INST, body=body)
def test_floating_ip_associate_invalid_instance(self):
def fake_get(self, context, id, expected_attrs=None,
want_objects=False):
raise exception.InstanceNotFound(instance_id=id)
self.stubs.Set(compute.api.API, "get", fake_get)
body = dict(addFloatingIp=dict(address=self.floating_ip))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._add_floating_ip, self.fake_req,
'test_inst', body=body)
def test_associate_not_allocated_floating_ip_to_instance(self):
def fake_associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
raise exception.FloatingIpNotFoundForAddress(
address=floating_address)
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
floating_ip = '10.10.10.11'
body = dict(addFloatingIp=dict(address=floating_ip))
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.manager._add_floating_ip,
self.fake_req, TEST_INST, body=body)
self.assertIn("floating IP not found", ex.explanation)
@mock.patch.object(network.api.API, 'associate_floating_ip',
side_effect=exception.Forbidden)
def test_associate_floating_ip_forbidden(self, associate_mock):
body = dict(addFloatingIp=dict(address='10.10.10.11'))
self.assertRaises(webob.exc.HTTPForbidden,
self.manager._add_floating_ip, self.fake_req,
TEST_INST, body=body)
def test_associate_floating_ip_bad_address_key(self):
body = dict(addFloatingIp=dict(bad_address='10.10.10.11'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(self.validation_error,
self.manager._add_floating_ip, req, 'test_inst',
body=body)
def test_associate_floating_ip_bad_addfloatingip_key(self):
body = dict(bad_addFloatingIp=dict(address='10.10.10.11'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(self.validation_error,
self.manager._add_floating_ip, req, 'test_inst',
body=body)
def test_floating_ip_disassociate(self):
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return TEST_INST
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
rsp = self.manager._remove_floating_ip(self.fake_req, TEST_INST,
body=body)
self.assertEqual(202, rsp.status_int)
def test_floating_ip_disassociate_missing(self):
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
self.assertRaises(webob.exc.HTTPConflict,
self.manager._remove_floating_ip,
self.fake_req, 'test_inst', body=body)
def test_floating_ip_associate_non_existent_ip(self):
def fake_network_api_associate(self, context, instance,
floating_address=None,
fixed_address=None):
floating_ips = ["10.10.10.10", "10.10.10.11"]
if floating_address not in floating_ips:
raise exception.FloatingIpNotFoundForAddress(
address=floating_address)
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_network_api_associate)
body = dict(addFloatingIp=dict(address='1.1.1.1'))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._add_floating_ip,
self.fake_req, TEST_INST, body=body)
def test_floating_ip_disassociate_non_existent_ip(self):
def network_api_get_floating_ip_by_address(self, context,
floating_address):
floating_ips = ["10.10.10.10", "10.10.10.11"]
if floating_address not in floating_ips:
raise exception.FloatingIpNotFoundForAddress(
address=floating_address)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
body = dict(removeFloatingIp=dict(address='1.1.1.1'))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._remove_floating_ip,
self.fake_req, TEST_INST, body=body)
def test_floating_ip_disassociate_wrong_instance_uuid(self):
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return TEST_INST
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
wrong_uuid = 'aaaaaaaa-ffff-ffff-ffff-aaaaaaaaaaaa'
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
self.assertRaises(webob.exc.HTTPConflict,
self.manager._remove_floating_ip,
self.fake_req, wrong_uuid, body=body)
def test_floating_ip_disassociate_wrong_instance_id(self):
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return WRONG_INST
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
self.assertRaises(webob.exc.HTTPConflict,
self.manager._remove_floating_ip,
self.fake_req, TEST_INST, body=body)
def test_floating_ip_disassociate_auto_assigned(self):
def fake_get_floating_ip_addr_auto_assigned(self, context, address):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': 10, 'auto_assigned': 1}
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return TEST_INST
def network_api_disassociate(self, context, instance,
floating_address):
raise exception.CannotDisassociateAutoAssignedFloatingIP()
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake_get_floating_ip_addr_auto_assigned)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
self.assertRaises(webob.exc.HTTPForbidden,
self.manager._remove_floating_ip,
self.fake_req, TEST_INST, body=body)
def test_floating_ip_disassociate_map_authorization_exc(self):
def fake_get_floating_ip_addr_auto_assigned(self, context, address):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': 10, 'auto_assigned': 1}
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return TEST_INST
def network_api_disassociate(self, context, instance, address):
raise exception.Forbidden()
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake_get_floating_ip_addr_auto_assigned)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
self.assertRaises(webob.exc.HTTPForbidden,
self.manager._remove_floating_ip,
self.fake_req, TEST_INST, body=body)
# these are a few bad param tests
def test_bad_address_param_in_remove_floating_ip(self):
body = dict(removeFloatingIp=dict(badparam='11.0.0.1'))
self.assertRaises(self.validation_error,
self.manager._remove_floating_ip, self.fake_req,
TEST_INST, body=body)
def test_missing_dict_param_in_remove_floating_ip(self):
body = dict(removeFloatingIp='11.0.0.1')
self.assertRaises(self.validation_error,
self.manager._remove_floating_ip, self.fake_req,
TEST_INST, body=body)
def test_missing_dict_param_in_add_floating_ip(self):
body = dict(addFloatingIp='11.0.0.1')
self.assertRaises(self.validation_error,
self.manager._add_floating_ip, self.fake_req,
TEST_INST, body=body)
def _build_floating_ip(self, address, fixed_ip):
floating = objects.FloatingIP(id=1, address=address, pool='foo',
fixed_ip=fixed_ip)
return floating
def _build_expected(self, floating_ip, fixed_ip, instance_id):
return {'floating_ip': {'id': floating_ip.id,
'ip': floating_ip.address,
'pool': floating_ip.pool,
'fixed_ip': fixed_ip,
'instance_id': instance_id}}
def _test_result(self, expected, actual):
expected_fl = expected['floating_ip']
actual_fl = actual['floating_ip']
self.assertEqual(expected_fl, actual_fl)
class ExtendedFloatingIpTestV21(test.TestCase):
floating_ip = "10.10.10.10"
floating_ip_2 = "10.10.10.11"
floating_ips = fips_v21
def _create_floating_ips(self, floating_ips=None):
"""Create a floating IP object."""
if floating_ips is None:
floating_ips = [self.floating_ip]
elif not isinstance(floating_ips, (list, tuple)):
floating_ips = [floating_ips]
dict_ = {'pool': 'nova', 'host': 'fake_host'}
return db.floating_ip_bulk_create(
self.context, [dict(address=ip, **dict_) for ip in floating_ips],
)
def _delete_floating_ip(self):
db.floating_ip_destroy(self.context, self.floating_ip)
def setUp(self):
super(ExtendedFloatingIpTestV21, self).setUp()
self.stubs.Set(compute.api.API, "get",
compute_api_get)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
self.stubs.Set(network.api.API, "get_floating_ips_by_project",
network_api_get_floating_ips_by_project)
self.stubs.Set(network.api.API, "release_floating_ip",
network_api_release)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(compute_utils, "get_nw_info_for_instance",
stub_nw_info(self))
fake_network.stub_out_nw_api_get_instance_nw_info(self)
self.stub_out('nova.db.instance_get',
fake_instance_get)
self.context = context.get_admin_context()
self._create_floating_ips()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.ext_mgr.extensions['os-floating-ips'] = True
self.ext_mgr.extensions['os-extended-floating-ips'] = True
self.controller = self.floating_ips.FloatingIPController()
self.manager = self.floating_ips.\
FloatingIPActionController(self.ext_mgr)
self.fake_req = fakes.HTTPRequest.blank('')
def tearDown(self):
self._delete_floating_ip()
super(ExtendedFloatingIpTestV21, self).tearDown()
def test_extended_floating_ip_associate_fixed(self):
fixed_address = '192.168.1.101'
def fake_associate_floating_ip(*args, **kwargs):
self.assertEqual(fixed_address, kwargs['fixed_address'])
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip,
fixed_address=fixed_address))
rsp = self.manager._add_floating_ip(self.fake_req, TEST_INST,
body=body)
self.assertEqual(202, rsp.status_int)
def test_extended_floating_ip_associate_fixed_not_allocated(self):
def fake_associate_floating_ip(*args, **kwargs):
pass
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip,
fixed_address='11.11.11.11'))
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._add_floating_ip,
self.fake_req, TEST_INST, body=body)
self.assertIn("Specified fixed address not assigned to instance",
ex.explanation)
class FloatingIPPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(FloatingIPPolicyEnforcementV21, self).setUp()
self.controller = fips_v21.FloatingIPController()
self.req = fakes.HTTPRequest.blank('')
def _common_policy_check(self, func, *arg, **kwarg):
rule_name = "os_compute_api:os-floating-ips"
rule = {rule_name: "project:non_fake"}
self.policy.set_rules(rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_policy_failed(self):
self._common_policy_check(self.controller.index, self.req)
def test_show_policy_failed(self):
self._common_policy_check(self.controller.show, self.req, FAKE_UUID)
def test_create_policy_failed(self):
self._common_policy_check(self.controller.create, self.req)
def test_delete_policy_failed(self):
self._common_policy_check(self.controller.delete, self.req, FAKE_UUID)
class FloatingIPActionPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(FloatingIPActionPolicyEnforcementV21, self).setUp()
self.controller = fips_v21.FloatingIPActionController()
self.req = fakes.HTTPRequest.blank('')
def _common_policy_check(self, func, *arg, **kwarg):
rule_name = "os_compute_api:os-floating-ips"
rule = {rule_name: "project:non_fake"}
self.policy.set_rules(rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_add_policy_failed(self):
body = dict(addFloatingIp=dict(address='10.10.10.11'))
self._common_policy_check(
self.controller._add_floating_ip, self.req, FAKE_UUID, body=body)
def test_remove_policy_failed(self):
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
self._common_policy_check(
self.controller._remove_floating_ip, self.req,
FAKE_UUID, body=body)
| |
# coding: utf-8
from __future__ import print_function
import os
import numpy as np
import time
np.random.seed(1337)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Dense, Flatten, Activation
from keras.layers import Convolution1D, MaxPooling1D, Embedding, LSTM
from keras.models import Model
from keras.layers import Input, Dropout
from keras.optimizers import SGD, Adadelta
from keras.models import Sequential
from sklearn.model_selection import train_test_split, KFold
import csv
import sys
MAX_SEQUENCE_LENGTH = 1000
MAX_NB_WORDS = 20000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.1
DROP_OUT = 0.3
Nb_EPOCH = 25
BATCH_SIZE = 10
Classes = 2
parameters = {
"classes" : [11],
#"batches" : [10, 20, 50, 100],
#"epochs": [1, 10, 25, 50, 100],
#"dropout_rate" : [0.0, 0.1, 0.2, 0.3, 0.4],
#"embedding_dimension" : [25, 50, 100, 200]
}
def add_csv_result(train_accuracy, valid_accuracy, test_accuracy, time):
global header, items
global MAX_SEQUENCE_LENGTH, MAX_NB_WORDS , EMBEDDING_DIM, VALIDATION_SPLIT, DROP_OUT, Nb_EPOCH, BATCH_SIZE, Classes
header = [['Classes', 'Dropout', 'Iterations', 'Batch Size','Embedding Dimension',
'Training Accuracy', 'Validation Accuracy', 'Test Accuracy', 'Time']]
items = [Classes, DROP_OUT, Nb_EPOCH, BATCH_SIZE, EMBEDDING_DIM,
train_accuracy, valid_accuracy, test_accuracy, time]
header.append(items)
def write_csv_result(fname):
f = open(fname, 'wb')
writer = csv.writer(f)
writer.writerows(header)
header = [] # reset header after each loop
f.close()
def reset_parameter():
global MAX_SEQUENCE_LENGTH, MAX_NB_WORDS , EMBEDDING_DIM, VALIDATION_SPLIT, DROP_OUT, Nb_EPOCH, BATCH_SIZE, Classes
MAX_SEQUENCE_LENGTH = 1000
MAX_NB_WORDS = 20000
EMBEDDING_DIM = 200
VALIDATION_SPLIT = 0.1
DROP_OUT = 0.2
Nb_EPOCH = 30
BATCH_SIZE = 10
Classes = 2
def embedding_index(GLOVE_DIR, FILENAME):
global embeddings_index
embeddings_index = {}
fname = os.path.join(GLOVE_DIR, FILENAME)
f = open(fname)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
def load_data(TEXT_DATA_DIR):
print('Processing text dataset')
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = os.path.join(path, fname)
if sys.version_info < (3,):
f = open(fpath)
else:
f = open(fpath, encoding='latin-1')
texts.append(f.read())
f.close()
labels.append(label_id)
print('Found %s texts.' % len(texts))
global word_index, tokenizer
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
return (data, labels, labels_index)
def train_Test_Split(data, labels):
train_X, test_X, train_Y, test_Y = train_test_split(data, labels, test_size=VALIDATION_SPLIT)
return (train_X, train_Y, test_X, test_Y)
# split the data into a training set and a validation set
def train_Val_Split(train_X, train_Y):
trainX, valX, trainY, valY = train_test_split(train_X, train_Y, test_size=VALIDATION_SPLIT)
return (trainX, trainY, valX, valY)
# prepare embedding matrix
def embeddingMatrix():
global nb_words, embedding_matrix
print('Preparing embedding matrix.')
nb_words = min(MAX_NB_WORDS, len(word_index))
embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))
for word, i in word_index.items():
if i > MAX_NB_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
def create_model():
print('Number of class: ||%d||' % (Classes))
model = Sequential()
model.add(Embedding( # Layer 0, Start
input_dim=nb_words + 1, # Size to dictionary, has to be input + 1
output_dim= EMBEDDING_DIM, # Dimensions to generate
weights=[embedding_matrix], # Initialize word weights
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)) # Define length to input sequences in the first layer
model.add(LSTM(128, dropout_W=DROP_OUT, dropout_U=DROP_OUT))
model.add(Dense(Classes))
model.add(Activation('sigmoid'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
def train_and_evaluate_model(model, train_X, train_Y, test_X, test_Y):
start = time.time()
history = model.fit(train_X, train_Y, validation_split=VALIDATION_SPLIT, nb_epoch=Nb_EPOCH, batch_size=BATCH_SIZE)
trainTime = time.time() - start
print ("Training Time : ", trainTime)
last_epoch_training_accuracy = history.history['acc'][Nb_EPOCH-1]
last_epoch_training_loss = history.history['loss'][Nb_EPOCH-1]
print ("Training Loss: ", last_epoch_training_loss)
print ("Training Accuracy: ", last_epoch_training_accuracy)
last_epoch_validation_accuracy = history.history['val_acc'][Nb_EPOCH-1]
last_epoch_validation_loss = history.history['val_loss'][Nb_EPOCH-1]
print ("validation Loss: ", last_epoch_validation_loss)
print ("Validation Accuracy: ", last_epoch_validation_accuracy)
eval_loss, eval_accuracy = model.evaluate(test_X, test_Y, verbose=0)
print ("Testing Loss: ", eval_loss)
print ("Testing Accuracy: ", eval_accuracy)
model_history = history.history
return (model_history, last_epoch_training_loss, last_epoch_training_accuracy, eval_loss, eval_accuracy, trainTime)
def main():
global Classes, DROP_OUT, EMBEDDING_DIM, Nb_EPOCH, FILENAME, TEXT_DATA_DIR
for key in parameters:
reset_parameter()
for indx, val in enumerate(parameters[key]):
if key == 'classes':
Classes = val
elif key == 'dropout_rate':
DROP_OUT = val
elif key == 'embedding_dimension':
EMBEDDING_DIM = val
elif key == 'epochs':
Nb_EPOCH = val
elif key == 'batches':
BATCH_SIZE = val
GLOVE_DIR = './glove.twitter.27B/'
FILENAME = 'glove.twitter.27B.' + str(EMBEDDING_DIM) + 'd.txt'
TEXT_DATA_DIR = './20_newsgroups_' + str(Classes)
print ("******************Loop Test******************")
print ("*****Parameters: ", key, ", Value: ", val, "*****")
embedding_index(GLOVE_DIR, FILENAME)
data, labels, labels_index = load_data(TEXT_DATA_DIR) #load datasets
#create embedding index with GloVe embeddings_index(GLOVE_DIR, FILENAME) #create embedding index with GloVe
embedding_matrix = embeddingMatrix() #make embedding matrix as inp
train_X, train_Y, test_X, test_Y = train_Test_Split(data, labels) #split test set
model = create_model()
global_time = time.time()
model_history, last_epoch_training_loss, last_epoch_training_accuracy, eval_loss, eval_accuracy, trainTime = train_and_evaluate_model(model, train_X, train_Y, test_X, test_Y)
total_time = time.time()-global_time
print ("Total Training Time : ", total_time)
add_csv_result(last_epoch_training_accuracy, 0 , eval_accuracy, total_time)
write_csv_result(key+"_20news_LSTM_CV.csv")
main()
#write_csv_result("20news_LSTM_CV_2classes.csv")
# import SQLdb as db
# db.updateLSTM(classes = Classes, dropouts = DROP_OUT, iterations = Nb_EPOCH, accuracy = eval_accuracy, remark = total_time)
| |
import sys, pprint, argparse, errno, re, string
# TODO: ugh:
sys.path.insert(1, '../../py')
import h2o
import os
# print "ARGV is:", sys.argv
here=os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser(
description='Attach to an H2O instance and call its REST API to generate the Java REST API bindings docs and write them to the filesystem.',
)
parser.add_argument('--verbose', '-v', help='verbose output', action='store_true')
parser.add_argument('--usecloud', help='ip:port to attach to', default='')
parser.add_argument('--host', help='hostname to attach to', default='localhost')
parser.add_argument('--port', help='port to attach to', type=int, default=54321)
parser.add_argument('--dest', help='destination directory', default=(here + '/../build/src-gen/main/java'))
args = parser.parse_args()
h2o.H2O.verbose = True if args.verbose else False
pp = pprint.PrettyPrinter(indent=4) # pretty printer for debugging
def cons_java_type(pojo_name, name, h2o_type, schema_name):
if schema_name is None or h2o_type.startswith('enum'):
simple_type = h2o_type.replace('[]', '')
idx = h2o_type.find('[]')
brackets = '' if idx is -1 else h2o_type[idx:]
else:
simple_type = schema_name
idx = h2o_type.find('[]')
brackets = '' if idx is -1 else h2o_type[idx:]
if h2o_type.startswith('Map'):
java_type = h2o_type
java_type = java_type.replace('<string,', '<String,')
java_type = java_type.replace(',string>', ',String>')
return java_type
if simple_type == 'string':
return simple_type.capitalize() + brackets
# TODO: for input keys are String; for output they are KeyV3 and children
if h2o_type.startswith('Key<'): # Key<Frame> is a schema of FrameKeyVx
# return 'String' + brackets
return schema_name + brackets
if simple_type in ['int', 'float', 'double', 'long', 'boolean', 'byte', 'short']:
return simple_type + brackets
if simple_type == 'enum':
return schema_name + brackets
if schema_name is not None:
return simple_type + brackets
# Polymorphic fields can either be a scalar, a Schema, or an array of either of these:
if simple_type == 'Polymorphic':
return 'Object' # TODO: Polymorphic class?
# IcedWrapper fields can either be a scalar or an array of either of scalars:
if simple_type == 'IcedWrapper':
return 'Object' # TODO: Polymorphic class?
raise Exception('Unexpectedly found a ' + simple_type + ' field: ' + name + ' in pojo: ' + pojo_name)
# generate a Schema POJO and find any Enums it uses
def generate_pojo(schema, pojo_name):
global args
global enums
if args.verbose: print('Generating POJO: ', pojo_name)
pojo = []
pojo.append("package water.bindings.pojos;")
pojo.append("")
has_map = False
for field in schema['fields']:
if field['type'].startswith('Map'):
has_map = True
pojo.append("import com.google.gson.Gson;")
if has_map:
pojo.append("import java.util.Map;")
pojo.append("")
superclass = schema['superclass']
if 'Iced' == superclass:
# top of the schema class hierarchy
superclass = 'Object'
pojo.append("public class " + pojo_name + " extends {superclass} ".format(superclass=superclass) + '{')
first = True
for field in schema['fields']:
help = field['help']
type = field['type']
name = field['name']
schema_name = field['schema_name']
if name == '__meta': continue
if type == 'Iced': continue # TODO
java_type = cons_java_type(pojo_name, name, type, schema_name)
if type.startswith('enum'):
enum_name = field['schema_name']
if enum_name not in enums:
# save it for later
enums[enum_name] = field['values']
if not first:
pojo.append("")
if field['is_inherited']:
pojo.append(" /* INHERITED: {help} ".format(help=help))
pojo.append(" * public {type} {name};".format(type=java_type, name=name))
pojo.append(" */")
else:
pojo.append(" /** {help} */".format(help=help))
pojo.append(" public {type} {name};".format(type=java_type, name=name))
first = False
pojo.append("")
pojo.append(" /** Return the contents of this object as a JSON String. */")
pojo.append(" @Override")
pojo.append(" public String toString() {")
pojo.append(" return new Gson().toJson(this);")
pojo.append(" }")
pojo.append("}")
return pojo
def generate_enum(name, values):
if args.verbose: print('Generating enum: ', name)
pojo = []
pojo.append("package water.bindings.pojos;")
pojo.append("")
pojo.append("public enum " + name + " {")
for value in values:
pojo.append(" {value},".format(value=value))
pojo.append("}")
return pojo
# NOTE: not complete yet
def generate_retrofit_proxies(endpoints_meta, all_schemas_map):
'''
Walk across all the endpoint metadata returning a map of classnames to interface definitions.
Retrofit interfaces look like this:
public interface GitHubService {
@GET("/users/{user}/repos")
Call<List<Repo>> listRepos(@Path("user") String user);
}
'''
pojos = {}
java_type_map = { 'string': 'String' }
endpoints_by_entity = {} # entity (e.g., Frames) maps to an array of endpoints
# For each endpoint grab the endpoint prefix (the entity), e.g. ModelBuilders, for use as the classname:
entity_pattern_str = r"/[0-9]+?/([^/]+)(/.*)?" # Python raw string
entity_pattern = re.compile(entity_pattern_str)
# Collect the endpoints for each REST entity
for meta in endpoints_meta:
h2o.H2O.verboseprint('finding entity for url_pattern: ' + meta['url_pattern'])
m = entity_pattern.match(meta['url_pattern'])
entity = m.group(1)
# If the route contains a suffix like .bin strip it off.
if '.' in entity:
entity = entity.split('.')[0]
h2o.H2O.verboseprint('found entity: ' + entity)
if entity not in endpoints_by_entity:
endpoints_by_entity[entity] = []
endpoints_by_entity[entity].append(meta)
# replace path vars like (?<schemaname>.*) with {schemaname} for Retrofit's annotation
# TODO: fails for /3/Metadata/endpoints/(?<num>[0-9]+)
var_pattern_str = r"\(\?<(.+?)>\.\*\)" # Python raw string
var_pattern = re.compile(var_pattern_str)
# Walk across all the entities and generate a class with methods for all its endpoints:
for entity in endpoints_by_entity:
pojo = []
signatures = {}
pojo.append("package water.bindings.proxies.retrofit;")
pojo.append("")
pojo.append("import water.bindings.pojos.*;")
pojo.append("import retrofit2.*;")
pojo.append("import retrofit2.http.*;")
pojo.append("import java.util.Map;")
pojo.append("")
pojo.append("public interface " + entity + " {")
first = True
for meta in endpoints_by_entity[entity]:
path = meta['url_pattern']
# These redundant paths cause conflicts:
if path == "/3/ModelMetrics/frames/(?<frame>.*)/models/(?<model>.*)" or \
path == "/3/ModelMetrics/frames/(?<frame>.*)":
continue
path_parm_names = meta['path_params']
# replace all the vars in the path with the actual field names from path_params
retrofit_path = path
idx = 0
while re.search(var_pattern, retrofit_path):
retrofit_path = var_pattern.sub(r'{' + path_parm_names[idx] + '}', retrofit_path, 1)
idx += 1
retrofit_path = retrofit_path.replace('\\', '\\\\')
summary = meta['summary']
http_method = meta['http_method']
input_schema_name = meta['input_schema']
output_schema_name = meta['output_schema']
handler_method = meta['handler_method']
method = handler_method
# NOTE: hackery due to the way the paths are formed: POST to /99/Grid/glm and to /3/Grid/deeplearning both call methods called train
if (entity == 'Grid' or entity == 'ModelBuilders') and (method == 'train'):
# /99/Grid/glm or /3/ModelBuilders/glm
pieces = path.split('/')
if len(pieces) != 4:
raise Exception("Expected 3 parts to this path (something like /99/Grid/glm): " + path)
algo = pieces[3]
method = method + '_' + algo # train_glm()
elif (entity == 'ModelBuilders') and (method == 'validate_parameters'):
# /3/ModelBuilders/glm/parameters
pieces = path.split('/')
if len(pieces) != 5:
raise Exception("Expected 3 parts to this path (something like /3/ModelBuilders/glm/parameters): " + path)
algo = pieces[3]
method = method + '_' + algo # validate_parameters_glm()
# TODO: handle query parameters from RequestSchema
parms = ""
if http_method == 'POST':
is_post = True
else:
is_post = False
input_schema = all_schemas_map[input_schema_name]
# calculate indent
indent = ' ' * len(' Call<{output_schema_name}> {method}('.format(output_schema_name = output_schema_name, method = method))
# include path parms first, and then POST body parms
first_parm = True
for parm in path_parm_names:
# find the metadata for the field from the input schema:
fields = [field for field in input_schema['fields'] if field['name'] == parm]
if len(fields) != 1:
print('Failed to find parameter: ' + parm + ' for endpoint: ' + repr(meta))
field = fields[0]
if field['direction'] == 'OUTPUT': continue
# cons up the proper Java type:
parm_type = cons_java_type(entity, field['name'], field['type'], field['schema_name'])
# Send keys as Strings
# TODO: brackets
if parm_type.endswith('KeyV3'):
parm_type = 'String'
if not first_parm: parms += ',\n'; parms += indent
parms += '@Path("{parm}") '.format(parm = parm)
parms += parm_type
parms += ' '
parms += parm
first_parm = False
if is_post:
for field in input_schema['fields']:
if field['direction'] == 'OUTPUT': continue
if field['name'] in path_parm_names: continue
# cons up the proper Java type:
parm_type = cons_java_type(entity, field['name'], field['type'], field['schema_name'])
parm = field['name']
# Send keys as Strings
# TODO: brackets
if parm_type.endswith('KeyV3'):
parm_type = 'String'
if not first_parm: parms += ',\n'; parms += indent
parms += '@Field("{parm}") '.format(parm = parm)
parms += parm_type
parms += ' '
parms += parm
first_parm = False
# check for conflicts:
signature = '{method}({parms});'.format(method = method, parms = parms)
if signature in signatures:
print('ERROR: found a duplicate method signature in entity ' + entity + ': ' + signature)
else:
signatures[signature] = signature
if not first: pojo.append('')
pojo.append(' /** {summary} */'.format(summary = summary))
if http_method == 'POST':
pojo.append(' @FormUrlEncoded')
pojo.append(' @{http_method}("{path}")'.format(http_method = http_method, path = retrofit_path))
pojo.append(' Call<{output_schema_name}> {method}({parms});'.format(output_schema_name = output_schema_name, method = method, parms = parms))
first = False
pojo.append("}")
pojos[entity] = pojo
return pojos
######
# MAIN:
######
if (len(args.usecloud) > 0):
arr = args.usecloud.split(":")
args.host = arr[0]
args.port = int(arr[1])
h2o.H2O.verboseprint("connecting to: ", args.host, ":", args.port)
a_node = h2o.H2O(args.host, args.port)
print('creating the Java bindings in {}. . .'.format(args.dest))
#################################################################
# Get all the schemas and generate POJOs or Enums as appropriate.
# Note the medium ugliness that the enums list is global. . .
#################################################################
enums = {}
# write the schemas' POJOs, discovering enums on the way
all_schemas = a_node.schemas()['schemas']
all_schemas_map = {} # save for later use
for schema in all_schemas:
if 'void' == schema['name']:
continue;
schema_name = schema['name']
pojo_name = schema_name;
all_schemas_map[schema_name] = schema
save_full = args.dest + os.sep + 'water/bindings/pojos/' + pojo_name + '.java'
save_dir = os.path.dirname(save_full)
# create dirs without race:
try:
os.makedirs(save_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
with open(save_full, 'w') as the_file:
for line in generate_pojo(schema, pojo_name):
the_file.write("%s\n" % line)
########################
# Generate Enum classes.
########################
for name, values in enums.items():
pojo_name = name;
save_full = args.dest + os.sep + 'water/bindings/pojos/' + pojo_name + '.java'
save_dir = os.path.dirname(save_full)
# create dirs without race:
try:
os.makedirs(save_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
with open(save_full, 'w') as the_file:
for line in generate_enum(name, values):
the_file.write("%s\n" % line)
#########################################################################
# Get the list of endpoints and generate Retrofit proxy methods for them.
#########################################################################
endpoints_result = a_node.endpoints()
endpoints = endpoints_result['routes']
if h2o.H2O.verbose:
print('Endpoints: ')
pp.pprint(endpoints)
# Collect all the endpoints:
endpoints_meta = []
for num in range(len(endpoints)):
meta = a_node.endpoint_by_number(num)['routes'][0]
endpoints_meta.append(meta)
## Generate source code for a class for each entity (e.g., ModelBuilders):
retrofitProxies = generate_retrofit_proxies(endpoints_meta, all_schemas_map)
# TODO: makedirs only once!
# Write them out:
for entity, proxy in retrofitProxies.items():
save_full = args.dest + os.sep + 'water/bindings/proxies/retrofit/' + entity + '.java'
save_dir = os.path.dirname(save_full)
# create dirs without race:
try:
os.makedirs(save_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
with open(save_full, 'w') as the_file:
for line in proxy:
the_file.write("%s\n" % line)
#####################################################
# Write out an example program that uses the proxies.
#####################################################
retrofit_example = '''package water.bindings.proxies.retrofit;
import water.bindings.pojos.*;
import com.google.gson.*;
import retrofit2.*;
import retrofit2.http.*;
import retrofit2.converter.gson.GsonConverterFactory;
import retrofit2.Call;
import java.io.IOException;
import java.lang.reflect.Type;
public class Example {
/**
* Keys get sent as Strings and returned as objects also containing the type and URL,
* so they need a custom GSON serializer.
*/
private static class KeySerializer implements JsonSerializer<KeyV3> {
public JsonElement serialize(KeyV3 key, Type typeOfKey, JsonSerializationContext context) {
return new JsonPrimitive(key.name);
}
}
public static JobV3 poll(Retrofit retrofit, String job_id) {
Jobs jobsService = retrofit.create(Jobs.class);
Response<JobsV3> jobs_response = null;
int retries = 3;
JobsV3 jobs = null;
do {
try {
jobs_response = jobsService.fetch(job_id).execute();
}
catch (IOException e) {
System.err.println("Caught exception: " + e);
}
if (! jobs_response.isSuccessful())
if (retries-- > 0)
continue;
else
throw new RuntimeException("/3/Jobs/{job_id} failed 3 times.");
jobs = jobs_response.body();
if (null == jobs.jobs || jobs.jobs.length != 1)
throw new RuntimeException("Failed to find Job: " + job_id);
if (! "RUNNING".equals(jobs.jobs[0].status)) try { Thread.sleep(100); } catch (InterruptedException e) {} // wait 100mS
} while ("RUNNING".equals(jobs.jobs[0].status));
return jobs.jobs[0];
}
public static void main (String[] args) {
Gson gson = new GsonBuilder().registerTypeAdapter(KeyV3.class, new KeySerializer()).create();
Retrofit retrofit = new Retrofit.Builder()
.baseUrl("http://localhost:54321/") // note trailing slash for Retrofit 2
.addConverterFactory(GsonConverterFactory.create(gson))
.build();
CreateFrame createFrameService = retrofit.create(CreateFrame.class);
Frames framesService = retrofit.create(Frames.class);
Models modelsService = retrofit.create(Models.class);
try {
// NOTE: the Call objects returned by the service can't be reused, but they can be cloned.
Response<FramesV3> all_frames_response = framesService.list().execute();
Response<ModelsV3> all_models_response = modelsService.list().execute();
if (all_frames_response.isSuccessful()) {
FramesV3 all_frames = all_frames_response.body();
System.out.println("All Frames: ");
System.out.println(all_frames);
} else {
System.err.println("framesService.list() failed");
}
if (all_models_response.isSuccessful()) {
ModelsV3 all_models = all_models_response.body();
System.out.println("All Models: ");
System.out.println(all_models);
} else {
System.err.println("modelsService.list() failed");
}
Response<JobV3> create_frame_response = createFrameService.run(null, 1000, 100, 42, 42, true, 0, 100000, 0.2, 100, 0.2, 32767, 0.2, 0.5, 0.2, 0, 0.2, 2, true, null).execute();
if (create_frame_response.isSuccessful()) {
JobV3 job = create_frame_response.body();
if (null == job || null == job.key)
throw new RuntimeException("CreateFrame returned a bad Job: " + job);
job = poll(retrofit, job.key.name);
KeyV3 new_frame = job.dest;
System.out.println("Created frame: " + new_frame);
all_frames_response = framesService.list().execute();
if (all_frames_response.isSuccessful()) {
FramesV3 all_frames = all_frames_response.body();
System.out.println("All Frames (after createFrame): ");
System.out.println(all_frames);
} else {
System.err.println("framesService.list() failed");
}
Response<FramesV3> one_frame_response = framesService.fetch(new_frame.name).execute();
if (one_frame_response.isSuccessful()) {
FramesV3 one_frames = one_frame_response.body();
System.out.println("One Frame (after createFrame): ");
System.out.println(one_frames);
} else {
System.err.println("framesService.fetch() failed");
}
} else {
System.err.println("createFrameService.run() failed");
}
}
catch (IOException e) {
System.err.println("Caught exception: " + e);
}
}
}
'''
save_full = args.dest + os.sep + 'water/bindings/proxies/retrofit/' + 'Example' + '.java'
save_dir = os.path.dirname(save_full)
# create dirs without race:
try:
os.makedirs(save_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
with open(save_full, 'w') as the_file:
the_file.write("%s\n" % retrofit_example)
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Dataset_File.directory'
db.add_column(u'tardis_portal_dataset_file', 'directory',
self.gf('tardis.tardis_portal.models.fields.DirectoryField')(null=True, blank=True),
keep_default=False)
# Adding field 'Dataset.directory'
db.add_column(u'tardis_portal_dataset', 'directory',
self.gf('tardis.tardis_portal.models.fields.DirectoryField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Dataset_File.directory'
db.delete_column(u'tardis_portal_dataset_file', 'directory')
# Deleting field 'Dataset.directory'
db.delete_column(u'tardis_portal_dataset', 'directory')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'tardis_portal.author_experiment': {
'Meta': {'ordering': "['order']", 'unique_together': "(('experiment', 'author'),)", 'object_name': 'Author_Experiment'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '2000', 'blank': 'True'})
},
'tardis_portal.datafileparameter': {
'Meta': {'ordering': "['name']", 'object_name': 'DatafileParameter'},
'datetime_value': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"}),
'numerical_value': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parameterset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.DatafileParameterSet']"}),
'string_value': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.datafileparameterset': {
'Meta': {'ordering': "['id']", 'object_name': 'DatafileParameterSet'},
'dataset_file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset_File']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"})
},
'tardis_portal.dataset': {
'Meta': {'ordering': "['-id']", 'object_name': 'Dataset'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'directory': ('tardis.tardis_portal.models.fields.DirectoryField', [], {'null': 'True', 'blank': 'True'}),
'experiments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'datasets'", 'symmetrical': 'False', 'to': "orm['tardis_portal.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'tardis_portal.dataset_file': {
'Meta': {'ordering': "['filename']", 'object_name': 'Dataset_File'},
'created_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset']"}),
'directory': ('tardis.tardis_portal.models.fields.DirectoryField', [], {'null': 'True', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5sum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'mimetype': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'modification_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sha512sum': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'size': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'})
},
'tardis_portal.datasetparameter': {
'Meta': {'ordering': "['name']", 'object_name': 'DatasetParameter'},
'datetime_value': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"}),
'numerical_value': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parameterset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.DatasetParameterSet']"}),
'string_value': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.datasetparameterset': {
'Meta': {'ordering': "['id']", 'object_name': 'DatasetParameterSet'},
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"})
},
'tardis_portal.experiment': {
'Meta': {'object_name': 'Experiment'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'created_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'handle': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution_name': ('django.db.models.fields.CharField', [], {'default': "'Monash University'", 'max_length': '400'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.License']", 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'public_access': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.experimentparameter': {
'Meta': {'ordering': "['name']", 'object_name': 'ExperimentParameter'},
'datetime_value': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"}),
'numerical_value': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parameterset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ExperimentParameterSet']"}),
'string_value': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.experimentparameterset': {
'Meta': {'ordering': "['id']", 'object_name': 'ExperimentParameterSet'},
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"})
},
'tardis_portal.freetextsearchfield': {
'Meta': {'object_name': 'FreeTextSearchField'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter_name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"})
},
'tardis_portal.groupadmin': {
'Meta': {'object_name': 'GroupAdmin'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'tardis_portal.license': {
'Meta': {'object_name': 'License'},
'allows_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '2000', 'blank': 'True'}),
'internal_description': ('django.db.models.fields.TextField', [], {}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '400'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '2000'})
},
'tardis_portal.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_available': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'priority': ('django.db.models.fields.IntegerField', [], {}),
'transfer_provider': ('django.db.models.fields.CharField', [], {'default': "'local'", 'max_length': '10'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '400'})
},
'tardis_portal.objectacl': {
'Meta': {'ordering': "['content_type', 'object_id']", 'object_name': 'ObjectACL'},
'aclOwnershipType': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'canDelete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canRead': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canWrite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'effectiveDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'entityId': ('django.db.models.fields.CharField', [], {'max_length': '320'}),
'expiryDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isOwner': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'pluginId': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'tardis_portal.parametername': {
'Meta': {'ordering': "('order', 'name')", 'unique_together': "(('schema', 'name'),)", 'object_name': 'ParameterName'},
'choices': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'comparison_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'data_type': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '9999', 'null': 'True', 'blank': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'})
},
'tardis_portal.providerparameter': {
'Meta': {'unique_together': "(('location', 'name'),)", 'object_name': 'ProviderParameter'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'})
},
'tardis_portal.replica': {
'Meta': {'unique_together': "(('datafile', 'location'),)", 'object_name': 'Replica'},
'datafile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset_File']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Location']"}),
'protocol': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'stay_remote': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'tardis_portal.schema': {
'Meta': {'object_name': 'Schema'},
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'namespace': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'subtype': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'tardis_portal.token': {
'Meta': {'object_name': 'Token'},
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'expiry_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 8, 31, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'tardis_portal.userauthentication': {
'Meta': {'object_name': 'UserAuthentication'},
'authenticationMethod': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'userProfile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.UserProfile']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'tardis_portal.userprofile': {
'Meta': {'object_name': 'UserProfile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isDjangoAccount': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['tardis_portal']
| |
"""
Generate Netkit configuration files for a network
"""
from mako.lookup import TemplateLookup
from pkg_resources import resource_filename
import pkg_resources
import os
#import network as network
import logging
LOG = logging.getLogger("ANK")
#TODO: replace other if node in servers with node.is_server
import shutil
import glob
import time
import tarfile
import AutoNetkit as ank
from AutoNetkit import config
settings = config.settings
import pprint
# Check can write to template cache directory
#TODO: make function to provide cache directory
template_cache_dir = config.template_cache_dir
template_dir = resource_filename("AutoNetkit","lib/templates")
lookup = TemplateLookup(directories=[ template_dir ],
module_directory= template_cache_dir,
#cache_type='memory',
#cache_enabled=True,
)
#TODO: make a check_dir function that tries to create directory, if unable then recursively try/except to create parent directories
#TODO: add more detailed exception handling to catch writing errors
# eg for each subdir of templates
#TODO: make this a module
#TODO: make this a netkit compiler plugin
#TODO: clear up label vs node id
#TODO: Move these into a netkit helper function*****
def lab_dir():
return config.lab_dir
def netkit_dir(network, rtr):
"""Returns Netkit path"""
#TODO: reinstate this for multi-machine ANK
#nk_dir = ank.netkit_hostname(network, rtr)
#return os.path.join(lab_dir(), nk_dir)
return lab_dir()
def shared_dir():
# Shared dir for the lab
# Refer http://wiki.netkit.org/man/man1/lstart.1.html#lbAE
return os.path.join(lab_dir(), "shared")
def shared_etc_dir():
return os.path.join(shared_dir(), "etc")
def router_dir(network, rtr):
"""Returns path for router rtr"""
foldername = ank.rtr_folder_name(network, rtr)
return os.path.join(netkit_dir(network, rtr), foldername)
def root_dir(network, rtr):
"""Returns root path for router rtr"""
#TODO: rewrite these using join
return os.path.join(router_dir(network, rtr), "root")
def dot_ssh_dir(network, rtr):
"""Returns .ssh path for router rtr"""
#TODO: rewrite these using join
return os.path.join(root_dir(network, rtr), ".ssh")
def etc_dir(network, rtr):
"""Returns etc path for router rtr"""
#TODO: rewrite these using join
return os.path.join(router_dir(network, rtr), "etc")
def sshd_dir(network, rtr):
"""Returns formatted ssh path"""
return os.path.join(etc_dir(network, rtr), "ssh")
def zebra_dir(network, rtr):
"""Returns formatted Zebra path"""
return os.path.join(etc_dir(network, rtr), "zebra")
def bind_dir(network, rtr):
"""Returns bind path for router rtr"""
return os.path.join(etc_dir(network, rtr), "bind")
def lo_interface(int_id=0):
"""Returns Linux format lo_interface id for int_id"""
#TODO: use this throughout the module
return "lo%s" % int_id
class NetkitCompiler:
"""Compiler main"""
def __init__(self, network, services, zebra_password="1234"):
self.network = network
self.services = services
self.zebra_password = zebra_password
self.interface_id = ank.interface_id('netkit')
self.tap_interface_id = ank.tap_interface_id
self.lo_interface = lo_interface
self.default_weight = 1
# Speed improvement: grab eBGP and iBGP graphs
#TODO: fetch eBGP and iBGP graphs and cache them
def initialise(self):
"""Creates lab folder structure"""
LOG.debug("Initialising Netkit lab")
# TODO: clean out netkitdir
# Don't just remove the whole folder
# Note is ok to leave lab.conf as this will be over ridden
#TODO: make this go into one dir for each netkithost
if not os.path.isdir(lab_dir()):
os.mkdir(lab_dir())
else:
# network dir exists, clean out all (based on glob of ASxry)
#TODO: see if need * wildcard for standard glob
for item in glob.iglob(os.path.join(lab_dir(), "*")):
if os.path.isdir(item):
shutil.rmtree(item)
else:
os.unlink(item)
# Create folder for netkit hosts
#TODO: reinstate for multi-machine ANK
if not os.path.isdir(shared_dir()):
os.mkdir(shared_dir())
if not os.path.isdir(shared_etc_dir()):
os.mkdir(shared_etc_dir())
dns_servers = set(self.network.dns_servers())
for device in self.network.devices():
# Make folders - note order counts:
# need to make router dir before zebra, etc dirs
#TODO: only append root_dir and sshd_dir if netkit ssh key set
for test_dir in [router_dir(self.network, device),
etc_dir(self.network, device),
sshd_dir(self.network, device),
root_dir(self.network, device),
dot_ssh_dir(self.network, device),
zebra_dir(self.network, device)]:
if not os.path.isdir(test_dir):
os.mkdir(test_dir)
if device in dns_servers:
b_dir = bind_dir(self.network, device)
if not os.path.isdir(b_dir):
os.mkdir(b_dir)
return
def configure_netkit(self):
"""Generates Netkit and Zebra/Quagga specific configuration files."""
# Sets up netkit related files
tap_host = ank.get_tap_host(self.network)
ank_version = pkg_resources.get_distribution("AutoNetkit").version
date = time.strftime("%Y-%m-%d %H:%M", time.localtime())
lab_template = lookup.get_template("netkit/lab.mako")
startup_template = lookup.get_template("netkit/startup.mako")
zebra_daemons_template = lookup.get_template(
"quagga/zebra_daemons.mako")
zebra_template = lookup.get_template("quagga/zebra.mako")
sshd_template = lookup.get_template("linux/sshd.mako")
motd_template = lookup.get_template("quagga/motd.mako")
# Shared (common) configuration
startup_daemon_list = []
#Setup ssh
shutil.copy(resource_filename("AutoNetkit","lib/shadow"), shared_etc_dir())
startup_daemon_list.append("ssh")
# Need to chown root dir for ssh keys
# refer http://list.dia.uniroma3.it/pipermail/netkit.users/2010-February/000552.html
use_ssh_key = False
if config.settings['Netkit']['ssh key']:
#chown root:root /root
use_ssh_key = True
f_startup = open( os.path.join(lab_dir(), "shared.startup"), 'wb')
f_startup.write(startup_template.render(
interfaces=[],
add_localhost=True,
#don't send out the tap interface
del_default_route=True,
daemons=startup_daemon_list,
use_ssh_key = use_ssh_key,
))
f_startup.close()
# Files for indvidual node configuration
#TODO: this needs to be created for each netkit host machine
f_lab = open(os.path.join(lab_dir(), "lab.conf"), 'wb')
lab_conf = {}
tap_list_strings = {}
ibgp_routers = ank.ibgp_routers(self.network)
ebgp_routers = ank.ebgp_routers(self.network)
igp_graph = ank.igp_graph(self.network)
dns_servers = set(self.network.dns_servers())
routers = set(self.network.routers())
for node in self.network.devices():
#TODO: see if rtr label is still needed, if so replace with
# appropriate naming module function
rtr_folder_name = ank.rtr_folder_name(self.network, node)
# sshd options
f_sshd = open( os.path.join(sshd_dir(self.network, node), "sshd_config"), 'wb')
f_sshd.write(sshd_template.render())
f_sshd.close()
lab_conf[rtr_folder_name] = []
startup_daemon_list = ["zebra"]
startup_int_list = []
# convert tap list from ips into strings
# tap_int_id cannot conflict with already allocated interfaces
# assume edges number sequentially, so next free int id is number of
# edges
node_tap_id = self.tap_interface_id(self.network, node)
tap_list_strings[rtr_folder_name] = (node_tap_id,
self.network[node].get('tap_ip'))
if node in dns_servers:
startup_daemon_list.append("bind")
dns_memory = 64 # Allocate more memory to DNS server
#TODO: remove key, val and make it just key: val
lab_conf[rtr_folder_name].append( ('mem', dns_memory))
if config.settings['Netkit']['ssh key']:
f_auth_keys = open(os.path.join(dot_ssh_dir(self.network, node), "authorized_keys"), "wb")
f_auth_keys.write(config.settings['Netkit']['ssh key'])
f_auth_keys.close()
# Zebra Daemons
zebra_daemon_list = []
f_zdaemons = open( os.path.join(zebra_dir(self.network, node),
"daemons"), 'wb')
# Always start Zebra
zebra_daemon_list.append("zebra")
if igp_graph.degree(node) > 0:
zebra_daemon_list.append("ospfd") # Only start IGP process if IGP links
if (node in ibgp_routers) or (node in ebgp_routers):
zebra_daemon_list.append("bgpd")
f_zdaemons.write(zebra_daemons_template.render(
entryList = zebra_daemon_list,
))
f_zdaemons.close()
# MOTD
f_zmotd = open( os.path.join(zebra_dir(self.network, node),
"motd.txt"), 'wb')
f_zmotd.write(motd_template.render(
date = date,
version = ank_version,
password = self.zebra_password,
))
# Main Zebra config
f_z = open( os.path.join(zebra_dir(self.network, node),
"zebra.conf"), 'wb')
f_z.write( zebra_template.render(
hostname = node.device_hostname,
password = self.zebra_password,
enable_password = self.zebra_password,
use_snmp = True,
use_debug = True,
))
f_z.close()
# Loopback interface
lo_ip = self.network.lo_ip(node)
startup_int_list.append({
'int': 'lo:1',
'ip': str(lo_ip.ip),
'netmask': str(lo_ip.netmask),
})
# Ethernet interfaces
for link in self.network.links(node):
int_id = self.interface_id(link.id)
subnet = link.subnet
# replace the / from subnet label
collision_domain = "%s.%s" % (subnet.ip, subnet.prefixlen)
# lab.conf has id in form host[0]=... for eth0 of host
lab_conf[rtr_folder_name].append((link.id, collision_domain))
startup_int_list.append({
'int': int_id,
'ip': str(link.ip),
'netmask': str(subnet.netmask),
'broadcast': str(subnet.broadcast),
})
default_route = None
if node.is_server:
default_route = ank.default_route(node)
# add default_route for server to router
chown_bind = False
if node in ank.dns_servers(self.network):
chown_bind = True
#Write startup file for this router
f_startup = open( os.path.join(netkit_dir(self.network, node),
"{0}.startup".format(rtr_folder_name)), 'wb')
f_startup.write(startup_template.render(
interfaces=startup_int_list,
add_localhost=True,
#don't send out the tap interface
del_default_route=True,
default_route = default_route,
daemons=startup_daemon_list,
chown_bind = chown_bind,
))
f_startup.close()
# Write lab file for whole lab
f_lab.write(lab_template.render(
conf = lab_conf,
tapHost = tap_host,
tapList = tap_list_strings,
lab_description = "AutoNetkit generated lab",
lab_version = date,
#TODO: get this from config file
lab_email = "autonetkit@googlegroups.com",
lab_author = "AutoNetkit %s" % ank_version,
#TODO: get this from config file
lab_web = "www.autonetkit.org",
))
def configure_igp(self):
"""Generates IGP specific configuration files (eg ospfd)"""
LOG.debug("Configuring IGP")
template = lookup.get_template("quagga/ospf.mako")
default_weight = 1
# configures IGP for each AS
as_graphs = ank.get_as_graphs(self.network)
for my_as in as_graphs:
asn = my_as.asn
LOG.debug("Configuring IGP for AS %s " % asn)
if my_as.number_of_edges() == 0:
# No edges, nothing to configure
LOG.debug("Skipping IGP for AS%s as no internal links" % asn)
continue
for router in self.network.routers(asn):
#TODO: can probably through most of these straight into the template and use properties there!
interface_list = []
network_list = []
# Add loopback info
lo_ip = router.lo_ip
interface_list.append ( {'id': "lo", 'weight': 1,
'remote_router': "NA (loopback)",
'remote_int': "Loopback"})
network_list.append ( { 'cidr': lo_ip.cidr, 'ip': lo_ip.ip,
'netmask': lo_ip.netmask,
'area': 0, 'remote_ip': "Loopback" })
for link in self.network.links(router, my_as):
int_id = self.interface_id(link.id)
weight = link.weight or default_weight
interface_list.append ({ 'id': int_id,
'weight': weight,
'remote_router': link.remote_host, } )
# fetch and format the ip details
subnet = link.subnet
local_ip = link.local_ip
remote_ip = link.remote_ip
network_list.append ( { 'cidr': subnet.cidr, 'ip': local_ip,
'netmask': subnet.netmask,
'remote_ip': remote_ip, 'area': 0, } )
#TODO: see if need to use router-id for ospfd in quagga
f_handle = open( os.path.join(zebra_dir(self.network, router),
"ospfd.conf"), 'wb')
f_handle.write(template.render
(
hostname = router.device_hostname,
password = self.zebra_password,
enable_password = self.zebra_password,
interface_list = interface_list,
network_list = network_list,
routerID = router,
use_igp = True,
logfile = "/var/log/zebra/ospfd.log",
use_debug = False,
))
def configure_interfaces(self, device):
LOG.debug("Configuring interfaces for %s" % self.network.fqdn(device))
"""Interface configuration"""
lo_ip = self.network.lo_ip(device)
interfaces = []
interfaces.append({
'id': 'lo0',
'ip': lo_ip.ip,
'netmask': lo_ip.netmask,
'wildcard': lo_ip.hostmask,
'prefixlen': lo_ip.prefixlen,
'network': lo_ip.network,
'description': 'Loopback',
})
for src, dst, data in self.network.graph.edges(device, data=True):
subnet = data['sn']
int_id = self.interface_id(data['id'])
description = 'Interface %s -> %s' % (
ank.fqdn(self.network, src),
ank.fqdn(self.network, dst))
# Interface information for router config
interfaces.append({
'id': int_id,
'ip': data['ip'],
'network': subnet.network,
'prefixlen': subnet.prefixlen,
'netmask': subnet.netmask,
'wildcard': subnet.hostmask,
'broadcast': subnet.broadcast,
'description': description,
'weight': data.get('weight', self.default_weight),
})
return interfaces
def configure_bgp(self):
"""Generates BGP specific configuration files"""
ip_as_allocs = ank.get_ip_as_allocs(self.network)
LOG.debug("Configuring BGP")
template = lookup.get_template("quagga/bgp.mako")
route_maps = {}
ibgp_graph = ank.get_ibgp_graph(self.network)
ebgp_graph = ank.get_ebgp_graph(self.network)
physical_graph = self.network.graph
for my_as in ank.get_as_graphs(self.network):
asn = my_as.asn
LOG.debug("Configuring IGP for AS %s " % asn)
# get nodes ie intersection
#H = nx.intersection(my_as, ibgp_graph)
# get ibgp graph that contains only nodes from this AS
for router in self.network.routers(asn):
bgp_groups = {}
route_maps = []
ibgp_neighbor_list = []
ibgp_rr_client_list = []
route_map_groups = {}
if router in ibgp_graph:
for src, neigh, data in ibgp_graph.edges(router, data=True):
route_maps_in = self.network.g_session[neigh][router]['ingress']
rm_group_name_in = None
if len(route_maps_in):
rm_group_name_in = "rm_%s_in" % neigh.folder_name
route_map_groups[rm_group_name_in] = [match_tuple
for route_map in route_maps_in
for match_tuple in route_map.match_tuples]
route_maps_out = self.network.g_session[router][neigh]['egress']
rm_group_name_out = None
if len(route_maps_out):
rm_group_name_in = "rm_%s_out" % neigh.folder_name
route_map_groups[rm_group_name_out] = [match_tuple
for route_map in route_maps_out
for match_tuple in route_map.match_tuples]
description = data.get("rr_dir") + " to " + ank.fqdn(self.network, neigh)
if data.get('rr_dir') == 'down':
ibgp_rr_client_list.append(
{
'id': self.network.lo_ip(neigh).ip,
'description': description,
'route_maps_in': rm_group_name_in,
'route_maps_out': rm_group_name_out,
})
elif (data.get('rr_dir') in set(['up', 'over', 'peer'])
or data.get('rr_dir') is None):
ibgp_neighbor_list.append(
{
'id': self.network.lo_ip(neigh).ip,
'description': description,
'route_maps_in': rm_group_name_in,
'route_maps_out': rm_group_name_out,
})
bgp_groups['internal_peers'] = {
'type': 'internal',
'neighbors': ibgp_neighbor_list
}
if len(ibgp_rr_client_list):
bgp_groups['internal_rr'] = {
'type': 'internal',
'neighbors': ibgp_rr_client_list,
'cluster': self.network.lo_ip(router).ip,
}
if router in ebgp_graph:
external_peers = []
for peer in ebgp_graph.neighbors(router):
route_maps_in = self.network.g_session[peer][router]['ingress']
rm_group_name_in = None
if len(route_maps_in):
rm_group_name_in = "rm_%s_in" % peer.folder_name
route_map_groups[rm_group_name_in] = [match_tuple
for route_map in route_maps_in
for match_tuple in route_map.match_tuples]
# Now need to update the sequence numbers for the flattened route maps
route_maps_out = self.network.g_session[router][peer]['egress']
rm_group_name_out = None
if len(route_maps_out):
rm_group_name_out = "rm_%s_out" % peer.folder_name
route_map_groups[rm_group_name_out] = [match_tuple
for route_map in route_maps_out
for match_tuple in route_map.match_tuples]
peer_ip = physical_graph[peer][router]['ip']
external_peers.append({
'id': peer_ip,
'route_maps_in': rm_group_name_in,
'route_maps_out': rm_group_name_out,
'peer_as': self.network.asn(peer)})
bgp_groups['external_peers'] = {
'type': 'external',
'neighbors': external_peers}
# Ensure only one copy of each route map, can't use set due to list inside tuples (which won't hash)
# Use dict indexed by name, and then extract the dict items, dict hashing ensures only one route map per name
community_lists = {}
prefix_lists = {}
node_bgp_data = self.network.g_session.node.get(router)
if node_bgp_data:
community_lists = node_bgp_data.get('tags')
prefix_lists = node_bgp_data.get('prefixes')
policy_options = {
'community_lists': community_lists,
'prefix_lists': prefix_lists,
'route_maps': route_map_groups,
}
f_handle = open(os.path.join(zebra_dir(self.network, router),
"bgpd.conf"),'wb')
#TODO: remove community_lists and prefix_lists as they are put into policy_options
f_handle.write(template.render(
hostname = router.device_hostname,
asn = self.network.asn(router),
password = self.zebra_password,
enable_password = self.zebra_password,
router_id = self.network.lo_ip(router).ip,
community_lists = community_lists,
policy_options = policy_options,
prefix_lists = prefix_lists,
#TODO: see how this differs to router_id
identifying_loopback = self.network.lo_ip(router),
bgp_groups = bgp_groups,
ibgp_neighbor_list = ibgp_neighbor_list,
ibgp_rr_client_list = ibgp_rr_client_list,
route_maps = route_maps,
logfile = "/var/log/zebra/bgpd.log",
debug=True,
use_debug=True,
dump=False,
snmp=False,
interfaces = self.configure_interfaces(router)
))
def configure_dns(self):
"""Generates BIND configuration files for DNS
Can check configs eg:
Forward::
bash-3.2$ named-checkzone -d AS3 ank_lab/netkit_lab/AS3_l3_3_dns_1/etc/bind/db.AS3
loading "AS3" from "ank_lab/netkit_lab/AS3_l3_3_dns_1/etc/bind/db.AS3" class "IN"
zone AS3/IN: loaded serial 2008080101
OK
Reverse::
bash-3.2$ named-checkzone -d 0.10.in-addr.arpa ank_lab/netkit_lab/AS3_l3_3_dns_1/etc/bind/db.0.10.in-addr.arpa.
loading "0.10.in-addr.arpa" from "ank_lab/netkit_lab/AS3_l3_3_dns_1/etc/bind/db.0.10.in-addr.arpa." class "IN"
zone 0.10.in-addr.arpa/IN: loaded serial 2008080101
OK
named::
bash-3.2$ named-checkconf ank_lab/netkit_lab/AS3_l3_3_dns_1/etc/bind/named.conf
"""
import netaddr
ip_localhost = netaddr.IPAddress("127.0.0.1")
linux_bind_dir = "/etc/bind"
resolve_template = lookup.get_template("linux/resolv.mako")
forward_template = lookup.get_template("bind/forward.mako")
named_template = lookup.get_template("bind/named.mako")
reverse_template = lookup.get_template("bind/reverse.mako")
root_template = lookup.get_template("bind/root.mako")
root_dns_template = lookup.get_template("bind/root_dns.mako")
root_dns_named_template = lookup.get_template("bind/root_dns_named.mako")
ip_as_allocs = ank.get_ip_as_allocs(self.network)
dns_servers = ank.dns_servers(self.network)
root_servers = list(ank.root_dns_servers(self.network))
auth_servers = ank.dns.dns_auth_servers(self.network)
caching_servers = ank.dns.dns_cache_servers(self.network)
clients = ank.dns.dns_clients(self.network)
routers = set(self.network.routers())
#TODO: use with for opening files
for server in root_servers:
children = ank.dns.dns_hiearchy_children(server)
child_servers = []
for child in children:
advertise_block = ip_as_allocs[child.asn]
reverse_identifier = ank.rev_dns_identifier(advertise_block)
child_servers.append( (child.domain, reverse_identifier, ank.server_ip(child)))
f_root_db = open(os.path.join(bind_dir(self.network, server), "db.root"), 'wb')
f_root_db.write( root_dns_template.render(
dns_servers = child_servers,
server = server,
))
f_named = open( os.path.join(bind_dir(self.network, server), "named.conf"), 'wb')
f_named.write(root_dns_named_template.render(
logging = False,
))
for server in caching_servers:
#root_db_hint = ( ("ns.AS%s" % n.asn, ank.server_ip(n)) for n in ank.dns_hiearchy_parents(server))
root_db_hint = ( ("ROOT-SERVER", ank.server_ip(n)) for n in root_servers)
root_db_hint = list(root_db_hint)
#TODO: make caching use parent rather than global root
f_root = open( os.path.join(bind_dir(self.network, server), "db.root"), 'wb')
f_root.write( root_template.render( root_servers = root_db_hint))
f_named = open( os.path.join(bind_dir(self.network, server), "named.conf"), 'wb')
f_named.write(named_template.render(
entry_list = [],
bind_dir = linux_bind_dir,
logging = False,
))
f_named.close()
for server in auth_servers:
named_list = []
advertise_links = list(ank.advertise_links(server))
advertise_hosts = list(ank.dns_auth_children(server))
LOG.debug("DNS server %s advertises %s" % (server, advertise_links))
#TODO: make reverse dns handle domains other than /8 /16 /24
advertise_block = ip_as_allocs[server.asn]
# remove trailing fullstop
reverse_identifier = ank.rev_dns_identifier(advertise_block).rstrip(".")
#TODO: look at using advertise_block.network.reverse_dns - check what Bind needs
named_list.append(reverse_identifier)
f_named = open( os.path.join(bind_dir(self.network, server), "named.conf"), 'wb')
f_named.write(named_template.render(
domain = server.domain,
entry_list = named_list,
bind_dir = linux_bind_dir,
logging = False,
))
f_named.close()
for_entry_list = list( (self.interface_id(link.id), link.local_host.dns_host_portion_only, link.ip)
for link in advertise_links)
# Add loopbacks for routers
for_entry_list += ( (self.lo_interface(0), host.dns_host_portion_only, host.lo_ip.ip)
#TODO: make thise check l3 group rather than asn (generalise)
for host in advertise_hosts if host.is_router and host.asn == server.asn)
rev_entry_list = list(
(ank.reverse_subnet(link.ip, advertise_block.prefixlen), self.interface_id(link.id), link.local_host.dns_hostname)
for link in advertise_links)
# Add loopbacks for routers
rev_entry_list += ( (ank.reverse_subnet(host.lo_ip.ip, advertise_block.prefixlen), self.lo_interface(0), host.dns_host_portion_only)
#TODO: make thise check l3 group rather than asn (generalise)
for host in advertise_hosts if host.is_router and host.asn == server.asn)
#TODO: provide better way to get eg eth0.host than string concat inside the template
host_cname_list = []
for host in advertise_hosts:
if host.asn != server.asn:
# host is from another asn, skip.
#TODO: extend this to make sure matches same asn, l3group and l2group
continue
if host.is_router:
# has lo_ip
cname = "%s.%s" % (self.lo_interface(), host.dns_host_portion_only)
else:
# choose an interface - arbitrary choice, choose first host link
interface = self.interface_id(ank.server_interface_id(host))
cname = "%s.%s" % (interface, host.dns_host_portion_only)
host_cname_list.append( (host.dns_host_portion_only, cname))
#Sort to make format nicer
host_cname_list = sorted(host_cname_list, key = lambda x: x[1])
for_entry_list = sorted(for_entry_list)
for_entry_list = sorted(for_entry_list, key = lambda x: x[1])
f_forward = open ( os.path.join(bind_dir(self.network, server), "db.%s" % server.domain), 'wb')
f_forward.write(forward_template.render(
domain = server.domain,
entry_list = for_entry_list,
host_cname_list = host_cname_list,
dns_server = server.dns_hostname,
dns_server_ip = ank.server_ip(server),
))
f_reverse = open(os.path.join(bind_dir(self.network, server), "db.%s" % reverse_identifier), 'wb')
f_reverse.write(reverse_template.render(
domain = server.domain,
identifier = reverse_identifier,
entry_list = rev_entry_list,
dns_server= server.dns_hostname,
))
#TODO: make l2 use l3 for caching
#TODO: ROOT-SERVER can't be part of a domain... - need to correctly handle case of multiple root servers
# and also need to handle this for case of single root server (ie no hiearchy) probably ok as /etc/resolv.conf points to server itself, not through dns hints
root_db_hint = ( ("ROOT-SERVER", ank.server_ip(n)) for n in ank.dns_hiearchy_parents(server))
f_root = open( os.path.join(bind_dir(self.network, server), "db.root"), 'wb')
f_root.write( root_template.render( root_servers = root_db_hint))
for server in dns_servers:
f_resolv = open( os.path.join(etc_dir(self.network, server), "resolv.conf"), 'wb')
f_resolv.write ( resolve_template.render(
nameservers = [ank.server_ip(server)],
domain = server.domain))
# Configure clients
for client in clients:
server_ips = (ank.server_ip(server) for server in ank.dns_hiearchy_parents(client))
server_ips = list(server_ips)
f_resolv = open( os.path.join(etc_dir(self.network, client), "resolv.conf"), 'wb')
f_resolv.write ( resolve_template.render(
nameservers = server_ips,
domain = client.domain))
return
def configure(self):
"""Configure Netkit"""
LOG.info("Configuring Netkit")
self.configure_netkit()
self.configure_igp()
self.configure_bgp()
self.configure_dns()
# create .tgz
tar_filename = "netkit_%s.tar.gz" % time.strftime("%Y%m%d_%H%M", time.localtime())
tar = tarfile.open(os.path.join(config.ank_main_dir, tar_filename), "w:gz")
# Store using directory structure, eg ank_lab/netkit_lab/
# Note: this differs to Junos which flattens file structure
tar.add(lab_dir())
self.network.compiled_labs['netkit'] = tar_filename
tar.close()
| |
#!/usr/bin/python
# Test tool to compare Capstone output with llvm-mc. By Nguyen Anh Quynh, 2014
import array, os.path, sys
from subprocess import Popen, PIPE, STDOUT
from capstone import *
# convert all hex numbers to decimal numbers in a text
def normalize_hex(a):
while(True):
i = a.find('0x')
if i == -1: # no more hex number
break
hexnum = '0x'
for c in a[i + 2:]:
if c in '0123456789abcdefABCDEF':
hexnum += c
else:
break
num = int(hexnum, 16)
a = a.replace(hexnum, str(num))
return a
def run_mc(arch, hexcode, option, syntax=None):
def normalize(text):
# remove tabs
text = text.lower()
items = text.split()
text = ' '.join(items)
if arch == CS_ARCH_X86:
# remove comment after #
i = text.find('# ')
if i != -1:
return text[:i].strip()
if arch == CS_ARCH_ARM64:
# remove comment after #
i = text.find('// ')
if i != -1:
return text[:i].strip()
# remove some redundant spaces
text = text.replace('{ ', '{')
text = text.replace(' }', '}')
return text.strip()
#print("Trying to decode: %s" %hexcode)
if syntax:
if arch == CS_ARCH_MIPS:
p = Popen(['llvm-mc', '-disassemble', '-print-imm-hex', '-mattr=+msa', syntax] + option, stdout=PIPE, stdin=PIPE, stderr=STDOUT)
else:
p = Popen(['llvm-mc', '-disassemble', '-print-imm-hex', syntax] + option, stdout=PIPE, stdin=PIPE, stderr=STDOUT)
else:
if arch == CS_ARCH_MIPS:
p = Popen(['llvm-mc', '-disassemble', '-print-imm-hex', '-mattr=+msa'] + option, stdout=PIPE, stdin=PIPE, stderr=STDOUT)
else:
p = Popen(['llvm-mc', '-disassemble', '-print-imm-hex'] + option, stdout=PIPE, stdin=PIPE, stderr=STDOUT)
output = p.communicate(input=hexcode)[0]
lines = output.split('\n')
#print lines
if 'invalid' in lines[0]:
#print 'invalid ----'
return 'FAILED to disassemble (MC)'
else:
#print 'OK:', lines[1]
return normalize(lines[1].strip())
def test_file(fname):
print("Test %s" %fname);
f = open(fname)
lines = f.readlines()
f.close()
if not lines[0].startswith('# '):
print("ERROR: decoding information is missing")
return
# skip '# ' at the front, then split line to get out hexcode
# Note: option can be '', or 'None'
#print lines[0]
#print lines[0][2:].split(', ')
(arch, mode, option) = lines[0][2:].split(', ')
mode = mode.replace(' ', '')
option = option.strip()
archs = {
"CS_ARCH_ARM": CS_ARCH_ARM,
"CS_ARCH_ARM64": CS_ARCH_ARM64,
"CS_ARCH_MIPS": CS_ARCH_MIPS,
"CS_ARCH_PPC": CS_ARCH_PPC,
"CS_ARCH_SPARC": CS_ARCH_SPARC,
"CS_ARCH_SYSZ": CS_ARCH_SYSZ,
"CS_ARCH_X86": CS_ARCH_X86,
"CS_ARCH_XCORE": CS_ARCH_XCORE,
}
modes = {
"CS_MODE_16": CS_MODE_16,
"CS_MODE_32": CS_MODE_32,
"CS_MODE_64": CS_MODE_64,
"CS_MODE_MIPS32": CS_MODE_MIPS32,
"CS_MODE_MIPS64": CS_MODE_MIPS64,
"0": CS_MODE_ARM,
"CS_MODE_ARM": CS_MODE_ARM,
"CS_MODE_THUMB": CS_MODE_THUMB,
"CS_MODE_ARM+CS_MODE_V8": CS_MODE_ARM+CS_MODE_V8,
"CS_MODE_THUMB+CS_MODE_V8": CS_MODE_THUMB+CS_MODE_V8,
"CS_MODE_THUMB+CS_MODE_MCLASS": CS_MODE_THUMB+CS_MODE_MCLASS,
"CS_MODE_LITTLE_ENDIAN": CS_MODE_LITTLE_ENDIAN,
"CS_MODE_BIG_ENDIAN": CS_MODE_BIG_ENDIAN,
"CS_MODE_64+CS_MODE_LITTLE_ENDIAN": CS_MODE_64+CS_MODE_LITTLE_ENDIAN,
"CS_MODE_64+CS_MODE_BIG_ENDIAN": CS_MODE_64+CS_MODE_BIG_ENDIAN,
"CS_MODE_MIPS32+CS_MODE_MICRO": CS_MODE_MIPS32+CS_MODE_MICRO,
"CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN": CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN,
"CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN+CS_MODE_MICRO": CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN,
"CS_MODE_BIG_ENDIAN+CS_MODE_V9": CS_MODE_BIG_ENDIAN + CS_MODE_V9,
"CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN": CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN,
"CS_MODE_MIPS32+CS_MODE_LITTLE_ENDIAN": CS_MODE_MIPS32+CS_MODE_LITTLE_ENDIAN,
"CS_MODE_MIPS64+CS_MODE_LITTLE_ENDIAN": CS_MODE_MIPS64+CS_MODE_LITTLE_ENDIAN,
"CS_MODE_MIPS64+CS_MODE_BIG_ENDIAN": CS_MODE_MIPS64+CS_MODE_BIG_ENDIAN,
}
options = {
"CS_OPT_SYNTAX_ATT": CS_OPT_SYNTAX_ATT,
"CS_OPT_SYNTAX_NOREGNAME": CS_OPT_SYNTAX_NOREGNAME,
}
mc_modes = {
("CS_ARCH_X86", "CS_MODE_32"): ['-triple=i386'],
("CS_ARCH_X86", "CS_MODE_64"): ['-triple=x86_64'],
("CS_ARCH_ARM", "CS_MODE_ARM"): ['-triple=armv7'],
("CS_ARCH_ARM", "CS_MODE_THUMB"): ['-triple=thumbv7'],
("CS_ARCH_ARM", "CS_MODE_ARM+CS_MODE_V8"): ['-triple=armv8'],
("CS_ARCH_ARM", "CS_MODE_THUMB+CS_MODE_V8"): ['-triple=thumbv8'],
("CS_ARCH_ARM", "CS_MODE_THUMB+CS_MODE_MCLASS"): ['-triple=thumbv7m'],
("CS_ARCH_ARM64", "0"): ['-triple=aarch64'],
("CS_ARCH_MIPS", "CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN"): ['-triple=mips'],
("CS_ARCH_MIPS", "CS_MODE_MIPS32+CS_MODE_MICRO"): ['-triple=mipsel', '-mattr=+micromips'],
("CS_ARCH_MIPS", "CS_MODE_MIPS64"): ['-triple=mips64el'],
("CS_ARCH_MIPS", "CS_MODE_MIPS32"): ['-triple=mipsel'],
("CS_ARCH_MIPS", "CS_MODE_MIPS64+CS_MODE_BIG_ENDIAN"): ['-triple=mips64'],
("CS_ARCH_MIPS", "CS_MODE_MIPS32+CS_MODE_MICRO+CS_MODE_BIG_ENDIAN"): ['-triple=mips', '-mattr=+micromips'],
("CS_ARCH_MIPS", "CS_MODE_MIPS32+CS_MODE_BIG_ENDIAN+CS_MODE_MICRO"): ['-triple=mips', '-mattr=+micromips'],
("CS_ARCH_PPC", "CS_MODE_BIG_ENDIAN"): ['-triple=powerpc64'],
('CS_ARCH_SPARC', 'CS_MODE_BIG_ENDIAN'): ['-triple=sparc'],
('CS_ARCH_SPARC', 'CS_MODE_BIG_ENDIAN+CS_MODE_V9'): ['-triple=sparcv9'],
('CS_ARCH_SYSZ', '0'): ['-triple=s390x', '-mcpu=z196'],
}
#if not option in ('', 'None'):
# print archs[arch], modes[mode], options[option]
#print(arch, mode, option)
md = Cs(archs[arch], modes[mode])
mc_option = None
if arch == 'CS_ARCH_X86':
# tell llvm-mc to use Intel syntax
mc_option = '-output-asm-variant=1'
if arch == 'CS_ARCH_ARM' or arch == 'CS_ARCH_PPC' :
md.syntax = CS_OPT_SYNTAX_NOREGNAME
if fname.endswith('3DNow.s.cs'):
md.syntax = CS_OPT_SYNTAX_ATT
for line in lines[1:]:
# ignore all the input lines having # in front.
if line.startswith('#'):
continue
#print("Check %s" %line)
code = line.split(' = ')[0]
asm = ''.join(line.split(' = ')[1:])
hex_code = code.replace('0x', '')
hex_code = hex_code.replace(',', '')
hex_data = hex_code.decode('hex')
#hex_bytes = array.array('B', hex_data)
x = list(md.disasm(hex_data, 0))
if len(x) > 0:
if x[0].op_str != '':
cs_output = "%s %s" %(x[0].mnemonic, x[0].op_str)
else:
cs_output = x[0].mnemonic
else:
cs_output = 'FAILED to disassemble'
cs_output2 = normalize_hex(cs_output)
cs_output2 = cs_output2.replace(' ', '')
if arch == 'CS_ARCH_MIPS':
# normalize register alias names
cs_output2 = cs_output2.replace('$at', '$1')
cs_output2 = cs_output2.replace('$v0', '$2')
cs_output2 = cs_output2.replace('$v1', '$3')
cs_output2 = cs_output2.replace('$a0', '$4')
cs_output2 = cs_output2.replace('$a1', '$5')
cs_output2 = cs_output2.replace('$a2', '$6')
cs_output2 = cs_output2.replace('$a3', '$7')
cs_output2 = cs_output2.replace('$t0', '$8')
cs_output2 = cs_output2.replace('$t1', '$9')
cs_output2 = cs_output2.replace('$t2', '$10')
cs_output2 = cs_output2.replace('$t3', '$11')
cs_output2 = cs_output2.replace('$t4', '$12')
cs_output2 = cs_output2.replace('$t5', '$13')
cs_output2 = cs_output2.replace('$t6', '$14')
cs_output2 = cs_output2.replace('$t7', '$15')
cs_output2 = cs_output2.replace('$t8', '$24')
cs_output2 = cs_output2.replace('$t9', '$25')
cs_output2 = cs_output2.replace('$s0', '$16')
cs_output2 = cs_output2.replace('$s1', '$17')
cs_output2 = cs_output2.replace('$s2', '$18')
cs_output2 = cs_output2.replace('$s3', '$19')
cs_output2 = cs_output2.replace('$s4', '$20')
cs_output2 = cs_output2.replace('$s5', '$21')
cs_output2 = cs_output2.replace('$s6', '$22')
cs_output2 = cs_output2.replace('$s7', '$23')
cs_output2 = cs_output2.replace('$k0', '$26')
cs_output2 = cs_output2.replace('$k1', '$27')
#print("Running MC ...")
if fname.endswith('thumb-fp-armv8.s.cs'):
mc_output = run_mc(archs[arch], code, ['-triple=thumbv8'], mc_option)
elif fname.endswith('mips64-alu-instructions.s.cs'):
mc_output = run_mc(archs[arch], code, ['-triple=mips64el', '-mcpu=mips64r2'], mc_option)
else:
mc_output = run_mc(archs[arch], code, mc_modes[(arch, mode)], mc_option)
mc_output2 = normalize_hex(mc_output)
if arch == 'CS_ARCH_MIPS':
mc_output2 = mc_output2.replace(' 0(', '(')
if arch == 'CS_ARCH_PPC':
mc_output2 = mc_output2.replace('.+', '')
mc_output2 = mc_output2.replace('.', '')
mc_output2 = mc_output2.replace(' 0(', '(')
mc_output2 = mc_output2.replace(' ', '')
mc_output2 = mc_output2.replace('opaque', '')
if (cs_output2 != mc_output2):
asm = asm.replace(' ', '').strip().lower()
if asm != cs_output2:
print("Mismatch: %s" %line.strip())
print("\tMC = %s" %mc_output)
print("\tCS = %s" %cs_output)
if __name__ == '__main__':
if len(sys.argv) == 1:
fnames = sys.stdin.readlines()
for fname in fnames:
test_file(fname.strip())
else:
#print("Usage: ./test_mc.py <input-file.s.cs>")
test_file(sys.argv[1])
| |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This test currently expects some preconfiguration to set up a GCS bucket,
# a Google pub/sub topic and subscription, and notifications enabled on the
# GCS bucket pushing to the pub/sub topic.
#
# The full setup and config instructions are listed in the following sections:
# * https://www.spinnaker.io/guides/tutorials/codelabs/pubsub-to-appengine/#create-a-gcs-bucket-to-store-artifacts
# * https://www.spinnaker.io/guides/tutorials/codelabs/pubsub-to-appengine/#set-up-google-cloud-pubsub-to-listen-to-bucket-object-changes
#
# Summarized here:
# * Create bucket: gsutil mb -p <project> <bucket_name>
# * Create topic and notification channel: gsutil notification create -t <topic> -f json <bucket_name>
# * Create subscription: gcloud beta pubsub subscriptions create <subscription> --topic <topic>
#
# In addition to the external config, this test expects a pub/sub subscription and artifacts configured
# in Spinnaker.
#
# Invoke this test by executing this python command:
#
# PYTHONPATH=$CITEST_ROOT \
# python spinnaker/testing/citest/tests/gcs_pubsub_gae_test.py
# --native_hostname
# --native_port
# --git_repo_url <app to deploy>
# --app_directory_root <root of app>
# --test_storage_account_name <configured artifact storage account>
# --appengine_primary_managed_project_id <managed gce project>
# --appengine_credentials_path <path to service account credentials>
# --test_subscription_name <configured subscription name>
# --test_gcs_bucket <bucket notifying pub/sub topic>
# --spinnaker_appengine_account
# --test_app
# --test_stack
#
# pylint: disable=bad-continuation
# pylint: disable=invalid-name
# pylint: disable=missing-docstring
# Standard python modules.
import json
import logging
import os
import subprocess
import shutil
import sys
import tempfile
import time
# citest modules.
import citest.base
import citest.gcp_testing as gcp
import citest.json_contract as jc
import citest.json_predicate as jp
import citest.service_testing as st
# Spinnaker modules.
import spinnaker_testing as sk
import spinnaker_testing.gate as gate
import spinnaker_testing.frigga as frigga
ov_factory = jc.ObservationPredicateFactory()
class AppengineGcsPubsubTestScenario(sk.SpinnakerTestScenario):
"""
Scenario for testing GAE deploys of GCS artifacts via pub/sub triggers.
"""
@classmethod
def new_agent(cls, bindings):
return gate.new_agent(bindings)
@classmethod
def initArgumentParser(cls, parser, defaults=None):
"""Initialize command line argument parser.
Args:
parser: argparse.ArgumentParser
"""
super(AppengineGcsPubsubTestScenario, cls).initArgumentParser(
parser, defaults=defaults)
defaults = defaults or {}
parser.add_argument(
'--app_directory_root', default=None,
help='Path from the root of source code repository to the application directory.')
parser.add_argument(
'--branch', default='master',
help='Git branch to be used when deploying from source code repository.')
parser.add_argument(
'--git_repo_url', default=None,
help='URL of a git source code repository used by Spinnaker to deploy to App Engine.')
parser.add_argument(
'--test_gcs_bucket', default=None,
help='GCS bucket to upload GAE app source code to.')
parser.add_argument(
'--test_storage_account_name', default=None,
help='Storage account when testing GCS buckets.'
' If not specified, use the application default credentials.')
parser.add_argument(
'--test_subscription_name', default=None,
help='Google pub/sub subscription name configured in Echo.')
def __init__(self, bindings, agent=None):
super(AppengineGcsPubsubTestScenario, self).__init__(bindings, agent)
self.logger = logging.getLogger(__name__)
bindings = self.bindings
if not bindings['GIT_REPO_URL']:
raise ValueError('Must supply value for --git_repo_url')
if not bindings['APP_DIRECTORY_ROOT']:
raise ValueError('Must supply value for --app_directory_root')
# We'll call out the app name because it is widely used
# because it scopes the context of our activities.
self.TEST_APP = bindings['TEST_APP']
self.TEST_STACK = bindings['TEST_STACK']
self.__EXPECTED_ARTIFACT_ID = 'deployable-gae-app-artifact'
self.__gcp_project = bindings['APPENGINE_PRIMARY_MANAGED_PROJECT_ID']
self.__cluster_name = frigga.Naming.cluster(self.TEST_APP, self.TEST_STACK)
self.__server_group_name = frigga.Naming.server_group(self.TEST_APP, self.TEST_STACK)
self.__lb_name = self.__cluster_name
self.__subscription_name = bindings['TEST_SUBSCRIPTION_NAME']
self.__gcs_pubsub_agent = sk.GcsFileUploadAgent(bindings['APPENGINE_CREDENTIALS_PATH'])
# Python is clearly hard-coded as the runtime here, but we're just asking App Engine to be a static file server.
self.__app_yaml = ('\n'.join(['runtime: python27',
'api_version: 1',
'threadsafe: true',
'service: {service}',
'handlers:',
' - url: /.*',
' static_dir: .']).format(service=self.__lb_name))
self.__app_directory_root = bindings['APP_DIRECTORY_ROOT']
self.__branch = bindings['BRANCH']
self.pipeline_id = None
self.bucket = bindings['TEST_GCS_BUCKET']
self.__test_repository_url = 'gs://' + self.bucket
self.__pipeline_id = None
def create_app(self):
# Not testing create_app, since the operation is well tested elsewhere.
# Retryable to handle platform flakiness.
contract = jc.Contract()
return st.OperationContract(
self.agent.make_create_app_operation(
bindings=self.bindings,
application=self.TEST_APP,
account_name=self.bindings['SPINNAKER_APPENGINE_ACCOUNT']),
contract=contract)
def delete_app(self):
# Not testing delete_app, since the operation is well tested elsewhere.
# Retryable to handle platform flakiness.
contract = jc.Contract()
return st.OperationContract(
self.agent.make_delete_app_operation(
application=self.TEST_APP,
account_name=self.bindings['SPINNAKER_APPENGINE_ACCOUNT']),
contract=contract)
def make_deploy_stage(self):
return {
'clusters': [
{
'account': self.bindings['SPINNAKER_APPENGINE_ACCOUNT'],
'application': self.TEST_APP,
'cloudProvider': 'appengine',
'configFilepaths': [],
'configFiles': [self.__app_yaml],
'expectedArtifactId': self.__EXPECTED_ARTIFACT_ID,
'fromArtifact': True,
'gitCredentialType': 'NONE',
'interestingHealthProviderNames': [
'App Engine Service'
],
'provider': 'appengine',
'region': self.bindings['TEST_GCE_REGION'],
'sourceType': 'gcs',
'stack': self.TEST_STACK,
'storageAccountName': self.bindings.get('TEST_STORAGE_ACCOUNT_NAME')
}
],
'name': 'Deploy',
'refId': 'DEPLOY',
'requisiteStageRefIds': [],
'type': 'deploy'
}
def make_pubsub_trigger(self):
return {
'attributeConstraints': {'eventType': 'OBJECT_FINALIZE'},
'constraints': {},
'enabled': True,
'expectedArtifactIds': [
self.__EXPECTED_ARTIFACT_ID
],
'payloadConstraints': {},
'pubsubSystem': 'google',
'subscriptionName': self.__subscription_name, # Logical name assigned in Echo.
'type': 'pubsub'
}
def make_expected_artifact(self):
return {
'defaultArtifact': {
'kind': 'custom'
},
'id': self.__EXPECTED_ARTIFACT_ID,
'matchArtifact': {
'kind': 'gcs',
'name': 'gs://{}/app.tar'.format(self.bucket),
'type': 'gcs/object'
},
'useDefaultArtifact': False,
'usePriorExecution': False
}
def make_pipeline_spec(self, name):
return dict(
name=name,
stages=[self.make_deploy_stage()],
triggers=[self.make_pubsub_trigger()],
expectedArtifacts=[self.make_expected_artifact()],
application=self.TEST_APP,
stageCounter=1,
parallel=True,
limitConcurrent=True,
appConfig={},
index=0
)
def make_dict_matcher(self, want):
spec = {}
for key, value in want.items():
if isinstance(value, dict):
spec[key] = self.make_dict_matcher(value)
elif isinstance(value, list):
list_spec = []
for elem in value:
if isinstance(elem, dict):
list_spec.append(self.make_dict_matcher(elem))
else:
list_spec.append(jp.CONTAINS(elem))
spec[key] = jp.LIST_MATCHES(list_spec)
else:
spec[key] = jp.CONTAINS(value)
return jp.DICT_MATCHES(spec)
def create_deploy_pipeline(self):
name = 'GcsToGaePubsubDeploy'
self.pipeline_id = name
pipeline_spec = self.make_pipeline_spec(name)
payload = self.agent.make_json_payload_from_kwargs(**pipeline_spec)
pipeline_config_path = 'applications/{app}/pipelineConfigs'.format(app=self.TEST_APP)
builder = st.HttpContractBuilder(self.agent)
(builder.new_clause_builder('Has Pipeline', retryable_for_secs=15)
.get_url_path(pipeline_config_path)
.EXPECT(
jp.LIST_MATCHES([self.make_dict_matcher(pipeline_spec)])))
# Need to query Gate for the id of the pipeline we just created...
def create_pipeline_id_extractor(_ignored, context):
pipeline_config_resp = self.agent.get(pipeline_config_path)
pipeline_config_list = json.JSONDecoder().decode(pipeline_config_resp.output)
found = next((x for x in pipeline_config_list if x['name'] == self.pipeline_id), None)
if (found is not None):
context['pipelineId'] = found['id'] # I don't know how to reference this later, so I'm saving it in self for now.
self.__pipeline_id = found['id'] # I don't know how to reference this later, so I'm saving it in self for now.
logging.info('Created pipeline config with id: %s', context['pipelineId'])
return st.OperationContract(
self.new_post_operation(
title='create_gcs_gae_pubsub_pipeline', data=payload, path='pipelines',
status_class=st.SynchronousHttpOperationStatus),
contract=builder.build(),
status_extractor=create_pipeline_id_extractor)
def trigger_deploy_pipeline(self):
name = 'app.tar'
command = 'tar -cvf {tar} {git_dir}/{app_root}/*'.format(tar=name,
git_dir=self.temp,
app_root=self.bindings['APP_DIRECTORY_ROOT'])
logging.info('Tar-ing %s/%s for GCS upload', self.temp, self.bindings['APP_DIRECTORY_ROOT'])
subprocess.Popen(command, stderr=sys.stderr, shell=True).wait()
group_name = frigga.Naming.server_group(
app=self.TEST_APP,
stack=self.bindings['TEST_STACK'],
version='v000')
# Triggered pipeline does a deploy, check for that server group.
server_group_path = 'applications/{app}/serverGroups'.format(app=self.TEST_APP)
builder = st.HttpContractBuilder(self.agent)
(builder.new_clause_builder('GAE Deploy Pipeline Succeeded',
retryable_for_secs=300)
.get_url_path(server_group_path)
.EXPECT(
ov_factory.value_list_matches(
[jp.DICT_MATCHES({'name': jp.STR_EQ(group_name)})]
)))
executions_path = 'executions?pipelineConfigIds={}&limit=1&statuses=SUCCEEDED'.format(self.__pipeline_id)
return st.OperationContract(
self.__gcs_pubsub_agent.new_gcs_pubsub_trigger_operation(
gate_agent=self.agent,
title='monitor_gcs_pubsub_pipeline',
bucket_name=self.bucket,
upload_path='{}'.format(name),
local_filename=os.path.abspath(name),
status_class=None,
status_path=executions_path
),
contract=builder.build())
def delete_load_balancer(self):
bindings = self.bindings
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'type': 'deleteLoadBalancer',
'cloudProvider': 'appengine',
'loadBalancerName': self.__lb_name,
'account': bindings['SPINNAKER_APPENGINE_ACCOUNT'],
'credentials': bindings['SPINNAKER_APPENGINE_ACCOUNT'],
'user': '[anonymous]'
}],
description='Delete Load Balancer: {0} in {1}'.format(
self.__lb_name,
bindings['SPINNAKER_APPENGINE_ACCOUNT']),
application=self.TEST_APP)
builder = gcp.GcpContractBuilder(self.appengine_observer)
(builder.new_clause_builder('Service Deleted', retryable_for_secs=30)
.inspect_resource('apps.services',
self.__lb_name,
appsId=self.__gcp_project)
.EXPECT(
ov_factory.error_list_contains(gcp.HttpErrorPredicate(http_code=404))))
return st.OperationContract(
self.new_post_operation(
title='delete_load_balancer', data=payload, path='tasks'),
contract=builder.build())
def delete_deploy_pipeline(self, pipeline_id):
payload = self.agent.make_json_payload_from_kwargs(id=pipeline_id)
path = os.path.join('pipelines', self.TEST_APP, pipeline_id)
builder = st.HttpContractBuilder(self.agent)
(builder.new_clause_builder('Has Pipeline',
retryable_for_secs=5)
.get_url_path(
'applications/{app}/pipelineConfigs'.format(app=self.TEST_APP))
.excludes_path_value('name', pipeline_id))
return st.OperationContract(
self.new_delete_operation(
title='delete_deploy_pipeline', data=payload, path=path,
status_class=st.SynchronousHttpOperationStatus),
contract=builder.build())
class AppengineGcsPubsubTest(st.AgentTestCase):
@classmethod
def setUpClass(cls):
runner = citest.base.TestRunner.global_runner()
scenario = runner.get_shared_data(AppengineGcsPubsubTestScenario)
bindings = scenario.bindings
branch = bindings['BRANCH']
git_repo = bindings['GIT_REPO_URL']
scenario.temp = tempfile.mkdtemp()
gcs_path = 'gs://{bucket}'.format(bucket=scenario.bucket)
topic = '{}-topic'.format(bindings['TEST_APP'])
subscription = '{}-subscription'.format(bindings['TEST_APP'])
# App to tar and upload to GCS.
command = 'git clone {repo} -b {branch} {dir}'.format(
repo=git_repo, branch=branch, dir=scenario.temp)
logging.info('Fetching %s', git_repo)
subprocess.Popen(command, stderr=sys.stderr, shell=True).wait()
@classmethod
def tearDownClass(cls):
runner = citest.base.TestRunner.global_runner()
scenario = runner.get_shared_data(AppengineGcsPubsubTestScenario)
bindings = scenario.bindings
shutil.rmtree(scenario.temp)
@property
def scenario(self):
return citest.base.TestRunner.global_runner().get_shared_data(AppengineGcsPubsubTestScenario)
@property
def testing_agent(self):
return self.scenario.agent
def test_a_create_app(self):
self.run_test_case(self.scenario.create_app(),
retry_interval_secs=8, max_retries=8)
def test_b_create_pipeline(self):
self.run_test_case(self.scenario.create_deploy_pipeline(),
retry_interval_secs=8, max_retries=8)
def test_d_run_pipeline(self):
time.sleep(60)
# Wait for Echo's cache to pick up the deploy pipeline.
# This is generally a bad strategy for synchronizing cache timing, so we'll investigate
# performing this check a different way in the future. One option is querying the
# Spinnaker services' metrics endpoint and inspecting metrics related to caching
# once they are instrumented.
self.run_test_case(self.scenario.trigger_deploy_pipeline(), poll_every_secs=5)
def test_x_delete_load_balancer(self):
self.run_test_case(self.scenario.delete_load_balancer(),
retry_interval_secs=8, max_retries=8)
def test_y_delete_pipeline(self):
self.run_test_case(
self.scenario.delete_deploy_pipeline(self.scenario.pipeline_id))
def test_z_delete_app(self):
# Give a total of a minute because it might also need
# an internal cache update
self.run_test_case(self.scenario.delete_app(),
retry_interval_secs=8, max_retries=8)
def main():
defaults = {
'TEST_STACK': 'appenginegcspubsubtest' + AppengineGcsPubsubTestScenario.DEFAULT_TEST_ID,
'TEST_APP': 'appenginegcspubsubgaetest' + AppengineGcsPubsubTestScenario.DEFAULT_TEST_ID
}
return citest.base.TestRunner.main(
parser_inits=[AppengineGcsPubsubTestScenario.initArgumentParser],
default_binding_overrides=defaults,
test_case_list=[AppengineGcsPubsubTest])
if __name__ == '__main__':
sys.exit(main())
| |
import re
import subprocess
import gzip
import glob
import os
import logging
import flowcell_parser.classes as cl
from taca.utils.config import CONFIG
logger=logging.getLogger(__name__)
dmux_folder='Demultiplexing'
def check_undetermined_status(run, und_tresh=10, q30_tresh=75, freq_tresh=40, pooled_tresh=5, dex_status='COMPLETED'):
"""Will check for undetermined fastq files, and perform the linking to the sample folder if the
quality thresholds are met.
:param run: path of the flowcell
:type run: str
:param und_tresh: max percentage of undetermined indexed in a lane allowed
:type und_tresh: float
:param q30_tresh: lowest percentage of q30 bases allowed
:type q30_tresh: float
:param freq_tresh: highest allowed percentage of the most common undetermined index
:type freq_tresh: float:w
:returns boolean: True if the flowcell passes the checks, False otherwise
"""
global dmux_folder
try:
dmux_folder=CONFIG['analysis']['bcl2fastq']['options'][0]['output_dir']
except KeyError:
dmux_folder='Demultiplexing'
status=True
if os.path.exists(os.path.join(run, dmux_folder)):
xtp=cl.XTenParser(run)
ss=xtp.samplesheet
lb=xtp.lanebarcodes
path_per_lane=get_path_per_lane(run, ss)
samples_per_lane=get_samples_per_lane(ss)
workable_lanes=get_workable_lanes(run, dex_status)
for lane in workable_lanes:
if is_unpooled_lane(ss,lane):
rename_undet(run, lane, samples_per_lane)
if check_index_freq(run,lane, freq_tresh):
if lb :
if first_qc_check(lane,lb, und_tresh, q30_tresh):
link_undet_to_sample(run, lane, path_per_lane)
status= status and True
else:
logger.warn("lane {} did not pass the qc checks, the Undetermined will not be added.".format(lane))
status= status and False
else:
logger.info("The HTML report is not available yet, will wait.")
status= status and False
else:
logger.warn("lane {} did not pass the qc checks, the Undetermined will not be added.".format(lane))
status= status and False
else:
if lb and qc_for_pooled_lane(lane,lb,pooled_tresh):
return True
logger.warn("The lane {} has been multiplexed, according to the samplesheet and will be skipped.".format(lane))
else:
logger.warn("No demultiplexing folder found, aborting")
return status
def qc_for_pooled_lane(lane,lb , und_thresh):
d={}
for entry in lb.sample_data:
if lane == int(entry['Lane']):
if entry.get('Sample')!='unknown':
d['det']=int(entry['Clusters'].replace(',',''))
else:
d['undet']=int(entry['Clusters'].replace(',',''))
if d['undet'] > (d['det']+d['undet']) * und_thresh / 100:
logger.warn("Lane {} has more than {}% undetermined indexes ({}%)".format(lane, und_thresh,d['undet']/(d['det']+d['undet'])*100))
return False
return True
def rename_undet(run, lane, samples_per_lane):
"""Renames the Undetermined fastq file by prepending the sample name in front of it
:param run: the path to the run folder
:type run: str
:param status: the demultiplexing status
:type status: str
:param samples_per_lane: lane:sample dict
:type status: dict
"""
for file in glob.glob(os.path.join(run, dmux_folder, "Undetermined*L0?{}*".format(lane))):
old_name=os.path.basename(file)
old_name_comps=old_name.split("_")
old_name_comps[1]=old_name_comps[0]# replace S0 with Undetermined
old_name_comps[0]=samples_per_lane[lane]#replace Undetermined with samplename
for index, comp in enumerate(old_name_comps):
if comp.startswith('L00'):
old_name_comps[index]=comp.replace('L00','L01')#adds a 1 as the second lane number in order to differentiate undetermined from normal in piper
new_name="_".join(old_name_comps)
logger.info("Renaming {} to {}".format(file, os.path.join(os.path.dirname(file), new_name)))
os.rename(file, os.path.join(os.path.dirname(file), new_name))
def get_workable_lanes(run, status):
"""List the lanes that have a .fastq file
:param run: the path to the run folder
:type run: str
:param status: the demultiplexing status
:type status: str
:rtype: list of ints
:returns:: list of lanes having an undetermined fastq file
"""
lanes=[]
pattern=re.compile('L0[0,1]([0-9])')
for unde in glob.glob(os.path.join(run, dmux_folder, '*Undetermined_*')):
name=os.path.basename(unde)
lanes.append(int(pattern.search(name).group(1)))
lanes=list(set(lanes))
if status =='IN_PROGRESS':
#the last lane is the one that is currently being worked on by bcl2fastq, don't work on it.
lanes=lanes[:-1]
logger.info("post_demux processing will happen with lanes {}".format(lanes))
return lanes
def link_undet_to_sample(run, lane, path_per_lane):
"""symlinks the undetermined file to the right sample folder with a RELATIVE path so it's carried over by rsync
:param run: path of the flowcell
:type run: str
:param lane: lane identifier
:type lane: int
:param path_per_lane: {lane:path/to/the/sample}
:type path_per_lane: dict"""
for fastqfile in glob.glob(os.path.join(run, dmux_folder, '*Undetermined*_L0?{}_*'.format(lane))):
if not os.path.exists(os.path.join(path_per_lane[lane], os.path.basename(fastqfile))):
fqbname=os.path.basename(fastqfile)
logger.info("linking file {} to {}".format(fastqfile, path_per_lane[lane]))
os.symlink(os.path.join('..','..',fqbname), os.path.join(path_per_lane[lane], os.path.basename(fastqfile)))
def save_index_count(barcodes, run, lane):
"""writes the barcode counts
:param barcodes: {barcode:count}
:type barcodes: dict
:param run: path to the flowcell
:type run: str
"""
with open(os.path.join(run, dmux_folder, 'index_count_L{}.tsv'.format(lane)), 'w') as f:
for barcode in sorted(barcodes, key=barcodes.get, reverse=True):
f.write("{}\t{}\n".format(barcode, barcodes[barcode]))
def check_index_freq(run, lane, freq_tresh):
"""uses subprocess to perform zcat <file> | sed -n '1~4 p' | awk -F ':' '{print $NF}', counts the barcodes and
returns true if the most represented index accounts for less than freq_tresh% of the total
:param run: path to the flowcell
:type run: str
:param lane: lane identifier
:type lane: int
:param freq_tresh: maximal allowed frequency of the most frequent undetermined index
:type frew_tresh: float
:rtype: boolean
:returns: True if the checks passes, False otherwise
"""
barcodes={}
if os.path.exists(os.path.join(run, dmux_folder,'index_count_L{}.tsv'.format(lane))):
logger.info("Found index count for lane {}.".format(lane))
with open(os.path.join(run, dmux_folder,'index_count_L{}.tsv'.format(lane))) as idxf:
for line in idxf:
barcodes[line.split('\t')[0]]=int(line.split('\t')[1])
else:
open(os.path.join(run, dmux_folder,'index_count_L{}.tsv'.format(lane)), 'a').close()
for fastqfile in glob.glob(os.path.join(run, dmux_folder, '*Undetermined*_L0?{}_R1*'.format(lane))):
logger.info("working on {}".format(fastqfile))
zcat=subprocess.Popen(['zcat', fastqfile], stdout=subprocess.PIPE)
sed=subprocess.Popen(['sed', '-n', "1~4p"],stdout=subprocess.PIPE, stdin=zcat.stdout)
awk=subprocess.Popen(['awk', '-F', ":", '{print $NF}'],stdout=subprocess.PIPE, stdin=sed.stdout)
zcat.stdout.close()
sed.stdout.close()
output = awk.communicate()[0]
zcat.wait()
sed.wait()
for barcode in output.split('\n')[:-1]:
try:
barcodes[barcode]=barcodes[barcode]+1
except KeyError:
barcodes[barcode]=1
save_index_count(barcodes, run, lane)
total=sum(barcodes.values())
count, bar = max((v, k) for k, v in barcodes.items())
if total * freq_tresh / 100<count:
logger.warn("The most frequent barcode of lane {} ({}) represents {}%, "
"which is over the threshold of {}%".format(lane, bar, count * 100 / total , freq_tresh))
return False
else:
logger.info("Most frequent undetermined index represents less than {}% of the total, lane {} looks fine.".format(freq_tresh, lane))
return True
def first_qc_check(lane, lb, und_tresh, q30_tresh):
"""checks wether the percentage of bases over q30 for the sample is
above the treshold, and if the amount of undetermined is below the treshold
:param lane: lane identifier
:type lane: int
:param lb: reader of laneBarcodes.html
:type lb: flowcell_parser.classes.XTenLaneBarcodes
:param und_tresh: maximal allowed percentage of undetermined indexes
:type und_tresh: float
:param q30_tresh: maximal allowed percentage of bases over q30
:type q30_tresh: float
:rtype: boolean
:returns: True of the qc checks pass, False otherwise
"""
d={}
for entry in lb.sample_data:
if lane == int(entry['Lane']):
if entry.get('Sample')!='unknown':
if float(entry['% >= Q30bases']) < q30_tresh:
logger.warn("Sample {} of lane {} has a percentage of bases over q30 of {}%, "
"which is below the cutoff of {}% ".format(entry['Sample'], lane, float(entry['% >= Q30bases']), q30_tresh))
return False
d['det']=int(entry['Clusters'].replace(',',''))
else:
d['undet']=int(entry['Clusters'].replace(',',''))
if d['undet'] > (d['det']+d['undet']) * und_tresh / 100:
logger.warn("Lane {} has more than {}% undetermined indexes ({:.2f}%)".format(lane, und_tresh,float(d['undet'])/(d['det']+d['undet'])*100))
return False
return True
def get_path_per_lane(run, ss):
"""
:param run: the path to the flowcell
:type run: str
:param ss: SampleSheet reader
:type ss: flowcell_parser.XTenSampleSheet
"""
d={}
for l in ss.data:
try:
d[int(l['Lane'])]=os.path.join(run, dmux_folder, l['Project'], l['SampleID'])
except KeyError:
logger.error("Can't find the path to the sample, is 'Project' in the samplesheet ?")
d[int(l['Lane'])]=os.path.join(run, dmux_folder)
return d
def get_samples_per_lane(ss):
"""
:param ss: SampleSheet reader
:type ss: flowcell_parser.XTenSampleSheet
:rtype: dict
:returns: dictionnary of lane:samplename
"""
d={}
for l in ss.data:
s=l['SampleName'].replace("Sample_", "").replace("-", "_")
d[int(l['Lane'])]=l['SampleName']
return d
def get_barcode_per_lane(ss):
"""
:param ss: SampleSheet reader
:type ss: flowcell_parser.XTenSampleSheet
:rtype: dict
:returns: dictionnary of lane:barcode
"""
d={}
for l in ss.data:
d[int(l['Lane'])]=l['index']
return d
def is_unpooled_lane(ss, lane):
"""
:param ss: SampleSheet reader
:type ss: flowcell_parser.XTenSampleSheet
:param lane: lane identifier
:type lane: int
:rtype: boolean
:returns: True if the samplesheet has one entry for that lane, False otherwise
"""
count=0
for l in ss.data:
if int(l['Lane']) == lane:
count+=1
return count==1
def is_unpooled_run(ss):
"""
:param ss: SampleSheet reader
:type ss: flowcell_parser.XTenSampleSheet
:rtype: boolean
:returns: True if the samplesheet has one entry per lane, False otherwise
"""
ar=[]
for l in ss.data:
ar.append(l['Lane'])
return len(ar)==len(set(ar))
if __name__=="__main__":
import sys
mainlog = logging.getLogger(__name__)
mainlog.setLevel(level=logging.INFO)
mfh = logging.StreamHandler(sys.stderr)
mft = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
mfh.setFormatter(mft)
mainlog.addHandler(mfh)
check_undetermined_status("/srv/illumina/HiSeq_X_data/150424_ST-E00214_0031_BH2WY7CCXX")
| |
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""vmconsole proxy configuration plugin."""
import gettext
from otopi import constants as otopicons
from otopi import filetransaction, plugin, util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup import hostname as osetuphostname
from ovirt_engine_setup import dialog
from ovirt_engine_setup.engine_common import constants as oengcommcons
from ovirt_engine_setup.vmconsole_proxy_helper import constants as ovmpcons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
def _base_url_from_env(env):
sslFlag = env[
oengcommcons.ConfigEnv.JBOSS_DIRECT_HTTP_PORT
]
proxyFlag = env[
oengcommcons.ConfigEnv.JBOSS_AJP_PORT
]
if sslFlag:
proto = 'https'
if proxyFlag:
port = env[
oengcommcons.ConfigEnv.HTTPS_PORT
]
else:
port = env[
oengcommcons.ConfigEnv.JBOSS_DIRECT_HTTPS_PORT
]
else:
proto = 'http'
if proxyFlag:
port = env[
oengcommcons.ConfigEnv.HTTP_PORT
]
else:
port = env[
oengcommcons.ConfigEnv.JBOSS_DIRECT_HTTP_PORT
]
return "{proto}://{fqdn}:{port}/eayunos/".format(
proto=proto,
fqdn=env[osetupcons.ConfigEnv.FQDN],
port=port,
)
@util.export
class Plugin(plugin.PluginBase):
"""vmconsole proxy configuration plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment.setdefault(
ovmpcons.ConfigEnv.VMCONSOLE_PROXY_CONFIG,
None
)
self.environment.setdefault(
ovmpcons.EngineConfigEnv.ENGINE_FQDN,
None
)
self.environment.setdefault(
ovmpcons.ConfigEnv.VMCONSOLE_PROXY_PORT,
ovmpcons.Defaults.DEFAULT_VMCONSOLE_PROXY_PORT
)
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
before=(
osetupcons.Stages.DIALOG_TITLES_E_PRODUCT_OPTIONS,
),
after=(
ovmpcons.Stages.ENGINE_CORE_ENABLE,
),
)
def _customization(self):
if self.environment[oengcommcons.EngineConst.ENGINE_ENABLE]:
if self.environment[
ovmpcons.ConfigEnv.VMCONSOLE_PROXY_CONFIG
] is None:
self.environment[
ovmpcons.ConfigEnv.VMCONSOLE_PROXY_CONFIG
] = dialog.queryBoolean(
dialog=self.dialog,
name='OVESETUP_CONFIG_VMCONSOLE_PROXY',
note=_(
'Configure VM Console Proxy on this host '
'(@VALUES@) [@DEFAULT@]: '
),
prompt=True,
default=True,
)
else:
self.logger.info(_(
'Deploying VM Console Proxy on a separate '
'host is not supported'
))
self.environment[ovmpcons.ConfigEnv.VMCONSOLE_PROXY_CONFIG] = False
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
after=(
osetupcons.Stages.DIALOG_TITLES_S_NETWORK,
oengcommcons.Stages.NETWORK_OWNERS_CONFIG_CUSTOMIZED,
),
before=(
osetupcons.Stages.DIALOG_TITLES_E_NETWORK,
),
condition=lambda self: self.environment[
ovmpcons.ConfigEnv.VMCONSOLE_PROXY_CONFIG
],
)
def _customizationNetwork(self):
osetuphostname.Hostname(
plugin=self,
).getHostname(
envkey=ovmpcons.EngineConfigEnv.ENGINE_FQDN,
whichhost=_('the engine'),
supply_default=True,
)
self.environment[osetupcons.NetEnv.FIREWALLD_SERVICES].extend([
{
'name': 'ovirt-vmconsole-proxy',
'directory': 'vmconsole-proxy'
},
])
self.environment[
osetupcons.NetEnv.FIREWALLD_SUBST
].update({
'@VMCONSOLE_PROXY_PORT@': self.environment[
ovmpcons.ConfigEnv.VMCONSOLE_PROXY_PORT
],
})
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
condition=lambda self: self.environment[
ovmpcons.ConfigEnv.VMCONSOLE_PROXY_CONFIG
],
)
def _miscConfigVMConsoleHelper(self):
content = (
'ENGINE_CA={engine_apache_ca_cert}\n'
'ENGINE_VERIFY_HOST={engine_verify_host}\n'
'ENGINE_BASE_URL={engine_base_url}\n'
'TOKEN_CERTIFICATE={certificate}\n'
'TOKEN_KEY={key}\n'
).format(
engine_apache_ca_cert=(
oengcommcons.FileLocations.OVIRT_ENGINE_PKI_APACHE_CA_CERT
),
# we skip host verification only if it is localhost
engine_verify_host=self.environment[
osetupcons.ConfigEnv.FQDN_NON_LOOPBACK_VALIDATION
],
engine_base_url=_base_url_from_env(self.environment),
certificate=(
ovmpcons.FileLocations.
OVIRT_ENGINE_PKI_VMCONSOLE_PROXY_HELPER_CERT
),
key=(
ovmpcons.FileLocations.
OVIRT_ENGINE_PKI_VMCONSOLE_PROXY_HELPER_KEY
),
)
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
filetransaction.FileTransaction(
name=(
ovmpcons.FileLocations.
VMCONSOLE_PROXY_HELPER_VARS_SETUP
),
content=content,
modifiedList=self.environment[
otopicons.CoreEnv.MODIFIED_FILES
],
)
)
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
condition=lambda self: (
self.environment[
ovmpcons.ConfigEnv.VMCONSOLE_PROXY_CONFIG
]
),
)
def _miscConfigVMConsoleProxy(self):
with open(ovmpcons.FileLocations.OVIRT_VMCONSOLE_PROXY_CONFIG) as f:
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
filetransaction.FileTransaction(
name=ovmpcons.FileLocations.VMCONSOLE_CONFIG,
content=f.read(),
modifiedList=self.environment[
otopicons.CoreEnv.MODIFIED_FILES
],
)
)
# vim: expandtab tabstop=4 shiftwidth=4
| |
# Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hardware reset functions for the D'Kitty."""
import numpy as np
from robel.components.builder import ComponentBuilder
from robel.components.robot import RobotComponentBuilder, RobotState
from robel.components.robot.dynamixel_robot import DynamixelRobotComponent
from robel.components.tracking import TrackerComponentBuilder
from robel.components.tracking.tracker import TrackerComponent
from robel.utils.reset_procedure import ResetProcedure
# Maximum values for each joint.
BASEMAX = .8
MIDMAX = 2.4
FOOTMAX = 2.5
# Common parameters for all `set_state` commands.
SET_PARAMS = dict(
error_tol=5 * np.pi / 180, # 5 degrees
last_diff_tol=.1 * np.pi / 180, # 5 degrees
)
# Convenience constants.
PI = np.pi
PI2 = np.pi / 2
PI4 = np.pi / 4
PI6 = np.pi / 6
OUTWARD_TUCK_POSE = np.array([0, -MIDMAX, FOOTMAX, 0, MIDMAX, -FOOTMAX])
INWARD_TUCK_POSE = np.array([0, MIDMAX, -FOOTMAX, 0, -MIDMAX, FOOTMAX])
class ScriptedDKittyResetProcedure(ResetProcedure):
"""Scripted reset procedure for D'Kitty.
This resets the D'Kitty to a standing position.
"""
def __init__(self,
upright_threshold: float = 0.9,
standing_height: float = 0.35,
height_tolerance: float = 0.05,
max_attempts: int = 5):
super().__init__()
self._upright_threshold = upright_threshold
self._standing_height = standing_height
self._height_tolerance = height_tolerance
self._max_attempts = max_attempts
self._robot = None
self._tracker = None
def configure_reset_groups(self, builder: ComponentBuilder):
"""Configures the component groups needed for reset."""
if isinstance(builder, RobotComponentBuilder):
builder.add_group('left', motor_ids=[20, 21, 22, 30, 31, 32])
builder.add_group('right', motor_ids=[10, 11, 12, 40, 41, 42])
builder.add_group('front', motor_ids=[10, 11, 12, 20, 21, 22])
builder.add_group('back', motor_ids=[30, 31, 32, 40, 41, 42])
elif isinstance(builder, TrackerComponentBuilder):
assert 'torso' in builder.group_configs
def reset(self, robot: DynamixelRobotComponent, tracker: TrackerComponent):
"""Performs the reset procedure."""
self._robot = robot
self._tracker = tracker
attempts = 0
while not self._is_standing():
attempts += 1
if attempts > self._max_attempts:
break
if self._is_upside_down():
self._perform_flip_over()
self._perform_tuck_under()
self._perform_stand_up()
def _is_standing(self) -> bool:
"""Returns True if the D'Kitty is fully standing."""
state = self._tracker.get_state('torso', raw_states=True)
height = state.pos[2]
upright = state.rot[2, 2]
print('Upright: {:2f}, height: {:2f}'.format(upright, height))
if upright < self._upright_threshold:
return False
if (np.abs(height - self._standing_height) > self._height_tolerance):
return False
return True
def _get_uprightedness(self) -> float:
"""Returns the uprightedness of the D'Kitty."""
return self._tracker.get_state('torso', raw_states=True).rot[2, 2]
def _is_upside_down(self) -> bool:
"""Returns whether the D'Kitty is upside-down."""
return self._get_uprightedness() < 0
def _perform_flip_over(self):
"""Attempts to flip the D'Kitty over."""
while self._is_upside_down():
print('Is upside down {}; attempting to flip over...'.format(
self._get_uprightedness()))
# Spread flat and extended.
self._perform_flatten()
# If we somehow flipped over from that, we're done.
if not self._is_upside_down():
return
# Tuck in one side while pushing down on the other side.
self._robot.set_state(
{
'left':
RobotState(qpos=np.array([-PI4, -MIDMAX, FOOTMAX] * 2)),
'right': RobotState(qpos=np.array([-PI - PI4, 0, 0] * 2)),
},
timeout=4,
**SET_PARAMS,
)
# Straighten out the legs that were pushing down.
self._robot.set_state(
{
'left': RobotState(qpos=np.array([PI2, 0, 0] * 2)),
'right': RobotState(qpos=np.array([-PI2, 0, 0] * 2)),
},
timeout=4,
**SET_PARAMS,
)
def _perform_tuck_under(self):
"""Tucks the D'Kitty's legs so that they're under itself."""
# Bring in both sides of the D'Kitty while remaining flat.
self._perform_flatten()
# Tuck one side at a time.
for side in ('left', 'right'):
self._robot.set_state(
{side: RobotState(qpos=np.array(INWARD_TUCK_POSE))},
timeout=4,
**SET_PARAMS,
)
def _perform_flatten(self):
"""Makes the D'Kitty go into a flat pose."""
left_pose = INWARD_TUCK_POSE.copy()
left_pose[[0, 3]] = PI2
right_pose = INWARD_TUCK_POSE.copy()
right_pose[[0, 3]] = -PI2
self._robot.set_state(
{
'left': RobotState(qpos=left_pose),
'right': RobotState(qpos=right_pose),
},
timeout=4,
**SET_PARAMS,
)
def _perform_stand_up(self):
"""Makes the D'Kitty stand up."""
# Flip the back and front.
self._robot.set_state(
{
'back': RobotState(
qpos=np.array(OUTWARD_TUCK_POSE[3:].tolist() * 2)),
},
timeout=4,
**SET_PARAMS,
)
self._robot.set_state(
{
'front': RobotState(
qpos=np.array(OUTWARD_TUCK_POSE[:3].tolist() * 2)),
},
timeout=4,
**SET_PARAMS,
)
# Stand straight up.
self._robot.set_state(
{
'dkitty': RobotState(qpos=np.zeros(12)),
},
timeout=3,
**SET_PARAMS,
)
# Tuck a bit.
self._robot.set_state(
{
'dkitty': RobotState(qpos=np.array([0, PI6, -PI6] * 4)),
},
timeout=1,
**SET_PARAMS,
)
# Stand straight up.
self._robot.set_state(
{
'dkitty': RobotState(qpos=np.zeros(12)),
},
timeout=3,
**SET_PARAMS,
)
| |
"""Constants and functions pertaining to classification datasets."""
import functools
import math
import numpy as np
import tensorflow as tf
import vesper.util.signal_utils as signal_utils
import vesper.util.time_frequency_analysis_utils as tfa_utils
# dataset parts
DATASET_PART_TRAINING = 'Training'
DATASET_PART_VALIDATION = 'Validation'
DATASET_PART_TEST = 'Test'
# dataset modes, determining the preprocessing performed by the dataset
DATASET_MODE_TRAINING = 'Training'
DATASET_MODE_EVALUATION = 'Evaluation'
DATASET_MODE_INFERENCE = 'Inference'
_WAVEFORM_DATASET_FEATURES = {
'waveform': tf.FixedLenFeature((), tf.string, default_value=''),
'label': tf.FixedLenFeature((), tf.int64, default_value=0)
}
def create_spectrogram_dataset_from_waveforms_array(
waveforms, mode, settings, num_repeats=1, shuffle=False, batch_size=1,
feature_name='spectrogram'):
dataset = tf.data.Dataset.from_tensor_slices(waveforms)
return _create_spectrogram_dataset(
dataset, mode, settings, num_repeats, shuffle, batch_size,
feature_name)
def create_spectrogram_dataset_from_waveform_files(
dir_path, mode, settings, num_repeats=1, shuffle=False, batch_size=1,
feature_name='spectrogram'):
dataset = create_waveform_dataset_from_waveform_files(dir_path)
return _create_spectrogram_dataset(
dataset, mode, settings, num_repeats, shuffle, batch_size,
feature_name)
def create_waveform_dataset_from_waveform_files(dir_path):
file_path_pattern = str(dir_path / '*.tfrecords')
# Get file paths matching pattern. Sort the paths for consistency.
file_paths = sorted(tf.gfile.Glob(file_path_pattern))
return tf.data.TFRecordDataset(file_paths).map(_parse_example)
def _parse_example(example_proto):
example = tf.parse_single_example(
example_proto, _WAVEFORM_DATASET_FEATURES)
bytes_ = example['waveform']
waveform = tf.decode_raw(bytes_, out_type=tf.int16, little_endian=True)
label = example['label']
return waveform, label
def _create_spectrogram_dataset(
waveform_dataset, mode, settings, num_repeats=1, shuffle=False,
batch_size=1, feature_name='spectrogram'):
preprocessor = _Preprocessor(mode, settings, feature_name)
dataset = waveform_dataset
if num_repeats is None:
dataset = dataset.repeat()
elif num_repeats != 1:
dataset = dataset.repeat(num_repeats)
dataset = dataset.map(
preprocessor.preprocess_waveform,
num_parallel_calls=settings.num_dataset_parallel_calls)
if shuffle:
dataset = dataset.shuffle(10 * batch_size)
if batch_size != 1:
dataset = dataset.batch(batch_size)
dataset = dataset.map(
preprocessor.compute_spectrograms,
num_parallel_calls=settings.num_dataset_parallel_calls)
return dataset
class _Preprocessor:
"""
Dataset example preprocessor.
A dataset example preprocessor prepares dataset examples for input to
a classifier's neural network during training, evaluation, and inference.
It performs data augmentation, waveform slicing, and spectrogram
computation in a TensorFlow graph.
"""
def __init__(self, mode, settings, output_feature_name='spectrogram'):
# `mode` can be `DATASET_MODE_TRAINING`, `DATASET_MODE_EVALUATION`,
# or `DATASET_MODE_INFERENCE`.
#
# When `mode` is `DATASET_MODE_TRAINING`, dataset examples are
# preprocessed according to certain settings that control waveform
# slicing and data augmentation.
#
# When `mode` is `DATASET_MODE_EVALUATION`, dataset examples are
# processed as when it is `DATASET_MODE_TRAINING`, except that
# data augmentation can be turned on or off via the
# `evaluation_data_augmentation_enabled` setting.
#
# When `mode` is `DATASET_MODE_INFERENCE`, dataset waveforms are
# not sliced as they are when it is `DATASET_MODE_TRAINING` or
# `DATASET_MODE_EVALUATION`. Instead, the slicing start index is
# always zero. Data augmentation is also disabled.
self.settings = settings
self.output_feature_name = output_feature_name
s = settings
(self.time_start_index, self.time_end_index,
self.window_size, self.hop_size, self.dft_size,
self.freq_start_index, self.freq_end_index) = \
_get_low_level_preprocessing_settings(mode, s)
self.waveform_length = self.time_end_index - self.time_start_index
self.window_fn = functools.partial(
tf.contrib.signal.hann_window, periodic=True)
augmentation_enabled = _is_data_augmentation_enabled(mode, s)
self.random_waveform_time_shifting_enabled = \
augmentation_enabled and s.random_waveform_time_shifting_enabled
if self.random_waveform_time_shifting_enabled:
self.max_waveform_time_shift = signal_utils.seconds_to_frames(
s.max_waveform_time_shift, s.waveform_sample_rate)
def preprocess_waveform(self, waveform, label=None):
"""
Preprocesses one input waveform.
Applies any data augmentation indicated by this preprocessor's
settings, and extracts the appropriate slice from the resulting
waveform.
"""
# Get time shifting offset.
if self.random_waveform_time_shifting_enabled:
n = self.max_waveform_time_shift
offset = tf.random.uniform((), -n, n, dtype=tf.int32)
else:
offset = 0
# Slice waveform.
start_index = self.time_start_index + offset
end_index = self.time_end_index + offset
waveform = waveform[start_index:end_index]
if label is None:
return waveform
else:
return waveform, label
def compute_spectrograms(self, waveforms, labels=None):
"""Computes spectrograms for a batch of waveforms."""
s = self.settings
# Set final dimension of waveforms, which comes to us as `None`.
self._set_waveforms_shape(waveforms)
# Compute STFTs.
waveforms = tf.cast(waveforms, tf.float32)
stfts = tf.contrib.signal.stft(
waveforms, self.window_size, self.hop_size,
fft_length=self.dft_size, window_fn=self.window_fn)
# Slice STFTs along frequency axis.
stfts = stfts[..., self.freq_start_index:self.freq_end_index]
# Get STFT magnitudes squared, i.e. squared spectrograms.
grams = tf.real(stfts * tf.conj(stfts))
# gram = tf.abs(stft) ** 2
# Take natural log of squared spectrograms. Adding an epsilon
# avoids log-of-zero errors.
grams = tf.log(grams + s.spectrogram_log_epsilon)
# Clip spectrograms if indicated.
if s.spectrogram_clipping_enabled:
grams = tf.clip_by_value(
grams, s.spectrogram_clipping_min, s.spectrogram_clipping_max)
# Normalize spectrograms if indicated.
if s.spectrogram_normalization_enabled:
grams = \
s.spectrogram_normalization_scale_factor * grams + \
s.spectrogram_normalization_offset
# Reshape spectrograms for input into Keras neural network.
grams = self._reshape_grams(grams)
# Create features dictionary.
features = {self.output_feature_name: grams}
if labels is None:
return features
else:
# have labels
# Reshape labels into a single 2D column.
labels = tf.reshape(labels, (-1, 1))
return features, labels
def _set_waveforms_shape(self, waveforms):
"""
Sets the final dimension of a batch of waveforms.
When we receive a batch of waveforms its final dimension is
`None`, even though we know that the dimension is the sliced
waveform length. We set the dimension since if we don't and
the model includes at least one convolutional layer, then
the `Classifier.train` method raises an exception.
"""
dims = list(waveforms.shape.dims)
dims[-1] = tf.Dimension(self.waveform_length)
shape = tf.TensorShape(dims)
waveforms.set_shape(shape)
def _reshape_grams(self, grams):
"""
Reshapes a batch of spectrograms for input to a Keras neural network.
The batch must be reshaped differently depending on whether the
network's input layer is convolutional or dense.
"""
s = self.settings
if len(s.convolutional_layer_sizes) != 0:
# model is CNN
# Add channel dimension for Keras `Conv2D` layer compatibility.
return tf.expand_dims(grams, 3)
else:
# model is DNN
# Flatten spectrograms for Keras `Dense` layer compatibility.
size = get_sliced_spectrogram_size(s)
return tf.reshape(grams, (-1, size))
def _get_low_level_preprocessing_settings(mode, settings):
s = settings
fs = s.waveform_sample_rate
s2f = signal_utils.seconds_to_frames
# time slicing
if mode == DATASET_MODE_INFERENCE:
time_start_index = 0
else:
time_start_index = s2f(s.waveform_start_time, fs)
length = s2f(s.waveform_duration, fs)
time_end_index = time_start_index + length
# spectrogram
window_size = s2f(s.spectrogram_window_size, fs)
fraction = s.spectrogram_hop_size / 100
hop_size = s2f(s.spectrogram_window_size * fraction, fs)
dft_size = tfa_utils.get_dft_size(window_size)
# frequency slicing
f2i = tfa_utils.get_dft_bin_num
freq_start_index = f2i(s.spectrogram_start_freq, fs, dft_size)
freq_end_index = f2i(s.spectrogram_end_freq, fs, dft_size) + 1
return (
time_start_index, time_end_index, window_size, hop_size, dft_size,
freq_start_index, freq_end_index)
def _is_data_augmentation_enabled(mode, settings):
return \
mode == DATASET_MODE_TRAINING or \
mode == DATASET_MODE_EVALUATION and \
settings.evaluation_data_augmentation_enabled
def get_sliced_spectrogram_size(settings):
num_spectra, num_bins = get_sliced_spectrogram_shape(settings)
return num_spectra * num_bins
def get_sliced_spectrogram_shape(settings):
(time_start_index, time_end_index, window_size, hop_size, _,
freq_start_index, freq_end_index) = \
_get_low_level_preprocessing_settings(
DATASET_MODE_TRAINING, settings)
num_samples = time_end_index - time_start_index
num_spectra = tfa_utils.get_num_analysis_records(
num_samples, window_size, hop_size)
num_bins = freq_end_index - freq_start_index
return (num_spectra, num_bins)
def show_dataset(dataset, num_batches):
print('output_types', dataset.output_types)
print('output_shapes', dataset.output_shapes)
iterator = dataset.make_one_shot_iterator()
next_batch = iterator.get_next()
with tf.Session() as session:
num_values = 0
values_sum = 0
squares_sum = 0
for i in range(num_batches):
features, labels = session.run(next_batch)
feature_name, values = list(features.items())[0]
values_class = values.__class__.__name__
labels_class = labels.__class__.__name__
num_values += values.size
values_sum += values.sum()
squares_sum += (values ** 2).sum()
mean = values_sum / num_values
std_dev = math.sqrt(squares_sum / num_values - mean ** 2)
print(
'Batch {} of {}: {} {} {} {} {}, labels {} {}'.format(
i + 1, num_batches, feature_name, values_class,
values.shape, mean, std_dev, labels_class, labels.shape))
def _main():
_test_random_time_shifting()
def _test_random_time_shifting():
"""
Tests random time shifting for data augmentation.
Random time shifting is used by the `WaveformPreprocessor` class to
distribute NFC onset times more evenly during classifier training.
"""
class ShiftingSlicer:
def __init__(self):
self.max_shift = 2
self.length = 3
def __call__(self, x):
n = self.max_shift
i = tf.random.uniform((), -n, n, dtype=tf.int32)
return x[n + i:n + self.length + i]
# Create dataset as NumPy array.
m = 10
n = 6
x = 100 * np.arange(m).reshape((m, 1)) + np.arange(n).reshape((1, n))
# Create TensorFlow dataset.
slicer = ShiftingSlicer()
dataset = tf.data.Dataset.from_tensor_slices(x).repeat(2).map(slicer)
# Show dataset.
iterator = dataset.make_one_shot_iterator()
x = iterator.get_next()
with tf.Session() as session:
while True:
try:
x_ = session.run(x)
print(x_)
except tf.errors.OutOfRangeError:
break
if __name__ == '__main__':
_main()
| |
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import StringIO
import errno
import os
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase
from test import test_support
from test.test_support import HOST, HOSTv6
threading = test_support.import_module('threading')
# the dummy data returned by server over the data channel when
# RETR, LIST and NLST commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024)
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def handle_error(self):
raise
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator("\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = ''.join(self.in_buffer)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data + '\r\n')
def cmd_port(self, arg):
addr = map(int, arg.split(','))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=10)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
sock = socket.socket()
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ',')
p1, p2 = divmod(port, 256)
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=10)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
sock = socket.socket(socket.AF_INET6)
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
# For testing. Next RETR will return long line.
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accept(self):
conn, addr = self.accept()
self.handler = self.handler(conn)
self.close()
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert.pem")
class SSLConnection(object, asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
self.socket = ssl.wrap_socket(self.socket, suppress_ragged_eofs=False,
certfile=CERTFILE, server_side=True,
do_handshake_on_connect=False,
ssl_version=ssl.PROTOCOL_SSLv23)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error, err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except socket.error, err:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
super(SSLConnection, self).close()
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return ''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return ''
raise
def handle_error(self):
raise
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=10)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_cwd(self):
dir = self.client.cwd('/foo')
self.assertEqual(dir, '250 cwd ok')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_retrbinary(self):
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', received.append, rest=rest)
self.assertEqual(''.join(received), RETR_DATA[rest:],
msg='rest test case %d %d %d' % (rest,
len(''.join(received)),
len(RETR_DATA[rest:])))
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = StringIO.StringIO(RETR_DATA)
self.client.storbinary('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
f = StringIO.StringIO(RETR_DATA)
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler.rest, str(r))
def test_storlines(self):
f = StringIO.StringIO(RETR_DATA.replace('\r\n', '\n'))
self.client.storlines('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_makeport(self):
self.client.makeport()
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 10)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'pasv')
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd,
'x' * self.client.maxline * 2)
def test_retrlines_too_long(self):
self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
received = []
self.assertRaises(ftplib.Error,
self.client.retrlines, 'retr', received.append)
def test_storlines_too_long(self):
f = StringIO.StringIO('x' * self.client.maxline * 2)
self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOSTv6, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
self.client.makeport()
self.assertEqual(self.server.handler.last_received_cmd, 'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 10)
conn.close()
self.assertEqual(self.server.handler.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=10)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=10)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
sock = self.client.transfercmd('list')
self.assertNotIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
sock = self.client.transfercmd('list')
self.assertIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
sock = self.client.transfercmd('list')
self.assertNotIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_auth_ssl(self):
try:
self.client.ssl_version = ssl.PROTOCOL_SSLv3
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
finally:
self.client.ssl_version = ssl.PROTOCOL_TLSv1
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(10)
self.port = test_support.bind_port(self.sock)
threading.Thread(target=self.server, args=(self.evt,self.sock)).start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
ftplib.FTP.port = self.port
def tearDown(self):
self.evt.wait()
def server(self, evt, serv):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
serv.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
conn.send("1 Hola mundo\n")
# (2) Signal the caller that it is safe to close the socket.
evt.set()
conn.close()
finally:
serv.close()
# (3) Signal the caller that we are done.
evt.set()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(ftp.sock.gettimeout() is None)
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts]
if socket.has_ipv6:
try:
DummyFTPServer((HOST, 0), af=socket.AF_INET6)
except socket.error:
pass
else:
tests.append(TestIPv6Environment)
if ssl is not None:
tests.extend([TestTLS_FTPClassMixin, TestTLS_FTPClass])
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
| |
#!/usr/bin/env python
#/******************************************************************************
# * $Id$
# *
# * Project: GDAL/OGR Utilities
# * Purpose: GDAL raster to partial FDGC metadata XML file.
# *
# * Author: Trent Hare, <thare at usgs dot gov>
# * Date: Oct 10, 2011
# * version: 0.2 (proof of concept - still not well tested!)
# *
# * Port from gdalinfo.py whose author is Even Rouault and Frank Warmerdam
# *
# ******************************************************************************
# * Copyright (c) 2010, Even Rouault
# * Copyright (c) 1998, Frank Warmerdam
# *
# * Permission is hereby granted, free of charge, to any person obtaining a
# * copy of this software and associated documentation files (the "Software"),
# * to deal in the Software without restriction, including without limitation
# * the rights to use, copy, modify, merge, publish, distribute, sublicense,
# * and/or sell copies of the Software, and to permit persons to whom the
# * Software is furnished to do so, subject to the following conditions:
# *
# * The above copyright notice and this permission notice shall be included
# * in all copies or substantial portions of the Software.
# *
# * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# * DEALINGS IN THE SOFTWARE.
# ****************************************************************************/
import sys
import math
from time import strftime
try:
from osgeo import gdal
from osgeo import osr
except:
import gdal
import osr
try:
from lxml import etree
#print("running with lxml.etree")
except ImportError:
try:
# Python 2.5
import xml.etree.cElementTree as etree
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# Python 2.5
import xml.etree.ElementTree as etree
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree
print("running with ElementTree")
except ImportError:
print("Failed to import ElementTree from any known place")
#/************************************************************************/
#/* Usage() */
#/************************************************************************/
def Usage(theApp):
print( '\nUsage: gdal2metadata in_Geo.tif in_FGDCtemplate.xml output.xml') # % theApp)
print( ' Optional: to print out image information also send -debug')
print( 'Usage: gdal2metadata -debug in_Geo.tif in_FGDCtemplate.xml output.xml\n') # % theApp)
print( 'Note: Currently this routine only supports FGDC version CSDGM - FGDC-STD-001-1998\n')
sys.exit(1)
def EQUAL(a, b):
return a.lower() == b.lower()
#/************************************************************************/
#/* main() */
#/************************************************************************/
def main( argv = None ):
bComputeMinMax = False
bSample = False
bShowGCPs = True
bShowMetadata = True
bShowRAT=False
debug = False
bStats = False
bApproxStats = True
bShowColorTable = True
bComputeChecksum = False
bReportHistograms = False
pszFilename = None
papszExtraMDDomains = [ ]
pszProjection = None
hTransform = None
bShowFileList = True
dst_xml = None
template_xml = None
bands = 1
iOverview = None
if argv is None:
argv = sys.argv
argv = gdal.GeneralCmdLineProcessor( argv )
if argv is None:
return 1
nArgc = len(argv)
#/* -------------------------------------------------------------------- */
#/* Parse arguments. */
#/* -------------------------------------------------------------------- */
i = 1
while i < nArgc:
if EQUAL(argv[i], "--utility_version"):
print(("%s is running against GDAL %s" %
(argv[0], gdal.VersionInfo("RELEASE_NAME"))))
return 0
elif EQUAL(argv[i], "-debug"):
debug = True
elif EQUAL(argv[i], "-mm"):
bComputeMinMax = True
elif EQUAL(argv[i], "-hist"):
bReportHistograms = True
elif EQUAL(argv[i], "-stats"):
bStats = True
bApproxStats = False
elif EQUAL(argv[i], "-approx_stats"):
bStats = True
bApproxStats = True
elif EQUAL(argv[i], "-sample"):
bSample = True
elif EQUAL(argv[i], "-checksum"):
bComputeChecksum = True
elif EQUAL(argv[i], "-nogcp"):
bShowGCPs = False
elif EQUAL(argv[i], "-nomd"):
bShowMetadata = False
elif EQUAL(argv[i], "-norat"):
bShowRAT = False
elif EQUAL(argv[i], "-noct"):
bShowColorTable = False
elif EQUAL(argv[i], "-mdd") and i < nArgc-1:
i = i + 1
papszExtraMDDomains.append( argv[i] )
elif EQUAL(argv[i], "-nofl"):
bShowFileList = False
elif argv[i][0] == '-':
return Usage(argv[0])
elif pszFilename is None:
pszFilename = argv[i]
elif template_xml is None:
template_xml = argv[i]
elif dst_xml is None:
dst_xml = argv[i]
else:
return Usage(argv[0])
i = i + 1
if pszFilename is None:
return Usage(argv[0])
if template_xml is None:
return Usage(argv[0])
if dst_xml is None:
return Usage(argv[0])
#/* -------------------------------------------------------------------- */
#/* Open GDAL dataset. */
#/* -------------------------------------------------------------------- */
hDataset = gdal.Open( pszFilename, gdal.GA_ReadOnly )
if hDataset is None:
print(("gdalinfo failed - unable to open '%s'." % pszFilename ))
sys.exit(1)
#/* -------------------------------------------------------------------- */
#/* load XML template file (generally fgdc-template.xml) */
#/* -------------------------------------------------------------------- */
parser = etree.XMLParser(remove_blank_text=True)
tree = etree.parse(template_xml, parser)
for lworkcit in tree.getiterator('lworkcit'):
for citeinfo in lworkcit.getiterator('citeinfo'):
title = citeinfo.find('title')
if title is None:
title = etree.SubElement(citeinfo, 'title')
title.text = pszFilename
#/* -------------------------------------------------------------------- */
#/* Report general info. */
#/* -------------------------------------------------------------------- */
hDriver = hDataset.GetDriver();
if debug:
print(( "Driver: %s/%s" % ( \
hDriver.ShortName, \
hDriver.LongName )))
papszFileList = hDataset.GetFileList();
if papszFileList is None or len(papszFileList) == 0:
print( "Files: none associated" )
else:
if debug:
print(( "Files: %s" % papszFileList[0] ))
if bShowFileList:
for i in range(1, len(papszFileList)):
print(( " %s" % papszFileList[i] ))
if debug:
print(( "Size is %d, %d" % (hDataset.RasterXSize, hDataset.RasterYSize)))
#/* -------------------------------------------------------------------- */
#/* Report projection. */
#/* -------------------------------------------------------------------- */
pszProjection = hDataset.GetProjectionRef()
if pszProjection is not None:
hSRS = osr.SpatialReference()
if hSRS.ImportFromWkt(pszProjection ) == gdal.CE_None:
pszPrettyWkt = hSRS.ExportToPrettyWkt(False)
#print( "Coordinate System is:\n%s" % pszPrettyWkt )
mapProjection = "None"
#Extract projection information
target = hSRS.GetAttrValue("DATUM",0).replace("D_","").replace("_2000","")
semiMajor = hSRS.GetSemiMajor() #/ 1000.0
semiMinor = hSRS.GetSemiMinor() #/ 1000.0
invFlat = hSRS.GetInvFlattening()
# the USGS tool MP doesn't like 0 for the invFlat so we are putting a giant number to represent a sphere
if (invFlat < 0.1):
invFlat = 1.0e+10
if (pszProjection[0:6] == "GEOGCS"):
mapProjection = "SIMPLE_CYLINDRICAL"
centLat = 0
centLon = 0
if (pszProjection[0:6] == "PROJCS"):
mapProjection = hSRS.GetAttrValue("PROJECTION",0)
for horizsys in tree.getiterator('horizsys'):
horizsys.clear()
planar = etree.SubElement(horizsys, 'planar')
mapproj = etree.SubElement(planar, 'mapproj')
mapprojn = etree.SubElement(mapproj, 'mapprojn')
if EQUAL(mapProjection,"Equirectangular"):
#for mapprojn in tree.getiterator('mapprojn'):
mapprojn.text = "Equirectangular"
centLat = None
centLat = hSRS.GetProjParm('standard_parallel_1')
if centLat == None:
centLat = hSRS.GetProjParm('latitude_of_origin')
centLon = hSRS.GetProjParm('central_meridian')
equirect = etree.SubElement(mapproj, 'equirect')
#for equirect in tree.getiterator('equirect'):
stdparll = etree.SubElement(equirect, 'stdparll')
#for stdparll in equirect.getiterator('stdparll'):
stdparll.text = str(centLat)
#for longcm in equirect.getiterator('longcm'):
longcm = etree.SubElement(equirect, 'longcm')
longcm.text = str(centLon)
#for feast in equirect.getiterator('feast'):
feast = etree.SubElement(equirect, 'feast')
feast.text = str(hSRS.GetProjParm('false_easting'))
#for fnorth in equirect.getiterator('fnorth'):
fnorth = etree.SubElement(equirect, 'fnorth')
fnorth.text = str(hSRS.GetProjParm('false_northing'))
if EQUAL(mapProjection,"Mercator"):
for mapprojn in tree.getiterator('mapprojn'):
mapprojn.text = "Mercator"
centLat = None
centLat = hSRS.GetProjParm('latitude_of_origin')
if centLat == None:
centLat = hSRS.GetProjParm('standard_parallel_1')
centLon = hSRS.GetProjParm('central_meridian')
scale = hSRS.GetProjParm('scale_factor')
for merc in tree.getiterator('transmer'):
for stdparll in merc.getiterator('stdparll'):
stdparll.text = str(centLat)
for longcm in merc.getiterator('longcm'):
longcm.text = str(centLon)
for sfequat in merc.getiterator('sfequat'):
sfequat.text = str(scale)
for feast in merc.getiterator('feast'):
feast.text = str(hSRS.GetProjParm('false_easting'))
for fnorth in merc.getiterator('fnorth'):
fnorth.text = str(hSRS.GetProjParm('false_northing'))
if EQUAL(mapProjection,"Orthographic "):
for mapprojn in tree.getiterator('mapprojn'):
mapprojn.text = "Orthographic"
centLat = hSRS.GetProjParm('latitude_of_origin ')
centLon = hSRS.GetProjParm('central_meridian')
for orthogr in tree.getiterator('orthogr'):
for stdparll in orthogr.getiterator('stdparll'):
stdparll.text = str(centLat)
for longcm in orthogr.getiterator('longcm'):
longcm.text = str(centLon)
for feast in orthogr.getiterator('feast'):
feast.text = str(hSRS.GetProjParm('false_easting'))
for fnorth in orthogr.getiterator('fnorth'):
fnorth.text = str(hSRS.GetProjParm('false_northing'))
if EQUAL(mapProjection,"Stereographic"):
for mapprojn in tree.getiterator('mapprojn'):
mapprojn.text = "Stereographic"
centLat = hSRS.GetProjParm('latitude_of_origin')
centLon = hSRS.GetProjParm('central_meridian')
for stereo in tree.getiterator('stereo'):
for latprjc in stereo.getiterator('latprjc'):
latprjc.text = str(centLat)
for longpc in stereo.getiterator('longpc'):
longpc.text = str(centLon)
for feast in stereo.getiterator('feast'):
feast.text = str(hSRS.GetProjParm('false_easting'))
for fnorth in stereo.getiterator('fnorth'):
fnorth.text = str(hSRS.GetProjParm('false_northing'))
if EQUAL(mapProjection,"Sinusoidal"):
for mapprojn in tree.getiterator('mapprojn'):
mapprojn.text = "Sinusoidal"
centLon = None
centLon = hSRS.GetProjParm('longitude_of_center')
if centLon == None:
centLon = hSRS.GetProjParm('central_meridian')
for sinusoid in tree.getiterator('sinusoid'):
for longcm in sinusoid.getiterator('longcm'):
longcm.text = str(centLon)
for feast in sinusoid.getiterator('feast'):
feast.text = str(hSRS.GetProjParm('false_easting'))
for fnorth in sinusoid.getiterator('fnorth'):
fnorth.text = str(hSRS.GetProjParm('false_northing'))
if EQUAL(mapProjection,"Robinson"):
for mapprojn in tree.getiterator('mapprojn'):
mapprojn.text = "Robinson"
centLon = None
centLon = hSRS.GetProjParm('longitude_of_center')
if centLon == None:
centLon = hSRS.GetProjParm('central_meridian')
for robinson in tree.getiterator('robinson'):
for longpc in robinson.getiterator('longpc'):
longpc.text = str(centLon)
for feast in robinson.getiterator('feast'):
feast.text = str(hSRS.GetProjParm('false_easting'))
for fnorth in robinson.getiterator('fnorth'):
fnorth.text = str(hSRS.GetProjParm('false_northing'))
if (EQUAL(mapProjection,"Polar_Stereographic") or EQUAL(mapProjection,"Stereographic_North_Pole") or EQUAL(mapProjection,"Stereographic_South_Pole")):
for mapprojn in tree.getiterator('mapprojn'):
mapprojn.text = "Polar Stereographic"
centLat = hSRS.GetProjParm('latitude_of_origin')
centLon = hSRS.GetProjParm('central_meridian')
scale = hSRS.GetProjParm('scale_factor')
for polarst in tree.getiterator('polarst'):
for stdparll in polarst.getiterator('stdparll'):
stdparll.text = str(centLat)
for svlong in polarst.getiterator('svlong'):
svlong.text = str(centLon)
for sfprjorg in polarst.getiterator('sfprjorg'):
sfprjorg.text = str(scale)
for feast in polarst.getiterator('feast'):
feast.text = str(hSRS.GetProjParm('false_easting'))
for fnorth in polarst.getiterator('fnorth'):
fnorth.text = str(hSRS.GetProjParm('false_northing'))
if EQUAL(mapProjection,"Transverse_Mercator"):
for mapprojn in tree.getiterator('mapprojn'):
mapprojn.text = "Transverse Mercator"
centLat = hSRS.GetProjParm('latitude_of_origin')
centLon = hSRS.GetProjParm('central_meridian')
scale = hSRS.GetProjParm('scale_factor')
for transmer in tree.getiterator('transmer'):
for latprjo in transmer.getiterator('latprjo'):
latprjo.text = str(centLat)
for longcm in transmer.getiterator('longcm'):
longcm.text = str(centLon)
for sfctrmer in transmer.getiterator('sfctrmer'):
sfctrmer.text = str(scale)
for feast in transmer.getiterator('feast'):
feast.text = str(hSRS.GetProjParm('false_easting'))
for fnorth in transmer.getiterator('fnorth'):
fnorth.text = str(hSRS.GetProjParm('false_northing'))
#Create cellsize block for all projections
planci = etree.SubElement(planar, 'planci')
plance = etree.SubElement(planci, 'plance')
plance.text = 'row and column'
coordrep = etree.SubElement(planci, 'coordrep')
absres = etree.SubElement(coordrep, 'absres')
ordres = etree.SubElement(coordrep, 'ordres')
plandu = etree.SubElement(planci, 'plandu')
if debug:
print(( "Coordinate System is:\n%s" % pszPrettyWkt ))
else:
print( "Warning - Can't parse this type of projection\n" )
print(( "Coordinate System is `%s'" % pszProjection ))
sys.exit(1)
else:
print( "Warning - No Coordinate System defined:\n" )
sys.exit(1)
#/* -------------------------------------------------------------------- */
#/* Report Geotransform. */
#/* -------------------------------------------------------------------- */
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
if adfGeoTransform[2] == 0.0 and adfGeoTransform[4] == 0.0:
if debug:
print(( "Origin = (%.15f,%.15f)" % ( \
adfGeoTransform[0], adfGeoTransform[3] )))
print(( "Pixel Size = (%.15f,%.15f)" % ( \
adfGeoTransform[1], adfGeoTransform[5] )))
else:
if debug:
print(( "GeoTransform =\n" \
" %.16g, %.16g, %.16g\n" \
" %.16g, %.16g, %.16g" % ( \
adfGeoTransform[0], \
adfGeoTransform[1], \
adfGeoTransform[2], \
adfGeoTransform[3], \
adfGeoTransform[4], \
adfGeoTransform[5] )))
if (pszProjection[0:6] == "GEOGCS"):
#convert degrees/pixel to km/pixel
mapres = 1 / adfGeoTransform[1]
lonres = adfGeoTransform[1]
latres = adfGeoTransform[5]
kmres = adfGeoTransform[1] * (semiMajor * math.pi / 180.0)
else:
#convert m/pixel to pixel/degree
mapres = 1 / (adfGeoTransform[1] / (semiMajor * 1000.0 * math.pi / 180.0))
lonres = adfGeoTransform[1] / (semiMajor * 1000.0 * math.pi / 180.0)
latres = adfGeoTransform[5] / (semiMajor * 1000.0 * math.pi / 180.0)
xres = adfGeoTransform[1]
yres = adfGeoTransform[5]
kmres = adfGeoTransform[1] / 1000.0
#/* -------------------------------------------------------------------- */
#/* Report GCPs. */
#/* -------------------------------------------------------------------- */
if bShowGCPs and hDataset.GetGCPCount() > 0:
pszProjection = hDataset.GetGCPProjection()
if pszProjection is not None:
hSRS = osr.SpatialReference()
if hSRS.ImportFromWkt(pszProjection ) == gdal.CE_None:
pszPrettyWkt = hSRS.ExportToPrettyWkt(False)
if debug:
print(( "GCP Projection = \n%s" % pszPrettyWkt ))
else:
if debug:
print(( "GCP Projection = %s" % \
pszProjection ))
gcps = hDataset.GetGCPs()
i = 0
for gcp in gcps:
if debug:
print(( "GCP[%3d]: Id=%s, Info=%s\n" \
" (%.15g,%.15g) -> (%.15g,%.15g,%.15g)" % ( \
i, gcp.Id, gcp.Info, \
gcp.GCPPixel, gcp.GCPLine, \
gcp.GCPX, gcp.GCPY, gcp.GCPZ )))
i = i + 1
#/* -------------------------------------------------------------------- */
#/* Report metadata. */
#/* -------------------------------------------------------------------- */
if debug:
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List()
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "Metadata:" )
for metadata in papszMetadata:
print(( " %s" % metadata ))
if bShowMetadata:
for extra_domain in papszExtraMDDomains:
papszMetadata = hDataset.GetMetadata_List(extra_domain)
if papszMetadata is not None and len(papszMetadata) > 0 :
print(( "Metadata (%s):" % extra_domain))
for metadata in papszMetadata:
print(( " %s" % metadata ))
#/* -------------------------------------------------------------------- */
#/* Report "IMAGE_STRUCTURE" metadata. */
#/* -------------------------------------------------------------------- */
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List("IMAGE_STRUCTURE")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "Image Structure Metadata:" )
for metadata in papszMetadata:
print(( " %s" % metadata ))
#/* -------------------------------------------------------------------- */
#/* Report subdatasets. */
#/* -------------------------------------------------------------------- */
papszMetadata = hDataset.GetMetadata_List("SUBDATASETS")
if papszMetadata is not None and len(papszMetadata) > 0 :
print( "Subdatasets:" )
for metadata in papszMetadata:
print(( " %s" % metadata ))
#/* -------------------------------------------------------------------- */
#/* Report geolocation. */
#/* -------------------------------------------------------------------- */
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List("GEOLOCATION")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "Geolocation:" )
for metadata in papszMetadata:
print(( " %s" % metadata ))
#/* -------------------------------------------------------------------- */
#/* Report RPCs */
#/* -------------------------------------------------------------------- */
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List("RPC")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "RPC Metadata:" )
for metadata in papszMetadata:
print(( " %s" % metadata ))
#/* -------------------------------------------------------------------- */
#/* Setup projected to lat/long transform if appropriate. */
#/* -------------------------------------------------------------------- */
if pszProjection is not None and len(pszProjection) > 0:
hProj = osr.SpatialReference( pszProjection )
if hProj is not None:
hLatLong = hProj.CloneGeogCS()
if hLatLong is not None:
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
hTransform = osr.CoordinateTransformation( hProj, hLatLong )
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find( 'Unable to load PROJ.4 library' ) != -1:
hTransform = None
#/* -------------------------------------------------------------------- */
#/* Report corners. */
#/* -------------------------------------------------------------------- */
if debug:
print( "Corner Coordinates:" )
GDALInfoReportCorner( hDataset, hTransform, "Upper Left", \
0.0, 0.0 );
GDALInfoReportCorner( hDataset, hTransform, "Lower Left", \
0.0, hDataset.RasterYSize);
GDALInfoReportCorner( hDataset, hTransform, "Upper Right", \
hDataset.RasterXSize, 0.0 );
GDALInfoReportCorner( hDataset, hTransform, "Lower Right", \
hDataset.RasterXSize, \
hDataset.RasterYSize );
GDALInfoReportCorner( hDataset, hTransform, "Center", \
hDataset.RasterXSize/2.0, \
hDataset.RasterYSize/2.0 );
#Get bounds
ulx = GDALGetLon( hDataset, hTransform, 0.0, 0.0 );
uly = GDALGetLat( hDataset, hTransform, 0.0, 0.0 );
lrx = GDALGetLon( hDataset, hTransform, hDataset.RasterXSize, \
hDataset.RasterYSize );
lry = GDALGetLat( hDataset, hTransform, hDataset.RasterXSize, \
hDataset.RasterYSize );
#FGDC metadata requires -180 to 180 to pass validation (not 0 to 360).
if (lrx > 179.99):
lrx = lrx - 360
if (ulx > 179.99):
ulx = ulx - 360
#/* ==================================================================== */
#/* Loop over bands. */
#/* ==================================================================== */
if debug:
bands = hDataset.RasterCount
for iBand in range(hDataset.RasterCount):
hBand = hDataset.GetRasterBand(iBand+1 )
#if( bSample )
#{
# float afSample[10000];
# int nCount;
#
# nCount = GDALGetRandomRasterSample( hBand, 10000, afSample );
# print( "Got %d samples.\n", nCount );
#}
(nBlockXSize, nBlockYSize) = hBand.GetBlockSize()
print(( "Band %d Block=%dx%d Type=%s, ColorInterp=%s" % ( iBand+1, \
nBlockXSize, nBlockYSize, \
gdal.GetDataTypeName(hBand.DataType), \
gdal.GetColorInterpretationName( \
hBand.GetRasterColorInterpretation()) )))
if hBand.GetDescription() is not None \
and len(hBand.GetDescription()) > 0 :
print(( " Description = %s" % hBand.GetDescription() ))
dfMin = hBand.GetMinimum()
dfMax = hBand.GetMaximum()
if dfMin is not None or dfMax is not None or bComputeMinMax:
line = " "
if dfMin is not None:
line = line + ("Min=%.3f " % dfMin)
if dfMax is not None:
line = line + ("Max=%.3f " % dfMax)
if bComputeMinMax:
gdal.ErrorReset()
adfCMinMax = hBand.ComputeRasterMinMax(False)
if gdal.GetLastErrorType() == gdal.CE_None:
line = line + ( " Computed Min/Max=%.3f,%.3f" % ( \
adfCMinMax[0], adfCMinMax[1] ))
print( line )
stats = hBand.GetStatistics( bApproxStats, bStats)
# Dirty hack to recognize if stats are valid. If invalid, the returned
# stddev is negative
if stats[3] >= 0.0:
print(( " Minimum=%.3f, Maximum=%.3f, Mean=%.3f, StdDev=%.3f" % ( \
stats[0], stats[1], stats[2], stats[3] )))
if bReportHistograms:
hist = hBand.GetDefaultHistogram(force = True, callback = gdal.TermProgress)
if hist is not None:
dfMin = hist[0]
dfMax = hist[1]
nBucketCount = hist[2]
panHistogram = hist[3]
print(( " %d buckets from %g to %g:" % ( \
nBucketCount, dfMin, dfMax )))
line = ' '
for bucket in panHistogram:
line = line + ("%d " % bucket)
print(line)
if bComputeChecksum:
print(( " Checksum=%d" % hBand.Checksum()))
dfNoData = hBand.GetNoDataValue()
if dfNoData is not None:
if dfNoData != dfNoData:
print( " NoData Value=nan" )
else:
print(( " NoData Value=%.18g" % dfNoData ))
if hBand.GetOverviewCount() > 0:
line = " Overviews: "
for iOverview in range(hBand.GetOverviewCount()):
if iOverview != 0 :
line = line + ", "
hOverview = hBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%dx%d" % (hOverview.XSize, hOverview.YSize))
pszResampling = \
hOverview.GetMetadataItem( "RESAMPLING", "" )
if pszResampling is not None \
and len(pszResampling) >= 12 \
and EQUAL(pszResampling[0:12],"AVERAGE_BIT2"):
line = line + "*"
else:
line = line + "(null)"
print(line)
if bComputeChecksum:
line = " Overviews checksum: "
for iOverview in range(hBand.GetOverviewCount()):
if iOverview != 0:
line = line + ", "
hOverview = hBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%d" % hOverview.Checksum())
else:
line = line + "(null)"
print(line)
if hBand.HasArbitraryOverviews():
print( " Overviews: arbitrary" )
nMaskFlags = hBand.GetMaskFlags()
if (nMaskFlags & (gdal.GMF_NODATA|gdal.GMF_ALL_VALID)) == 0:
hMaskBand = hBand.GetMaskBand()
line = " Mask Flags: "
if (nMaskFlags & gdal.GMF_PER_DATASET) != 0:
line = line + "PER_DATASET "
if (nMaskFlags & gdal.GMF_ALPHA) != 0:
line = line + "ALPHA "
if (nMaskFlags & gdal.GMF_NODATA) != 0:
line = line + "NODATA "
if (nMaskFlags & gdal.GMF_ALL_VALID) != 0:
line = line + "ALL_VALID "
print(line)
if hMaskBand is not None and \
hMaskBand.GetOverviewCount() > 0:
line = " Overviews of mask band: "
for iOverview in range(hMaskBand.GetOverviewCount()):
if iOverview != 0:
line = line + ", "
hOverview = hMaskBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%d" % hOverview.Checksum())
else:
line = line + "(null)"
if len(hBand.GetUnitType()) > 0:
print(( " Unit Type: %s" % hBand.GetUnitType()))
papszCategories = hBand.GetRasterCategoryNames()
if papszCategories is not None:
print( " Categories:" );
i = 0
for category in papszCategories:
print(( " %3d: %s" % (i, category) ))
i = i + 1
if hBand.GetScale() != 1.0 or hBand.GetOffset() != 0.0:
print(( " Offset: %.15g, Scale:%.15g" % \
( hBand.GetOffset(), hBand.GetScale())))
if bShowMetadata:
papszMetadata = hBand.GetMetadata_List()
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( " Metadata:" )
for metadata in papszMetadata:
print(( " %s" % metadata ))
if bShowMetadata:
papszMetadata = hBand.GetMetadata_List("IMAGE_STRUCTURE")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( " Image Structure Metadata:" )
for metadata in papszMetadata:
print(( " %s" % metadata ))
hTable = hBand.GetRasterColorTable()
if hBand.GetRasterColorInterpretation() == gdal.GCI_PaletteIndex \
and hTable is not None:
print(( " Color Table (%s with %d entries)" % (\
gdal.GetPaletteInterpretationName( \
hTable.GetPaletteInterpretation( )), \
hTable.GetCount() )))
if bShowColorTable:
for i in range(hTable.GetCount()):
sEntry = hTable.GetColorEntry(i)
print(( " %3d: %d,%d,%d,%d" % ( \
i, \
sEntry[0],\
sEntry[1],\
sEntry[2],\
sEntry[3] )))
if bShowRAT:
hRAT = hBand.GetDefaultRAT()
#GDALRATDumpReadable( hRAT, None );
if iOverview is not None:
hOverview = hBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%d" % hOverview.Checksum())
else:
line = line + "(null)"
print(line)
if hBand.HasArbitraryOverviews():
print( " Overviews: arbitrary" )
nMaskFlags = hBand.GetMaskFlags()
if (nMaskFlags & (gdal.GMF_NODATA|gdal.GMF_ALL_VALID)) == 0:
hMaskBand = hBand.GetMaskBand()
line = " Mask Flags: "
if (nMaskFlags & gdal.GMF_PER_DATASET) != 0:
line = line + "PER_DATASET "
if (nMaskFlags & gdal.GMF_ALPHA) != 0:
line = line + "ALPHA "
if (nMaskFlags & gdal.GMF_NODATA) != 0:
line = line + "NODATA "
if (nMaskFlags & gdal.GMF_ALL_VALID) != 0:
line = line + "ALL_VALID "
print(line)
if hMaskBand is not None and \
hMaskBand.GetOverviewCount() > 0:
line = " Overviews of mask band: "
for iOverview in range(hMaskBand.GetOverviewCount()):
if iOverview != 0:
line = line + ", "
hOverview = hMaskBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%d" % hOverview.Checksum())
else:
line = line + "(null)"
if len(hBand.GetUnitType()) > 0:
print(( " Unit Type: %s" % hBand.GetUnitType()))
papszCategories = hBand.GetRasterCategoryNames()
if papszCategories is not None:
print( " Categories:" );
i = 0
for category in papszCategories:
print(( " %3d: %s" % (i, category) ))
i = i + 1
if hBand.GetScale() != 1.0 or hBand.GetOffset() != 0.0:
print(( " Offset: %.15g, Scale:%.15g" % \
( hBand.GetOffset(), hBand.GetScale())))
if bShowMetadata:
papszMetadata = hBand.GetMetadata_List()
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( " Metadata:" )
for metadata in papszMetadata:
print(( " %s" % metadata ))
if bShowMetadata:
papszMetadata = hBand.GetMetadata_List("IMAGE_STRUCTURE")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( " Image Structure Metadata:" )
for metadata in papszMetadata:
print(( " %s" % metadata ))
hTable = hBand.GetRasterColorTable()
if hBand.GetRasterColorInterpretation() == gdal.GCI_PaletteIndex \
and hTable is not None:
print(( " Color Table (%s with %d entries)" % (\
gdal.GetPaletteInterpretationName( \
hTable.GetPaletteInterpretation( )), \
hTable.GetCount() )))
if bShowColorTable:
for i in range(hTable.GetCount()):
sEntry = hTable.GetColorEntry(i)
print(( " %3d: %d,%d,%d,%d" % ( \
i, \
sEntry[0],\
sEntry[1],\
sEntry[2],\
sEntry[3] )))
if bShowRAT:
hRAT = hBand.GetDefaultRAT()
#GDALRATDumpReadable( hRAT, None );
#/************************************************************************/
#/* WriteXML bits to FGDC template */
#/************************************************************************/
for rasttype in tree.getiterator('rasttype'):
rasttype.text = "Pixel"
#~ instrList = pszFilename.split("_")
hBand = hDataset.GetRasterBand( 1 )
#~ #get the datatype
#~ if EQUAL(gdal.GetDataTypeName(hBand.DataType), "Float32"):
#~ sample_bits = 32
#~ sample_type = "PC_REAL"
#~ sample_mask = "2#11111111111111111111111111111111#"
#~ elif EQUAL(gdal.GetDataTypeName(hBand.DataType), "INT16"):
#~ sample_bits = 16
#~ sample_type = "LSB_INTEGER"
#~ sample_mask = "2#1111111111111111#"
#~ elif EQUAL(gdal.GetDataTypeName(hBand.DataType), "UINT16"):
#~ sample_bits = 16
#~ sample_type = "UNSIGNED_INTEGER"
#~ sample_mask = "2#1111111111111111#"
#~ elif EQUAL(gdal.GetDataTypeName(hBand.DataType), "Byte"):
#~ sample_bits = 8
#~ sample_type = "UNSIGNED_INTEGER"
#~ sample_mask = "2#11111111#"
#~ else:
#~ print( " %s: Not supported pixel type" % gdal.GetDataTypeName(hBand.DataType))
#~ sys.exit(1)
#~ f.write('PDS_VERSION_ID = PDS3\n')
#~ f.write('\n')
#~ f.write('/* The source image data definition. */\n')
#~ f.write('FILE_NAME = \"%s\"\n' % (dst_img))
#~ f.write('RECORD_TYPE = FIXED_LENGTH\n')
#~ f.write('RECORD_BYTES = %d\n' % (hDataset.RasterYSize))
#~ f.write('FILE_RECORDS = %d\n' % ((hDataset.RasterXSize * sample_bits / 8)) )
#~ #f.write('LABEL_RECORDS = 1\n')
#~ f.write('^IMAGE = \"%s\"\n' % (dst_img))
#~ f.write('\n')
#~ f.write('/* Identification Information */\n')
#~ f.write('DATA_SET_ID = "%s"\n' % pszFilename.split(".")[0])
#~ f.write('DATA_SET_NAME = "%s"\n' % pszFilename.split(".")[0])
#~ f.write('PRODUCER_INSTITUTION_NAME = "Lunar Mapping and Modeling Project"\n')
#~ f.write('PRODUCER_ID = "LMMP_TEAM"\n')
#~ f.write('PRODUCER_FULL_NAME = "LMMP TEAM"\n')
#~ f.write('PRODUCT_ID = "%s"\n' % pszFilename.split(".")[0])
#~ if "_v" in pszFilename:
#~ f.write('PRODUCT_VERSION_ID = "%s.0"\n' % instrList[-1].split(".")[0].upper())
#~ else:
#~ f.write('PRODUCT_VERSION_ID = "%s"\n' % "V1.0")
#~ f.write('PRODUCT_TYPE = "RDR"\n')
#~ f.write('INSTRUMENT_HOST_NAME = "%s"\n' % instrList[0])
#~ f.write('INSTRUMENT_HOST_ID = "%s"\n' % instrList[0])
#~ f.write('INSTRUMENT_NAME = "%s"\n' % instrList[1])
#~ f.write('INSTRUMENT_ID = "%s"\n' % instrList[1])
#~ f.write('TARGET_NAME = MOON\n')
for ellips in tree.getiterator('ellips'):
ellips.text = target
#~ f.write('MISSION_PHASE_NAME = "POST MISSION"\n')
#~ f.write('RATIONALE_DESC = "Created at the request of NASA\'s Exploration\n')
#~ f.write(' Systems Mission Directorate to support future\n')
#~ f.write(' human exploration"\n')
#~ f.write('SOFTWARE_NAME = "ISIS 3.2.1 | SOCET SET v5.5 (r) BAE Systems\n')
#~ f.write(' | GDAL 1.8"\n')
#~ f.write('\n')
#~ f.write('/* Time Parameters */\n')
#~ f.write('START_TIME = "N/A"\n')
#~ f.write('STOP_TIME = "N/A"\n')
#~ f.write('SPACECRAFT_CLOCK_START_COUNT = "N/A"\n')
#~ f.write('SPACECRAFT_CLOCK_STOP_COUNT = "N/A"\n')
#~ f.write('PRODUCT_CREATION_TIME = %s\n' % strftime("%Y-%m-%dT%H:%M:%S")) #2011-03-11T22:13:40
#~ f.write('\n')
#~ f.write('OBJECT = IMAGE_MAP_PROJECTION\n')
#~ f.write(' ^DATA_SET_MAP_PROJECTION = "DSMAP.CAT"\n')
#~ f.write(' MAP_PROJECTION_TYPE = \"%s\"\n' % mapProjection)
#~ f.write(' PROJECTION_LATITUDE_TYPE = PLANETOCENTRIC\n')
#~ f.write(' A_AXIS_RADIUS = %.1f <KM>\n' % semiMajor)
for semiaxis in tree.getiterator('semiaxis'):
semiaxis.text = str(semiMajor)
#~ f.write(' B_AXIS_RADIUS = %.1f <KM>\n' % semiMajor)
#~ f.write(' C_AXIS_RADIUS = %.1f <KM>\n' % semiMinor)
for denflat in tree.getiterator('denflat'):
denflat.text = str(invFlat)
#~ f.write(' COORDINATE_SYSTEM_NAME = PLANETOCENTRIC\n')
#~ f.write(' POSITIVE_LONGITUDE_DIRECTION = EAST\n')
#~ f.write(' KEYWORD_LATITUDE_TYPE = PLANETOCENTRIC\n')
#~ f.write(' /* NOTE: CENTER_LATITUDE and CENTER_LONGITUDE describe the location */\n')
#~ f.write(' /* of the center of projection, which is not necessarily equal to the */\n')
#~ f.write(' /* location of the center point of the image. */\n')
#~ f.write(' CENTER_LATITUDE = %5.2f <DEG>\n' % centLat)
#~ f.write(' CENTER_LONGITUDE = %5.2f <DEG>\n' % centLon)
#~ f.write(' LINE_FIRST_PIXEL = 1\n')
#~ f.write(' LINE_LAST_PIXEL = %d\n' % hDataset.RasterYSize)
#~ f.write(' SAMPLE_FIRST_PIXEL = 1\n')
#~ f.write(' SAMPLE_LAST_PIXEL = %d\n' % hDataset.RasterXSize)
#~ f.write(' MAP_PROJECTION_ROTATION = 0.0 <DEG>\n')
#~ f.write(' MAP_RESOLUTION = %.4f <PIX/DEG>\n' % mapres )
if (pszProjection[0:6] == "GEOGCS"):
for latSize in tree.getiterator('latres'):
latSize.text = str(latres)
if debug:
print('Lat resolution: %s' %(latSize.text))
for lonSize in tree.getiterator('lonres'):
lonSize.text = str(lonres)
for geogunit in tree.getiterator('geogunit'):
geogunit.text = "Decimal degrees"
#~ f.write(' MAP_SCALE = %.8f <KM/PIXEL>\n' % kmres )
else:
for absres in tree.getiterator('absres'): # in meters
absres.text = str(xres)
if debug:
print('X resolution: %s' %(absres.text))
for ordres in tree.getiterator('ordres'):
ordres.text = str(abs(yres))
for plandu in tree.getiterator('plandu'):
plandu.text = "meters"
#~ f.write(' MINIMUM_LATITUDE = %.8f <DEGREE>\n' % lry)
for southbc in tree.getiterator('southbc'):
southbc.text = str(lry)
#~ f.write(' MAXIMUM_LATITUDE = %.8f <DEGREE>\n' % uly)
for northbc in tree.getiterator('northbc'):
northbc.text = str(uly)
#~ f.write(' WESTERNMOST_LONGITUDE = %.8f <DEGREE>\n' % ulx)
for westbc in tree.getiterator('westbc'):
westbc.text = str(ulx)
#~ f.write(' EASTERNMOST_LONGITUDE = %.8f <DEGREE>\n' % lrx)
for eastbc in tree.getiterator('eastbc'):
eastbc.text = str(lrx)
#~ f.write(' LINE_PROJECTION_OFFSET = %.1f\n' % ( (ulx / kmres * 1000 ) - 0.5 ))
#~ f.write(' SAMPLE_PROJECTION_OFFSET = %.1f\n' % ( (uly / kmres * 1000 ) + 0.5 ))
#~ f.write('END_OBJECT = IMAGE_MAP_PROJECTION\n')
#~ f.write('\n')
#~ f.write('OBJECT = IMAGE\n')
#~ f.write(' NAME = \"%s\"\n' % (pszFilename))
#~ f.write(' DESCRIPTION = "Export data set from LMMP portal.\n')
#~ f.write(' see filename for data type."\n')
#~ #f.write('\n')
#~ f.write(' LINES = %d\n' % hDataset.RasterYSize)
for rowcount in tree.getiterator('rowcount'):
rowcount.text = str(hDataset.RasterYSize)
#~ f.write(' LINE_SAMPLES = %d\n' % hDataset.RasterXSize)
for colcount in tree.getiterator('colcount'):
colcount.text = str(hDataset.RasterYSize)
#~ f.write(' UNIT = METER\n')
#~ f.write(' OFFSET = %.10g\n' % ( hBand.GetOffset() ))
#~ f.write(' SCALING_FACTOR = %.10g\n' % ( hBand.GetScale() ))
#~ f.write(' SAMPLE_TYPE = %s\n' % (sample_type) )
#~ f.write(' SAMPLE_BITS = %d\n' % (sample_bits) )
#~ f.write(' SAMPLE_BIT_MASK = %s\n' % (sample_mask) )
#~ #f.write('\n')
#~ f.write(' BANDS = %d\n' % hDataset.RasterCount)
for vrtcount in tree.getiterator('vrtcount'):
vrtcount.text = str(hDataset.RasterCount)
#~ #f.write('\n')
#~ f.write(' BAND_STORAGE_TYPE = BAND_SEQUENTIAL\n')
#~ if (sample_bits == 32) :
#~ f.write(' CORE_NULL = 16#FF7FFFFB#\n')
#~ f.write(' CORE_LOW_REPR_SATURATION = 16#FF7FFFFC#\n')
#~ f.write(' CORE_LOW_INSTR_SATURATION = 16#FF7FFFFD#\n')
#~ f.write(' CORE_HIGH_REPR_SATURATION = 16#FF7FFFFF#\n')
#~ f.write(' CORE_HIGH_INSTR_SATURATION = 16#FF7FFFFE#\n')
#~ elif (sample_bits == 16) :
#~ f.write(' CORE_NULL = -32768\n')
#~ f.write(' CORE_LOW_REPR_SATURATION = -32767\n')
#~ f.write(' CORE_LOW_INSTR_SATURATION = -32766\n')
#~ f.write(' CORE_HIGH_REPR_SATURATION = 32767\n')
#~ f.write(' CORE_HIGH_INSTR_SATURATION = 32768\n')
#~ else : #8bit
#~ f.write(' CORE_NULL = 0\n')
#~ f.write(' CORE_LOW_REPR_SATURATION = 0\n')
#~ f.write(' CORE_LOW_INSTR_SATURATION = 0\n')
#~ f.write(' CORE_HIGH_REPR_SATURATION = 255\n')
#~ f.write(' CORE_HIGH_INSTR_SATURATION = 255\n')
#~ f.write('END_OBJECT = IMAGE\n')
#~ f.write('END\n')
#~ f.close()
for metstdn in tree.getiterator('metstdn'):
metstdn.text = "FGDC Content Standards for Digital Geospatial Metadata"
for metstdv in tree.getiterator('metstdv'):
metstdv.text = "FGDC-STD-001-1998"
#/* ==================================================================== */
#/* writeout sparse XML for merging */
#/* ==================================================================== */
try:
#tree.write(dst_xml, pretty_print=True, xml_declaration=True) #mp doesn't like declaration
tree.write(dst_xml, pretty_print=True)
except ImportError:
print("Failed to write out XML document")
return 0
#/************************************************************************/
#/* GDALInfoReportCorner() */
#/************************************************************************/
def GDALInfoReportCorner( hDataset, hTransform, corner_name, x, y ):
line = "%-11s " % corner_name
#/* -------------------------------------------------------------------- */
#/* Transform the point into georeferenced coordinates. */
#/* -------------------------------------------------------------------- */
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
dfGeoX = adfGeoTransform[0] + adfGeoTransform[1] * x \
+ adfGeoTransform[2] * y
dfGeoY = adfGeoTransform[3] + adfGeoTransform[4] * x \
+ adfGeoTransform[5] * y
else:
line = line + ("(%7.1f,%7.1f)" % (x, y ))
print(line)
return False
#/* -------------------------------------------------------------------- */
#/* Report the georeferenced coordinates. */
#/* -------------------------------------------------------------------- */
if abs(dfGeoX) < 181 and abs(dfGeoY) < 91:
line = line + ( "(%12.7f,%12.7f) " % (dfGeoX, dfGeoY ))
else:
line = line + ( "(%12.3f,%12.3f) " % (dfGeoX, dfGeoY ))
#/* -------------------------------------------------------------------- */
#/* Transform to latlong and report. */
#/* -------------------------------------------------------------------- */
if hTransform is not None:
pnt = hTransform.TransformPoint(dfGeoX, dfGeoY, 0)
if pnt is not None:
line = line + ( "(%s," % gdal.DecToDMS( pnt[0], "Long", 2 ) )
line = line + ( "%s)" % gdal.DecToDMS( pnt[1], "Lat", 2 ) )
print(line)
return True
#/************************************************************************/
#/* GDALGetLon() */
#/************************************************************************/
def GDALGetLon( hDataset, hTransform, x, y ):
#/* -------------------------------------------------------------------- */
#/* Transform the point into georeferenced coordinates. */
#/* -------------------------------------------------------------------- */
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
dfGeoX = adfGeoTransform[0] + adfGeoTransform[1] * x \
+ adfGeoTransform[2] * y
dfGeoY = adfGeoTransform[3] + adfGeoTransform[4] * x \
+ adfGeoTransform[5] * y
else:
return 0.0
#/* -------------------------------------------------------------------- */
#/* Transform to latlong and report. */
#/* -------------------------------------------------------------------- */
if hTransform is not None:
pnt = hTransform.TransformPoint(dfGeoX, dfGeoY, 0)
if pnt is not None:
return pnt[0]
return dfGeoX
#/************************************************************************/
#/* GDALGetLat() */
#/************************************************************************/
def GDALGetLat( hDataset, hTransform, x, y ):
#/* -------------------------------------------------------------------- */
#/* Transform the point into georeferenced coordinates. */
#/* -------------------------------------------------------------------- */
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
dfGeoX = adfGeoTransform[0] + adfGeoTransform[1] * x \
+ adfGeoTransform[2] * y
dfGeoY = adfGeoTransform[3] + adfGeoTransform[4] * x \
+ adfGeoTransform[5] * y
else:
return 0.0
#/* -------------------------------------------------------------------- */
#/* Transform to latlong and report. */
#/* -------------------------------------------------------------------- */
if hTransform is not None:
pnt = hTransform.TransformPoint(dfGeoX, dfGeoY, 0)
if pnt is not None:
return pnt[1]
return dfGeoY
#Parses attributes and texts and returns a list
# Not currently used
def parse_XML(element):
a = []
for subelement in element:
if subelement.text is None:
a.append(subelement.attrib)
else:
a.append(subelement.text)
return a
if __name__ == '__main__':
version_num = int(gdal.VersionInfo('VERSION_NUM'))
if version_num < 1800: # because of GetGeoTransform(can_return_null)
print('ERROR: Python bindings of GDAL 1.8.0 or later required')
sys.exit(1)
sys.exit(main(sys.argv))
| |
""" Base variables to build probabilistic graph. """
__author__ = "arnaud.rachez@gmail.com"
from abc import ABCMeta, abstractmethod
from copy import copy
from inspect import getargspec
import numpy as np
from scipy.stats import beta, invgamma, laplace, norm
from scipy.stats import binom, uniform
from .distributions import sep_rvs, sep_logpdf
class Error(Exception):
""" Base class for handling Errors. """
pass
class SamplingObservedVariableError(Error):
""" Sampling observed variables is forbidden. """
pass
class BaseVariable(object):
__metaclass__ = ABCMeta
def __init__(self, parents, value, observed, name, size):
self.parents = parents
self.children = set()
for parent in parents.values():
parent.children |= set([self])
self._value = copy(value)
self._size = size
if value is not None:
try:
self._size = value.shape
except:
self._size = None
self._observed = observed
self._deterministic = False
self.name = name
if type(name) is not str:
raise ValueError("You must provide a `name` for your variable")
def __str__(self):
return self.__repr__()
def __repr__(self):
return self.name or super(BaseVariable, self).__repr__()
@property
def value(self):
if self._value is None and not self._observed:
self.sample()
return self._value
@value.setter
def value(self, value):
self._last_value = copy(self._value)
self._value = copy(value)
def logp(self):
kwargs = {key: parent.value for key, parent in self.parents.iteritems()}
kwargs['value'] = self.value
return self._logp(**kwargs)
def sample(self):
if self._observed:
raise SamplingObservedVariableError()
kwargs = {key: parent.value for key, parent in self.parents.iteritems()}
kwargs['size'] = self._size
self._last_value = self._value
self._value = self._sample(**kwargs)
return self.value
def reject(self):
self._value = self._last_value
@abstractmethod
def _logp(self):
pass
@abstractmethod
def _sample(self):
pass
class Value(BaseVariable):
def __init__(self, value):
super(Value, self).__init__(
parents={}, value=value, observed=True, name='value', size=None)
self._deterministic = True
self.parent = {}
self.children = set()
def __str__(self):
return self.__repr__()
def __repr__(self):
return str(self.value)
def _sample(self):
raise SamplingObservedVariableError()
def _logp(self):
return 0.
class Function(BaseVariable):
def __init__(self, function):
args, _, _, default = getargspec(function)
parents = dict(zip(args, default))
name = str(function)
super(Function, self).__init__(
parents=parents, value=None, observed=False, name=name, size=None)
self.function = function
self._deterministic = True
@property
def value(self):
kwargs = {key: parent.value for key, parent in self.parents.iteritems()}
return self.function(**kwargs)
def sample(self):
kwargs = {key: parent.value for key, parent in self.parents.iteritems()}
self._last_value = self._value
self._value = self.function(**kwargs)
return self.value
def _sample(self):
raise NotImplementedError()
def _logp(self):
raise NotImplementedError()
class Beta(BaseVariable):
def __init__(self, a, b, value=None, observed=False, name=None, size=None):
parents = {'a': a, 'b': b}
super(Beta, self).__init__(
parents=parents, value=value, observed=observed, name=name,
size=size)
def _sample(self, a, b, size):
return beta.rvs(a, b, size=size)
def _logp(self, value, a, b):
if value < 0 or value > 1:
raise ValueError("Domain Error.")
return np.sum(beta.logpdf(value, a, b))
class Binomial(BaseVariable):
def __init__(self, p, k, value=None, observed=False, name=None, size=None):
parents = {'p': p, 'k': k}
super(Binomial, self).__init__(
parents=parents, value=value, observed=observed, name=name,
size=size)
def _sample(self, p, k, size):
return binom.rvs(k, p, size=size)
def _logp(self, value, p, k):
return np.sum(binom.logpmf(value, k, p, loc=0))
class InvGamma(BaseVariable):
def __init__(self, shape, scale, value=None, observed=False, name=None,
size=None):
parents = {'shape': shape, 'scale': scale}
super(InvGamma, self).__init__(
parents=parents, value=value, observed=observed, name=name,
size=size)
def _sample(self, shape, scale, size):
return invgamma.rvs(shape, scale=scale, size=size)
def _logp(self, value, shape, scale):
return np.sum(invgamma.logpdf(value, shape, scale=scale))
class Normal(BaseVariable):
def __init__(self, mu, sigma, value=None, observed=False, name=None,
size=None):
parents = {'mu': mu, 'sigma': sigma}
super(Normal, self).__init__(
parents=parents, value=value, observed=observed, name=name,
size=size)
def _sample(self, mu, sigma, size):
return norm.rvs(loc=mu, scale=sigma, size=size)
def _logp(self, value, mu, sigma):
return np.sum(norm.logpdf(value, loc=mu, scale=sigma))
class Laplace(BaseVariable):
def __init__(self, loc, scale, value=None, observed=False, name=None,
size=None):
parents = {'loc': loc, 'scale': scale}
super(Laplace, self).__init__(
parents=parents, value=value, observed=observed, name=name,
size=size)
def _sample(self, loc, scale, size):
return laplace.rvs(loc=loc, scale=scale, size=size)
def _logp(self, value, loc, scale):
return np.sum(laplace.logpdf(value, loc=loc, scale=scale))
class SEP(BaseVariable):
def __init__(self, mu, sigma, nu, tau, value=None, observed=False,
name=None, size=None):
parents = {'mu': mu, 'sigma': sigma, 'nu': nu, 'tau': tau}
super(SEP, self).__init__(
parents=parents, value=value, observed=observed, name=name,
size=size)
def _sample(self, mu, sigma, nu, tau, size):
return sep_rvs(mu=mu, sigma=sigma, nu=nu, tau=tau, size=size)
def _logp(self, value, mu, sigma, nu, tau):
logp = sep_logpdf(value, mu=mu, sigma=sigma, nu=nu, tau=tau)
return np.sum(logp)
class Uniform(BaseVariable):
def __init__(self, lower, upper, value=None, observed=False, name=None,
size=None):
parents = {'lower': lower, 'upper': upper}
super(Uniform, self).__init__(
parents=parents, value=value, observed=observed, name=name,
size=size)
def _sample(self, lower, upper, size):
return uniform.rvs(loc=lower, scale=upper-lower, size=size)
def _logp(self, value, lower, upper):
if value < lower or value > upper:
raise ValueError("Domain Error.")
return np.sum(uniform.logpdf(value, loc=lower, scale=upper-lower))
| |
#
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fractions import Fraction
from six.moves import xrange
import pysmt
from pysmt.typing import BOOL, REAL, INT, FunctionType
from pysmt.shortcuts import Symbol, is_sat, Not, Implies, GT, Plus, Int, Real
from pysmt.shortcuts import Minus, Times, Xor, And, Or, TRUE
from pysmt.shortcuts import get_env
from pysmt.environment import Environment
from pysmt.test import TestCase, skipIfNoSolverForLogic
from pysmt.logics import QF_BOOL
from pysmt.exceptions import NonLinearError
from pysmt.formula import FormulaManager
class TestFormulaManager(TestCase):
def setUp(self):
super(TestFormulaManager, self).setUp()
self.env = get_env()
self.mgr = self.env.formula_manager
self.x = self.mgr.Symbol("x")
self.y = self.mgr.Symbol("y")
self.p = self.mgr.Symbol("p", INT)
self.q = self.mgr.Symbol("q", INT)
self.r = self.mgr.Symbol("r", REAL)
self.s = self.mgr.Symbol("s", REAL)
self.rconst = self.mgr.Real(10)
self.iconst = self.mgr.Int(10)
self.ftype = FunctionType(REAL, [REAL, REAL])
self.f = self.mgr.Symbol("f", self.ftype)
self.real_expr = self.mgr.Plus(self.s, self.r)
def test_new_fresh_symbol(self):
fv1 = self.mgr.new_fresh_symbol(BOOL)
self.assertIsNotNone(fv1, "New symbol was not created.")
fv2 = self.mgr.new_fresh_symbol(BOOL)
self.assertNotEqual(fv1, fv2, "Fresh symbol is not new.")
fv3 = self.mgr.new_fresh_symbol(BOOL, "abc_%d")
self.assertEqual(fv3.symbol_name()[:3], "abc",
"Fresh variable doesn't have the desired prefix")
def test_get_symbol(self):
a = self.mgr.get_symbol("a")
self.assertIsNone(a,
"Symbol returned from an empty symboltable")
self.mgr.get_or_create_symbol("a", BOOL)
a = self.mgr.get_symbol("a")
self.assertIsNotNone(a, "Symbol was not found in symbol table")
def test_get_or_create_symbol(self):
a = self.mgr.get_or_create_symbol("a", REAL)
self.assertIsNotNone(a, "Symbol was not created")
a2 = self.mgr.get_or_create_symbol("a", REAL)
self.assertEqual(a, a2, "Symbol was not memoized")
with self.assertRaises(TypeError):
self.mgr.get_or_create_symbol("a", BOOL)
def test_symbol(self):
a1 = self.mgr.Symbol("a", BOOL)
self.assertIsNotNone(a1, "Symbol was not created.")
a2 = self.mgr.Symbol("a", BOOL)
self.assertEqual(a1, a2, "Symbol is not memoized")
c = self.mgr.Symbol("c")
self.assertEqual(c.symbol_type(), BOOL, "Default Symbol Type is not BOOL")
def test_and_node(self):
n = self.mgr.And(self.x, self.y)
self.assertIsNotNone(n)
self.assertTrue(n.is_and())
self.assertEqual(n.get_free_variables(), set([self.x, self.y]))
m = self.mgr.And([self.x, self.y])
self.assertEqual(m, n, "And(1,2) != And([1,2]")
args = m.args()
self.assertTrue(self.x in args and self.y in args)
self.assertTrue(len(args) == 2)
zero = self.mgr.And()
self.assertEqual(zero, self.mgr.TRUE())
one = self.mgr.And(self.x)
self.assertEqual(one, self.x)
def test_or_node(self):
n = self.mgr.Or(self.x, self.y)
self.assertIsNotNone(n)
self.assertTrue(n.is_or())
self.assertEqual(n.get_free_variables(), set([self.x, self.y]))
m = self.mgr.Or([self.x, self.y])
self.assertEqual(m, n, "Or(1,2) != Or([1,2]")
args = m.args()
self.assertIn(self.x, args)
self.assertIn(self.y, args)
self.assertEqual(len(args), 2)
zero = self.mgr.Or()
self.assertEqual(zero, self.mgr.FALSE())
one = self.mgr.Or(self.x)
self.assertEqual(one, self.x)
def test_not_node(self):
n = self.mgr.Not(self.x)
self.assertIsNotNone(n)
self.assertTrue(n.is_not())
self.assertEqual(n.get_free_variables(), set([self.x]))
args = n.args()
self.assertIn(self.x, args)
self.assertEqual(len(args), 1)
self.assertEqual(self.mgr.Not(n), self.x)
def test_implies_node(self):
n = self.mgr.Implies(self.x, self.y)
self.assertIsNotNone(n)
self.assertTrue(n.is_implies())
self.assertEqual(n.get_free_variables(), set([self.x, self.y]))
args = n.args()
self.assertEqual(self.x, args[0])
self.assertEqual(self.y, args[1])
self.assertEqual(len(args), 2)
def test_iff_node(self):
n = self.mgr.Iff(self.x, self.y)
self.assertIsNotNone(n)
self.assertTrue(n.is_iff())
self.assertEqual(n.get_free_variables(), set([self.x, self.y]))
args = n.args()
self.assertIn(self.x, args)
self.assertIn(self.y, args)
self.assertEqual(len(args), 2)
def test_ge_node_type(self):
with self.assertRaises(TypeError):
self.mgr.GE(self.x, self.r)
with self.assertRaises(TypeError):
self.mgr.GE(self.r, self.x)
with self.assertRaises(TypeError):
self.mgr.GE(self.p, self.r)
def test_ge_node(self):
n = self.mgr.GE(self.real_expr, self.real_expr)
self.assertIsNotNone(n)
n = self.mgr.GE(self.r, self.rconst)
self.assertIsNotNone(n)
n = self.mgr.GE(self.rconst, self.s)
self.assertIsNotNone(n)
n = self.mgr.GE(self.rconst, self.rconst)
self.assertIsNotNone(n)
n = self.mgr.GE(self.r, self.s)
self.assertIsNotNone(n)
args = n.args()
self.assertIn(self.r, args)
self.assertIn(self.s, args)
self.assertEqual(len(args), 2)
n = self.mgr.GE(self.p, self.q)
self.assertIsNotNone(n)
def test_minus_node(self):
n = self.mgr.Minus(self.real_expr, self.real_expr)
self.assertIsNotNone(n)
n = self.mgr.Minus(self.r, self.rconst)
self.assertIsNotNone(n)
n = self.mgr.Minus(self.rconst, self.s)
self.assertIsNotNone(n)
n = self.mgr.Minus(self.rconst, self.rconst)
self.assertIsNotNone(n)
n = self.mgr.Minus(self.r, self.s)
self.assertIsNotNone(n)
args = n.args()
self.assertIn(self.r, args)
self.assertIn(self.s, args)
self.assertEqual(len(args), 2)
n = self.mgr.Minus(self.p, self.q)
self.assertIsNotNone(n)
self.assertTrue(n.is_minus())
self.assertEqual(n.get_free_variables(), set([self.p, self.q]))
with self.assertRaises(TypeError):
n = self.mgr.Minus(self.r, self.q)
def test_times_node(self):
n = self.mgr.Times(self.real_expr, self.rconst)
self.assertIsNotNone(n)
n = self.mgr.Times(self.r, self.rconst)
self.assertIsNotNone(n)
n = self.mgr.Times(self.rconst, self.rconst)
self.assertIsNotNone(n)
n = self.mgr.Times(self.rconst, self.s)
self.assertIsNotNone(n)
args = n.args()
self.assertIn(self.rconst, args)
self.assertIn(self.s, args)
self.assertEqual(len(args), 2)
n = self.mgr.Times(self.r, self.s)
self.assertIsNotNone(n)
self.assertTrue(n.is_times())
self.assertEqual(n.get_free_variables(), set([self.r, self.s]))
n = self.mgr.Times(self.iconst, self.q)
self.assertIsNotNone(n)
def test_div_non_linear(self):
with self.assertRaises(NonLinearError):
self.mgr.Div(self.r, self.s)
with self.assertRaises(NonLinearError):
self.mgr.Div(self.rconst, self.s)
def test_div_node(self):
n = self.mgr.Div(self.real_expr, self.rconst)
self.assertIsNotNone(n)
n = self.mgr.Div(self.r, self.rconst)
self.assertIsNotNone(n)
n = self.mgr.Div(self.rconst, self.rconst)
self.assertIsNotNone(n)
n = self.mgr.Div(self.s, self.rconst)
self.assertIsNotNone(n)
inv = self.mgr.Real((1, self.rconst.constant_value()))
self.assertEqual(n, self.mgr.Times(self.s, inv))
def test_equals(self):
n = self.mgr.Equals(self.real_expr, self.real_expr)
self.assertIsNotNone(n)
n = self.mgr.Equals(self.r, self.s)
self.assertIsNotNone(n)
args = n.args()
self.assertIn(self.r, args)
self.assertIn(self.s, args)
self.assertEqual(len(args), 2)
n = self.mgr.Equals(self.p, self.q)
self.assertIsNotNone(n)
self.assertTrue(n.is_equals())
self.assertEqual(n.get_free_variables(), set([self.p, self.q]))
with self.assertRaises(TypeError):
n = self.mgr.Equals(self.p, self.r)
def test_gt_node_type(self):
with self.assertRaises(TypeError):
self.mgr.GT(self.x, self.r)
with self.assertRaises(TypeError):
self.mgr.GT(self.r, self.x)
with self.assertRaises(TypeError):
self.mgr.GT(self.r, self.p)
def test_gt_node(self):
n = self.mgr.GT(self.real_expr, self.real_expr)
self.assertIsNotNone(n)
n = self.mgr.GT(self.r, self.rconst)
self.assertIsNotNone(n)
n = self.mgr.GT(self.rconst, self.s)
self.assertIsNotNone(n)
n = self.mgr.GT(self.rconst, self.rconst)
self.assertIsNotNone(n)
n = self.mgr.GT(self.r, self.s)
self.assertIsNotNone(n)
args = n.args()
self.assertIn(self.r, args)
self.assertIn(self.s, args)
self.assertEqual(len(args), 2)
n = self.mgr.GT(self.p, self.q)
self.assertIsNotNone(n)
def test_le_node_type(self):
with self.assertRaises(TypeError):
self.mgr.LE(self.x, self.r)
with self.assertRaises(TypeError):
self.mgr.LE(self.r, self.x)
def test_le_node(self):
n = self.mgr.LE(self.real_expr, self.real_expr)
self.assertIsNotNone(n)
n = self.mgr.LE(self.r, self.rconst)
self.assertIsNotNone(n)
n = self.mgr.LE(self.rconst, self.s)
self.assertIsNotNone(n)
n = self.mgr.LE(self.rconst, self.rconst)
self.assertIsNotNone(n)
n = self.mgr.LE(self.r, self.s)
self.assertIsNotNone(n)
self.assertTrue(n.is_le())
self.assertEqual(n.get_free_variables(), set([self.r, self.s]))
args = n.args()
self.assertIn(self.r, args)
self.assertIn(self.s, args)
self.assertEqual(len(args), 2)
def test_lt_node_type(self):
with self.assertRaises(TypeError):
self.mgr.LT(self.x, self.r)
with self.assertRaises(TypeError):
self.mgr.LT(self.r, self.x)
def test_lt_node(self):
n = self.mgr.LT(self.real_expr, self.real_expr)
self.assertIsNotNone(n)
n = self.mgr.LT(self.r, self.rconst)
self.assertIsNotNone(n)
n = self.mgr.LT(self.rconst, self.s)
self.assertIsNotNone(n)
n = self.mgr.LT(self.rconst, self.rconst)
self.assertIsNotNone(n)
n = self.mgr.LT(self.r, self.s)
self.assertIsNotNone(n)
self.assertTrue(n.is_lt())
self.assertEqual(n.get_free_variables(), set([self.r, self.s]))
args = n.args()
self.assertIn(self.r, args)
self.assertIn(self.s, args)
self.assertEqual(len(args), 2)
def test_ite(self):
n = self.mgr.Ite(self.x, self.y, self.x)
self.assertIsNotNone(n)
args = n.args()
self.assertIn(self.x, args)
self.assertIn(self.y, args)
self.assertEqual(len(args), 3)
n = self.mgr.Ite(self.x, self.s, self.r)
self.assertIsNotNone(n)
n = self.mgr.Ite(self.x, self.p, self.q)
self.assertIsNotNone(n)
self.assertTrue(n.is_ite())
self.assertEqual(n.get_free_variables(), set([self.x, self.p, self.q]))
with self.assertRaises(TypeError):
self.mgr.Ite(self.x, self.p, self.r)
def test_function(self):
n = self.mgr.Function(self.f, [self.r, self.s])
self.assertIsNotNone(n)
args = n.args()
self.assertIn(self.r, args)
self.assertIn(self.s, args)
self.assertEqual(len(args), 2)
self.assertTrue(n.is_function_application())
self.assertEqual(n.get_free_variables(), set([self.f, self.r, self.s]))
def test_constant(self):
n1 = self.mgr.Real(Fraction(100, 10))
self.assertIsNotNone(n1)
self.assertTrue(n1.is_constant())
self.assertTrue(n1.is_real_constant())
n2 = self.mgr.Real((100, 10))
self.assertEqual(n1, n2,
"Generation of constant does not provide a consistent result.")
n3 = self.mgr.Real(10)
self.assertEqual(n1, n3,
"Generation of constant does not provide a consistent result.")
n4 = self.mgr.Real(10.0)
self.assertEqual(n1, n4,
"Generation of constant does not provide a consistent result.")
nd = self.mgr.Real(Fraction(100,1))
self.assertNotEqual(nd, n1)
with self.assertRaises(TypeError):
self.mgr.Real(True)
nd = self.mgr.Int(10)
self.assertIsNotNone(nd)
self.assertTrue(nd.is_constant())
self.assertTrue(nd.is_int_constant())
def test_bconstant(self):
n = self.mgr.Bool(True)
m = self.mgr.Bool(False)
self.assertIsNotNone(n)
self.assertIsNotNone(m)
self.assertNotEqual(n, m)
self.assertTrue(n.is_constant())
self.assertTrue(n.is_bool_constant())
with self.assertRaises(TypeError):
self.mgr.Bool(42)
def test_plus_node(self):
with self.assertRaises(TypeError):
self.mgr.Plus([self.x, self.r])
with self.assertRaises(TypeError):
self.mgr.Plus([self.p, self.r])
with self.assertRaises(TypeError):
self.mgr.Plus()
n1 = self.mgr.Plus([self.r, self.s])
n2 = self.mgr.Plus(self.r, self.s)
self.assertIsNotNone(n1)
self.assertIsNotNone(n2)
self.assertEqual(n1, n2, "Constructed Plus expression do not match")
self.assertTrue(n1.is_plus())
self.assertEqual(set([self.r, self.s]), n1.get_free_variables())
one = self.mgr.Plus([self.p])
self.assertEqual(one, self.p)
def test_exactly_one(self):
symbols = [ self.mgr.Symbol("s%d"%i, BOOL) for i in range(5) ]
c = self.mgr.ExactlyOne(symbols)
self.assertTrue(len(c.args()) > 1)
t = self.mgr.Bool(True)
c = c.substitute({symbols[0]: t,
symbols[1]: t}).simplify()
self.assertEqual(c, self.mgr.Bool(False),
"ExactlyOne should not allow 2 symbols to be True")
@skipIfNoSolverForLogic(QF_BOOL)
def test_exactly_one_is_sat(self):
symbols = [ self.mgr.Symbol("s%d"%i, BOOL) for i in range(5) ]
c = self.mgr.ExactlyOne(symbols)
all_zero = self.mgr.And([self.mgr.Iff(s, self.mgr.Bool(False))
for s in symbols])
test_zero = self.mgr.And(c, all_zero)
self.assertFalse(is_sat(test_zero, logic=QF_BOOL),
"ExactlyOne should not allow all symbols to be False")
def test_at_most_one(self):
symbols = [ self.mgr.Symbol("s%d"%i, BOOL) for i in range(5) ]
c = self.mgr.AtMostOne(symbols)
self.assertTrue(len(c.args()) > 1)
t = self.mgr.Bool(True)
c = c.substitute({symbols[0]: t,
symbols[1]: t}).simplify()
self.assertEqual(c, self.mgr.Bool(False),
"AtMostOne should not allow two symbols to be True")
def test_xor(self):
xor1 = self.mgr.Xor(self.x, self.y)
self.assertIsNotNone(xor1)
with self.assertRaises(TypeError):
self.mgr.Xor(self.p, self.q)
xor_false = self.mgr.Xor(self.mgr.TRUE(), self.mgr.TRUE()).simplify()
self.assertEqual(xor_false, self.mgr.FALSE(),
"Xor should be False if both arguments are True")
xor_true = self.mgr.Xor(self.mgr.TRUE(), self.mgr.FALSE()).simplify()
self.assertEqual(xor_true, self.mgr.TRUE(),
"Xor should be True if both arguments are False")
def test_all_different(self):
many = 5
symbols = [self.mgr.Symbol("s%d"%i, INT) for i in range(many) ]
f = self.mgr.AllDifferent(symbols)
one = self.mgr.Int(1)
for i in xrange(many):
for j in xrange(many):
if i != j:
c = f.substitute({symbols[i]: one,
symbols[j]: one}).simplify()
self.assertEqual(c, self.mgr.Bool(False),
"AllDifferent should not allow 2 symbols "\
"to be 1")
c = f.substitute({symbols[i]: self.mgr.Int(i) for i in xrange(many)})
self.assertEqual(c.simplify(), self.mgr.Bool(True),
"AllDifferent should be tautological for a set " \
"of different values")
def test_min(self):
min1 = self.mgr.Min(self.p, Plus(self.q, self.mgr.Int(1)))
self.assertIsNotNone(min1)
with self.assertRaises(TypeError):
self.mgr.Min(self.p, self.r)
min_int = self.mgr.Min(self.mgr.Int(1), self.mgr.Int(2), self.mgr.Int(3))
self.assertEqual(min_int.simplify(), self.mgr.Int(1),
"The minimum of 1, 2 and 3 should be 1")
min_real = self.mgr.Min(self.mgr.Real(1), self.mgr.Real(2), self.mgr.Real(3))
self.assertEqual(min_real.simplify(), self.mgr.Real(1),
"The minimum of 1.0, 2.0 and 3.0 should be 1.0")
def test_max(self):
max1 = self.mgr.Max(self.p, Plus(self.q, self.mgr.Int(1)))
self.assertIsNotNone(max1)
with self.assertRaises(TypeError):
self.mgr.Max(self.p, self.r)
max_int = self.mgr.Max(self.mgr.Int(1), self.mgr.Int(2), self.mgr.Int(3))
self.assertEqual(max_int.simplify(), self.mgr.Int(3),
"The maximum of 1, 2 and 3 should be 3")
max_real = self.mgr.Max(self.mgr.Real(1), self.mgr.Real(2), self.mgr.Real(3))
self.assertEqual(max_real.simplify(), self.mgr.Real(3),
"The maximum of 1.0, 2.0 and 3.0 should be 3.0")
def test_pickling(self):
import pickle
src_env = Environment()
dst_env = Environment()
src_mgr = src_env.formula_manager
dst_mgr = dst_env.formula_manager
a = src_mgr.Symbol("A")
b = src_mgr.Symbol("B")
f = src_mgr.And(a, src_mgr.Not(b))
self.assertEqual(str(f), "(A & (! B))", str(f))
serialized = pickle.dumps(f)
f_new = pickle.loads(serialized)
f_new = dst_mgr.normalize(f)
args = f_new.args()
self.assertEqual(str(args[0]), "A",
"Expecting symbol A, " +
"symbol %s found instead" % str(args[0]))
a = dst_mgr.Symbol("A")
b = dst_mgr.Symbol("B")
g = dst_mgr.And(a, dst_mgr.Not(b))
# Contextualized formula is memoized
self.assertEqual(f_new, g, "%s != %s" % (id(f_new), id(g)))
# But it differs from the one in the other formula manager
self.assertNotEqual(f_new, f)
# Normalizing a formula in the same manager should not
# be a problem
f_new = src_mgr.normalize(f)
self.assertEqual(f_new, f, "%s != %s" %(id(a),id(b)))
def test_infix(self):
x, y, p = self.x, self.y, self.p
with self.assertRaises(Exception):
x.Implies(y)
get_env().enable_infix_notation = True
self.assertEqual(Implies(x,y), x.Implies(y))
self.assertEqual(p + p, Plus(p,p))
self.assertEqual(p > p, GT(p,p))
get_env().enable_infix_notation = False
def test_infix_extended(self):
p, r, x, y = self.p, self.r, self.x, self.y
get_env().enable_infix_notation = True
self.assertEqual(Plus(p, Int(1)), p + 1)
self.assertEqual(Plus(r, Real(1)), r + 1)
self.assertEqual(Times(r, Real(1)), r * 1)
self.assertEqual(Minus(p, Int(1)), p - 1)
self.assertEqual(Minus(r, Real(1)), r - 1)
self.assertEqual(Times(r, Real(1)), r * 1)
self.assertEqual(Plus(r, Real(1.5)), r + 1.5)
self.assertEqual(Minus(r, Real(1.5)), r - 1.5)
self.assertEqual(Times(r, Real(1.5)), r * 1.5)
self.assertEqual(Plus(r, Real(1.5)), 1.5 + r)
self.assertEqual(Times(r, Real(1.5)), 1.5 * r)
with self.assertRaises(TypeError):
foo = p + 1.5
self.assertEqual(Not(x), ~x)
self.assertEqual(Times(r, Real(-1)), -r)
self.assertEqual(Times(p, Int(-1)), -p)
self.assertEqual(Xor(x, y), x ^ y)
self.assertEqual(And(x, y), x & y)
self.assertEqual(Or(x, y), x | y)
self.assertEqual(Or(x, TRUE()), x | True)
self.assertEqual(Or(x, TRUE()), True | x)
self.assertEqual(And(x, TRUE()), x & True)
self.assertEqual(And(x, TRUE()), True & x)
get_env().enable_infix_notation = False
def test_toReal(self):
f = self.mgr.Equals(self.rconst, self.mgr.ToReal(self.p))
self.assertIsNotNone(f)
with self.assertRaises(TypeError):
self.mgr.ToReal(self.x)
f1 = self.mgr.ToReal(self.p)
f2 = self.mgr.ToReal(f1)
self.assertEqual(f1, f2)
self.assertTrue(f1.is_toreal())
self.assertEqual(set([self.p]), f1.get_free_variables())
f3 = self.mgr.Equals(self.iconst, self.p)
with self.assertRaises(TypeError):
self.mgr.ToReal(f3)
f4 = self.mgr.Plus(self.rconst, self.r)
f5 = self.mgr.ToReal(f4)
self.assertEqual(f5, f4)
def test_is_term(self):
and_x_x = self.mgr.And(self.x, self.x)
apply_f = self.mgr.Function(self.f, [self.r, self.s])
self.assertTrue(self.x.is_term())
self.assertTrue(and_x_x.is_term())
self.assertFalse(self.f.is_term())
self.assertTrue(apply_f.is_term())
def test_formula_in_formula_manager(self):
x = self.mgr.FreshSymbol()
and_x_x = self.mgr.And(x, x)
new_mgr = FormulaManager(get_env())
y = new_mgr.FreshSymbol()
and_y_y = new_mgr.And(y, y)
self.assertTrue(x in self.mgr)
self.assertFalse(y in self.mgr)
self.assertTrue(and_x_x in self.mgr)
self.assertFalse(and_y_y in self.mgr)
def test_typing(self):
self.assertTrue(BOOL.is_bool_type())
self.assertFalse(BOOL.is_function_type())
self.assertTrue(REAL.is_real_type())
self.assertFalse(REAL.is_bool_type())
self.assertTrue(INT.is_int_type())
self.assertFalse(INT.is_real_type())
self.assertTrue(self.ftype.is_function_type())
self.assertFalse(self.ftype.is_int_type())
class TestShortcuts(TestCase):
def test_shortcut_is_using_global_env(self):
global_mgr = get_env().formula_manager
a1 = Symbol("z", BOOL)
a2 = global_mgr.Symbol("z", BOOL)
self.assertEqual(a1, a2,
"Symbols generated by env and Symbol are not the same")
if __name__ == '__main__':
import unittest
unittest.main()
| |
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.shortcuts import render_to_response, redirect
from django.core.urlresolvers import reverse
from django.contrib.auth import logout
from web.campaign.models import Campaign, Scan, Click
from django.db.models.aggregates import Count
from datetime import timedelta
from datetime import datetime, time, date
from django.http import HttpResponse
import csv
from web.member.forms import ChangePasswordForm
from web.member.models import MemberPayment
from web.campaign.forms import NewStdCampaignForm
from dateutil.relativedelta import relativedelta
from web.campaign.models import PCAMPAIGN_LINK_TEXT_LOOKUP, Note
@login_required
def dash(request):
cs = Campaign.objects.filter(user=request.user).order_by('-updated_at')[0:5]
return render_to_response('member/dash.html',
{'cs':cs},
context_instance=RequestContext(request))
@login_required
def campaign(request, campaign_type = None):
count_list = []
if campaign_type == 'S' or campaign_type == 'P':
cs = Campaign.objects.filter(user=request.user, campaign_type=campaign_type).order_by('name')
elif not campaign_type:
cs = Campaign.objects.filter(user=request.user).order_by('name')
else:
return redirect(reverse('web.general.views.home'))
for c in cs:
count_list.append(c.id)
dt = datetime.today()
d = date(dt.year, dt.month, dt.day)
t = time(0,0)
ss = Scan.objects.extra(select={'campaign': 'campaign_id'}).values('campaign').filter(campaign__in=count_list, created_at__gte=datetime.combine(d,t)).annotate(c=Count('campaign'))
ss_dict = {}
for s in ss:
ss_dict[s['campaign']] = s['c']
for c in cs:
if c.id in ss_dict:
c.d_day_count = ss_dict[c.id]
else:
c.d_day_count = 0
return render_to_response('member/campaign.html',
{'cs':cs,
},
context_instance=RequestContext(request))
@login_required
def campaign_details(request, campaign_id):
ccs = None
notes = None
try:
c = Campaign.objects.get(user=request.user, id=campaign_id)
except:
return redirect(reverse('web.general.views.home'))
ss = Scan.objects.extra(select={'day': 'date( created_at )'}).values('day').filter(campaign=c).annotate(c=Count('created_at')).order_by('-day')
current_day = datetime.now()
days = {}
for n in [0,1,2,3,4,5,6]:
cd = current_day - timedelta(days=n)
days[cd.strftime('%m-%d')] = 0
chart_data_x = ''
chart_data_y = ''
for s in ss:
if s['day'].strftime("%m-%d") in days:
days[s['day'].strftime("%m-%d")] = s['c']
counter = len(days)
for k in sorted(days.iterkeys(), reverse=True):
chart_data_x = chart_data_x + '[' + str((counter)) + ',"' + str(k) + '"], '
chart_data_y = chart_data_y + '[' + str((counter)) + ',' + str(days[k]) + '], '
counter -= 1
if c.campaign_type == 'P':
ccs = Click.objects.extra().values('click_type').filter(campaign=c).annotate(c=Count('created_at'))
notes = Note.objects.filter(campaign=c).order_by('-id')
total = 0
for cc in ccs:
cc['d_click_type'] = PCAMPAIGN_LINK_TEXT_LOOKUP[cc['click_type']]
total = total + cc['c']
for cc in ccs:
if total == 0:
cc['d_click_percent'] = 0
else:
cc['d_click_percent'] = int((float(cc['c']) / float(total))*100)
return render_to_response('member/campaign_details.html',
{'c':c,
'ss':ss,
'ccs':ccs,
'notes': notes,
'chart_data_x': chart_data_x,
'chart_data_y': chart_data_y},
context_instance=RequestContext(request))
@login_required
def campaign_new_edit(request, campaign_id=None):
try:
mp = MemberPayment.objects.filter(user=request.user, end_at__gt=datetime.now()).order_by('-id')[0:1].get()
except:
mp = None
c = None
if campaign_id:
try:
c = Campaign.objects.get(user=request.user, id=campaign_id)
form = NewStdCampaignForm(instance=c)
except:
return redirect(reverse('web.general.views.home'))
else:
form = NewStdCampaignForm()
if request.method == 'POST':
posted = request.POST.copy()
posted['user'] = request.user.id
posted['campaign_type'] = 'S'
if campaign_id:
form = NewStdCampaignForm(posted, instance=c)
else:
form = NewStdCampaignForm(posted)
if form.is_valid():
if c:
messages.success(request, 'Your campaign was edited.')
else:
messages.success(request, 'A new campaign was created.')
c = form.save()
return redirect(reverse('web.member.views.campaign_details', args=[c.id]))
return render_to_response('member/campaign_new_edit.html',
{'form':form,
'c':c,
'mp':mp},
context_instance=RequestContext(request))
@login_required
def campaign_export(request, campaign_id):
try:
c = Campaign.objects.get(user=request.user, id=campaign_id)
except:
return redirect(reverse('web.general.views.home'))
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=qrtrace_' + str(c.id) + '_' + datetime.now().strftime('%Y%m%d') + '.csv'
writer = csv.writer(response)
ss = Scan.objects.filter(campaign=c)
writer.writerow(['ID', 'IP Address', 'Refer URL', 'User Agent', 'Created at'])
for s in ss:
writer.writerow([s.id, s.ip, s.refer, s.user_agent, s.created_at])
return response
@login_required
def account(request):
mp_old = MemberPayment.objects.filter(user=request.user, end_at__lte=datetime.now()).count()
try:
mp = MemberPayment.objects.filter(user=request.user, end_at__gt=datetime.now()).order_by('-id')[0:1].get()
except:
mp = None
cp_form = ChangePasswordForm(instance=request.user)
if request.method == 'POST':
if 'password' in request.POST:
cp_form = ChangePasswordForm(request.POST, instance=request.user)
if cp_form.is_valid():
request.user.set_password(cp_form.cleaned_data['password'])
request.user.save()
#log_profile_updated(request.user)
messages.success(request, 'Yay. You have changed your password!')
return redirect(reverse('web.member.views.account'))
return render_to_response('member/account.html',
{'cp_form': cp_form,
'mp': mp,
'mp_old': mp_old},
context_instance=RequestContext(request))
@login_required
def thanks(request):
mp = None
if 'HTTP_REFERER' not in request.META:
refer = request.META['HTTP_REFERER']
if 'paypal' not in refer:
date_start = None
date_end = None
try:
mp_check = MemberPayment.objects.filter(user=request.user).order_by('-end_at')[0:1].get()
if mp_check:
date_start = mp_check.end_at
date_end = date_start + relativedelta(years=1)
except:
date_start = datetime.now()
date_end = datetime.now() + relativedelta(years=1)
mp = MemberPayment(user=request.user, memo='1 year payment', amount=str(40.00), start_at=date_start, end_at=date_end)
mp.save()
return render_to_response('member/thanks.html',
{'mp':mp},
context_instance=RequestContext(request))
@login_required
def log_out(request):
logout(request)
return redirect(reverse('web.general.views.home'))
| |
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
monetaryunitd_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "monetaryunit.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
monetaryunitd and monetaryunit-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run monetaryunitds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("MONETARYUNITD", "monetaryunitd"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
monetaryunitd_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("MONETARYUNITCLI", "monetaryunit-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_monetaryunitds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a monetaryunitd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("MONETARYUNITD", "monetaryunitd"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
monetaryunitd_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("MONETARYUNITCLI", "monetaryunit-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple monetaryunitds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
monetaryunitd_processes[i].wait()
del monetaryunitd_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_monetaryunitds():
# Wait for all monetaryunitds to cleanly exit
for monetaryunitd in monetaryunitd_processes.values():
monetaryunitd.wait()
monetaryunitd_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
| |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 16 08:04:13 2016
@author: GDM
"""
import re
import os
import datetime
import time
class Upperlevel:
def __repr__(self):
return self.__class__.__name__ + ': ' + '(' +', '.join([n for n in dir(self) if not n.startswith('__')]) +')'
class Type_analysis(Upperlevel):
def __init__(self,dictionary):
self.Trim = dictionary[0]['A']
self.AllignPhix = dictionary[0]['B']
self.AllignGenome = dictionary[0]['C']
self.IIDefinition = dictionary[0]['D']
self.GeneDefinition = dictionary[0]['E']
self.GroupAnalysis = dictionary[0]['F']
self.Tables = dictionary[0]['G']
self.Design = dictionary[0]['H']
class General(Upperlevel):
def __init__(self,dictionary):
self.operator = dictionary[1]['A']
self.storing_loc = dictionary[1]['B']
self.pair_ends = True if (dictionary[2]['B']+dictionary[3]['D']+dictionary[4]['E']+dictionary[5]['E']+dictionary[6]['G']) >= 1 else False
self.date = (datetime.date.today()).isoformat()
class QS (Upperlevel):
def __init__(self,first_base, sequence_3end):
self.first_base = first_base
self.sequence_3end = sequence_3end
class Trim(Upperlevel):
def __init__(self,dictionary,pair_ends):
self.lib_numb = dictionary[2]['A']
self.lib_names = dictionary[2]['C']
self.input_files_I = dictionary[2]['D']
self.p7adaptor = dictionary[2]['F']
if pair_ends:
self.input_files_II = dictionary[2]['E']
self.p5adaptor = dictionary[2]['G']
self.QS = QS(dictionary[2]['H'],dictionary[2]['J'])
self.store = dictionary[2]['K']
class AllignPhix(Upperlevel):
def __init__(self,dictionary,pair_ends,starting):
self.reference = dictionary[3]['A']
self.store = dictionary[3]['B']
if starting:
self.lib_numb = dictionary[3]['C']
self.lib_names = dictionary[3]['E']
self.input_files_I = dictionary[3]['F']
if pair_ends:
self.input_files_II = dictionary[3]['G']
else:
self.lib_numb = int()
self.lib_names = []
self.input_files_I = []
if pair_ends:
self.input_files_II = []
class AllignGenome(Upperlevel):
def __init__(self,dictionary,pair_ends,starting):
self.program_type = dictionary[4]['A']
self.reference = dictionary[4]['B']
self.store = dictionary[4]['C']
if starting:
self.lib_numb = dictionary[4]['D']
self.lib_names = dictionary[4]['F']
self.input_files_I = dictionary[4]['G']
if pair_ends:
self.input_files_II = dictionary[4]['H']
else:
self.lib_numb = int()
self.lib_names = []
self.input_files_I = []
if pair_ends:
self.input_files_II = []
class IIDefinition(Upperlevel):
def __init__(self,dictionary,pair_ends,starting):
if starting:
self.lib_numb = dictionary[5]['E']
self.lib_names = dictionary[5]['G']
self.input_files = dictionary[5]['H']
else:
self.lib_numb = int()
self.lib_names = []
self.input_files = []
self.ins_iv = dictionary[5]['A']
if pair_ends:
self.reads_duplicate = dictionary[5]['B']
else:
self.reads_duplicate = False
self.fidelity = dictionary[5]['C']
if self.fidelity:
self.fidelity_limit = dictionary[5]['D']
else:
self.fidelity_limit = 0
class Parameters (Upperlevel):
def __init__(self,II,KI,Bias,Reads):
self.II = II
self.KI = KI
self.Bias = Bias
self.Reads = Reads
class GeneDefinition (Upperlevel):
def __init__(self,dictionary,starting):
self.reference = dictionary[6]['A']
self.Parameters = Parameters(II = dictionary[6]['B'],KI = dictionary[6]['C'],Bias = dictionary[6]['D'],Reads =dictionary[6]['E'])
if starting:
self.lib_numb = dictionary[6]['F']
self.lib_names = dictionary[6]['H']
self.input_files = dictionary[6]['I']
else:
self.lib_numb = int()
self.reads_norm = False
self.lib_names = []
self.input_files = []
class Outlier (Upperlevel):
def __init__(self,dictionary):
self.perform = dictionary[7]['J']
if self.perform:
self.Parameters = Parameters(II = dictionary[7]['K'],KI = dictionary[7]['L'],Bias = dictionary[7]['M'],Reads =dictionary[7]['N'])
self.Limits = Parameters(II = dictionary[7]['O'],KI = dictionary[7]['P'],Bias = dictionary[7]['Q'],Reads =dictionary[7]['R'])
class Experiments (Upperlevel):
def __init__(self,name,experiments):
self.name = name
self.experiments = experiments
class GroupAnalysis (Upperlevel):
def __init__(self,dictionary,starting):
self.group_numb = dictionary[7]['A']
self.storage_loc = ''
self.Reference = Experiments(dictionary[7]['B'],dictionary[7]['C'])
self.Others = Experiments(dictionary[7]['D'],dictionary[7]['E'])
self.Parameters = Parameters(II = dictionary[7]['F'],KI = dictionary[7]['G'],Bias = dictionary[7]['H'],Reads =dictionary[7]['I'])
self.Outlier = Outlier(dictionary)
if starting:
self.lib_numb = dictionary[7]['S']
self.lib_names = dictionary[7]['T']
self.input_files = dictionary[7]['U']
else:
self.lib_numb = int()
self.lib_names = []
self.input_files = []
class Tables(Upperlevel):
def __init__(self,dictionary,starting):
self.names = dictionary[8]['A']
self.keys = dictionary[8]['B']
self.filters = dictionary[8]['C']
if starting:
self.input_files = dictionary[8]['D']
else:
self.input_files = ''
class Design (Upperlevel):
def __init__ (self,dictionary,starting):
self.reference = dictionary[9]['A']
self.genes = dictionary[9]['B']
if starting:
self.input_files = dictionary[9]['C']
else:
self.input_files = ''
class Info(Upperlevel):
def __init__(self, dictionary):
self.Type_analysis = Type_analysis(dictionary)
self.General = General(dictionary)
def upload_informations(self,dictionary):
if self.Type_analysis.Trim:
self.Trim = Trim(dictionary,self.General.pair_ends)
if self.Type_analysis.AllignPhix:
self.AllignPhix = AllignPhix(dictionary,self.General.pair_ends,starting = not(self.Type_analysis.Trim))
if self.Type_analysis.AllignGenome:
self.AllignGenome = AllignGenome (dictionary,self.General.pair_ends,starting = False if (self.Type_analysis.Trim + self.Type_analysis.AllignPhix) == 1 else True)
if self.Type_analysis.IIDefinition:
self.IIDefinition = IIDefinition (dictionary,self.General.pair_ends, starting = not(self.Type_analysis.AllignGenome))
if self.Type_analysis.GeneDefinition:
self.GeneDefinition = GeneDefinition(dictionary, starting = not(self.Type_analysis.IIDefinition))
if self.Type_analysis.GroupAnalysis:
self.GroupAnalysis = GroupAnalysis(dictionary, starting = not(self.Type_analysis.GeneDefinition))
if self.Type_analysis.Tables:
self.Tables = Tables (dictionary, starting = not(self.Type_analysis.GroupAnalysis))
if self.Type_analysis.Design:
self.Design = Design (dictionary, starting = not(self.Type_analysis.GroupAnalysis))
def starting (self):
if self.Type_analysis.Trim:
self.Type_analysis.starting = self.Trim
elif self.Type_analysis.AllignPhix:
self.Type_analysis.starting = self.AllignPhix
elif self.Type_analysis.AllignGenome:
self.Type_analysis.starting = self.AllignGenome
elif self.Type_analysis.IIDefinition:
self.Type_analysis.starting = self.IIDefinition
elif self.Type_analysis.GeneDefinition:
self.Type_analysis.starting = self.GeneDefinition
elif self.Type_analysis.GroupAnalysis:
self.Type_analysis.starting = self.GroupAnalysis
elif self.Type_analysis.Tables:
self.Type_analysis.starting = self.Tables
elif self.Type_analysis.Design:
self.Type_analysis.starting = self.Design
def to_do (self):
to_do = []
if self.Type_analysis.Trim:
if self.Type_analysis.starting != self.Trim:
to_do.append(self.Trim)
if self.Type_analysis.AllignPhix:
if self.Type_analysis.starting != self.AllignPhix:
to_do.append(self.AllignPhix)
if self.Type_analysis.AllignGenome:
if self.Type_analysis.starting != self.AllignGenome:
to_do.append(self.AllignGenome)
if self.Type_analysis.IIDefinition:
if self.Type_analysis.starting != self.IIDefinition:
to_do.append(self.IIDefinition)
if self.Type_analysis.GeneDefinition:
if self.Type_analysis.starting != self.GeneDefinition:
to_do.append(self.GeneDefinition)
if self.Type_analysis.GroupAnalysis:
if self.Type_analysis.starting != self.GroupAnalysis:
to_do.append(self.GroupAnalysis)
if self.Type_analysis.Tables:
if self.Type_analysis.starting != self.Tables:
to_do.append(self.Tables)
if self.Type_analysis.Design:
if self.Type_analysis.starting != self.Design:
to_do.append(self.Design)
self.Type_analysis.to_do = to_do
def make_folders(self):
"""Function to create folders were to store subsequent data:
After checking if the storing_location does not exist (and eventually asking for a new name), creates:
the main folder: self.General.storing_loc + '/' exp_name + '_' + self.General.date
the graph folder: main folder +'/graph/'
the row folder: main folder +'/row/' """
do_folder_experiments = True
if self.Type_analysis.GroupAnalysis:
if self.Type_analysis.starting == self.GroupAnalysis:
do_folder_experiments = False
if self.Type_analysis.Tables:
if self.Type_analysis.starting == self.Tables:
do_folder_experiments = False
if self.Type_analysis.Design:
if self.Type_analysis.starting == self.Design:
do_folder_experiments = False
if do_folder_experiments:
for exp in self.Type_analysis.starting.lib_names:
location = os.path.join(self.General.storing_loc,exp + '_' +self.General.date)
while os.path.isdir(location):
print 'The directory %s already exists: '% location
new_name = raw_input('Change the experiment name (the previous one was %s): ' % exp )
new_loc = os.path.join(self.General.storing_loc,new_name + '_' +self.General.date)
while os.path.isdir(new_loc):
new_name = raw_input('The directory %s already exists. Change the experiment name (the previous one was %s): '%(new_loc,new_name))
new_loc = os.path.join(self.General.storing_loc,new_name + '_' +self.General.date)
location = new_loc
self.Type_analysis.starting.lib_names = [new_name if x == exp else x for x in self.Type_analysis.starting.lib_names]
os.mkdir(location)
graph_loc = os.path.join(location, "graph")
os.mkdir(graph_loc)
row = os.path.join(location, "row")
os.mkdir(row)
if self.Type_analysis.GroupAnalysis:
analysis_main = os.path.join(self.General.storing_loc,'Analysis')
if not os.path.isdir(analysis_main):
os.mkdir(analysis_main)
analysis_loc = os.path.join(analysis_main, self.General.date)
while os.path.isdir(analysis_loc):
print 'The directory %s already exists: '% analysis_loc
new_name = raw_input('Change the experiment name (the previous one was %s): ' % self.General.date)
analysis_loc = os.path.join(analysis_main,new_name)
while os.path.isdir(analysis_loc):
new_name = raw_input('The directory %s already exists. Change the experiment name (the previous one was %s): '%(analysis_loc,new_name))
analysis_loc = os.path.join(analysis_main,new_name)
self.GroupAnalysis.storage_loc = analysis_loc
os.mkdir(analysis_loc)
row_loc = os.path.join(analysis_loc, "row")
os.mkdir(row_loc)
graph_loc = os.path.join(analysis_loc, "graph")
os.mkdir(graph_loc)
def print_info (self):
print "\n\n***\tInformations on experiments' analysis\t***"
print 'Generals'
print '\t{:20s}:\t'.format('Operator') + '%s'%self.General.operator
print '\t{:20s}:\t'.format('Date') + '%s'%self.General.date
print '\t{:20s}:\t'.format('Storing location') + '%s'%self.General.storing_loc
print '\t{:20s}:\t'.format('Pair ends') + '%s'% self.General.pair_ends
print 'Type of analysis'
print '\t{:20s}:\t'.format('Trim') + '%s' % self.Type_analysis.Trim
print '\t{:20s}:\t'.format('AllignPhix') + '%s'%self.Type_analysis.AllignPhix
print '\t{:20s}:\t'.format('AllignGenome') + '%s'%self.Type_analysis.AllignGenome
print '\t{:20s}:\t'.format('IIDefinition') + '%s'%self.Type_analysis.IIDefinition
print '\t{:20s}:\t'.format('GeneDefinition') + '%s'%self.Type_analysis.GeneDefinition
print '\t{:20s}:\t'.format('GroupAnalysis') + '%s'%self.Type_analysis.GroupAnalysis
print '\t{:20s}:\t'.format('Tables') + '%s'%self.Type_analysis.Tables
if self.Type_analysis.Trim:
print 'Trim'
print '\t{:20s}:\t'.format('Libraries number') + '%s' % self.Trim.lib_numb
print '\t{:20s}:\t'.format('Libraries name') + '%s' % '\n\t{:20s}\t'.format('').join(self.Trim.lib_names)
print '\t{:20s}:\t'.format('Input') + '%s' % '\n\t{:20s}\t'.format('').join(self.Trim.input_files_I)
if self.General.pair_ends:
print '\t{:20s}:\t'.format('Input_pairends') + '%s' % '\n\t{:20s}\t'.format('').join(self.Trim.input_files_II)
print '\t{:20s}:\t'.format('P7 adaptor') + '%s' % self.Trim.p7adaptor
if self.General.pair_ends:
print '\t{:20s}:\t'.format('P5 adaptor') + '%s' % self.Trim.p5adaptor
print '\tQS parmeters'
print '\t\t{:20s}:\t'.format('First base Q') + '%s' % self.Trim.QS.first_base
print '\t\t{:20s}:\t'.format('Extension limit') + '%s' % self.Trim.QS.sequence_3end
print '\t{:20s}:\t'.format('Permanently store') + '%s' % self.Trim.store
if self.Type_analysis.AllignPhix:
print 'AllignPhix'
print '\t{:20s}:\t'.format('Phix reference') + '%s' % self.AllignPhix.reference
print '\t{:20s}:\t'.format('Permanently store') + '%s' % self.AllignPhix.store
print '\t{:20s}:\t'.format('Libraries number') + '%s' % self.AllignPhix.lib_numb
print '\t{:20s}:\t'.format('Libraries name') + '%s' % '\n\t{:20s}\t'.format('').join(self.AllignPhix.lib_names)
print '\t{:20s}:\t'.format('Lib input') + '%s' % '\n\t{:20s}\t'.format('').join(self.AllignPhix.input_files_I)
if self.General.pair_ends:
print '\t{:20s}:\t'.format('Lib input pair_end') + '%s' % '\n\t{:20s}\t'.format('').join(self.AllignPhix.input_files_II)
if self.Type_analysis.AllignGenome:
print 'AllignGenome'
print '\t{:20s}:\t'.format('Allignment Program') + '%s' % self.AllignGenome.program_type
print '\t{:20s}:\t'.format('Reference') + '%s' % self.AllignGenome.reference
print '\t{:20s}:\t'.format('Permanently store') + '%s' % self.AllignGenome.store
print '\t{:20s}:\t'.format('Libraries number') + '%s' % self.AllignGenome.lib_numb
print '\t{:20s}:\t'.format('Libraries name') + '%s' % '\n\t{:20s}\t'.format('').join(self.AllignGenome.lib_names)
print '\t{:20s}:\t'.format('Lib input') + '%s' % '\n\t{:20s}\t'.format('').join(self.AllignGenome.input_files_I)
if self.General.pair_ends:
print '\t{:20s}:\t'.format('Lib input pair_end') + '%s' % '\n\t{:20s}\t'.format('').join(self.AllignGenome.input_files_II)
if self.Type_analysis.IIDefinition:
print 'I.I. Definition'
print '\t{:20s}:\t'.format('Libraries number') + '%s' % self.IIDefinition.lib_numb
print '\t{:20s}:\t'.format('Libraries name') + '%s' % '\n\t{:20s}\t'.format('').join(self.IIDefinition.lib_names)
print '\t{:20s}:\t'.format('Lib input') + '%s' % '\n\t{:20s}\t'.format('').join(self.IIDefinition.input_files)
print '\t{:20s}:\t'.format('Bases to define I.I.') + '%s' % self.IIDefinition.ins_iv
print '\t{:20s}:\t'.format('Alignment fidelity limit') + '%s' % self.IIDefinition.fidelity
if self.IIDefinition.fidelity:
print '\t{:20s}:\t'.format('Fidelity limit value') + '%s' % self.IIDefinition.fidelity_limit
if self.General.pair_ends:
print '\t{:20s}:\t'.format('Remove duplicates') + '%s' % self.IIDefinition.reads_duplicate
if self.Type_analysis.GeneDefinition:
print 'Genes Definition'
print '\t{:20s}:\t'.format('Genes annotation') + '%s' % self.GeneDefinition.reference
print '\t{:20s}'.format('Parameters')
print '\t\t{:8s}:\t'.format('II') + '%s' % self.GeneDefinition.Parameters.II
print '\t\t{:8s}:\t'.format('KI') + '%s' % self.GeneDefinition.Parameters.KI
print '\t\t{:8s}:\t'.format('Bias') + '%s' % self.GeneDefinition.Parameters.Bias
print '\t\t{:8s}:\t'.format('Reads') + '%s' % self.GeneDefinition.Parameters.Reads
print '\t{:20s}:\t'.format('Libraries number') + '%s' % self.GeneDefinition.lib_numb
print '\t{:20s}:\t'.format('Libraries name') + '%s' % '\n\t{:20s}\t'.format('').join(self.GeneDefinition.lib_names)
print '\t{:20s}:\t'.format('Lib input') + '%s' % '\n\t{:20s}\t'.format('').join(self.GeneDefinition.input_files)
if self.Type_analysis.GroupAnalysis:
print 'Group analysis'
print '\t{:20s}:\t'.format('Storage location') + '%s' % self.GroupAnalysis.storage_loc
print '\t{:20s}:\t'.format('Libraries number') + '%s' % self.GroupAnalysis.lib_numb
print '\t{:20s}:\t'.format('Libraries name') + '%s' % '\n\t{:20s}\t'.format('').join(self.GroupAnalysis.lib_names)
print '\t{:20s}:\t'.format('Lib input') + '%s' % '\n\t{:20s}\t'.format('').join(self.GroupAnalysis.input_files)
print '\t{:20s}:\t'.format('Groups number') + '%s' % self.GroupAnalysis.group_numb
print '\tReference exp.'
print '\t\t{:20s}:\t'.format('Name') + '%s' % self.GroupAnalysis.Reference.name
print '\t\t{:20s}:\t'.format('Experiments') + '%s' % '\n\t\t{:20s}\t'.format('').join(self.GroupAnalysis.Reference.experiments)
print '\tOther groups'
for name in self.GroupAnalysis.Others.name:
print '\t\t{:20s}:\t'.format('Name') + '%s' % name
print '\t\t{:20s}:\t'.format('Experiments') + '%s' % '\n\t\t{:20s}\t'.format('').join(self.GroupAnalysis.Others.experiments[self.GroupAnalysis.Others.name.index(name)])
print '\t{:20s}'.format('Parameters')
print '\t\t{:8s}:\t'.format('II') + '%s' % self.GroupAnalysis.Parameters.II
print '\t\t{:8s}:\t'.format('KI') + '%s' % self.GroupAnalysis.Parameters.KI
print '\t\t{:8s}:\t'.format('Bias') + '%s' % self.GroupAnalysis.Parameters.Bias
print '\t\t{:8s}:\t'.format('Reads') + '%s' % self.GroupAnalysis.Parameters.Reads
print '\t{:20s}:\t'.format('Outlier analysis') + '%s' % self.GroupAnalysis.Outlier.perform
if self.GroupAnalysis.Outlier.perform:
print '\t\t{:20s}'.format('Parameters')
print '\t\t\t{:8s}:\t'.format('II') + '%s' % self.GroupAnalysis.Outlier.Parameters.II,
if self.GroupAnalysis.Outlier.Parameters.II:
print '=> Value limit: %s' % self.GroupAnalysis.Outlier.Limits.II
else:
print ''
print '\t\t\t{:8s}:\t'.format('KI') + '%s' % self.GroupAnalysis.Outlier.Parameters.KI,
if self.GroupAnalysis.Outlier.Parameters.KI:
print '=> Value limit: %s' % self.GroupAnalysis.Outlier.Limits.KI
else:
print ''
print '\t\t\t{:8s}:\t'.format('Bias') + '%s' % self.GroupAnalysis.Outlier.Parameters.Bias,
if self.GroupAnalysis.Outlier.Parameters.Bias:
print '=> Value limit: %s' % self.GroupAnalysis.Outlier.Limits.Bias
else:
print ''
print '\t\t\t{:8s}:\t'.format('Reads') + '%s' % self.GroupAnalysis.Outlier.Parameters.Reads,
if self.GroupAnalysis.Outlier.Parameters.Reads:
print '=> Value limit: %s' % self.GroupAnalysis.Outlier.Limits.Reads
else:
print ''
if self.Type_analysis.Tables:
print 'Tables'
print '\t{:20s}:\t'.format('Data_reference') + self.Tables.input_files
for table in self.Tables.names:
position = self.Tables.names.index(table)
print '\t{:20s}:\t'.format('Name') + table
print '\t\t{:20s}:\t\t'.format('Keys') + str(self.Tables.keys[position]).strip('[]').replace("'","").replace(" ","")
filter_list = []
if position < len(self.Tables.filters):
for n in self.Tables.filters[position]:
parameter = str(n['parameter']).strip('[]').replace("'","").replace(" ","")
if n.has_key('number'):
filter_list.append('%s%s%i' % (parameter,n['operation'],n['number']))
else:
filter_list.append('%s%s' % (parameter,n['operation']))
print '\t\t{:20s}:\t\t'.format('Filters') + ", ".join(filter_list)
if self.Type_analysis.Design:
print 'Gene rappresentation'
print '\t{:20s}:\t'.format('Genes annotation') + self.Design.reference
print '\t{:20s}:\t'.format('Genes to analyse') + '%s' % '\n\t{:20s}\t'.format('').join(self.Design.genes)
print '\t{:20s}:\t'.format('Data_reference') + self.Design.input_files
def fill_up(self):
def fill_up_categories(self,category,terminal_string,pair_end = True):
category.lib_numb = self.Type_analysis.starting.lib_numb
category.lib_names = self.Type_analysis.starting.lib_names
if pair_end:
input_ =[]
for exp in self.Type_analysis.starting.lib_names:
file_input = os.path.join(self.General.storing_loc,exp + '_' +self.General.date,'row',exp + terminal_string)
input_.append(file_input)
category.input_files_I = input_
if self.General.pair_ends:
input_ = []
for exp in self.Type_analysis.starting.lib_names:
file_input = os.path.join(self.General.storing_loc,exp + '_' +self.General.date,'row',exp + '_pairend' + terminal_string)
input_.append(file_input)
category.input_files_II = input_
else:
input_ =[]
for exp in self.Type_analysis.starting.lib_names:
file_input = os.path.join(self.General.storing_loc,exp + '_' +self.General.date,'row',exp + terminal_string)
input_.append(file_input)
category.input_files = input_
for category in self.Type_analysis.to_do:
if self.Type_analysis.AllignPhix:
if category == self.AllignPhix:
fill_up_categories(self,category,'_QS.fastq',pair_end = True)
if self.Type_analysis.AllignGenome:
if category == self.AllignGenome:
if self.Type_analysis.AllignPhix:
fill_up_categories(self,category,'_PhixCleaned.fastq',pair_end = True)
else:
fill_up_categories(self,category,'_QS.fastq',pair_end = True)
if self.Type_analysis.IIDefinition:
if category == self.IIDefinition:
fill_up_categories(self,category,'_Alligned.sam',pair_end = False)
if self.Type_analysis.GeneDefinition:
if category == self.GeneDefinition:
fill_up_categories(self,category,'_IIRowdata.pkl',pair_end = False)
if self.Type_analysis.GroupAnalysis:
if category == self.GroupAnalysis:
fill_up_categories(self,category,'_GenesData.pkl',pair_end = False)
if self.Type_analysis.Tables:
if category == self.Tables:
file_input = os.path.join(self.GroupAnalysis.storage_loc,'row','GroupAnalysis.pkl')
self.Tables.input_files = file_input
if self.Type_analysis.Design:
if category == self.Design:
file_input = os.path.join(self.GroupAnalysis.storage_loc,'row','GroupAnalysis.pkl')
self.Design.input_files = file_input
def print_save(self,exp,string):
"""Print the selected string on the screen and store the information in txt file in the main folder"""
print string
with open (os.path.join(self.General.storing_loc,exp + '_' + self.General.date,exp +'_info.txt'), 'a' ) as write:
print >> write, string
def read_txt(informations,text):
def get_TRUE(section,task,line,informations):
if not informations.has_key(section):
informations[section] = {}
if re.search('^\s*@\d[A-Z]\).*([YyNn]).*',line):
value = re.findall ('^\s*@\d[A-Z]\).*([YyNn]).*',line)[0]
if value.lower() == 'y':
informations[section][task] = True
else:
informations[section][task] = False
else:
informations[section][task] = False
return informations
def get_NUMBER(section,task,line,informations):
if not informations.has_key(section):
informations[section] = {}
if re.search ('^\s*@\d[A-Z]\).*([\d.]+).*',line):
value = re.findall ('^\s*@\d[A-Z]\).*([\d.]+).*',line)[0]
if value:
informations[section][task] = float(value)
else:
informations[section][task] = 'n.d'
else:
informations[section][task] = 'n.d'
return informations
def get_STRING(section,task,line,informations):
if not informations.has_key(section):
informations[section] = {}
string = re.findall ('^\s*@\d[A-Z]\)\s*(.*)',line)[0]
if string:
informations[section][task] = string
else:
informations[section][task] = 'n.d.'
return informations
def get_LIST(section,task,line,informations):
if not informations.has_key(section):
informations[section] = {}
if not informations[section].has_key(task):
informations[section][task] = []
string = re.findall ('^\s*@\d[A-Z]\)\s*(.*)',line)[0]
if string:
informations[section][task].append(string)
return informations
def extract_line (section,task,line,informations,true=[],number=[],list_=[],string=[]):
if task in number:
informations = get_NUMBER(section,task,line,informations)
elif task in true:
informations = get_TRUE(section,task,line,informations)
elif task in list_:
informations = get_LIST(section,task,line,informations)
elif task in string:
informations = get_STRING(section,task,line,informations)
return informations
hand = open(text,'rb')
line_count = 0
for line in hand:
line_count +=1
line = line.rstrip()
find = re.findall('^\s*@(\d)([A-Z])\)',line)
if find:
section, task = find[0]
section = int(section)
if section == 0:
informations = get_TRUE(section,task,line,informations)
elif section == 1:
informations = get_STRING(section,task,line,informations)
elif section == 2:
informations = extract_line (section,task,line,informations,true =['B','K'],number =['A','H','J'],list_=['C','D','E'],string=['F','G'])
elif section == 3:
informations = extract_line (section,task,line,informations,true =['B','D'],number =['C'],list_=['E','F','G'],string=['A'])
elif section == 4:
informations = extract_line (section,task,line,informations,true =['C','E'],number =['D'],list_=['F','G','H'],string=['A','B'])
elif section == 5:
informations = extract_line (section,task,line,informations,true =['B','C','E','F'],number =['A','D'],list_=['G','H'],string=[])
elif section == 6:
informations = extract_line (section,task,line,informations,true =['B','C','D','E','G'],number =['F'],list_=['H','I'],string=['A'])
elif section == 7:
informations = extract_line (section,task,line,informations,true =['F','G','H','I','J','K','L','M','N'],number =['A','P','O','P','Q','R','S'],list_=['D','T','U'],string =['B'])
if task in ['C']:
if re.search('^\s*@\d[A-Z]\)\s*(\S+.*\S*)', line):
string = [n.lstrip().rstrip() for n in re.findall('^\s*@\d[A-Z]\)\s*(\S+.*\S*)', line)[0].split(',')]
informations[section][task] = string
if task in ['E']:
if re.search('^\s*@\d[A-Z]\)\s*(\S+.*\S*)', line):
if not informations[section].has_key(task):
informations[section][task] =[]
string = [n.lstrip().rstrip() for n in re.findall('^\s*@\d[A-Z]\)\s*(\S+.*\S*)', line)[0].split(',')]
informations[section][task].append(string)
elif section == 8:
informations = extract_line (section,task,line,informations,list_=['A'],string =['D'])
if task == 'B':
if not informations[section].has_key(task):
informations[section][task] =[]
key = []
string = re.findall('\((.*?)\)',line)
for param in string:
variable = tuple(n.lstrip().rstrip() for n in param.split(',') if n != '')
if len(variable) == 3:
key.append(variable)
elif variable[1] == 'Score':
key.append(variable)
else:
print "ERROR! Line %d Section %s Task %d: The table parameter '%s' dosen't contain the correct structure" %(line_count,section,task,param)
informations[section][task].append(key)
elif task == 'C':
if not informations[section].has_key(task):
informations[section][task] =[]
filters = []
items = re.findall('[fF]ilter\s*?\[(.*?)\]',line)
for q in items:
filter_ = {}
if re.search('\((.*?)\)\s*?,\s*?(ascending|descending)',q):
parameters = re.findall('\((.*?)\)\s*?,\s*?(ascending|descending)',q)[0]
filter_['operation'] = parameters[1]
variable = tuple(n.lstrip().rstrip() for n in parameters[0].split(',') if n != '')
if len(variable) == 3:
filter_['parameter']= variable
elif variable[1] == 'Score':
filter_['parameter']= variable
else:
print "ERROR! Line %d Section %d Task %s: The table parameter '%s' dosen't contain the correct structure" %(line_count,section,task,q)
continue
elif re.search('\((.*?)\)\s*,\s*(\S+)\s*,\s*(\S+)',q):
parameters = re.findall('\((.*?)\)\s*,\s*(\S+)\s*,\s*(\S+)',q)[0]
filter_['operation'] = parameters[1]
filter_['number'] = float(parameters[2])
variable = tuple(n.lstrip().rstrip() for n in parameters[0].split(',') if n != '')
if len(variable) == 3:
filter_['parameter']= variable
else:
print "ERROR! Line %d Section %d Task %s: The table parameter '%s' dosen't contain the correct structure" %(line_count,section,task,q)
continue
else:
print "ERROR! Line %d Section %d Task %s: The table parameter '%s' dosen't contain the correct structure" %(line_count,section,task,q)
continue
filters.append(filter_)
informations[section][task].append(filters)
elif section == 9:
informations = extract_line (section,task,line,informations,list_=['B'],string =['A','C'])
return informations
| |
# orm/descriptor_props.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Descriptor properties are more "auxiliary" properties
that exist as configurational elements, but don't participate
as actively in the load/persist ORM loop.
"""
from .interfaces import MapperProperty, PropComparator
from .util import _none_set
from . import attributes
from .. import util, sql, exc as sa_exc, event, schema
from ..sql import expression
from . import properties
from . import query
class DescriptorProperty(MapperProperty):
""":class:`.MapperProperty` which proxies access to a
user-defined descriptor."""
doc = None
def instrument_class(self, mapper):
prop = self
class _ProxyImpl(object):
accepts_scalar_loader = False
expire_missing = True
collection = False
def __init__(self, key):
self.key = key
if hasattr(prop, 'get_history'):
def get_history(self, state, dict_,
passive=attributes.PASSIVE_OFF):
return prop.get_history(state, dict_, passive)
if self.descriptor is None:
desc = getattr(mapper.class_, self.key, None)
if mapper._is_userland_descriptor(desc):
self.descriptor = desc
if self.descriptor is None:
def fset(obj, value):
setattr(obj, self.name, value)
def fdel(obj):
delattr(obj, self.name)
def fget(obj):
return getattr(obj, self.name)
self.descriptor = property(
fget=fget,
fset=fset,
fdel=fdel,
)
proxy_attr = attributes.create_proxied_attribute(
self.descriptor)(
self.parent.class_,
self.key,
self.descriptor,
lambda: self._comparator_factory(mapper),
doc=self.doc,
original_property=self
)
proxy_attr.impl = _ProxyImpl(self.key)
mapper.class_manager.instrument_attribute(self.key, proxy_attr)
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class CompositeProperty(DescriptorProperty):
"""Defines a "composite" mapped attribute, representing a collection
of columns as one attribute.
:class:`.CompositeProperty` is constructed using the :func:`.composite`
function.
.. seealso::
:ref:`mapper_composite`
"""
def __init__(self, class_, *attrs, **kwargs):
"""Return a composite column-based property for use with a Mapper.
See the mapping documentation section :ref:`mapper_composite` for a
full usage example.
The :class:`.MapperProperty` returned by :func:`.composite`
is the :class:`.CompositeProperty`.
:param class\_:
The "composite type" class.
:param \*cols:
List of Column objects to be mapped.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
scalar attribute should be loaded when replaced, if not
already loaded. See the same flag on :func:`.column_property`.
.. versionchanged:: 0.7
This flag specifically becomes meaningful
- previously it was a placeholder.
:param group:
A group name for this property when marked as deferred.
:param deferred:
When True, the column property is "deferred", meaning that it does
not load immediately, and is instead loaded when the attribute is
first accessed on an instance. See also
:func:`~sqlalchemy.orm.deferred`.
:param comparator_factory: a class which extends
:class:`.CompositeProperty.Comparator` which provides custom SQL
clause generation for comparison operations.
:param doc:
optional string that will be applied as the doc on the
class-bound descriptor.
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
.. versionadded:: 0.8
:param extension:
an :class:`.AttributeExtension` instance,
or list of extensions, which will be prepended to the list of
attribute listeners for the resulting descriptor placed on the
class. **Deprecated.** Please see :class:`.AttributeEvents`.
"""
self.attrs = attrs
self.composite_class = class_
self.active_history = kwargs.get('active_history', False)
self.deferred = kwargs.get('deferred', False)
self.group = kwargs.get('group', None)
self.comparator_factory = kwargs.pop('comparator_factory',
self.__class__.Comparator)
if 'info' in kwargs:
self.info = kwargs.pop('info')
util.set_creation_order(self)
self._create_descriptor()
def instrument_class(self, mapper):
super(CompositeProperty, self).instrument_class(mapper)
self._setup_event_handlers()
def do_init(self):
"""Initialization which occurs after the :class:`.CompositeProperty`
has been associated with its parent mapper.
"""
self._setup_arguments_on_columns()
def _create_descriptor(self):
"""Create the Python descriptor that will serve as
the access point on instances of the mapped class.
"""
def fget(instance):
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
if self.key not in dict_:
# key not present. Iterate through related
# attributes, retrieve their values. This
# ensures they all load.
values = [
getattr(instance, key)
for key in self._attribute_keys
]
# current expected behavior here is that the composite is
# created on access if the object is persistent or if
# col attributes have non-None. This would be better
# if the composite were created unconditionally,
# but that would be a behavioral change.
if self.key not in dict_ and (
state.key is not None or
not _none_set.issuperset(values)
):
dict_[self.key] = self.composite_class(*values)
state.manager.dispatch.refresh(state, None, [self.key])
return dict_.get(self.key, None)
def fset(instance, value):
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
attr = state.manager[self.key]
previous = dict_.get(self.key, attributes.NO_VALUE)
for fn in attr.dispatch.set:
value = fn(state, value, previous, attr.impl)
dict_[self.key] = value
if value is None:
for key in self._attribute_keys:
setattr(instance, key, None)
else:
for key, value in zip(
self._attribute_keys,
value.__composite_values__()):
setattr(instance, key, value)
def fdel(instance):
state = attributes.instance_state(instance)
dict_ = attributes.instance_dict(instance)
previous = dict_.pop(self.key, attributes.NO_VALUE)
attr = state.manager[self.key]
attr.dispatch.remove(state, previous, attr.impl)
for key in self._attribute_keys:
setattr(instance, key, None)
self.descriptor = property(fget, fset, fdel)
@util.memoized_property
def _comparable_elements(self):
return [
getattr(self.parent.class_, prop.key)
for prop in self.props
]
@util.memoized_property
def props(self):
props = []
for attr in self.attrs:
if isinstance(attr, str):
prop = self.parent.get_property(
attr, _configure_mappers=False)
elif isinstance(attr, schema.Column):
prop = self.parent._columntoproperty[attr]
elif isinstance(attr, attributes.InstrumentedAttribute):
prop = attr.property
else:
raise sa_exc.ArgumentError(
"Composite expects Column objects or mapped "
"attributes/attribute names as arguments, got: %r"
% (attr,))
props.append(prop)
return props
@property
def columns(self):
return [a for a in self.attrs if isinstance(a, schema.Column)]
def _setup_arguments_on_columns(self):
"""Propagate configuration arguments made on this composite
to the target columns, for those that apply.
"""
for prop in self.props:
prop.active_history = self.active_history
if self.deferred:
prop.deferred = self.deferred
prop.strategy_class = prop._strategy_lookup(
("deferred", True),
("instrument", True))
prop.group = self.group
def _setup_event_handlers(self):
"""Establish events that populate/expire the composite attribute."""
def load_handler(state, *args):
dict_ = state.dict
if self.key in dict_:
return
# if column elements aren't loaded, skip.
# __get__() will initiate a load for those
# columns
for k in self._attribute_keys:
if k not in dict_:
return
# assert self.key not in dict_
dict_[self.key] = self.composite_class(
*[state.dict[key] for key in
self._attribute_keys]
)
def expire_handler(state, keys):
if keys is None or set(self._attribute_keys).intersection(keys):
state.dict.pop(self.key, None)
def insert_update_handler(mapper, connection, state):
"""After an insert or update, some columns may be expired due
to server side defaults, or re-populated due to client side
defaults. Pop out the composite value here so that it
recreates.
"""
state.dict.pop(self.key, None)
event.listen(self.parent, 'after_insert',
insert_update_handler, raw=True)
event.listen(self.parent, 'after_update',
insert_update_handler, raw=True)
event.listen(self.parent, 'load',
load_handler, raw=True, propagate=True)
event.listen(self.parent, 'refresh',
load_handler, raw=True, propagate=True)
event.listen(self.parent, 'expire',
expire_handler, raw=True, propagate=True)
# TODO: need a deserialize hook here
@util.memoized_property
def _attribute_keys(self):
return [
prop.key for prop in self.props
]
def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF):
"""Provided for userland code that uses attributes.get_history()."""
added = []
deleted = []
has_history = False
for prop in self.props:
key = prop.key
hist = state.manager[key].impl.get_history(state, dict_)
if hist.has_changes():
has_history = True
non_deleted = hist.non_deleted()
if non_deleted:
added.extend(non_deleted)
else:
added.append(None)
if hist.deleted:
deleted.extend(hist.deleted)
else:
deleted.append(None)
if has_history:
return attributes.History(
[self.composite_class(*added)],
(),
[self.composite_class(*deleted)]
)
else:
return attributes.History(
(), [self.composite_class(*added)], ()
)
def _comparator_factory(self, mapper):
return self.comparator_factory(self, mapper)
class CompositeBundle(query.Bundle):
def __init__(self, property, expr):
self.property = property
super(CompositeProperty.CompositeBundle, self).__init__(
property.key, *expr)
def create_row_processor(self, query, procs, labels):
def proc(row, result):
return self.property.composite_class(
*[proc(row, result) for proc in procs])
return proc
class Comparator(PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.CompositeProperty` attributes.
See the example in :ref:`composite_operations` for an overview
of usage , as well as the documentation for :class:`.PropComparator`.
See also:
:class:`.PropComparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__hash__ = None
@property
def clauses(self):
return self.__clause_element__()
def __clause_element__(self):
return expression.ClauseList(
group=False, *self._comparable_elements)
def _query_clause_element(self):
return CompositeProperty.CompositeBundle(
self.prop, self.__clause_element__())
@util.memoized_property
def _comparable_elements(self):
if self._adapt_to_entity:
return [
getattr(
self._adapt_to_entity.entity,
prop.key
) for prop in self.prop._comparable_elements
]
else:
return self.prop._comparable_elements
def __eq__(self, other):
if other is None:
values = [None] * len(self.prop._comparable_elements)
else:
values = other.__composite_values__()
comparisons = [
a == b
for a, b in zip(self.prop._comparable_elements, values)
]
if self._adapt_to_entity:
comparisons = [self.adapter(x) for x in comparisons]
return sql.and_(*comparisons)
def __ne__(self, other):
return sql.not_(self.__eq__(other))
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class ConcreteInheritedProperty(DescriptorProperty):
"""A 'do nothing' :class:`.MapperProperty` that disables
an attribute on a concrete subclass that is only present
on the inherited mapper, not the concrete classes' mapper.
Cases where this occurs include:
* When the superclass mapper is mapped against a
"polymorphic union", which includes all attributes from
all subclasses.
* When a relationship() is configured on an inherited mapper,
but not on the subclass mapper. Concrete mappers require
that relationship() is configured explicitly on each
subclass.
"""
def _comparator_factory(self, mapper):
comparator_callable = None
for m in self.parent.iterate_to_root():
p = m._props[self.key]
if not isinstance(p, ConcreteInheritedProperty):
comparator_callable = p.comparator_factory
break
return comparator_callable
def __init__(self):
def warn():
raise AttributeError("Concrete %s does not implement "
"attribute %r at the instance level. Add "
"this property explicitly to %s." %
(self.parent, self.key, self.parent))
class NoninheritedConcreteProp(object):
def __set__(s, obj, value):
warn()
def __delete__(s, obj):
warn()
def __get__(s, obj, owner):
if obj is None:
return self.descriptor
warn()
self.descriptor = NoninheritedConcreteProp()
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class SynonymProperty(DescriptorProperty):
def __init__(self, name, map_column=None,
descriptor=None, comparator_factory=None,
doc=None):
"""Denote an attribute name as a synonym to a mapped property,
in that the attribute will mirror the value and expression behavior
of another attribute.
:param name: the name of the existing mapped property. This
can refer to the string name of any :class:`.MapperProperty`
configured on the class, including column-bound attributes
and relationships.
:param descriptor: a Python :term:`descriptor` that will be used
as a getter (and potentially a setter) when this attribute is
accessed at the instance level.
:param map_column: if ``True``, the :func:`.synonym` construct will
locate the existing named :class:`.MapperProperty` based on the
attribute name of this :func:`.synonym`, and assign it to a new
attribute linked to the name of this :func:`.synonym`.
That is, given a mapping like::
class MyClass(Base):
__tablename__ = 'my_table'
id = Column(Integer, primary_key=True)
job_status = Column(String(50))
job_status = synonym("_job_status", map_column=True)
The above class ``MyClass`` will now have the ``job_status``
:class:`.Column` object mapped to the attribute named
``_job_status``, and the attribute named ``job_status`` will refer
to the synonym itself. This feature is typically used in
conjunction with the ``descriptor`` argument in order to link a
user-defined descriptor as a "wrapper" for an existing column.
:param comparator_factory: A subclass of :class:`.PropComparator`
that will provide custom comparison behavior at the SQL expression
level.
.. note::
For the use case of providing an attribute which redefines both
Python-level and SQL-expression level behavior of an attribute,
please refer to the Hybrid attribute introduced at
:ref:`mapper_hybrids` for a more effective technique.
.. seealso::
:ref:`synonyms` - examples of functionality.
:ref:`mapper_hybrids` - Hybrids provide a better approach for
more complicated attribute-wrapping schemes than synonyms.
"""
self.name = name
self.map_column = map_column
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
util.set_creation_order(self)
# TODO: when initialized, check _proxied_property,
# emit a warning if its not a column-based property
@util.memoized_property
def _proxied_property(self):
return getattr(self.parent.class_, self.name).property
def _comparator_factory(self, mapper):
prop = self._proxied_property
if self.comparator_factory:
comp = self.comparator_factory(prop, mapper)
else:
comp = prop.comparator_factory(prop, mapper)
return comp
def set_parent(self, parent, init):
if self.map_column:
# implement the 'map_column' option.
if self.key not in parent.mapped_table.c:
raise sa_exc.ArgumentError(
"Can't compile synonym '%s': no column on table "
"'%s' named '%s'"
% (self.name, parent.mapped_table.description, self.key))
elif parent.mapped_table.c[self.key] in \
parent._columntoproperty and \
parent._columntoproperty[
parent.mapped_table.c[self.key]
].key == self.name:
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r" %
(self.key, self.name, self.name, self.key)
)
p = properties.ColumnProperty(parent.mapped_table.c[self.key])
parent._configure_property(
self.name, p,
init=init,
setparent=True)
p._mapped_by_synonym = self.key
self.parent = parent
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class ComparableProperty(DescriptorProperty):
"""Instruments a Python property for use in query expressions."""
def __init__(self, comparator_factory, descriptor=None, doc=None):
"""Provides a method of applying a :class:`.PropComparator`
to any Python descriptor attribute.
.. versionchanged:: 0.7
:func:`.comparable_property` is superseded by
the :mod:`~sqlalchemy.ext.hybrid` extension. See the example
at :ref:`hybrid_custom_comparators`.
Allows any Python descriptor to behave like a SQL-enabled
attribute when used at the class level in queries, allowing
redefinition of expression operator behavior.
In the example below we redefine :meth:`.PropComparator.operate`
to wrap both sides of an expression in ``func.lower()`` to produce
case-insensitive comparison::
from sqlalchemy.orm import comparable_property
from sqlalchemy.orm.interfaces import PropComparator
from sqlalchemy.sql import func
from sqlalchemy import Integer, String, Column
from sqlalchemy.ext.declarative import declarative_base
class CaseInsensitiveComparator(PropComparator):
def __clause_element__(self):
return self.prop
def operate(self, op, other):
return op(
func.lower(self.__clause_element__()),
func.lower(other)
)
Base = declarative_base()
class SearchWord(Base):
__tablename__ = 'search_word'
id = Column(Integer, primary_key=True)
word = Column(String)
word_insensitive = comparable_property(lambda prop, mapper:
CaseInsensitiveComparator(
mapper.c.word, mapper)
)
A mapping like the above allows the ``word_insensitive`` attribute
to render an expression like::
>>> print SearchWord.word_insensitive == "Trucks"
lower(search_word.word) = lower(:lower_1)
:param comparator_factory:
A PropComparator subclass or factory that defines operator behavior
for this property.
:param descriptor:
Optional when used in a ``properties={}`` declaration. The Python
descriptor or property to layer comparison behavior on top of.
The like-named descriptor will be automatically retrieved from the
mapped class if left blank in a ``properties`` declaration.
"""
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
util.set_creation_order(self)
def _comparator_factory(self, mapper):
return self.comparator_factory(self, mapper)
| |
def init_actions_(service, args):
"""
this needs to returns an array of actions representing the depencies between actions.
Looks at ACTION_DEPS in this module for an example of what is expected
"""
# some default logic for simple actions
return {
'test_all': ['test_list_services', 'test_delete_bp', 'test_execute_bp',
'test_get_bp', 'test_restore_bp', 'test_archive_bp', 'test_update_bp',
'test_create_bp', 'test_list_bps', 'test_delete_repo', 'test_get_repo',
'test_create_repo', 'test_list_repos', 'install']
}
def test_all(job):
"""
Test umbrella
"""
pass
def test_list_repos(job):
"""
Tests list repositories API
"""
import sys
import os
import requests
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
failures = []
repos = []
server_uri = model.data.uri
server_uri = server_uri.strip('/')
try:
service_uri = '/ays/repository'
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
if res.status_code != 200:
failures.append('Wrong response while testing [%s] service using uri [%s]. Error [%s]' % (service_uri, server_uri, res.text))
if failures:
model.data.result = RESULT_FAILED % '\n'.join(failures)
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
if repos:
for repo in repos:
repo.destroy()
def test_create_repo(job):
"""
Tests create new repository api
"""
import sys
import os
import requests
import time
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
failures = []
repos = []
server_uri = model.data.uri
server_uri = server_uri.strip('/')
repo_name = 'testrepo_%s' % time.time()
fake_repo_url = 'https://githum.com/ahussin/%s.git' % repo_name
try:
service_uri = '/ays/repository'
full_uri = '%s%s' % (server_uri, service_uri)
# non-valid requests...no data sent
res = requests.post(full_uri)
if res.status_code != 400:
failures.append('Wrong status code while creating new repository with invlid data. Expected [%s] found [%s]' % (400, res.status_code))
# valid request
data = {'name': repo_name, 'git_url': fake_repo_url}
res = requests.post(full_uri, json=data)
if res.status_code != 201:
failures.append('Wrong status code while creating new repository. Expected [201] found [%s]' % res.status_code)
if failures:
model.data.result = RESULT_FAILED % '\n'.join(failures)
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
if repos:
for repo in repos:
repo.destroy()
# delete the created repo
full_uri = '%s/%s' % (full_uri, repo_name)
requests.delete(full_uri)
def test_get_repo(job):
"""
Tests get repository api
"""
import sys
import os
import requests
import time
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
failures = []
repos = []
server_uri = model.data.uri
server_uri = server_uri.strip('/')
try:
service_uri = '/ays/repository'
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
if res.status_code == 200:
ays_repos = res.json()
if len(ays_repos) > 0:
repo_info = ays_repos[0]
ays_repos = dict(zip([item['name'] for item in ays_repos], ays_repos))
non_existing_repo = '%s_%s' % (repo_info['name'], time.time())
while non_existing_repo in ays_repos:
non_existing_repo = non_existing_repo = '%s_%s' % (repo_info['name'], time.time())
service_uri = '%s/%s' % (service_uri, repo_info['name'])
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
if res.status_code != 200:
failures.append('Wrong status code while getting repository [%s] using uri [%s]' % (repo_info['name'], full_uri))
# test non-existing repo
service_uri = '%s/%s' % (service_uri, non_existing_repo)
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
if res.status_code != 404:
failures.append('Wrong status code while getting non-existing repository [%s] using uri [%s]' % (repo_info['name'], full_uri))
if failures:
model.data.result = RESULT_FAILED % '\n'.join(failures)
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
if repos:
for repo in repos:
repo.destroy()
def test_delete_repo(job):
"""
Tests delete repository api
"""
import sys
import os
import requests
import time
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
failures = []
repos = []
server_uri = model.data.uri
server_uri = server_uri.strip('/')
repo_name = 'testrepo_%s' % time.time()
fake_repo_url = 'https://githum.com/ahussin/%s.git' % repo_name
try:
service_uri = '/ays/repository'
full_uri = '%s%s' % (server_uri, service_uri)
# list repos
res = requests.get(full_uri)
if res.status_code == 200:
ays_repos = res.json()
ays_repos = dict(zip([item['name'] for item in ays_repos], ays_repos))
while repo_name in ays_repos:
repo_name = 'testrepo_%s' % time.time()
# now create a repo with the non-existing name
res = requests.post(full_uri, json={'name': repo_name, 'git_url': fake_repo_url})
if res.status_code == 201:
full_uri = '%s/%s' % (full_uri, repo_name)
res = requests.delete(full_uri)
if res.status_code != 204:
failures.append('Failed to delete repository. Error [%s]' % res.text)
else:
failures.append('Cannot test delete repository API. Cannot create test repo. Error [%s]' % res.text)
if failures:
model.data.result = RESULT_FAILED % '\n'.join(failures)
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
if repos:
for repo in repos:
repo.destroy()
def test_list_bps(job):
"""
Tests list blueprints API
"""
import sys
import os
import requests
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
failures = []
repos = []
server_uri = model.data.uri
server_uri = server_uri.strip('/')
try:
service_uri = '/ays/repository'
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
if res.status_code == 200:
repo_info = res.json()
if repo_info:
repo_info = repo_info[0]
service_uri = '/ays/repository/%s/blueprint' % repo_info['name']
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
if res.status_code != 200:
failures.append('Failed to list blueprints using url [%s]. Error [%s]' % (full_uri, res.text))
if failures:
model.data.result = RESULT_FAILED % '\n'.join(failures)
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
if repos:
for repo in repos:
repo.destroy()
def test_create_bp(job):
"""
Create test blueprint
"""
import sys
import os
import requests
import time
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
failures = []
repos = []
server_uri = model.data.uri
server_uri = server_uri.strip('/')
repo_info = None
blueprint_name = None
try:
service_uri = '/ays/repository'
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
if res.status_code == 200:
repo_info = res.json()
if repo_info:
repo_info = repo_info[0]
service_uri = '/ays/repository/%s/blueprint' % repo_info['name']
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
ays_bps = dict(zip([bp_info['name'] for bp_info in res.json()], res.json()))
if res.status_code == 200:
blueprint_name = 'testbp_%s' % time.time()
while blueprint_name in ays_bps:
blueprint_name = 'testbp_%s' % time.time()
content = "test_recurring_actions__instance:\n\nactions:\n - action: 'test'"
res = requests.post(full_uri, json={'name': blueprint_name, 'content': content})
if res.status_code != 201:
failures.append("Failed to create new blueprint. Error [%s]" % res.text)
if failures:
model.data.result = RESULT_FAILED % '\n'.join(failures)
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
if repos:
for repo in repos:
repo.destroy()
# delete the test blueprint
if repo_info and blueprint_name:
full_uri = '%s/ays/repository/%s/blueprint/%s' % (server_uri, repo_info['name'], blueprint_name)
requests.delete(full_uri)
def test_update_bp(job):
"""
Tests update blueprint API
"""
import sys
import os
import requests
import time
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
failures = []
repos = []
server_uri = model.data.uri
server_uri = server_uri.strip('/')
repo_info = None
blueprint_name = None
try:
service_uri = '/ays/repository'
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
if res.status_code == 200:
repo_info = res.json()
if repo_info:
repo_info = repo_info[0]
service_uri = '/ays/repository/%s/blueprint' % repo_info['name']
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
ays_bps = dict(zip([bp_info['name'] for bp_info in res.json()], res.json()))
if res.status_code == 200:
blueprint_name = 'testbp_%s' % time.time()
while blueprint_name in ays_bps:
blueprint_name = 'testbp_%s' % time.time()
content = "test_recurring_actions__instance:\n\nactions:\n - action: 'test'"
res = requests.post(full_uri, json={'name': blueprint_name, 'content': content})
if res.status_code == 201:
updated_content = "test_recurring_actions__instance2:\n\nactions:\n - action: 'test'"
full_uri = '%s/ays/repository/%s/blueprint/%s' % (server_uri, repo_info['name'], blueprint_name)
res = requests.put(full_uri, json={'name': blueprint_name, 'content': content})
if res.status_code != 200:
failures.append('Failed to update blueprint using uri [%s]. Error[%s]' % (full_uri, res.text))
if failures:
model.data.result = RESULT_FAILED % '\n'.join(failures)
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
if repos:
for repo in repos:
repo.destroy()
# delete the test blueprint
if repo_info and blueprint_name:
full_uri = '%s/ays/repository/%s/blueprint/%s' % (server_uri, repo_info['name'], blueprint_name)
requests.delete(full_uri)
def test_archive_bp(job):
"""
Tests archive blueprint API
"""
import sys
import os
import requests
import time
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
failures = []
repos = []
server_uri = model.data.uri
server_uri = server_uri.strip('/')
repo_info = None
blueprint_name = None
try:
service_uri = '/ays/repository'
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
if res.status_code == 200:
repo_info = res.json()
if repo_info:
repo_info = repo_info[0]
service_uri = '/ays/repository/%s/blueprint' % repo_info['name']
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
ays_bps = dict(zip([bp_info['name'] for bp_info in res.json()], res.json()))
if res.status_code == 200:
blueprint_name = 'testbp_%s' % time.time()
while blueprint_name in ays_bps:
blueprint_name = 'testbp_%s' % time.time()
content = "test_recurring_actions__instance:\n\nactions:\n - action: 'test'"
res = requests.post(full_uri, json={'name': blueprint_name, 'content': content})
if res.status_code == 201:
full_uri = '%s/ays/repository/%s/blueprint/%s/archive' % (server_uri, repo_info['name'], blueprint_name)
res = requests.put(full_uri)
if res.status_code != 200:
failures.append('Failed to archive blueprint using uri [%s]. Error[%s]' % (full_uri, res.text))
if failures:
model.data.result = RESULT_FAILED % '\n'.join(failures)
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
if repos:
for repo in repos:
repo.destroy()
# delete the test blueprint
if repo_info and blueprint_name:
full_uri = '%s/ays/repository/%s/blueprint/%s' % (server_uri, repo_info['name'], blueprint_name)
requests.delete(full_uri)
def test_restore_bp(job):
"""
Tests restore blueprint API
"""
import sys
import os
import requests
import time
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
failures = []
repos = []
server_uri = model.data.uri
server_uri = server_uri.strip('/')
repo_info = None
blueprint_name = None
try:
service_uri = '/ays/repository'
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
if res.status_code == 200:
repo_info = res.json()
if repo_info:
repo_info = repo_info[0]
service_uri = '/ays/repository/%s/blueprint' % repo_info['name']
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
ays_bps = dict(zip([bp_info['name'] for bp_info in res.json()], res.json()))
if res.status_code == 200:
blueprint_name = 'testbp_%s' % time.time()
while blueprint_name in ays_bps:
blueprint_name = 'testbp_%s' % time.time()
content = "test_recurring_actions__instance:\n\nactions:\n - action: 'test'"
res = requests.post(full_uri, json={'name': blueprint_name, 'content': content})
if res.status_code == 201:
full_uri = '%s/ays/repository/%s/blueprint/%s/archive' % (server_uri, repo_info['name'], blueprint_name)
res = requests.put(full_uri)
if res.status_code == 200:
full_uri = '%s/ays/repository/%s/blueprint/%s/restore' % (server_uri, repo_info['name'], blueprint_name)
res = requests.put(full_uri)
if res.status_code != 200:
failures.append('Failed to restore blueprint using uri [%s]. Error[%s]' % (full_uri, res.text))
if failures:
model.data.result = RESULT_FAILED % '\n'.join(failures)
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
if repos:
for repo in repos:
repo.destroy()
# delete the test blueprint
if repo_info and blueprint_name:
full_uri = '%s/ays/repository/%s/blueprint/%s' % (server_uri, repo_info['name'], blueprint_name)
requests.delete(full_uri)
def test_get_bp(job):
"""
Tests get blueprint API
"""
import sys
import os
import requests
import time
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
failures = []
repos = []
server_uri = model.data.uri
server_uri = server_uri.strip('/')
repo_info = None
blueprint_name = None
try:
service_uri = '/ays/repository'
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
if res.status_code == 200:
repo_info = res.json()
if repo_info:
repo_info = repo_info[0]
service_uri = '/ays/repository/%s/blueprint' % repo_info['name']
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
ays_bps = dict(zip([bp_info['name'] for bp_info in res.json()], res.json()))
if res.status_code == 200:
blueprint_name = 'testbp_%s' % time.time()
while blueprint_name in ays_bps:
blueprint_name = 'testbp_%s' % time.time()
content = "test_recurring_actions__instance:\n\nactions:\n - action: 'test'"
res = requests.post(full_uri, json={'name': blueprint_name, 'content': content})
if res.status_code == 201:
full_uri = '%s/ays/repository/%s/blueprint/%s' % (server_uri, repo_info['name'], blueprint_name)
res = requests.get(full_uri)
if res.status_code != 200:
failures.append('Failed to get blueprint using uri [%s]. Error[%s]' % (full_uri, res.text))
if failures:
model.data.result = RESULT_FAILED % '\n'.join(failures)
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
if repos:
for repo in repos:
repo.destroy()
# delete the test blueprint
if repo_info and blueprint_name:
full_uri = '%s/ays/repository/%s/blueprint/%s' % (server_uri, repo_info['name'], blueprint_name)
requests.delete(full_uri)
def test_execute_bp(job):
"""
Tests execute blueprint API
"""
import sys
import os
import requests
import time
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
failures = []
repos = []
server_uri = model.data.uri
server_uri = server_uri.strip('/')
repo_info = None
blueprint_name = None
try:
service_uri = '/ays/repository'
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
if res.status_code == 200:
repo_info = res.json()
if repo_info:
repo_info = repo_info[0]
service_uri = '/ays/repository/%s/blueprint' % repo_info['name']
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
ays_bps = dict(zip([bp_info['name'] for bp_info in res.json()], res.json()))
if res.status_code == 200:
blueprint_name = 'testbp_%s' % time.time()
while blueprint_name in ays_bps:
blueprint_name = 'testbp_%s' % time.time()
content = "test_recurring_actions__instance:\n\nactions:\n - action: 'test'"
res = requests.post(full_uri, json={'name': blueprint_name, 'content': content})
if res.status_code == 201:
full_uri = '%s/ays/repository/%s/blueprint/%s' % (server_uri, repo_info['name'], blueprint_name)
res = requests.post(full_uri)
if res.status_code != 200:
failures.append('Failed to execute blueprint using uri [%s]. Error[%s]' % (full_uri, res.text))
if failures:
model.data.result = RESULT_FAILED % '\n'.join(failures)
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
if repos:
for repo in repos:
repo.destroy()
# delete the test blueprint
if repo_info and blueprint_name:
full_uri = '%s/ays/repository/%s/blueprint/%s' % (server_uri, repo_info['name'], blueprint_name)
requests.delete(full_uri)
def test_delete_bp(job):
"""
Tests delete blueprint API
"""
import sys
import os
import requests
import time
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
failures = []
repos = []
server_uri = model.data.uri
server_uri = server_uri.strip('/')
repo_info = None
blueprint_name = None
try:
service_uri = '/ays/repository'
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
if res.status_code == 200:
repo_info = res.json()
if repo_info:
repo_info = repo_info[0]
service_uri = '/ays/repository/%s/blueprint' % repo_info['name']
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
ays_bps = dict(zip([bp_info['name'] for bp_info in res.json()], res.json()))
if res.status_code == 200:
blueprint_name = 'testbp_%s' % time.time()
while blueprint_name in ays_bps:
blueprint_name = 'testbp_%s' % time.time()
content = "test_recurring_actions__instance:\n\nactions:\n - action: 'test'"
res = requests.post(full_uri, json={'name': blueprint_name, 'content': content})
if res.status_code == 201:
full_uri = '%s/ays/repository/%s/blueprint/%s' % (server_uri, repo_info['name'], blueprint_name)
res = requests.delete(full_uri)
if res.status_code != 204:
failures.append('Failed to delete blueprint using uri [%s]. Error[%s]' % (full_uri, res.text))
if failures:
model.data.result = RESULT_FAILED % '\n'.join(failures)
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
if repos:
for repo in repos:
repo.destroy()
# delete the test blueprint
if repo_info and blueprint_name:
full_uri = '%s/ays/repository/%s/blueprint/%s' % (server_uri, repo_info['name'], blueprint_name)
requests.delete(full_uri)
def test_list_services(job):
"""
Test list services API
"""
import sys
import os
import requests
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
failures = []
repos = []
server_uri = model.data.uri
server_uri = server_uri.strip('/')
try:
service_uri = '/ays/repository'
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
if res.status_code == 200:
repo_info = res.json()
if repo_info:
repo_info = repo_info[0]
service_uri = '/ays/repository/%s/service' % repo_info['name']
full_uri = '%s%s' % (server_uri, service_uri)
res = requests.get(full_uri)
if res.status_code != 200:
failures.append('Failed to list services using url [%s]. Error [%s]' % (full_uri, res.text))
if failures:
model.data.result = RESULT_FAILED % '\n'.join(failures)
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
if repos:
for repo in repos:
repo.destroy()
| |
#
# license-expression is a free software tool from nexB Inc. and others.
# Visit https://github.com/nexB/license-expression for support and download.
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and http://aboutcode.org
#
# This software is licensed under the Apache License version 2.0.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"""
This module defines a mini language to parse, validate, simplify, normalize and
compare license expressions using a boolean logic engine.
This supports SPDX license expressions and also accepts other license naming
conventions and license identifiers aliases to recognize and normalize licenses.
Using boolean logic, license expressions can be tested for equality, containment,
equivalence and can be normalized or simplified.
The main entry point is the Licensing object.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
# Python 2 and 3 support
try:
# Python 2
unicode
str = unicode
except NameError:
# Python 3
unicode = str
import collections
from copy import copy
from copy import deepcopy
from functools import total_ordering
import itertools
import re
import string
import boolean
from boolean import Expression as LicenseExpression
# note these may not all be used here but are imported here to avoid leaking
# boolean.py constants to callers
from boolean.boolean import PARSE_ERRORS
from boolean.boolean import PARSE_INVALID_EXPRESSION
from boolean.boolean import PARSE_INVALID_NESTING
from boolean.boolean import PARSE_INVALID_SYMBOL_SEQUENCE
from boolean.boolean import PARSE_UNBALANCED_CLOSING_PARENS
from boolean.boolean import PARSE_UNKNOWN_TOKEN
from boolean.boolean import ParseError
from boolean.boolean import TOKEN_SYMBOL
from boolean.boolean import TOKEN_AND
from boolean.boolean import TOKEN_OR
from boolean.boolean import TOKEN_LPAR
from boolean.boolean import TOKEN_RPAR
from license_expression._pyahocorasick import Trie as Scanner
from license_expression._pyahocorasick import Output
from license_expression._pyahocorasick import Result
# append new error codes to PARSE_ERRORS by monkey patching
PARSE_EXPRESSION_NOT_UNICODE = 100
if PARSE_EXPRESSION_NOT_UNICODE not in PARSE_ERRORS:
PARSE_ERRORS[PARSE_EXPRESSION_NOT_UNICODE] = 'Expression string must be unicode.'
PARSE_INVALID_EXCEPTION = 101
if PARSE_INVALID_EXCEPTION not in PARSE_ERRORS:
PARSE_ERRORS[PARSE_INVALID_EXCEPTION] = (
'A license exception symbol can only be used as an exception '
'in a "WITH exception" statement.')
PARSE_INVALID_SYMBOL_AS_EXCEPTION = 102
if PARSE_INVALID_SYMBOL_AS_EXCEPTION not in PARSE_ERRORS:
PARSE_ERRORS[PARSE_INVALID_SYMBOL_AS_EXCEPTION] = (
'A plain license symbol cannot be used as an exception '
'in a "WITH symbol" statement.')
PARSE_INVALID_SYMBOL = 103
if PARSE_INVALID_SYMBOL not in PARSE_ERRORS:
PARSE_ERRORS[PARSE_INVALID_SYMBOL] = (
'A proper license symbol is needed.')
class ExpressionError(Exception):
pass
# Used for tokenizing
Keyword = collections.namedtuple('Keyword', 'value type')
# id for "with" token which is not a proper boolean symbol but an expression symbol
TOKEN_WITH = 10
# actual keyword types
KW_LPAR = Keyword('(', TOKEN_LPAR)
KW_RPAR = Keyword(')', TOKEN_RPAR)
_KEYWORDS = [
Keyword(' and ', TOKEN_AND),
Keyword(' or ', TOKEN_OR),
KW_LPAR,
KW_RPAR,
Keyword(' with ', TOKEN_WITH),
]
KEYWORDS = tuple(kw.value for kw in _KEYWORDS)
KEYWORDS_STRIPPED = tuple(k.strip() for k in KEYWORDS)
class Licensing(boolean.BooleanAlgebra):
"""
Define a mini language to parse, validate and compare license expressions.
For example:
>>> l = Licensing()
>>> expr = l.parse(" GPL-2.0 or LGPL-2.1 and mit ")
>>> expected = 'GPL-2.0 OR (LGPL-2.1 AND mit)'
>>> assert expected == expr.render('{symbol.key}')
>>> expected = [
... LicenseSymbol('GPL-2.0'),
... LicenseSymbol('LGPL-2.1'),
... LicenseSymbol('mit')
... ]
>>> assert expected == l.license_symbols(expr)
>>> symbols = ['GPL-2.0+', 'Classpath', 'BSD']
>>> l = Licensing(symbols)
>>> expression = 'GPL-2.0+ with Classpath or (bsd)'
>>> parsed = l.parse(expression)
>>> expected = 'GPL-2.0+ WITH Classpath OR BSD'
>>> assert expected == parsed.render('{symbol.key}')
>>> expected = [
... LicenseSymbol('GPL-2.0+'),
... LicenseSymbol('Classpath'),
... LicenseSymbol('BSD')
... ]
>>> assert expected == l.license_symbols(parsed)
>>> assert expected == l.license_symbols(expression)
This is set at runtime during parsing:
>>> assert l.license_symbols(parsed)[1].as_exception
"""
def __init__(self, symbols=tuple(), quiet=True):
"""
Initialize a Licensing with an optional `symbols` sequence of LicenseSymbol
or LicenseSymbol-like objects or license key strings. If provided and this
list data is invalid, raise a ValueError.
"""
super(Licensing, self).__init__(Symbol_class=LicenseSymbol, AND_class=AND, OR_class=OR)
# FIXME: this should be instead a super class of all symbols
self.LicenseSymbol = self.Symbol
symbols = symbols or tuple()
if symbols:
symbols = tuple(as_symbols(symbols))
warns, errors = validate_symbols(symbols)
if warns and not quiet:
for w in warns:
print(w)
if errors and not quiet:
for e in errors:
print(e)
if errors:
raise ValueError('\n'.join(warns + errors))
# mapping of known symbol used for parsing and resolution as (key, symbol)
# TODO: inject lpar, rpar and spaces sourround, before and after
# e.g "(sym)" "(sym " "sym)" " sym "
self.known_symbols = {symbol.key: symbol for symbol in symbols}
# Aho-Corasick automaton-based Scanner used for expression tokenizing
self.scanner = None
def is_equivalent(self, expression1, expression2, **kwargs):
"""
Return True if both `expressions` LicenseExpression are equivalent.
If a string is provided, it will be parsed and simplified.
Extra kwargs are passed down to the parse() function.
"""
ex1 = self._parse_and_simplify(expression1, **kwargs)
ex2 = self._parse_and_simplify(expression2, **kwargs)
return ex1 == ex2
def contains(self, expression1, expression2, **kwargs):
"""
Return True if `expression1` contains `expression2`.
Expressions are either a string or a LicenseExpression object.
If a string is provided, it will be parsed and simplified.
Extra kwargs are passed down to the parse() function.
"""
ex1 = self._parse_and_simplify(expression1, **kwargs)
ex2 = self._parse_and_simplify(expression2, **kwargs)
return ex2 in ex1
def _parse_and_simplify(self, expression, **kwargs):
expression = self.parse(expression, **kwargs)
if expression is None:
return None
if not isinstance(expression, LicenseExpression):
raise TypeError('expressions must be LicenseExpression objects: %(expression1)r, %(expression2)r' % locals())
return expression.simplify()
def license_symbols(self, expression, unique=True, decompose=True, **kwargs):
"""
Return a list of LicenseSymbol objects used in an expression in
the same order as they first appear in the expression tree.
`expression` is either a string or a LicenseExpression object.
If a string is provided, it will be parsed.
If `unique` is True only return unique symbols.
If `decompose` is True then composite LicenseWithExceptionSymbol instance are
not returned directly but their underlying license and exception symbols are
retruned instead.
Extra kwargs are passed down to the parse() function.
For example:
>>> l = Licensing()
>>> expected = [
... LicenseSymbol('GPL-2.0'),
... LicenseSymbol('LGPL-2.1+')
... ]
>>> result = l.license_symbols(l.parse('GPL-2.0 or LGPL-2.1+'))
>>> assert expected == result
"""
expression = self.parse(expression, **kwargs)
if expression is None:
return []
symbols = (s for s in expression.get_literals() if isinstance(s, BaseSymbol))
if decompose:
symbols = itertools.chain.from_iterable(s.decompose() for s in symbols)
if unique:
symbols = ordered_unique(symbols)
return list(symbols)
def primary_license_symbol(self, expression, decompose=True, **kwargs):
"""
Return the left-most license symbol of an `expression` or None.
`expression` is either a string or a LicenseExpression object.
If `decompose` is True, only the left-hand license symbol of a decomposed
LicenseWithExceptionSymbol symbol will be returned if this is the left most
member. Otherwise a composite LicenseWithExceptionSymbol is returned in this
case.
Extra kwargs are passed down to the parse() function.
"""
symbols = self.license_symbols(expression, decompose=decompose, **kwargs)
if symbols:
return symbols[0]
def primary_license_key(self, expression, **kwargs):
"""
Return the left-most license key of an `expression` or None. The underlying
symbols are decomposed.
`expression` is either a string or a LicenseExpression object.
Extra kwargs are passed down to the parse() function.
"""
prim = self.primary_license_symbol(expression, decompose=True, **kwargs)
if prim:
return prim.key
def license_keys(self, expression, unique=True, **kwargs):
"""
Return a list of licenses keys used in an expression in the same order as
they first appear in the expression.
`expression` is either a string or a LicenseExpression object.
Extra kwargs are passed down to the parse() function.
For example:
>>> l = Licensing()
>>> expr = ' GPL-2.0 and mit+ with blabla and mit or LGPL-2.1 and mit and mit+ with GPL-2.0'
>>> expected = ['GPL-2.0', 'mit+', 'blabla', 'mit', 'LGPL-2.1']
>>> assert expected == l.license_keys(l.parse(expr))
"""
symbols = self.license_symbols(expression, unique=False, decompose=True, **kwargs)
return self._keys(symbols, unique)
def _keys(self, symbols, unique=True):
keys = [ls.key for ls in symbols]
# note: we only apply this on bare keys strings as we can have the same
# symbol used as symbol or exception if we are not in strict mode
if unique:
keys = ordered_unique(keys)
return keys
def unknown_license_symbols(self, expression, unique=True, **kwargs):
"""
Return a list of unknown licenses symbols used in an `expression` in the same
order as they first appear in the `expression`.
`expression` is either a string or a LicenseExpression object.
Extra kwargs are passed down to the parse() function.
"""
return [ls for ls in self.license_symbols(expression, unique=unique, decompose=True, **kwargs)
if not ls.key in self.known_symbols]
def unknown_license_keys(self, expression, unique=True, **kwargs):
"""
Return a list of unknown licenses keys used in an `expression` in the same
order as they first appear in the `expression`.
`expression` is either a string or a LicenseExpression object.
If a string is provided, it will be parsed.
If `unique` is True only return unique keys.
Extra kwargs are passed down to the parse() function.
"""
symbols = self.unknown_license_symbols(expression, unique=False, **kwargs)
return self._keys(symbols, unique)
def parse(self, expression, validate=False, strict=False, **kwargs):
"""
Return a new license LicenseExpression object by parsing a license expression
string. Check that the expression syntax is valid and raise an Exception,
ExpressionError or ParseError on errors. Return None for empty expressions.
`expression` is either a string or a LicenseExpression object. If this is a
LicenseExpression it is retruned as-si.
Symbols are always recognized from known symbols if `symbols` were provided
Licensing creation time: each license and exception is recognized from known
license keys (and from aliases for a symbol if available).
If `validate` is True and a symbol is unknown, an ExpressionError error
is raised with a message listing the unknown license keys.
If `validate` is False, no error is triggered.
You can call the `unknown_license_keys` or `unknown_license_symbols` methods
to get unknown license keys or symbols found in a parsed LicenseExpression.
If `strict` is True, additional exceptions will be raised if in a expression
such as "XXX with ZZZ" if the XXX symbol has `is_exception` set to True or
the YYY symbol has `is_exception` set to False.
When a symbol used as an exception its attribute `as_exception` is set to True.
For example:
>>> expression = 'EPL-1.0 and Apache-1.1 OR GPL-2.0 with Classpath-exception'
>>> parsed = Licensing().parse(expression)
>>> expected = '(EPL-1.0 AND Apache-1.1) OR GPL-2.0 WITH Classpath-exception'
>>> assert expected == parsed.render(template='{symbol.key}')
"""
if expression is None:
return
if isinstance(expression, LicenseExpression):
return expression
if isinstance(expression, bytes):
try:
expression = unicode(expression)
except:
ext = type(expression)
raise ExpressionError('expression must be a string and not: %(ext)r' % locals())
if not isinstance(expression, str):
ext = type(expression)
raise ExpressionError('expression must be a string and not: %(ext)r' % locals())
if not expression or not expression.strip():
return
try:
# this will raise a ParseError on errors
tokens = list(self.tokenize(expression, strict=strict))
expression = super(Licensing, self).parse(tokens)
except TypeError as e:
msg = 'Invalid expression syntax: ' + repr(e)
raise ExpressionError(msg)
if not isinstance(expression, LicenseExpression):
raise ExpressionError('expression must be a LicenseExpression once parsed.')
if validate:
unknown_keys = self.unknown_license_keys(expression, unique=True)
if unknown_keys:
msg = 'Unknown license key(s): {}'.format(', '.join(unknown_keys))
raise ExpressionError(msg)
return expression
def tokenize(self, expression, strict=False):
"""
Return an iterable of 3-tuple describing each token given an expression
unicode string. See boolean.BooleanAlgreba.tokenize() for API details.
This 3-tuple contains these items: (token, token string, position):
- token: either a Symbol instance or one of TOKEN_* token types..
- token string: the original token unicode string.
- position: some simple object describing the starting position of the
original token string in the `expr` string. It can be an int for a
character offset, or a tuple of starting (row/line, column).
If `strict` is True, additional exceptions will be raised in a expression
such as "XXX with ZZZ" if the XXX symbol has is_exception` set to True or the
ZZZ symbol has `is_exception` set to False.
"""
if self.known_symbols:
# scan with an automaton, recognize whole symbols+keywords or only keywords
scanner = self.get_scanner()
results = scanner.scan(expression)
else:
# scan with a simple regex-based splitter
results = splitter(expression)
results = strip_and_skip_spaces(results)
result_groups = group_results_for_with_subexpression(results)
for group in result_groups:
len_group = len(group)
if not len_group:
# This should never happen
continue
if len_group == 1:
# a single token
result = group[0]
pos = result.start
token_string = result.string
output = result.output
if output:
val = output.value
if isinstance(val, Keyword):
# keyword
token = val.type
# WITH is not known from the boolean parser as a proper
# boolean element so we handle validation ourselves: by
# design a single group cannot be a single 'WITH' keyword:
# this is an error that we catch and raise here.
if token == TOKEN_WITH:
raise ParseError(token_type=TOKEN_WITH,
token_string=result.string,
position=result.start,
error_code=PARSE_INVALID_EXPRESSION)
elif isinstance(val, LicenseSymbol):
if strict and val.is_exception:
raise ParseError(token_type=TOKEN_SYMBOL,
token_string=result.string,
position=result.start,
error_code=PARSE_INVALID_EXCEPTION)
# known symbol: The strict check above handled possible errors before.
token = val
else:
# this should not be possible by design
raise Exception('Licensing.tokenize is internally confused...')
else:
token = LicenseSymbol(result.string)
else:
if len_group != 3:
# this should never happen
string = ' '.join([res.string for res in group])
start = group[0].start
raise ParseError(
TOKEN_SYMBOL, string, start, PARSE_INVALID_EXPRESSION)
# this is a A with B seq of three results
lic_res, WITH , exc_res = group
pos = lic_res.start
WITHs = ' ' + WITH.string.strip() + ' '
token_string = ''.join([lic_res.string, WITHs, exc_res.string])
# licenses
lic_out = lic_res.output
lic_sym = lic_out and lic_out.value
# this should not happen
if lic_sym and not isinstance(lic_sym, LicenseSymbol):
raise ParseError(TOKEN_SYMBOL, lic_res.string, lic_res.start,
PARSE_INVALID_SYMBOL)
if not lic_sym:
lic_sym = LicenseSymbol(lic_res.string, is_exception=False)
if not isinstance(lic_sym, LicenseSymbol):
raise ParseError(TOKEN_SYMBOL, lic_res.string, lic_res.start,
PARSE_INVALID_SYMBOL)
if strict and lic_sym.is_exception:
raise ParseError(TOKEN_SYMBOL, lic_res.string, lic_res.start,
PARSE_INVALID_EXCEPTION)
# exception
exc_out = exc_res.output
exc_sym = exc_out and exc_out.value
# this should not happen
if exc_sym and not isinstance(exc_sym, LicenseSymbol):
raise ParseError(TOKEN_SYMBOL, lic_sym.string, lic_sym.start,
PARSE_INVALID_SYMBOL)
if exc_sym:
exc_sym = copy(exc_sym)
if not exc_sym:
exc_sym = LicenseSymbol(exc_res.string)
if not isinstance(exc_sym, LicenseSymbol):
raise ParseError(TOKEN_SYMBOL, exc_res.string, exc_res.start,
PARSE_INVALID_SYMBOL)
if strict and self.known_symbols and not exc_sym.is_exception:
raise ParseError(TOKEN_SYMBOL, exc_res.string, exc_res.start,
PARSE_INVALID_SYMBOL_AS_EXCEPTION)
token = LicenseWithExceptionSymbol(lic_sym, exc_sym, strict)
yield token, token_string, pos
def get_scanner(self):
"""
Return a scanner either cached or created as needed. If symbols were provided
when this Licensing object was created, the scanner will recognize known
symbols when tokenizing expressions. Otherwise, only keywords are recognized
and a license symbol is anything in between keywords.
"""
if self.scanner is not None:
return self.scanner
self.scanner = scanner = Scanner(ignore_case=True)
for keyword in _KEYWORDS:
scanner.add(keyword.value, keyword, priority=0)
# self.known_symbols has been created at Licensing initialization time and is
# already validated and trusted here
for key, symbol in self.known_symbols.items():
# always use the key even if there are no aliases.
scanner.add(key, symbol, priority=1)
aliases = getattr(symbol, 'aliases', [])
for alias in aliases:
# normalize spaces for each alias. The Scanner will lowercase them
# since we created it with ignore_case=True
if alias:
alias = ' '.join(alias.split())
if alias:
scanner.add(alias, symbol, priority=2)
scanner.make_automaton()
return scanner
class Renderable(object):
"""
An interface for renderable objects.
"""
def render(self, template='{symbol.key}', *args, **kwargs):
"""
Return a formatted string rendering for this expression using the `template`
format string to render each symbol. The variable available are `symbol.key`
and any other attribute that was attached to a license symbol instance and a
custom template can be provided to handle custom HTML rendering or similar.
For symbols that hold multiple licenses (e.g. a WITH statement) the template
is applied to each symbol individually.
Note that when render() is called the *args and **kwargs are propagated
recursively to any Renderable object render() method.
"""
return NotImplementedError
class BaseSymbol(Renderable, boolean.Symbol):
"""
A base class for all symbols.
"""
def decompose(self):
"""
Yield the underlying symbols of this symbol.
"""
raise NotImplementedError
# validate license keys
is_valid_license_key = re.compile(r'^[-\w\s\.\+]+$', re.UNICODE).match
#FIXME: we need to implement comparison!!!!
@total_ordering
class LicenseSymbol(BaseSymbol):
"""
A LicenseSymbol represents a license as used in a license expression.
"""
def __init__(self, key, aliases=tuple(), is_exception=False, *args, **kwargs):
if not key:
raise ExpressionError(
'A license key cannot be empty: %(key)r' % locals())
if not isinstance(key, str):
if isinstance(key, bytes):
try:
key = unicode(key)
except:
raise ExpressionError(
'A license key must be a unicode string: %(key)r' % locals())
else:
raise ExpressionError(
'A license key must be a unicode string: %(key)r' % locals())
key = key.strip()
if not key:
raise ExpressionError(
'A license key cannot be blank: "%(key)s"' % locals())
# note: key can contain spaces
if not is_valid_license_key(key):
raise ExpressionError(
'Invalid license key: the valid characters are: letters and numbers, '
'underscore, dot or hyphen signs and spaces: "%(key)s"' % locals())
# normalize for spaces
key = ' '.join(key.split())
if key.lower() in KEYWORDS_STRIPPED:
raise ExpressionError(
'Invalid license key: a key cannot be a reserved keyword: "or", "and" or "with: "%(key)s"' % locals())
self.key = key
if aliases and not isinstance(aliases, (list, tuple,)):
raise TypeError('License aliases must be a sequence.')
self.aliases = aliases and tuple(aliases) or tuple()
self.is_exception = is_exception
# set at runtime based on parsing when the symbol was used as an exception
self.as_exception = False
# super only know about a single "obj" object.
super(LicenseSymbol, self).__init__(self.key)
def decompose(self):
"""
Return an iterable the underlying symbols for this symbol
"""
yield self
def __hash__(self, *args, **kwargs):
return hash((self.key, self.is_exception))
def __eq__(self, other):
return (self is other
or (isinstance(other, self.__class__)
and self.key == other.key
and self.is_exception == other.is_exception)
or (self.symbol_like(other)
and self.key == other.key
and self.is_exception == other.is_exception)
)
__nonzero__ = __bool__ = lambda s: True
def render(self, template='{symbol.key}', *args, **kwargs):
return template.format(symbol=self)
def __str__(self):
return self.key
def __repr__(self):
cls = self.__class__.__name__
key = self.key
aliases = self.aliases and ('aliases=%(a)r, ' % {'a': self.aliases}) or ''
is_exception = self.is_exception
return '%(cls)s(%(key)r, %(aliases)sis_exception=%(is_exception)r)' % locals()
def __copy__(self):
return LicenseSymbol(self.key, tuple(self.aliases), self.is_exception)
@classmethod
def symbol_like(cls, symbol):
"""
Return True if `symbol` is a symbol-like object with its essential attributes.
"""
return hasattr(symbol, 'key') and hasattr(symbol, 'is_exception')
#FIXME: we need to implement comparison!!!!
@total_ordering
class LicenseSymbolLike(LicenseSymbol):
"""
A LicenseSymbolLike object wraps a symbol-like object to expose a LicenseSymbol
behavior.
"""
def __init__(self, symbol_like, *args, **kwargs):
if not self.symbol_like(symbol_like):
raise ExpressionError(
'Not a symbol-like object: %(symbol_like)r' % locals())
self.wrapped = symbol_like
super(LicenseSymbolLike, self).__init__(self.wrapped.key, *args, **kwargs)
self.is_exception = self.wrapped.is_exception
self.aliases = getattr(self.wrapped, 'aliases', tuple())
# can we delegate rendering to a render method of the wrapped object?
# we can if we have a .render() callable on the wrapped object.
self._render = None
renderer = getattr(symbol_like, 'render', None)
if callable(renderer):
self._render = renderer
def __copy__(self):
return LicenseSymbolLike(symbol_like=self.wrapped)
def render(self, template='{symbol.key}', *args, **kwargs):
if self._render:
return self._render(template, *args, **kwargs)
return super(LicenseSymbolLike, self).render(template, *args, **kwargs)
#FIXME: we need to implement comparison!!!!
@total_ordering
class LicenseWithExceptionSymbol(BaseSymbol):
"""
A LicenseWithExceptionSymbol represents a license "with" an exception as used in
a license expression. It holds two LicenseSymbols objects: one for the left-hand
license proper and one for the right-hand exception to this license and deals
with the specifics of resolution, validation and representation.
"""
def __init__(self, license_symbol, exception_symbol, strict=False, *args, **kwargs):
"""
Initialize a new LicenseWithExceptionSymbol from a `license_symbol` and a
`exception_symbol` symbol-like objects.
Raise a ExpressionError exception if strict is True and either:
- license_symbol.is_exception is True
- exception_symbol.is_exception is not True
"""
if not LicenseSymbol.symbol_like(license_symbol):
raise ExpressionError(
'license_symbol must be a LicenseSymbol-like object: %(license_symbol)r' % locals())
if strict and license_symbol.is_exception:
raise ExpressionError(
'license_symbol cannot be an exception with "is_exception" set to True: %(license_symbol)r' % locals())
if not LicenseSymbol.symbol_like(exception_symbol):
raise ExpressionError(
'exception_symbol must be a LicenseSymbol-like object: %(exception_symbol)r' % locals())
if strict and not exception_symbol.is_exception:
raise ExpressionError(
'exception_symbol must be an exception with "is_exception" set to True: %(exception_symbol)r' % locals())
self.license_symbol = license_symbol
exception_symbol.as_exception = True
self.exception_symbol = exception_symbol
super(LicenseWithExceptionSymbol, self).__init__(str(self))
def __copy__(self):
return LicenseWithExceptionSymbol(copy(self.license_symbol), copy(self.exception_symbol))
def decompose(self):
yield self.license_symbol
yield self.exception_symbol
def render(self, template='{symbol.key}', *args, **kwargs):
lic = self.license_symbol.render(template, *args, **kwargs)
exc = self.exception_symbol.render(template, *args, **kwargs)
return '%(lic)s WITH %(exc)s' % locals()
def __hash__(self, *args, **kwargs):
return hash((self.license_symbol, self.exception_symbol,))
def __eq__(self, other):
return self is other or (
isinstance(other, self.__class__)
and self.license_symbol == other.license_symbol
and self.exception_symbol == other.exception_symbol)
__nonzero__ = __bool__ = lambda s: True
def __str__(self):
lkey = self.license_symbol.key
ekey = self.exception_symbol.key
return '%(lkey)s WITH %(ekey)s' % locals()
def __repr__(self):
data = dict(cls=self.__class__.__name__)
data.update(self.__dict__)
return '%(cls)s(license_symbol=%(license_symbol)r, exception_symbol=%(exception_symbol)r)' % data
class RenderableFunction(Renderable):
# derived from the __str__ code in boolean.py
def render(self, template='{symbol.key}', *args, **kwargs):
"""
Render an expression as a string, recursively applying the string `template`
to every symbols and operators.
"""
expression_args = self.args
if len(expression_args) == 1:
# a bare symbol
sym = expression_args[0]
if isinstance(sym, Renderable):
sym = sym.render(template, *args, **kwargs)
else:
print('WARNING: symbol is not renderable: using plain string representation.')
# FIXME: CAN THIS REALLY HAPPEN since we only have symbols, OR, AND?
sym = str(sym)
if self.isliteral:
rendered = '%s%s' % (self.operator, sym)
else:
# NB: the operator str already has a leading and trailing space
rendered = '%s(%s)' % (self.operator, sym)
return rendered
rendered_items = []
rendered_items_append = rendered_items.append
for arg in expression_args:
if isinstance(arg, Renderable):
# recurse
rendered = arg.render(template, *args, **kwargs)
else:
print('WARNING: object in expression is not renderable: falling back to plain string representation: %(arg)r.')
# FIXME: CAN THIS REALLY HAPPEN since we only have symbols, or and AND?
rendered = str(arg)
if arg.isliteral:
rendered_items_append(rendered)
else:
rendered_items_append('(%s)' % rendered)
return self.operator.join(rendered_items)
class AND(RenderableFunction, boolean.AND):
"""
Custom representation for the AND operator to uppercase.
"""
def __init__(self, *args):
super(AND, self).__init__(*args)
self.operator = ' AND '
class OR(RenderableFunction, boolean.OR):
"""
Custom representation for the OR operator to uppercase.
"""
def __init__(self, *args):
super(OR, self).__init__(*args)
self.operator = ' OR '
def ordered_unique(seq):
"""
Return unique items in a sequence seq preserving the original order.
"""
if not seq:
return []
uniques = []
for item in seq:
if item in uniques:
continue
uniques.append(item)
return uniques
def strip_and_skip_spaces(results):
"""
Yield results given a sequence of Result skipping whitespace-only results
"""
for result in results:
if result.string.strip():
yield result
def group_results_for_with_subexpression(results):
"""
Yield tuples of (Result) given a sequence of Result such that:
- all symbol-with-symbol subsequences of three results are grouped in a three-tuple
- other results are the single result in a tuple.
"""
# if n-1 is sym, n is with and n+1 is sym: yield this as a group for a with exp
# otherwise: yield each single result as a group
results = list(results)
# check three contiguous result from scanning at a time
triple_len = 3
# shortcut if there are no grouping possible
if len(results) < triple_len:
for res in results:
yield (res,)
return
# accumulate three contiguous results
triple = collections.deque()
triple_popleft = triple.popleft
triple_clear = triple.clear
tripple_append = triple.append
for res in results:
if len(triple) == triple_len:
if is_with_subexpression(triple):
yield tuple(triple)
triple_clear()
else:
prev_res = triple_popleft()
yield (prev_res,)
tripple_append(res)
# end remainders
if triple:
if len(triple) == triple_len and is_with_subexpression(triple):
yield tuple(triple)
else:
for res in triple:
yield (res,)
def is_symbol(result):
# either the output value is a known sym, or we have no output for unknown sym
return result.output and isinstance(result.output.value, LicenseSymbol) or not result.output
def is_with_keyword(result):
return (result.output
and isinstance(result.output.value, Keyword)
and result.output.value.type == TOKEN_WITH)
def is_with_subexpression(results):
lic, wit, exc = results
return (is_symbol(lic) and is_with_keyword(wit) and is_symbol(exc))
def as_symbols(symbols):
"""
Return an iterable of LicenseSymbol objects from a sequence of `symbols` or
strings. If an item is a string, then create a new LicenseSymbol for it using the
string as key. If this is not a string it must be a LicenseSymbol-like type. It
will raise a TypeError expection if an item is neither a string or LicenseSymbol-
like.
"""
if symbols:
for symbol in symbols:
if not symbol:
continue
if isinstance(symbol, bytes):
try:
symbol = unicode(symbol)
except:
raise TypeError('%(symbol)r is not a unicode string.' % locals())
if isinstance(symbol, unicode):
if symbol.strip():
yield LicenseSymbol(symbol)
elif isinstance(symbol, LicenseSymbol):
yield symbol
elif LicenseSymbol.symbol_like(symbol):
yield LicenseSymbolLike(symbol)
else:
raise TypeError('%(symbol)r is not a unicode string '
'or a LicenseSymbol-like instance.' % locals())
def validate_symbols(symbols, validate_keys=False, _keywords=KEYWORDS):
"""
Return a tuple of (`warnings`, `errors`) given a sequence of `symbols`
LicenseSymbol-like objects.
- warnings is a list of validation warnings messages (possibly empty if there
were no warnings).
- errors is a list of validation error messages (possibly empty if there were no
errors).
Keys and aliases are cleaned and validated for uniqueness.
"""
# collection used for checking unicity and correctness
seen_keys = set()
seen_aliases = {}
seen_exceptions = set()
# collections to accumulate invalid data and build error messages at the end
not_symbol_classes = []
dupe_keys = set()
dupe_exceptions = set()
dupe_aliases = collections.defaultdict(list)
invalid_keys_as_kw = set()
invalid_alias_as_kw = collections.defaultdict(list)
# warning
warning_dupe_aliases = set()
for symbol in symbols:
if not isinstance(symbol, LicenseSymbol):
not_symbol_classes.append(symbol)
continue
key = symbol.key
key = key.strip()
keyl = key.lower()
# ensure keys are unique
if keyl in seen_keys:
dupe_keys.add(key)
# key cannot be an expression keyword
if keyl in _keywords:
invalid_keys_as_kw.add(key)
# keep a set of unique seen keys
seen_keys.add(keyl)
# aliases is an optional attribute
aliases = getattr(symbol, 'aliases', [])
initial_alias_len = len(aliases)
# always normalize aliases for spaces and case
aliases = set([' '.join(alias.lower().strip().split()) for alias in aliases])
# KEEP UNIQUES, remove empties
aliases = set(a for a in aliases if a)
# issue a warning when there are duplicated or empty aliases
if len(aliases) != initial_alias_len:
warning_dupe_aliases.add(key)
# always add a lowercase key as an alias
aliases.add(keyl)
for alias in aliases:
# note that we do not treat as an error the presence of a duplicated
# alias pointing to the same key
# ensure that a possibly duplicated alias does not point to another key
aliased_key = seen_aliases.get(alias)
if aliased_key and aliased_key != keyl:
dupe_aliases[alias].append(key)
# an alias cannot be an expression keyword
if alias in _keywords:
invalid_alias_as_kw[key].append(alias)
seen_aliases[alias] = keyl
if symbol.is_exception:
if keyl in seen_exceptions:
dupe_exceptions.add(keyl)
else:
seen_exceptions.add(keyl)
# build warning and error messages from invalid data
errors = []
for ind in sorted(not_symbol_classes):
errors.append('Invalid item: not a LicenseSymbol object: %(ind)s.' % locals())
for dupe in sorted(dupe_keys):
errors.append('Invalid duplicated license key: %(dupe)s.' % locals())
for dalias, dkeys in sorted(dupe_aliases.items()):
dkeys = ', '.join(dkeys)
errors.append('Invalid duplicated alias pointing to multiple keys: '
'%(dalias)s point to keys: %(dkeys)s.' % locals())
for ikey, ialiases in sorted(invalid_alias_as_kw.items()):
ialiases = ', '.join(ialiases)
errors.append('Invalid aliases: an alias cannot be an expression keyword. '
'key: "%(ikey)s", aliases: %(ialiases)s.' % locals())
for dupe in sorted(dupe_exceptions):
errors.append('Invalid duplicated license exception key: %(dupe)s.' % locals())
for ikw in sorted(invalid_keys_as_kw):
errors.append('Invalid key: a key cannot be an expression keyword: %(ikw)s.' % locals())
warnings = []
for dupeal in sorted(dupe_aliases):
errors.append('Duplicated or empty aliases ignored for license key: %(dupeal)r.' % locals())
return warnings, errors
_splitter = re.compile('''
(?P<symbol>[^\s\(\)]+)
|
(?P<space>\s+)
|
(?P<lpar>\()
|
(?P<rpar>\))
''',
re.VERBOSE | re.MULTILINE | re.UNICODE
).finditer
def splitter(expression):
"""
Return an iterable of Result describing each token given an
expression unicode string.
This is a simpler tokenizer used when the Licensing does not have
known symbols. The split is done on spaces and parens. Anything else
is either a token or a symbol.
"""
if not expression:
return
if not isinstance(expression, str):
raise ParseError(error_code=PARSE_EXPRESSION_NOT_UNICODE)
# mapping of lowercase token strings to a token type id
TOKENS = {
'and': Keyword(value='and', type=TOKEN_AND),
'or': Keyword(value='or', type=TOKEN_OR),
'with': Keyword(value='with', type=TOKEN_WITH),
}
for match in _splitter(expression):
if not match:
continue
start, end = match.span()
end = end - 1
mgd = match.groupdict()
space = mgd.get('space')
if space:
yield Result(start, end, space, None)
lpar = mgd.get('lpar')
if lpar:
yield Result(start, end, lpar, Output(lpar, KW_LPAR))
rpar = mgd.get('rpar')
if rpar:
yield Result(start, end, rpar, Output(rpar, KW_RPAR))
token_or_sym = mgd.get('symbol')
if not token_or_sym:
continue
token = TOKENS.get(token_or_sym.lower())
if token:
yield Result(start, end, token_or_sym, Output(token_or_sym, token))
# elif token_or_sym.endswith('+') and token_or_sym != '+':
# val = token_or_sym[:-1]
# sym = LicenseSymbol(key=val)
# yield Result(start, end - 1, val, Output(val, sym))
# yield Result(end, end, '+', Output('+', KW_PLUS))
else:
sym = LicenseSymbol(key=token_or_sym)
yield Result(start, end, token_or_sym, Output(token_or_sym, sym))
| |
import sys
import logging
import re
import maya.OpenMaya as om
from capture_gui.vendor.Qt import QtCore, QtWidgets
import capture_gui.lib
import capture_gui.plugin
log = logging.getLogger("Time Range")
def parse_frames(string):
"""Parse the resulting frames list from a frame list string.
Examples
>>> parse_frames("0-3;30")
[0, 1, 2, 3, 30]
>>> parse_frames("0,2,4,-10")
[0, 2, 4, -10]
>>> parse_frames("-10--5,-2")
[-10, -9, -8, -7, -6, -5, -2]
Args:
string (str): The string to parse for frames.
Returns:
list: A list of frames
"""
result = list()
if not string.strip():
raise ValueError("Can't parse an empty frame string.")
if not re.match("^[-0-9,; ]*$", string):
raise ValueError("Invalid symbols in frame string: {}".format(string))
for raw in re.split(";|,", string):
# Skip empty elements
value = raw.strip().replace(" ", "")
if not value:
continue
# Check for sequences (1-20) including negatives (-10--8)
sequence = re.search("(-?[0-9]+)-(-?[0-9]+)", value)
# Sequence
if sequence:
start, end = sequence.groups()
frames = range(int(start), int(end) + 1)
result.extend(frames)
# Single frame
else:
try:
frame = int(value)
except ValueError:
raise ValueError("Invalid frame description: "
"'{0}'".format(value))
result.append(frame)
if not result:
# This happens when only spaces are entered with a separator like `,` or `;`
raise ValueError("Unable to parse any frames from string: {}".format(string))
return result
class TimePlugin(capture_gui.plugin.Plugin):
"""Widget for time based options"""
id = "Time Range"
section = "app"
order = 30
RangeTimeSlider = "Time Slider"
RangeStartEnd = "Start/End"
CurrentFrame = "Current Frame"
CustomFrames = "Custom Frames"
def __init__(self, parent=None):
super(TimePlugin, self).__init__(parent=parent)
self._event_callbacks = list()
self._layout = QtWidgets.QHBoxLayout()
self._layout.setContentsMargins(5, 0, 5, 0)
self.setLayout(self._layout)
self.mode = QtWidgets.QComboBox()
self.mode.addItems([self.RangeTimeSlider,
self.RangeStartEnd,
self.CurrentFrame,
self.CustomFrames])
frame_input_height = 20
self.start = QtWidgets.QSpinBox()
self.start.setRange(-sys.maxint, sys.maxint)
self.start.setFixedHeight(frame_input_height)
self.end = QtWidgets.QSpinBox()
self.end.setRange(-sys.maxint, sys.maxint)
self.end.setFixedHeight(frame_input_height)
# unique frames field
self.custom_frames = QtWidgets.QLineEdit()
self.custom_frames.setFixedHeight(frame_input_height)
self.custom_frames.setPlaceholderText("Example: 1-20,25;50;75,100-150")
self.custom_frames.setVisible(False)
self._layout.addWidget(self.mode)
self._layout.addWidget(self.start)
self._layout.addWidget(self.end)
self._layout.addWidget(self.custom_frames)
# Connect callbacks to ensure start is never higher then end
# and the end is never lower than start
self.end.valueChanged.connect(self._ensure_start)
self.start.valueChanged.connect(self._ensure_end)
self.on_mode_changed() # force enabled state refresh
self.mode.currentIndexChanged.connect(self.on_mode_changed)
self.start.valueChanged.connect(self.on_mode_changed)
self.end.valueChanged.connect(self.on_mode_changed)
self.custom_frames.textChanged.connect(self.on_mode_changed)
def _ensure_start(self, value):
self.start.setValue(min(self.start.value(), value))
def _ensure_end(self, value):
self.end.setValue(max(self.end.value(), value))
def on_mode_changed(self, emit=True):
"""Update the GUI when the user updated the time range or settings.
Arguments:
emit (bool): Whether to emit the options changed signal
Returns:
None
"""
mode = self.mode.currentText()
if mode == self.RangeTimeSlider:
start, end = capture_gui.lib.get_time_slider_range()
self.start.setEnabled(False)
self.end.setEnabled(False)
self.start.setVisible(True)
self.end.setVisible(True)
self.custom_frames.setVisible(False)
mode_values = int(start), int(end)
elif mode == self.RangeStartEnd:
self.start.setEnabled(True)
self.end.setEnabled(True)
self.start.setVisible(True)
self.end.setVisible(True)
self.custom_frames.setVisible(False)
mode_values = self.start.value(), self.end.value()
elif mode == self.CustomFrames:
self.start.setVisible(False)
self.end.setVisible(False)
self.custom_frames.setVisible(True)
mode_values = "({})".format(self.custom_frames.text())
# ensure validation state for custom frames
self.validate()
else:
self.start.setEnabled(False)
self.end.setEnabled(False)
self.start.setVisible(True)
self.end.setVisible(True)
self.custom_frames.setVisible(False)
currentframe = int(capture_gui.lib.get_current_frame())
mode_values = "({})".format(currentframe)
# Update label
self.label = "Time Range {}".format(mode_values)
self.label_changed.emit(self.label)
if emit:
self.options_changed.emit()
def validate(self):
errors = []
if self.mode.currentText() == self.CustomFrames:
# Reset
self.custom_frames.setStyleSheet("")
try:
parse_frames(self.custom_frames.text())
except ValueError as exc:
errors.append("{} : Invalid frame description: "
"{}".format(self.id, exc))
self.custom_frames.setStyleSheet(self.highlight)
return errors
def get_outputs(self, panel=""):
"""Get the plugin outputs that matches `capture.capture` arguments
Returns:
dict: Plugin outputs
"""
mode = self.mode.currentText()
frames = None
if mode == self.RangeTimeSlider:
start, end = capture_gui.lib.get_time_slider_range()
elif mode == self.RangeStartEnd:
start = self.start.value()
end = self.end.value()
elif mode == self.CurrentFrame:
frame = capture_gui.lib.get_current_frame()
start = frame
end = frame
elif mode == self.CustomFrames:
frames = parse_frames(self.custom_frames.text())
start = None
end = None
else:
raise NotImplementedError("Unsupported time range mode: "
"{0}".format(mode))
return {"start_frame": start,
"end_frame": end,
"frame": frames}
def get_inputs(self, as_preset):
return {"time": self.mode.currentText(),
"start_frame": self.start.value(),
"end_frame": self.end.value(),
"frame": self.custom_frames.text()}
def apply_inputs(self, settings):
# get values
mode = self.mode.findText(settings.get("time", self.RangeTimeSlider))
startframe = settings.get("start_frame", 1)
endframe = settings.get("end_frame", 120)
custom_frames = settings.get("frame", None)
# set values
self.mode.setCurrentIndex(mode)
self.start.setValue(int(startframe))
self.end.setValue(int(endframe))
if custom_frames is not None:
self.custom_frames.setText(custom_frames)
def initialize(self):
self._register_callbacks()
def uninitialize(self):
self._remove_callbacks()
def _register_callbacks(self):
"""Register maya time and playback range change callbacks.
Register callbacks to ensure Capture GUI reacts to changes in
the Maya GUI in regards to time slider and current frame
"""
callback = lambda x: self.on_mode_changed(emit=False)
# this avoid overriding the ids on re-run
currentframe = om.MEventMessage.addEventCallback("timeChanged",
callback)
timerange = om.MEventMessage.addEventCallback("playbackRangeChanged",
callback)
self._event_callbacks.append(currentframe)
self._event_callbacks.append(timerange)
def _remove_callbacks(self):
"""Remove callbacks when closing widget"""
for callback in self._event_callbacks:
try:
om.MEventMessage.removeCallback(callback)
except RuntimeError, error:
log.error("Encounter error : {}".format(error))
| |
#
# Copyright (c) 2016 Juniper Networks, Inc. All rights reserved.
#
"""
Database for Kubernetes objects.
"""
import json
from cfgm_common.vnc_db import DBBase
from kube_manager.sandesh.kube_introspect import ttypes as introspect
from ast import literal_eval
from utils import get_vn_fq_name_from_dict_string
class KubeDBBase(DBBase):
obj_type = __name__
@classmethod
def evaluate(self):
pass
@staticmethod
def get_uuid(obj):
""" Get UUID of the kubernetes object."""
if obj:
return obj.get('metadata').get('uid')
return None
def get_vn_from_annotation(self, annotations):
""" Get vn-fq-name if specified in annotations of a k8s object.
"""
vn_ann = annotations.get('opencontrail.org/network', None)
if vn_ann:
return get_vn_fq_name_from_dict_string(vn_ann)
return None
#
# Kubernetes POD Object DB.
#
class PodKM(KubeDBBase):
_dict = {}
obj_type = 'Pod'
def __init__(self, uuid, obj = None):
self.uuid = uuid
# Metadata.
self.name = None
self.namespace = None
self.labels = {}
self.annotations = None
self.pod_vn_fq_name = None
# Spec.
self.nodename = None
self.host_ip = None
# Status.
self.phase = None
# If an object is provided, update self with contents of object.
if obj:
self.update(obj)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self._update_metadata(obj.get('metadata'))
self._update_spec(obj.get('spec'))
self._update_status(obj.get('status'))
def _update_metadata(self, md):
if md is None:
return
self.name = md.get('name')
self.namespace = md.get('namespace')
self.labels = md.get('labels')
self.annotations = md.get('annotations', None)
self._parse_annotations(self.annotations)
def _update_spec(self, spec):
if spec is None:
return
self.nodename = spec.get('nodeName')
def _update_status(self, status):
if status is None:
return
self.host_ip = status.get('hostIP')
self.phase = status.get('phase')
def get_host_ip(self):
return self.host_ip
@staticmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to Pod DB lookup/introspect request. """
pod_resp = introspect.PodDatabaseListResp(pods=[])
# Iterate through all elements of Pod DB.
for pod in PodKM.values():
# If the request is for a specific entry, then locate the entry.
if req.pod_uuid and req.pod_uuid != pod.uuid:
continue
# Construct response for an element.
pod_instance = introspect.PodInstance(uuid=pod.uuid, name=pod.name,
labels=pod.labels, nodename=pod.nodename, ip=pod.host_ip,
phase=pod.phase)
# Append the constructed element info to the response.
pod_resp.pods.append(pod_instance)
# Send the reply out.
pod_resp.response(req.context())
def _parse_annotations(self, annotations):
if not annotations:
return
# Parse pod network annotations.
if not self.pod_vn_fq_name:
try:
self.pod_vn_fq_name = self.get_vn_from_annotation(
annotations)
except Exception as e:
err_msg = "Failed to parse annotation for pod[%s].Error[%s]"%\
(self.name, str(e))
raise Exception(err_msg)
def get_vn_fq_name(self):
"""Return virtual-network fq-name annotated on this pod."""
return self.pod_vn_fq_name
#
# Kubernetes Namespace Object DB.
#
class NamespaceKM(KubeDBBase):
_dict = {}
obj_type = 'Namespace'
def __init__(self, uuid, obj = None):
self.uuid = uuid
# Metadata.
self.name = None
self.labels = {}
self.isolated_vn_fq_name = None
self.annotated_vn_fq_name = None
self.annotations = None
self.np_annotations = None
# Status.
self.phase = None
# Config cache.
self.isolated = False
self.service_isolated = False
# If an object is provided, update self with contents of object.
if obj:
self.update(obj)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self._update_metadata(obj.get('metadata'))
self._update_status(obj.get('status'))
def _update_metadata(self, md):
if md is None:
return
self.name = md.get('name')
self.labels = md.get('labels')
# Parse annotations on this namespace.
self.annotations = md.get('annotations')
self._parse_annotations(self.annotations)
def _parse_annotations(self, annotations):
self.np_annotations = None
if not annotations:
return
# Parse virtual network annotations.
if not self.annotated_vn_fq_name:
try:
self.annotated_vn_fq_name = self.get_vn_from_annotation(
annotations)
except Exception as e:
err_msg = "Failed to parse annotations for namespace [%s]."\
" Error[%s]" % (self.name, str(e))
raise Exception(err_msg)
# Cache namespace isolation directive.
if 'opencontrail.org/isolation' in annotations and \
annotations['opencontrail.org/isolation'] == "true":
# Namespace isolation is configured
self.isolated = True
self.service_isolated = True
# Cache service isolation directive.
if 'opencontrail.org/isolation.service' in annotations and \
annotations['opencontrail.org/isolation.service'] == "false":
# Service isolation is disabled
self.service_isolated = False
# Cache k8s network-policy directive.
if 'net.beta.kubernetes.io/network-policy' in annotations:
self.np_annotations = json.loads(
annotations['net.beta.kubernetes.io/network-policy'])
def _update_status(self, status):
if status is None:
return
self.phase = status.get('phase')
def is_isolated(self):
return self.isolated
def is_service_isolated(self):
return self.service_isolated
def get_network_policy_annotations(self):
return self.np_annotations
def set_isolated_network_fq_name(self, fq_name):
self.isolated_vn_fq_name = fq_name
def get_isolated_network_fq_name(self):
return self.isolated_vn_fq_name
def get_annotated_network_fq_name(self):
return self.annotated_vn_fq_name
@staticmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to Namespace DB lookup/introspect request. """
ns_resp = introspect.NamespaceDatabaseListResp(namespaces=[])
# Iterate through all elements of Namespace DB.
for ns in NamespaceKM.values():
# If the request is for a specific entry, then locate the entry.
if req.namespace_uuid and req.namespace_uuid != ns.uuid:
continue
# Construct response for a namespace element.
ns_instance = introspect.NamespaceInstance(uuid=ns.uuid,
labels=ns.labels, name=ns.name,
phase=ns.phase, isolated=ns.isolated)
# Append the constructed element info to the response.
ns_resp.namespaces.append(ns_instance)
# Send the reply out.
ns_resp.response(req.context())
#
# Kubernetes Service Object DB.
#
class ServiceKM(KubeDBBase):
_dict = {}
obj_type = 'Service'
def __init__(self, uuid, obj = None):
self.uuid = uuid
# Metadata.
self.name = None
self.namespace = None
self.labels = {}
# Spec.
self.cluster_ip = None
self.service_type = None
# If an object is provided, update self with contents of object.
if obj:
self.update(obj)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self._update_metadata(obj.get('metadata'))
self._update_spec(obj.get('spec'))
def _update_metadata(self, md):
if md is None:
return
self.name = md.get('name')
self.namespace = md.get('namespace')
self.labels = md.get('labels')
def _update_spec(self, spec):
if spec is None:
return
self.service_type= spec.get('type')
self.cluster_ip = spec.get('clusterIP')
self.ports = spec.get('ports')
def get_service_ip(self):
return self.cluster_ip
def get_service_ports(self):
return self.ports
@staticmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to Service DB lookup/introspect request. """
svc_resp = introspect.ServiceDatabaseListResp(services=[])
# Iterate through all elements of Pod DB.
for svc in ServiceKM.values():
# If the request is for a specific entry, then locate the entry.
if req.service_uuid and req.service_uuid != svc.uuid:
continue
# Construct response for an element.
svc_instance = introspect.ServiceInstance(uuid=svc.uuid,
name=svc.name, name_space=svc.namespace, labels=svc.labels,
cluster_ip=svc.cluster_ip, service_type=svc.service_type)
# Append the constructed element info to the response.
svc_resp.services.append(svc_instance)
# Send the reply out.
svc_resp.response(req.context())
#
# Kubernetes Network Policy Object DB.
#
class NetworkPolicyKM(KubeDBBase):
_dict = {}
obj_type = 'NetworkPolicy'
def __init__(self, uuid, obj = None):
self.uuid = uuid
# Metadata.
self.name = None
self.namespace = None
# Spec.
self.spec = {}
# If an object is provided, update self with contents of object.
if obj:
self.update(obj)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self._update_metadata(obj.get('metadata'))
self._update_spec(obj.get('spec'))
def _update_metadata(self, md):
if md is None:
return
self.name = md.get('name')
self.namespace = md.get('namespace')
def _update_spec(self, spec):
if spec is None:
return
self.spec = spec
def get_pod_selector(self, pod_selector):
labels = pod_selector.get('matchLabels') \
if pod_selector.get('matchLabels') else {}
return introspect.NetworkPolicyPodSelectors(matchLabels=labels)
@staticmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to Network Policy DB lookup/introspect request. """
np_resp = introspect.NetworkPolicyDatabaseListResp(network_policies=[])
# Iterate through all elements of Network Policy DB.
for np in NetworkPolicyKM.values():
# If the request is for a specific entry, then locate the entry.
if req.network_policy_uuid and req.network_policy_uuid != np.uuid:
continue
# Parse "ingress" attribute.
np_ingress_list = []
if np.spec.get('ingress'):
for ingress in np.spec.get('ingress'):
# Parse "from" attribute.
from_list = []
if ingress.get('from'):
for each_from in ingress.get('from'):
np_pod_selector = None
if each_from.get('podSelector'):
np_pod_selector = np.get_pod_selector(
each_from.get('podSelector'))
from_list.append(introspect.NetworkPolicyFromRules(
podSelector=np_pod_selector))
# Parse "ports" attribute.
np_port_list = []
if ingress.get('ports'):
for port in ingress.get('ports'):
np_port = introspect.NetworkPolicyPort(
port=port.get('port').__str__(),
protocol=port.get('protocol'))
np_port_list.append(np_port)
np_ingress_list.append(\
introspect.NetworkPolicyIngressPolicy(\
fromPolicy=from_list, ports=np_port_list))
# Parse "pod selector" attribute.
np_pod_selector = None
if np.spec.get('podSelector'):
pod_selector = np.spec.get('podSelector')
np_pod_selector = introspect.NetworkPolicyPodSelectors(
matchLabels=pod_selector.get('matchLabels'))
np_spec = introspect.NetworkPolicySpec(ingress=np_ingress_list,
podSelector=np_pod_selector)
np_instance = introspect.NetworkPolicyInstance(uuid=np.uuid,
name=np.name, name_space=np.namespace,
spec_string=np.spec.__str__(), spec=np_spec)
# Append the constructed element info to the response.
np_resp.network_policies.append(np_instance)
# Send the reply out.
np_resp.response(req.context())
#
# Kubernetes Ingress Object DB.
#
class IngressKM(KubeDBBase):
_dict = {}
obj_type = 'Ingress'
def __init__(self, uuid, obj=None):
self.uuid = uuid
# Metadata.
self.name = None
self.namespace = None
self.labels = {}
self.spec = {}
# Spec.
self.rules = []
self.default_backend = {}
# If an object is provided, update self with contents of object.
if obj:
self.update(obj)
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self._update_metadata(obj.get('metadata'))
self._update_spec(obj.get('spec'))
def _update_metadata(self, md):
if md is None:
return
self.name = md.get('name')
self.namespace = md.get('namespace')
self.labels = md.get('labels', {})
def _update_spec(self, spec):
if spec is None:
return
self.spec = spec
self.rules = spec.get('rules', {})
self.default_backend = spec.get('backend', {})
@staticmethod
def sandesh_handle_db_list_request(cls, req):
""" Reply to Ingress DB lookup/introspect request. """
ingress_resp = introspect.IngressDatabaseListResp(ingress=[])
# Iterate through all elements of Ingress DB.
for ingress in IngressKM.values():
# If the request is for a specific entry, then locate the entry.
if req.ingress_uuid and req.ingress_uuid != ingress.uuid:
continue
# Get default backend info.
def_backend = introspect.IngressBackend(
name=ingress.default_backend.get('serviceName'),
port=str(ingress.default_backend.get('servicePort')))
# Get rules.
rules = []
for rule in ingress.rules:
ingress_rule = introspect.IngressRule(spec=[])
for key,value in rule.iteritems():
if key == 'host':
# Get host info from rule.
ingress_rule.host = value
else:
# Get proto spec from rule.
proto_spec = introspect.IngressProtoSpec(paths=[])
proto_spec.proto = key
for path in value.get('paths', []):
backend = path.get('backend')
proto_backend = None
if backend:
proto_backend = introspect.IngressBackend(
name=backend.get('serviceName'),
port=str(backend.get('servicePort')))
proto_path = introspect.IngressRuleProtoPath(
backend=proto_backend, path=path.get('path'))
proto_spec.paths.append(proto_path)
ingress_rule.spec.append(proto_spec)
rules.append(ingress_rule)
# Construct response for an element.
ingress_instance = introspect.IngressInstance(
uuid=ingress.uuid, name=ingress.name,
labels=ingress.labels, name_space=ingress.namespace,
rules=rules, default_backend=def_backend)
# Append the constructed element info to the response.
ingress_resp.ingress.append(ingress_instance)
# Send the reply out.
ingress_resp.response(req.context())
| |
import abc
import json
from django.conf import settings
from django.contrib.gis.geos import Point
from django.core.urlresolvers import reverse
from django.core.validators import validate_email
from django.forms import ValidationError
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404
from django.views.generic import FormView, DetailView, TemplateView
from django.utils import translation
from django.utils.translation import ugettext as _
from councils.models import Council
from data_finder.models import (
LoggedPostcode
)
from pollingstations.models import (
PollingStation,
ResidentialAddress,
CustomFinder
)
from uk_geo_utils.helpers import AddressSorter, Postcode
from whitelabel.views import WhiteLabelTemplateOverrideMixin
from .forms import PostcodeLookupForm, AddressSelectForm
from .helpers import (
DirectionsHelper,
get_council,
geocode,
EveryElectionWrapper,
MultipleCouncilsException,
PostcodeError,
RateLimitError,
RoutingHelper
)
class LogLookUpMixin(object):
def log_postcode(self, postcode, context, view_used):
if 'language' in context:
language = context['language']
else:
language = self.get_language()
if 'brand' in context:
brand = context['brand']
else:
brand = self.request.brand
kwargs = {
'postcode': postcode.without_space,
'had_data': bool(context['we_know_where_you_should_vote']),
'location': context['location'],
'council': context['council'],
'brand': brand,
'language': language,
'view_used': view_used,
}
if 'api_user' in context:
kwargs['api_user'] = context['api_user']
kwargs.update(self.request.session['utm_data'])
LoggedPostcode.objects.create(**kwargs)
class LanguageMixin(object):
def get_language(self):
if self.request.session and\
translation.LANGUAGE_SESSION_KEY in self.request.session and\
self.request.session[translation.LANGUAGE_SESSION_KEY]:
return self.request.session[translation.LANGUAGE_SESSION_KEY]
else:
return ''
class HomeView(WhiteLabelTemplateOverrideMixin, FormView):
form_class = PostcodeLookupForm
template_name = "home.html"
def form_valid(self, form):
postcode = Postcode(form.cleaned_data['postcode'])
rh = RoutingHelper(postcode)
endpoint = rh.get_endpoint()
self.success_url = reverse(
endpoint.view,
kwargs=endpoint.kwargs
)
return super(HomeView, self).form_valid(form)
def form_invalid(self, form):
context = self.get_context_data(form=form)
context['postcode'] = form.data.get('postcode', '')
return self.render_to_response(context)
class PrivacyView(WhiteLabelTemplateOverrideMixin, TemplateView):
template_name = "privacy.html"
class BasePollingStationView(
TemplateView, LogLookUpMixin, LanguageMixin, metaclass=abc.ABCMeta):
template_name = "postcode_view.html"
@abc.abstractmethod
def get_location(self):
pass
@abc.abstractmethod
def get_council(self, geocode_result):
pass
@abc.abstractmethod
def get_station(self):
pass
def get_directions(self):
if self.location and self.station and self.station.location:
dh = DirectionsHelper()
return dh.get_directions(
start_location=self.location,
end_location=self.station.location,
)
else:
return None
def get_context_data(self, **context):
context['tile_layer'] = settings.TILE_LAYER
context['mq_key'] = settings.MQ_KEY
try:
loc = self.get_location()
except (PostcodeError, RateLimitError) as e:
context['error'] = str(e)
return context
if loc is None:
# AddressView.get_location() may legitimately return None
self.location = None
else:
self.location = Point(loc['wgs84_lon'], loc['wgs84_lat'])
self.council = self.get_council(loc)
self.station = self.get_station()
self.directions = self.get_directions()
ee = EveryElectionWrapper(self.postcode)
if settings.EVERY_ELECTION['CHECK']:
context['has_election'] = ee.has_election()
else:
context['has_election'] = settings.EVERY_ELECTION['HAS_ELECTION']
if not context['has_election']:
context['error'] = 'There are no upcoming elections in your area'
context['election_explainers'] = ee.get_explanations()
context['postcode'] = self.postcode.with_space
context['location'] = self.location
context['council'] = self.council
context['station'] = self.station
context['directions'] = self.directions
context['we_know_where_you_should_vote'] = self.station
context['noindex'] = True
context['territory'] = self.postcode.territory
if not context['we_know_where_you_should_vote']:
if loc is None:
context['custom'] = None
else:
context['custom'] = CustomFinder.objects.get_custom_finder(
loc['gss_codes'], self.postcode.without_space)
self.log_postcode(self.postcode, context, type(self).__name__)
return context
class PostcodeView(BasePollingStationView):
def get(self, request, *args, **kwargs):
if 'postcode' in request.GET:
self.kwargs['postcode'] = kwargs['postcode'] = request.GET['postcode']
if 'postcode' not in kwargs or kwargs['postcode'] == '':
return HttpResponseRedirect(reverse('home'))
rh = RoutingHelper(self.kwargs['postcode'])
endpoint = rh.get_endpoint()
if endpoint.view != 'postcode_view':
return HttpResponseRedirect(
reverse(endpoint.view, kwargs=endpoint.kwargs)
)
else:
# we are already in postcode_view
self.postcode = Postcode(kwargs['postcode'])
try:
context = self.get_context_data(**kwargs)
except MultipleCouncilsException:
return HttpResponseRedirect(
reverse('multiple_councils_view',
kwargs={'postcode': self.postcode.without_space})
)
return self.render_to_response(context)
def get_location(self):
return geocode(self.postcode)
def get_council(self, geocode_result):
return get_council(geocode_result)
def get_station(self):
return PollingStation.objects.get_polling_station(
self.council.council_id, location=self.location)
class AddressView(BasePollingStationView):
def get(self, request, *args, **kwargs):
self.address = get_object_or_404(
ResidentialAddress,
slug=self.kwargs['address_slug']
)
self.postcode = Postcode(self.address.postcode)
try:
context = self.get_context_data(**kwargs)
except MultipleCouncilsException:
return HttpResponseRedirect(
reverse('multiple_councils_view',
kwargs={'postcode': self.postcode.without_space})
)
return self.render_to_response(context)
def get_location(self):
try:
location = geocode(self.postcode)
return location
except PostcodeError:
return None
def get_council(self, geocode_result):
return Council.objects.defer("area").get(
pk=self.address.council_id)
def get_station(self):
if not self.address.polling_station_id:
return None
return PollingStation.objects.get_polling_station_by_id(
self.address.polling_station_id,
self.address.council_id)
class ExamplePostcodeView(BasePollingStationView):
"""
This class presents a hard-coded example of what our website does
without having to worry about having any data imported
or whether an election is actually happening or not
"""
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def get_location(self):
return {
"wgs84_lat": 51.43921783606831,
"wgs84_lon": -2.54333651887832,
'gss_codes': [],
}
def get_council(self, geocode_result):
return Council.objects.defer("area").get(pk='E06000023')
def get_station(self):
return PollingStation(
internal_council_id="BREF",
postcode="BS4 4NZ",
address="St Peters Methodist Church\nAllison Road\nBrislington",
location=Point(-2.5417780465622686, 51.440043287399604),
council_id="E06000023"
)
def get_context_data(self, **kwargs):
self.postcode = Postcode('EXAMPLE') # put this in the logs so it is easy to exclude
context = super().get_context_data(**kwargs)
context['postcode'] = 'BS4 4NL' # show this on the page
context['has_election'] = True
context['election_explainers'] = []
context['error'] = None
context['custom'] = None
return context
class WeDontKnowView(PostcodeView):
def get(self, request, *args, **kwargs):
self.postcode = Postcode(kwargs['postcode'])
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def get_station(self):
return None
class MultipleCouncilsView(TemplateView, LogLookUpMixin, LanguageMixin):
# because sometimes "we don't know" just isn't uncertain enough
template_name = "multiple_councils.html"
def get(self, request, *args, **kwargs):
self.postcode = Postcode(self.kwargs['postcode'])
rh = RoutingHelper(self.postcode)
endpoint = rh.get_endpoint()
if endpoint.view != 'multiple_councils_view':
return HttpResponseRedirect(
reverse(endpoint.view, kwargs=endpoint.kwargs)
)
self.council_ids = rh.councils
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def get_context_data(self, **context):
context['councils'] = []
for council_id in self.council_ids:
context['councils'].append(Council.objects.get(pk=council_id))
context['territory'] = self.postcode.territory
log_data = {
'we_know_where_you_should_vote': False,
'location': None,
'council': None,
}
self.log_postcode(self.postcode, log_data, type(self).__name__)
return context
class AddressFormView(FormView):
form_class = AddressSelectForm
template_name = "address_select.html"
NOTINLIST = '519RA5LCGuHHXQvBUVgOXiCcqWy7SZG1inRDKcx1'
def get_context_data(self, **kwargs):
context = super(AddressFormView, self).get_context_data(**kwargs)
context['noindex'] = True
return context
def get_form(self, form_class):
self.postcode = Postcode(self.kwargs['postcode'])
addresses = ResidentialAddress.objects.filter(
postcode=self.postcode.without_space
)
if not addresses:
raise Http404
sorter = AddressSorter(addresses)
addresses = sorter.natural_sort()
select_addresses = [(element.slug, element.address) for element in addresses]
select_addresses.append((self.NOTINLIST, 'My address is not in the list'))
return form_class(select_addresses, self.postcode.without_space, **self.get_form_kwargs())
def form_valid(self, form):
slug = form.cleaned_data['address']
if slug == self.NOTINLIST:
self.success_url = reverse(
'we_dont_know',
kwargs={'postcode': self.postcode.without_space}
)
else:
self.success_url = reverse(
'address_view',
kwargs={'address_slug': slug}
)
return super(AddressFormView, self).form_valid(form)
class CoverageView(TemplateView):
template_name = 'coverage.html'
def get_context_data(self, *a, **k):
context = super(CoverageView, self).get_context_data(*a, **k)
num_councils = Council.objects.count()
districts, stations = 0, 0
covered = []
for council in Council.objects.all():
if council.pollingstation_set.count() > 1:
stations += 1
covered.append(council)
if council.pollingdistrict_set.count() > 1:
districts += 1
context['num_councils'] = num_councils
context['num_district_councils'] = districts
context['perc_districts'] = "%d%%" % (float(districts) / num_councils * 100)
context['num_station_councils'] = stations
context['perc_stations'] = "%d%%" % (float(stations) / num_councils * 100)
context['covered'] = covered
return context
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.