gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# plugs/infoitem.py
#
#
"""
information items .. keyword/description pairs
learn the bot something with: !<item> = <description>
question goes with: !<item>?
"""
## jsb imports
from jsb.lib.commands import cmnds
from jsb.lib.examples import examples
from jsb.lib.datadir import datadir
from jsb.utils.locking import lockdec
from jsb.lib.callbacks import callbacks
from jsb.lib.users import getusers
from jsb.lib.config import getmainconfig
## basic imports
import thread
import os
import time
import logging
## locks
infolock = thread.allocate_lock()
locked = lockdec(infolock)
## defines
db = None
## InfoItemsDb class
class InfoItemsDb(object):
""" information items """
def add(self, item, description, userhost, ttime):
""" add an item """
if not db: logging.error("plugin isnt initialised yet") ; return []
item = item.lower()
result = db.execute(""" INSERT INTO infoitems(item, description, userhost, time) VALUES(%s, %s, %s, %s) """, (item, description, userhost, ttime))
return result
def get(self, item):
""" get infoitems """
global db
if not db: logging.error("plugin isnt initialised yet") ; return []
item = item.lower()
result = db.execute(""" SELECT description FROM infoitems WHERE item = %s """, item)
res = []
if result:
for i in result: res.append(i[0])
return res
def delete(self, indexnr):
""" delete item with indexnr """
global db
if not db: logging.error("plugin isnt initialised yet") ; return []
result = db.execute(""" DELETE FROM infoitems WHERE indx = %s """, indexnr)
return result
def deltxt(self, item, txt):
""" delete item with matching txt """
global db
if not db: logging.error("plugin isnt initialised yet") ; return []
result = db.execute(""" DELETE FROM infoitems WHERE item = %s AND description LIKE %s """, (item, '%%%s%%' % txt))
return result
def size(self):
""" return number of items """
global db
if not db: logging.error("plugin isnt initialised yet") ; return []
result = db.execute(""" SELECT COUNT(*) FROM infoitems """)
return result[0][0]
def searchitem(self, search):
""" search items """
global db
if not db: logging.error("plugin isnt initialised yet") ; return []
result = db.execute(""" SELECT item, description FROM infoitems WHERE item LIKE %s """, '%%%s%%' % search)
return result
def searchdescr(self, search):
""" search descriptions """
global db
if not db: logging.error("plugin isnt initialised yet") ; return []
result = db.execute(""" SELECT item, description FROM infoitems WHERE description LIKE %s """, '%%%s%%' % search)
return result
## defines
info = InfoItemsDb()
## size function
def size():
""" return number of infoitems """
return info.size()
## info callbacks
def infopre(bot, ievent):
""" see if info callback needs to be called """
if ievent.iscmnd() and (ievent.txt and ievent.txt[-1] == "?") and not ievent.woulddispatch(): return True
def infocb(bot, ievent):
""" implement a !infoitem callback """
if getusers().allowed(ievent.userhost, 'USER'):
data = info.get(ievent.execstr)
if data: ievent.reply('%s is: ' % ievent.execstr, data)
callbacks.add('PRIVMSG', infocb, infopre)
## info-size command
def handle_infosize(bot, ievent):
""" info-size .. show number of information items """
ievent.reply("we have %s infoitems" % info.size())
cmnds.add('info-size', handle_infosize, ['USER', 'WEB', 'ANON'])
examples.add('info-size', 'show number of infoitems', 'info-size')
## addinfoitem RE
def handle_addinfoitem(bot, ievent):
""" <keyword> = <description> .. add information item """
if not ievent.hascc(): return
try: (what, description) = ievent.groups
except ValueError: ievent.reply('i need <item> <description>') ; return
if len(description) < 3: ievent.reply('i need at least 3 chars for the description') ; return
what = what.strip()[1:]
info.add(what, description, ievent.userhost, time.time())
ievent.reply('item added')
cmnds.add('^(.+?)\s+=\s+(.+)$', handle_addinfoitem, ['USER', 'INFOADD'], regex=True, needcc=True)
examples.add('=', 'add description to item', 'dunk = top')
## question RE
def handle_question(bot, ievent):
""" <keyword>? .. ask for information item description """
if not ievent.hascc(): return
try: what = ievent.groups[0]
except IndexError: ievent.reply('i need a argument') ; return
what = what.strip().lower()[1:]
infoitems = info.get(what)
if infoitems: ievent.reply("%s is: " % what, infoitems)
else: ievent.reply('nothing known about %s' % what) ; return
cmnds.add('^(.+)\?$', handle_question, ['USER', 'WEB', 'JCOLL', 'ANON'], regex=True, needcc=True)
cmnds.add('^\?(.+)$', handle_question, ['USER', 'WEB', 'JCOLL', 'ANON'], regex=True, needcc=True)
examples.add('?', 'show infoitems of <what>', '1) test? 2) ?test')
## info-forget command
def handle_forget(bot, ievent):
""" forget <keyword> <txttomatch> .. remove information item where \
description matches txt given """
if len(ievent.args) > 1: what = ' '.join(ievent.args[:-1]) ; txt = ievent.args[-1]
else: ievent.missing('<item> <txttomatch> (min 3 chars)') ; return
if len(txt) < 3: ievent.reply('i need txt with at least 3 characters') ; return
what = what.strip().lower()
try: nrtimes = info.deltxt(what, txt)
except KeyError: ievent.reply('no records matching %s found' % what) ; return
if nrtimes: ievent.reply('item deleted')
else: ievent.reply('delete %s of %s failed' % (txt, what))
cmnds.add('info-forget', handle_forget, ['FORGET', 'OPER'])
examples.add('info-forget', 'forget <item> containing <txt>', 'info-forget dunk bla')
## info-sd command
def handle_searchdescr(bot, ievent):
""" info-sd <txttosearchfor> .. search information items descriptions """
if not ievent.rest: ievent.missing('<txt>') ; return
else: what = ievent.rest
what = what.strip().lower()
result = info.searchdescr(what)
if result:
res = []
for i in result: res.append("[%s] %s" % (i[0], i[1]))
ievent.reply("the following matches %s: " % what, res)
else: ievent.reply('none found')
cmnds.add('info-sd', handle_searchdescr, ['USER', 'WEB', 'ANON'])
examples.add('info-sd', 'info-sd <txt> .. search description of infoitems', 'info-sd http')
## info-si command
def handle_searchitem(bot, ievent):
""" info-si <txt> .. search information keywords """
if not ievent.rest: ievent.missing('<txt>') ; return
else: what = ievent.rest
what = what.strip().lower()
result = info.searchitem(what)
if result:
res = []
for i in result: res.append("[%s] %s" % (i[0], i[1]))
ievent.reply("the following matches %s: " % what, res)
else: ievent.reply('none found')
cmnds.add('info-si', handle_searchitem, ['USER', 'WEB', 'ANON'])
examples.add('info-si', 'info-si <txt> .. search the infoitems keys', 'info-si test')
## plugin initialisation
def init():
global db
from jsb.db import getmaindb
db = getmaindb()
| |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from mapreduce.lib import simplejson
import shutil
import tempfile
import time
import unittest
from google.appengine.api import yaml_errors
from google.appengine.ext import db
from mapreduce import errors
from mapreduce import handlers
from mapreduce import status
from testlib import testutil
from mapreduce import test_support
from google.appengine.ext.webapp import mock_webapp
class TestKind(db.Model):
"""Used for testing."""
foobar = db.StringProperty(default="meep")
def TestMap(entity):
"""Used for testing."""
pass
class MapreduceYamlTest(unittest.TestCase):
"""Testing mapreduce.yaml-related functionality."""
def set_up_directory_tree(self, dir_tree_contents):
"""Create directory tree from dict of path:contents entries."""
for full_path, contents in dir_tree_contents.iteritems():
dir_name = os.path.dirname(full_path)
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
f = open(full_path, 'w')
f.write(contents)
f.close()
def setUp(self):
"""Initialize temporary application variable."""
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
"""Remove temporary application directory."""
if self.tempdir:
shutil.rmtree(self.tempdir)
def testFindYamlFile(self):
"""Test if mapreduce.yaml can be found with different app/library trees."""
test_status = os.path.join(self.tempdir, "library_root", "google",
"appengine", "ext", "mapreduce", "status.py")
test_mapreduce_yaml = os.path.join(self.tempdir, "application_root",
"mapreduce.yaml")
test_dict = {
test_status: "test",
test_mapreduce_yaml: "test",
}
self.set_up_directory_tree(test_dict)
os.chdir(os.path.dirname(test_mapreduce_yaml))
yaml_loc = status.find_mapreduce_yaml(status_file=test_status)
self.assertEqual(test_mapreduce_yaml, yaml_loc)
def testFindYamlFileSameTree(self):
"""Test if mapreduce.yaml can be found with the same app/library tree."""
test_status = os.path.join(self.tempdir, "application_root", "google",
"appengine", "ext", "mapreduce", "status.py")
test_mapreduce_yaml = os.path.join(self.tempdir, "application_root",
"mapreduce.yaml")
test_dict = {
test_status: "test",
test_mapreduce_yaml: "test",
}
self.set_up_directory_tree(test_dict)
os.chdir(os.path.dirname(test_mapreduce_yaml))
yaml_loc = status.find_mapreduce_yaml(status_file=test_status)
self.assertEqual(test_mapreduce_yaml, yaml_loc)
def testParseEmptyFile(self):
"""Parsing empty mapreduce.yaml file."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"")
def testParse(self):
"""Parsing a single document in mapreduce.yaml."""
mr_yaml = status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" params_validator: Validator1\n"
" params:\n"
" - name: entity_kind\n"
" default: Kind1\n"
" - name: human_supplied1\n"
" - name: human_supplied2\n"
"- name: Mapreduce2\n"
" mapper:\n"
" handler: Handler2\n"
" input_reader: Reader2\n")
self.assertTrue(mr_yaml)
self.assertEquals(2, len(mr_yaml.mapreduce))
self.assertEquals("Mapreduce1", mr_yaml.mapreduce[0].name)
self.assertEquals("Handler1", mr_yaml.mapreduce[0].mapper.handler)
self.assertEquals("Reader1", mr_yaml.mapreduce[0].mapper.input_reader)
self.assertEquals("Validator1",
mr_yaml.mapreduce[0].mapper.params_validator)
self.assertEquals(3, len(mr_yaml.mapreduce[0].mapper.params))
self.assertEquals("entity_kind", mr_yaml.mapreduce[0].mapper.params[0].name)
self.assertEquals("Kind1", mr_yaml.mapreduce[0].mapper.params[0].default)
self.assertEquals("human_supplied1",
mr_yaml.mapreduce[0].mapper.params[1].name)
self.assertEquals("human_supplied2",
mr_yaml.mapreduce[0].mapper.params[2].name)
self.assertEquals("Mapreduce2", mr_yaml.mapreduce[1].name)
self.assertEquals("Handler2", mr_yaml.mapreduce[1].mapper.handler)
self.assertEquals("Reader2", mr_yaml.mapreduce[1].mapper.input_reader)
def testParseOutputWriter(self):
"""Parsing a single document in mapreduce.yaml with output writer."""
mr_yaml = status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" output_writer: Writer1\n"
)
self.assertTrue(mr_yaml)
self.assertEquals(1, len(mr_yaml.mapreduce))
self.assertEquals("Mapreduce1", mr_yaml.mapreduce[0].name)
self.assertEquals("Handler1", mr_yaml.mapreduce[0].mapper.handler)
self.assertEquals("Reader1", mr_yaml.mapreduce[0].mapper.input_reader)
self.assertEquals("Writer1", mr_yaml.mapreduce[0].mapper.output_writer)
def testParseMissingRequiredAttrs(self):
"""Test parsing with missing required attributes."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n")
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" input_reader: Reader1\n")
def testBadValues(self):
"""Tests when some yaml values are of the wrong type."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" params:\n"
" - name: $$Invalid$$\n")
def testMultipleDocuments(self):
"""Tests when multiple documents are present."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
"---")
def testOverlappingNames(self):
"""Tests when there are jobs with the same name."""
self.assertRaises(errors.BadYamlError,
status.parse_mapreduce_yaml,
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n")
def testToDict(self):
"""Tests encoding the MR document as JSON."""
mr_yaml = status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" params_validator: Validator1\n"
" params:\n"
" - name: entity_kind\n"
" default: Kind1\n"
" - name: human_supplied1\n"
" - name: human_supplied2\n"
"- name: Mapreduce2\n"
" mapper:\n"
" handler: Handler2\n"
" input_reader: Reader2\n")
all_configs = status.MapReduceYaml.to_dict(mr_yaml)
self.assertEquals(
[
{
'name': 'Mapreduce1',
'mapper_params_validator': 'Validator1',
'mapper_params': {
'entity_kind': 'Kind1',
'human_supplied2': None,
'human_supplied1': None},
'mapper_handler': 'Handler1',
'mapper_input_reader': 'Reader1'
},
{
'mapper_input_reader': 'Reader2',
'mapper_handler': 'Handler2',
'name': 'Mapreduce2'
}
], all_configs)
def testToDictOutputWriter(self):
"""Tests encoding the MR document with output writer as JSON."""
mr_yaml = status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" output_writer: Writer1\n"
)
all_configs = status.MapReduceYaml.to_dict(mr_yaml)
self.assertEquals(
[
{
'name': 'Mapreduce1',
'mapper_handler': 'Handler1',
'mapper_input_reader': 'Reader1',
'mapper_output_writer': 'Writer1',
},
], all_configs)
class ResourceTest(testutil.HandlerTestBase):
"""Tests for the resource handler."""
def setUp(self):
"""Sets up the test harness."""
testutil.HandlerTestBase.setUp(self)
self.handler = status.ResourceHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/path"
def testPaths(self):
"""Tests that paths are accessible."""
self.handler.get("status")
self.assertTrue(self.handler.response.out.getvalue().startswith(
"<!DOCTYPE html>"))
self.assertEquals("text/html",
self.handler.response.headers["Content-Type"])
self.handler.response.out.truncate(0)
self.handler.get("jquery.js")
self.assertTrue(self.handler.response.out.getvalue().startswith(
"/*!"))
self.assertEquals("text/javascript",
self.handler.response.headers["Content-Type"])
def testCachingHeaders(self):
"""Tests that caching headers are correct."""
self.handler.get("status")
self.assertEquals("public; max-age=300",
self.handler.response.headers["Cache-Control"])
def testMissing(self):
"""Tests when a resource is requested that doesn't exist."""
self.handler.get("unknown")
self.assertEquals(404, self.handler.response.status)
class ListConfigsTest(testutil.HandlerTestBase):
"""Tests for the ListConfigsHandler."""
def setUp(self):
"""Sets up the test harness."""
testutil.HandlerTestBase.setUp(self)
self.handler = status.ListConfigsHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/command/path"
self.handler.request.headers["X-Requested-With"] = "XMLHttpRequest"
def testCSRF(self):
"""Test that we check the X-Requested-With header."""
del self.handler.request.headers["X-Requested-With"]
self.handler.get()
self.assertEquals(403, self.handler.response.status)
def testBasic(self):
"""Tests listing available configs."""
old_get_yaml = status.get_mapreduce_yaml
status.get_mapreduce_yaml = lambda: status.parse_mapreduce_yaml(
"mapreduce:\n"
"- name: Mapreduce1\n"
" mapper:\n"
" handler: Handler1\n"
" input_reader: Reader1\n"
" params_validator: Validator1\n"
" params:\n"
" - name: entity_kind\n"
" default: Kind1\n"
" - name: human_supplied1\n"
" - name: human_supplied2\n"
"- name: Mapreduce2\n"
" mapper:\n"
" handler: Handler2\n"
" input_reader: Reader2\n"
" params_validator: MapreduceValidator\n"
" params:\n"
" - name: foo\n"
" value: bar\n")
try:
self.handler.get()
finally:
status.get_mapreduce_yaml = old_get_yaml
self.assertEquals(
{u'configs': [
{u'mapper_params_validator': u'Validator1',
u'mapper_params': {
u'entity_kind': u'Kind1',
u'human_supplied2': None,
u'human_supplied1': None},
u'mapper_input_reader': u'Reader1',
u'mapper_handler': u'Handler1',
u'name': u'Mapreduce1'},
{u'mapper_input_reader': u'Reader2',
u'mapper_handler': u'Handler2',
u'name': u'Mapreduce2',
u'params': {
u'foo': u'bar',},
}]},
simplejson.loads(self.handler.response.out.getvalue()))
self.assertEquals("text/javascript",
self.handler.response.headers["Content-Type"])
class ListJobsTest(testutil.HandlerTestBase):
"""Tests listing active and inactive jobs."""
def setUp(self):
"""Sets up the test harness."""
testutil.HandlerTestBase.setUp(self)
self.start = handlers.StartJobHandler()
self.start.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.start.request.path = "/mapreduce/command/start"
self.start.request.set(
"mapper_input_reader",
"mapreduce.input_readers.DatastoreInputReader")
self.start.request.set("mapper_handler", "__main__.TestMap")
self.start.request.set("mapper_params.entity_kind", "__main__.TestKind")
self.start.request.headers["X-Requested-With"] = "XMLHttpRequest"
self.handler = status.ListJobsHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/command/list"
self.handler.request.headers["X-Requested-With"] = "XMLHttpRequest"
def testCSRF(self):
"""Test that we check the X-Requested-With header."""
TestKind().put()
del self.start.request.headers["X-Requested-With"]
self.start.post()
self.assertEquals(403, self.start.response.status)
del self.handler.request.headers["X-Requested-With"]
self.handler.get()
self.assertEquals(403, self.handler.response.status)
def testBasic(self):
"""Tests when there are fewer than the max results to render."""
TestKind().put()
self.start.request.set("name", "my job 1")
self.start.post()
time.sleep(.1)
self.start.request.set("name", "my job 2")
self.start.post()
time.sleep(.1)
self.start.request.set("name", "my job 3")
self.start.post()
self.handler.get()
result = simplejson.loads(self.handler.response.out.getvalue())
expected_args = set([
"active",
"active_shards",
"chart_url",
"chart_width",
"mapreduce_id",
"name",
"shards",
"start_timestamp_ms",
"updated_timestamp_ms",
])
self.assertEquals(3, len(result["jobs"]))
self.assertEquals("my job 3", result["jobs"][0]["name"])
self.assertEquals("my job 2", result["jobs"][1]["name"])
self.assertEquals("my job 1", result["jobs"][2]["name"])
self.assertEquals(expected_args, set(result["jobs"][0].keys()))
self.assertEquals(expected_args, set(result["jobs"][1].keys()))
self.assertEquals(expected_args, set(result["jobs"][2].keys()))
def testCursor(self):
"""Tests when a job cursor is present."""
TestKind().put()
self.start.request.set("name", "my job 1")
self.start.post()
time.sleep(.1) # Can not start two jobs before time advances
self.start.request.set("name", "my job 2")
self.start.post()
self.handler.request.set("count", "1")
self.handler.get()
result = simplejson.loads(self.handler.response.out.getvalue())
self.assertEquals(1, len(result["jobs"]))
self.assertTrue("cursor" in result)
self.handler.response.out.truncate(0)
self.handler.request.set("count", "1")
self.handler.request.set("cursor", result['cursor'])
self.handler.get()
result2 = simplejson.loads(self.handler.response.out.getvalue())
self.assertEquals(1, len(result2["jobs"]))
self.assertFalse("cursor" in result2)
def testNoJobs(self):
"""Tests when there are no jobs."""
self.handler.get()
result = simplejson.loads(self.handler.response.out.getvalue())
self.assertEquals({'jobs': []}, result)
class GetJobDetailTest(testutil.HandlerTestBase):
"""Tests listing job status detail."""
def setUp(self):
"""Sets up the test harness."""
testutil.HandlerTestBase.setUp(self)
for _ in range(100):
TestKind().put()
self.start = handlers.StartJobHandler()
self.start.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.start.request.path = "/mapreduce/command/start"
self.start.request.set("name", "my job 1")
self.start.request.set(
"mapper_input_reader",
"mapreduce.input_readers.DatastoreInputReader")
self.start.request.set("mapper_handler", "__main__.TestMap")
self.start.request.set("mapper_params.entity_kind", "__main__.TestKind")
self.start.request.headers["X-Requested-With"] = "XMLHttpRequest"
self.start.post()
result = simplejson.loads(self.start.response.out.getvalue())
self.mapreduce_id = result["mapreduce_id"]
self.handler = status.GetJobDetailHandler()
self.handler.initialize(mock_webapp.MockRequest(),
mock_webapp.MockResponse())
self.handler.request.path = "/mapreduce/command/list"
self.handler.request.headers["X-Requested-With"] = "XMLHttpRequest"
def KickOffMapreduce(self):
"""Executes pending kickoff task."""
test_support.execute_all_tasks(self.taskqueue)
def testCSRF(self):
"""Test that we check the X-Requested-With header."""
del self.handler.request.headers["X-Requested-With"]
self.handler.get()
self.assertEquals(403, self.handler.response.status)
def testBasic(self):
"""Tests getting the job details."""
self.KickOffMapreduce()
self.handler.request.set("mapreduce_id", self.mapreduce_id)
self.handler.get()
result = simplejson.loads(self.handler.response.out.getvalue())
expected_keys = set([
"active", "chart_url", "counters", "mapper_spec", "mapreduce_id",
"name", "result_status", "shards", "start_timestamp_ms",
"updated_timestamp_ms", "params", "hooks_class_name", "chart_width"])
expected_shard_keys = set([
"active", "counters", "last_work_item", "result_status",
"shard_description", "shard_id", "shard_number",
"updated_timestamp_ms"])
self.assertEquals(expected_keys, set(result.keys()))
self.assertEquals(8, len(result["shards"]))
self.assertEquals(expected_shard_keys, set(result["shards"][0].keys()))
def testBeforeKickOff(self):
"""Tests getting the job details."""
self.handler.request.set("mapreduce_id", self.mapreduce_id)
self.handler.get()
result = simplejson.loads(self.handler.response.out.getvalue())
expected_keys = set([
"active", "chart_url", "counters", "mapper_spec", "mapreduce_id",
"name", "result_status", "shards", "start_timestamp_ms",
"updated_timestamp_ms", "params", "hooks_class_name", "chart_width"])
self.assertEquals(expected_keys, set(result.keys()))
def testBadJobId(self):
"""Tests when an invalid job ID is supplied."""
self.handler.request.set("mapreduce_id", "does not exist")
self.handler.get()
result = simplejson.loads(self.handler.response.out.getvalue())
self.assertEquals(
{"error_message": "\"Could not find job with ID 'does not exist'\"",
"error_class": "KeyError"},
result)
# TODO(user): Add tests for abort
# TODO(user): Add tests for cleanup
if __name__ == "__main__":
unittest.main()
| |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import warnings
from collections import defaultdict
import sys
import re
import xml.etree.ElementTree as ET
import xml.dom.minidom
from six import u, iteritems, PY2
try:
# Python 2
unichr
except NameError: # pragma: nocover
# Python 3
unichr = chr
"""
Based on the understanding of what Jenkins can parse for JUnit XML files.
<?xml version="1.0" encoding="utf-8"?>
<testsuites errors="1" failures="1" tests="4" time="45">
<testsuite errors="1" failures="1" hostname="localhost" id="0" name="test1"
package="testdb" tests="4" timestamp="2012-11-15T01:02:29">
<properties>
<property name="assert-passed" value="1"/>
</properties>
<testcase classname="testdb.directory" name="1-passed-test" time="10"/>
<testcase classname="testdb.directory" name="2-failed-test" time="20">
<failure message="Assertion FAILED: failed assert" type="failure">
the output of the testcase
</failure>
</testcase>
<testcase classname="package.directory" name="3-errord-test" time="15">
<error message="Assertion ERROR: error assert" type="error">
the output of the testcase
</error>
</testcase>
<testcase classname="package.directory" name="3-skipped-test" time="0">
<skipped message="SKIPPED Test" type="skipped">
the output of the testcase
</skipped>
</testcase>
<testcase classname="testdb.directory" name="3-passed-test" time="10">
<system-out>
I am system output
</system-out>
<system-err>
I am the error output
</system-err>
</testcase>
</testsuite>
</testsuites>
"""
def decode(var, encoding):
"""
If not already unicode, decode it.
"""
if PY2:
if isinstance(var, unicode): # noqa: F821
ret = var
elif isinstance(var, str):
if encoding:
ret = var.decode(encoding)
else:
ret = unicode(var) # noqa: F821
else:
ret = unicode(var) # noqa: F821
else:
ret = str(var)
return ret
class TestSuite(object):
"""
Suite of test cases.
Can handle unicode strings or binary strings if their encoding is provided.
"""
def __init__(
self,
name,
test_cases=None,
hostname=None,
id=None,
package=None,
timestamp=None,
properties=None,
file=None,
log=None,
url=None,
stdout=None,
stderr=None,
):
self.name = name
if not test_cases:
test_cases = []
try:
iter(test_cases)
except TypeError:
raise TypeError("test_cases must be a list of test cases")
self.test_cases = test_cases
self.timestamp = timestamp
self.hostname = hostname
self.id = id
self.package = package
self.file = file
self.log = log
self.url = url
self.stdout = stdout
self.stderr = stderr
self.properties = properties
def build_xml_doc(self, encoding=None):
"""
Builds the XML document for the JUnit test suite.
Produces clean unicode strings and decodes non-unicode with the help of encoding.
@param encoding: Used to decode encoded strings.
@return: XML document with unicode string elements
"""
# build the test suite element
test_suite_attributes = dict()
if any(c.assertions for c in self.test_cases):
test_suite_attributes["assertions"] = str(sum([int(c.assertions) for c in self.test_cases if c.assertions]))
test_suite_attributes["disabled"] = str(len([c for c in self.test_cases if not c.is_enabled]))
test_suite_attributes["errors"] = str(len([c for c in self.test_cases if c.is_error()]))
test_suite_attributes["failures"] = str(len([c for c in self.test_cases if c.is_failure()]))
test_suite_attributes["name"] = decode(self.name, encoding)
test_suite_attributes["skipped"] = str(len([c for c in self.test_cases if c.is_skipped()]))
test_suite_attributes["tests"] = str(len(self.test_cases))
test_suite_attributes["time"] = str(sum(c.elapsed_sec for c in self.test_cases if c.elapsed_sec))
if self.hostname:
test_suite_attributes["hostname"] = decode(self.hostname, encoding)
if self.id:
test_suite_attributes["id"] = decode(self.id, encoding)
if self.package:
test_suite_attributes["package"] = decode(self.package, encoding)
if self.timestamp:
test_suite_attributes["timestamp"] = decode(self.timestamp, encoding)
if self.file:
test_suite_attributes["file"] = decode(self.file, encoding)
if self.log:
test_suite_attributes["log"] = decode(self.log, encoding)
if self.url:
test_suite_attributes["url"] = decode(self.url, encoding)
xml_element = ET.Element("testsuite", test_suite_attributes)
# add any properties
if self.properties:
props_element = ET.SubElement(xml_element, "properties")
for k, v in self.properties.items():
attrs = {"name": decode(k, encoding), "value": decode(v, encoding)}
ET.SubElement(props_element, "property", attrs)
# add test suite stdout
if self.stdout:
stdout_element = ET.SubElement(xml_element, "system-out")
stdout_element.text = decode(self.stdout, encoding)
# add test suite stderr
if self.stderr:
stderr_element = ET.SubElement(xml_element, "system-err")
stderr_element.text = decode(self.stderr, encoding)
# test cases
for case in self.test_cases:
test_case_attributes = dict()
test_case_attributes["name"] = decode(case.name, encoding)
if case.assertions:
# Number of assertions in the test case
test_case_attributes["assertions"] = "%d" % case.assertions
if case.elapsed_sec:
test_case_attributes["time"] = "%f" % case.elapsed_sec
if case.timestamp:
test_case_attributes["timestamp"] = decode(case.timestamp, encoding)
if case.classname:
test_case_attributes["classname"] = decode(case.classname, encoding)
if case.status:
test_case_attributes["status"] = decode(case.status, encoding)
if case.category:
test_case_attributes["class"] = decode(case.category, encoding)
if case.file:
test_case_attributes["file"] = decode(case.file, encoding)
if case.line:
test_case_attributes["line"] = decode(case.line, encoding)
if case.log:
test_case_attributes["log"] = decode(case.log, encoding)
if case.url:
test_case_attributes["url"] = decode(case.url, encoding)
test_case_element = ET.SubElement(xml_element, "testcase", test_case_attributes)
# failures
for failure in case.failures:
if failure["output"] or failure["message"]:
attrs = {"type": "failure"}
if failure["message"]:
attrs["message"] = decode(failure["message"], encoding)
if failure["type"]:
attrs["type"] = decode(failure["type"], encoding)
failure_element = ET.Element("failure", attrs)
if failure["output"]:
failure_element.text = decode(failure["output"], encoding)
test_case_element.append(failure_element)
# errors
for error in case.errors:
if error["message"] or error["output"]:
attrs = {"type": "error"}
if error["message"]:
attrs["message"] = decode(error["message"], encoding)
if error["type"]:
attrs["type"] = decode(error["type"], encoding)
error_element = ET.Element("error", attrs)
if error["output"]:
error_element.text = decode(error["output"], encoding)
test_case_element.append(error_element)
# skippeds
for skipped in case.skipped:
attrs = {"type": "skipped"}
if skipped["message"]:
attrs["message"] = decode(skipped["message"], encoding)
skipped_element = ET.Element("skipped", attrs)
if skipped["output"]:
skipped_element.text = decode(skipped["output"], encoding)
test_case_element.append(skipped_element)
# test stdout
if case.stdout:
stdout_element = ET.Element("system-out")
stdout_element.text = decode(case.stdout, encoding)
test_case_element.append(stdout_element)
# test stderr
if case.stderr:
stderr_element = ET.Element("system-err")
stderr_element.text = decode(case.stderr, encoding)
test_case_element.append(stderr_element)
return xml_element
@staticmethod
def to_xml_string(test_suites, prettyprint=True, encoding=None):
"""
Returns the string representation of the JUnit XML document.
@param encoding: The encoding of the input.
@return: unicode string
"""
warnings.warn(
"Testsuite.to_xml_string is deprecated. It will be removed in version 2.0.0. "
"Use function to_xml_report_string",
DeprecationWarning,
)
return to_xml_report_string(test_suites, prettyprint, encoding)
@staticmethod
def to_file(file_descriptor, test_suites, prettyprint=True, encoding=None):
"""
Writes the JUnit XML document to a file.
"""
warnings.warn(
"Testsuite.to_file is deprecated. It will be removed in version 2.0.0. Use function to_xml_report_file",
DeprecationWarning,
)
to_xml_report_file(file_descriptor, test_suites, prettyprint, encoding)
def to_xml_report_string(test_suites, prettyprint=True, encoding=None):
"""
Returns the string representation of the JUnit XML document.
@param encoding: The encoding of the input.
@return: unicode string
"""
try:
iter(test_suites)
except TypeError:
raise TypeError("test_suites must be a list of test suites")
xml_element = ET.Element("testsuites")
attributes = defaultdict(int)
for ts in test_suites:
ts_xml = ts.build_xml_doc(encoding=encoding)
for key in ["disabled", "errors", "failures", "tests"]:
attributes[key] += int(ts_xml.get(key, 0))
for key in ["time"]:
attributes[key] += float(ts_xml.get(key, 0))
xml_element.append(ts_xml)
for key, value in iteritems(attributes):
xml_element.set(key, str(value))
xml_string = ET.tostring(xml_element, encoding=encoding)
# is encoded now
xml_string = _clean_illegal_xml_chars(xml_string.decode(encoding or "utf-8"))
# is unicode now
if prettyprint:
# minidom.parseString() works just on correctly encoded binary strings
xml_string = xml_string.encode(encoding or "utf-8")
xml_string = xml.dom.minidom.parseString(xml_string)
# toprettyxml() produces unicode if no encoding is being passed or binary string with an encoding
xml_string = xml_string.toprettyxml(encoding=encoding)
if encoding:
xml_string = xml_string.decode(encoding)
# is unicode now
return xml_string
def to_xml_report_file(file_descriptor, test_suites, prettyprint=True, encoding=None):
"""
Writes the JUnit XML document to a file.
"""
xml_string = to_xml_report_string(test_suites, prettyprint=prettyprint, encoding=encoding)
# has problems with encoded str with non-ASCII (non-default-encoding) characters!
file_descriptor.write(xml_string)
def _clean_illegal_xml_chars(string_to_clean):
"""
Removes any illegal unicode characters from the given XML string.
@see: http://stackoverflow.com/questions/1707890/fast-way-to-filter-illegal-xml-unicode-chars-in-python
"""
illegal_unichrs = [
(0x00, 0x08),
(0x0B, 0x1F),
(0x7F, 0x84),
(0x86, 0x9F),
(0xD800, 0xDFFF),
(0xFDD0, 0xFDDF),
(0xFFFE, 0xFFFF),
(0x1FFFE, 0x1FFFF),
(0x2FFFE, 0x2FFFF),
(0x3FFFE, 0x3FFFF),
(0x4FFFE, 0x4FFFF),
(0x5FFFE, 0x5FFFF),
(0x6FFFE, 0x6FFFF),
(0x7FFFE, 0x7FFFF),
(0x8FFFE, 0x8FFFF),
(0x9FFFE, 0x9FFFF),
(0xAFFFE, 0xAFFFF),
(0xBFFFE, 0xBFFFF),
(0xCFFFE, 0xCFFFF),
(0xDFFFE, 0xDFFFF),
(0xEFFFE, 0xEFFFF),
(0xFFFFE, 0xFFFFF),
(0x10FFFE, 0x10FFFF),
]
illegal_ranges = ["%s-%s" % (unichr(low), unichr(high)) for (low, high) in illegal_unichrs if low < sys.maxunicode]
illegal_xml_re = re.compile(u("[%s]") % u("").join(illegal_ranges))
return illegal_xml_re.sub("", string_to_clean)
class TestCase(object):
"""A JUnit test case with a result and possibly some stdout or stderr"""
def __init__(
self,
name,
classname=None,
elapsed_sec=None,
stdout=None,
stderr=None,
assertions=None,
timestamp=None,
status=None,
category=None,
file=None,
line=None,
log=None,
url=None,
allow_multiple_subelements=False,
):
self.name = name
self.assertions = assertions
self.elapsed_sec = elapsed_sec
self.timestamp = timestamp
self.classname = classname
self.status = status
self.category = category
self.file = file
self.line = line
self.log = log
self.url = url
self.stdout = stdout
self.stderr = stderr
self.is_enabled = True
self.errors = []
self.failures = []
self.skipped = []
self.allow_multiple_subalements = allow_multiple_subelements
def add_error_info(self, message=None, output=None, error_type=None):
"""Adds an error message, output, or both to the test case"""
error = {}
error["message"] = message
error["output"] = output
error["type"] = error_type
if self.allow_multiple_subalements:
if message or output:
self.errors.append(error)
elif not len(self.errors):
self.errors.append(error)
else:
if message:
self.errors[0]["message"] = message
if output:
self.errors[0]["output"] = output
if error_type:
self.errors[0]["type"] = error_type
def add_failure_info(self, message=None, output=None, failure_type=None):
"""Adds a failure message, output, or both to the test case"""
failure = {}
failure["message"] = message
failure["output"] = output
failure["type"] = failure_type
if self.allow_multiple_subalements:
if message or output:
self.failures.append(failure)
elif not len(self.failures):
self.failures.append(failure)
else:
if message:
self.failures[0]["message"] = message
if output:
self.failures[0]["output"] = output
if failure_type:
self.failures[0]["type"] = failure_type
def add_skipped_info(self, message=None, output=None):
"""Adds a skipped message, output, or both to the test case"""
skipped = {}
skipped["message"] = message
skipped["output"] = output
if self.allow_multiple_subalements:
if message or output:
self.skipped.append(skipped)
elif not len(self.skipped):
self.skipped.append(skipped)
else:
if message:
self.skipped[0]["message"] = message
if output:
self.skipped[0]["output"] = output
def is_failure(self):
"""returns true if this test case is a failure"""
return sum(1 for f in self.failures if f["message"] or f["output"]) > 0
def is_error(self):
"""returns true if this test case is an error"""
return sum(1 for e in self.errors if e["message"] or e["output"]) > 0
def is_skipped(self):
"""returns true if this test case has been skipped"""
return len(self.skipped) > 0
| |
#!/usr/bin/env python
import pyodbc
import json
from toolbox import process_data_row
from files import (get_schema_file, loop_delimited_file)
from toolbox import _defaultencode
import logging
log = logging.getLogger(__name__)
def connect(server, database, username, password):
"""Build pyodbc connection to SQL Server from file, assuming driver name is "ODBC Driver 11 for SQL Server"
Args:
server: string, name or ip address for SQL Server
database: string, database name
username: string, useranme for the database
password: string, password for the database
Returns:
pyodbc connection object
"""
try:
connect_string = 'DRIVER={FreeTDS};SERVER=' + server.encode('utf-8') + ';PORT=1433;DATABASE=' + database.encode(
'utf-8') + ';UID=' + username.encode('utf-8') + ';PWD=' + password.encode('utf-8')
connection = pyodbc.connect(connect_string)
except (ValueError) as e:
log.error("Error creating database connection", e)
raise e
return connection
def insert_list_to_sql(connection,lst,tableName):
"""Inserts from a list to a SQL table. List must have the same format and item order as the table columns.
Args:
list: list, Values to insert to table
tableName: string, Fully qualified SQL table name
Returns:
None
"""
sorted_column_values_list = []
for items in lst:
sorted_column_values_list.append(items)
for val in sorted_column_values_list:
valstring = '('
for colval in val:
try:
valstring += "'" + colval + "',"
except TypeError:
valstring += str(colval) +','
valstring = valstring[0:-1] + ')' #remove trailing comma
query = "INSERT INTO {0} VALUES {1}".format(tableName, valstring)
c = run_sql(connection,query)
return
def insert_list_to_sql_batch(connection,lst,tableName,batchsize=1000):
"""Inserts from a list to a SQL table. List must have the same format and item order as the table columns.
Args:
list: list, Values to insert to table
tableName: string, Fully qualified SQL table name
batchsize: specifies what size you'd want the batches to run as
connection: sql server connection
Returns:
None
"""
insertvals = ''
batchcnt = 0
lstcnt = 0
lstsize = len(lst)
rowstr = 'SELECT '
for row in lst:
if batchcnt == batchsize or lstcnt == lstsize:
for val in row:
if type(val) == int or val == 'null':
rowstr += str(val) +','
else:
rowstr += "'" + str(val) + "',"
insertvals = insertvals + rowstr[:-1] + ' UNION ALL '
c = run_sql(connection,"INSERT INTO {0} {1}".format(tableName, insertvals[:-11]))
insertvals = ''
rowstr = 'SELECT '
batchcnt = 0
else:
for val in row:
if type(val) == int or val == 'null':
rowstr += str(val) +','
else:
rowstr += "'" + str(val) + "',"
insertvals = insertvals + rowstr[:-1] + ' UNION ALL '
rowstr = 'SELECT '
batchcnt += 1
lstcnt += 1
if batchcnt > 0:
c = run_sql(connection,"INSERT INTO {0} {1}".format(tableName, insertvals[:-11]))
return
def run_sql(connection,query): #courseTagDict
"""Runs SQL statement and commits changes to database.
Args:
connection: pyodbc.connect() object, Connection to use when running Sql
query: string, Valid query string
Returns:
cursor object, Results of the call to pyodb.connection().cursor().execute(query)
"""
cursor=connection.cursor()
cursor.execute(query.encode('utf-8'))
connection.commit()
return cursor
def truncate_sql_table(connection,table_name):
"""Runs truncate table SQL command and commits changes to database.
Args:
connection: pyodbc.connect() object, Connection to use for truncate
tableName: string, Fully qualified SQL table name (make sure this is the table you want to clear!)
Returns:
None
"""
sql = "truncate table " + table_name
cursor=connection.cursor()
cursor.execute(sql.encode('utf-8'))
connection.commit()
return
def create_table(connection, table_name, schema_file):
"""Create table.
Args:
connection: pyodbc.connect() object, Connection to use when running Sql
table_name: string, Table name including db schema (ex: my_schema.my_table)
schema_file: string, Path to csv schema file with each row as col_name, data_type
Returns:
cursor object, Results of the call to pyodb.connection().cursor().execute(query)
"""
cursor = connection.cursor()
schema_list = get_schema_file(schema_file)
table_split = table_name.split('.')
table = table_split[-1]
use_db = ""
if len(table_split) > 1:
use_db = "USE {0}; ".format(table_split[0])
ddl = use_db + """IF NOT EXISTS ( SELECT [name] FROM sys.tables WHERE [name] = '{0}' )
CREATE TABLE {0} (""".format(table_name)
for col, dt in schema_list:
ddl = ddl + col + ' ' + dt + ' NULL, '
ddl = ddl[:-2] + ');'
try:
log.debug(ddl)
cursor.execute(ddl.encode('utf-8'))
except UnicodeDecodeError:
cursor.execute(ddl)
return cursor
def create_index(connection, table_name, index):
"""Create index.
Args:
connection: pyodbc.connect() object, Connection to use when running Sql
table_name: string, Table name including db schema (ex: my_schema.my_table)
index: string, Column name of index (can put multiple columns comma delimited if desired)
Returns:
cursor object, Results of the call to pyodb.connection().cursor().execute(query)
"""
cursor = connection.cursor()
table_split = table_name.split('.')
table = table_split[-1]
if len(table_split) > 1:
use_db = "USE {0}; ".format(table_split[0])
run_sql(connection, use_db)
if index is not None:
idx_name = table + '_idx'
sql = "SELECT name FROM sys.indexes where name = '{0}' and object_id = OBJECT_ID('{1}')".format(idx_name, table)
log.debug("SQL to run: " + sql)
try:
exists = sql_get_query_data(connection, sql)
val = exists.fetchone()[0]
if val != idx_name:
ddl2 = 'CREATE INDEX {0} ON {1}({2});'.format(idx_name, table_name, index)
try:
cursor.execute(ddl2.encode('utf-8'))
connection.commit()
except UnicodeDecodeError:
cursor.execute(ddl2)
connection.commit()
except TypeError:
log.info("Index does not exist, will attempt to create it")
ddl2 = 'CREATE INDEX {0} ON {1}({2});'.format(idx_name, table_name, index)
try:
cursor.execute(ddl2.encode('utf-8'))
connection.commit()
except UnicodeDecodeError:
cursor.execute(ddl2)
connection.commit()
return cursor
def sql_get_schema(connection,query,include_extract_date = True):
"""Reads schema from database by running the provided query. It's recommended to
pass a query that is limited to 1 record to minimize the amount of rows accessed on
the server.
Args:
connection: pyodbc.connect() object, Connection to use when running Sql
query: string, Valid query string
include_extract_date: boolean, defaults to True to add current timestamp field
'ExtractDate' to results
Returns:
list, each list item contains field name and data type
"""
import json
cursor = connection.cursor()
cursor.execute(query)
schema_list = []
#colList = []
#typeList = []
for i in cursor.description:
schema_list.append(i[0:2])
#colList.append(i[0])
#typeList.append(str(i[1]))
if include_extract_date:
schema_list.append(['ExtractDate','datetime'])
return schema_list
def sql_get_table_data(connection, table, schema='dbo', include_extract_date = True):
"""Runs SQL statement to get all records from the table (select *)
Args:
connection: pyodbc.connect() object, Connection to use when selecting data
table: string, Valid table
Returns:
cursor object, Results of the call to pyodb.connection().cursor().execute(query)
"""
extract_date = ""
if include_extract_date:
extract_date = ", getdate() as ExtractDate"
query = 'select * ' + extract_date + ' from ' + schema + '.[' + table + '] with (nolock)'
log.info(query)
cursor=connection.cursor()
cursor.execute(query.encode('utf-8'))
return cursor
def sql_get_query_data(connection, query):
"""Runs SQL statement to get results of query specified, returned and pyodbc cursor.
Args:
connection: pyodbc.connect() object, Connection to use when selecting data
query: string, Valid select statement
Returns:
cursor object, Results of the call to pyodb.connection().cursor().execute(query)
"""
cursor=connection.cursor()
cursor.execute(query.encode('utf-8'))
return cursor
def cursor_to_json(cursor, dest_file, dest_schema_file=None, source_schema_file=None):
"""Takes a cursor and creates JSON file with the data
and a schema file for loading to other data systems.
Args:
cursor: cursor object with data to extract to file
dest_file: string, path and file name to save data
Returns:
None
"""
if source_schema_file is None:
schema = []
for i in cursor.description:
schema.append([i[0],str(i[1])])
else:
schema = get_schema_file(source_schema_file)
if dest_schema_file is not None:
with open(dest_schema_file,'wb') as schemafile:
for row in schema:
col = row[0]
if 'date' in row[1]:
datatype = 'timestamp'
elif 'list' in row[1]:
datatype = 'list'
elif 'bigint' in row[1]:
datatype = 'bigint'
elif 'int' in row[1] or 'long' in row[1]:
datatype = 'integer'
elif 'float' in row[1]:
datatype = 'float'
elif 'bool' in row[1]:
datatype = 'boolean'
elif 'str' in row[1]:
datatype = 'string'
else:
datatype = 'string'
schemafile.write("%s\n" % (col + ',' + datatype))
with open(dest_file,'wb') as outfile:
for row in cursor:
result_dct = process_data_row(row,schema)
outfile.write("%s\n" % json.dumps(result_dct, default=_defaultencode))
def load_csv_to_table(table ,schema_file ,csv_file, server, database, config,cred_file='config/dblogin.config',skipfirstrow=1):
"""Takes csv file, schema file, with sql server connection params and inserts data to a specified table
Args:
table: table name where csv data will be written
schema_file: schema file that has all column names and data type names
csv_file: data being loaded
server: sql server host name
config: which configuration name to pull username and password credentials
cred_file: location of db login config file
skipfirstrow(optional): if 1 then skip the first row of data (exclude headers)
Returns:
None
"""
from files import loop_csv_file, loop_delimited_file
from files import get_schema_file
with open(cred_file,'rb') as cred:
db_info = json.loads(cred.read())
username = db_info[config]['username']
password = db_info[config]['password']
data_list = loop_csv_file(csv_file)
connection = mssql_connect(server, database, username, password)
schema_list = get_schema_file(schema_file)
#skips the first value of data_list which is the header
data_list = iter(data_list)
if skipfirstrow == 1:
next(data_list)
insert_datarows_to_table(data_list, schema_list, connection, table)
def load_delimited_file_to_table(connection, table , source_file, schema_file, skipfirstrow=1, delimiter=','):
"""Takes delimited file name, schema file, and db connection and inserts data to a specified table
Args:
table: table name where csv data will be written
schema_file: schema file that has all column names and data type names
csv_file: data being loaded
server: sql server host name
config: which configuration name to pull username and password credentials
cred_file: location of db login config file
skipfirstrow(optional): if 1 then skip the first row of data (exclude headers)
Returns:
None
"""
data_list = loop_delimited_file(source_file,delimiter=delimiter)
schema_list = get_schema_file(schema_file)
#skips the first value of data_list which is the header
data_list = iter(data_list)
if skipfirstrow == 1:
next(data_list)
insert_datarows_to_table(data_list,schema_list,connection,table)
def insert_datarows_to_table(data_list, schema_list, connection, table):
"""gets a data list and converts it to the correct data type for inserts then inserts data to a table
Args:
data_list: a list of lists which contain data row Values
schema_list: a list of lists which contains all the column names with their respective data type
Returns:
None
"""
insert_list = []
for i in data_list:
load_list = []
for j, val in enumerate(i):
if 'int' in schema_list[j][1]:
if val == 'null' or val == '':
load_list.append('null')
else:
load_list.append(int(val))
elif 'date' in schema_list[j][1]:
load_list.append(str(val)[:19])
else:
load_list.append(str(val).replace("'","''"))
insert_list.append(load_list)
insert_list_to_sql_batch(connection, insert_list, table,100)
| |
# Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import timeutils
import webob.exc
from jacket.api.storage import extensions
from jacket.api.storage.openstack import wsgi
from jacket.api.storage import xmlutil
from jacket.storage import exception
from jacket.storage.i18n import _
from jacket.objects import storage
from jacket import objects
from jacket.storage import utils
from jacket.storage import volume
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('volume', 'services')
class ServicesIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('services')
elem = xmlutil.SubTemplateElement(root, 'service', selector='services')
elem.set('binary')
elem.set('host')
elem.set('zone')
elem.set('status')
elem.set('state')
elem.set('update_at')
elem.set('disabled_reason')
elem.set('replication_status')
elem.set('active_backend_id')
elem.set('frozen')
return xmlutil.MasterTemplate(root, 1)
class ServicesUpdateTemplate(xmlutil.TemplateBuilder):
def construct(self):
# TODO(uni): template elements of 'host', 'service' and 'disabled'
# should be deprecated to make ServicesUpdateTemplate consistent
# with ServicesIndexTemplate. Still keeping it here for API
# compatibility sake.
root = xmlutil.TemplateElement('host')
root.set('host')
root.set('service')
root.set('disabled')
root.set('binary')
root.set('status')
root.set('disabled_reason')
root.set('replication_status')
root.set('active_backend_id')
root.set('frozen')
return xmlutil.MasterTemplate(root, 1)
class ServiceController(wsgi.Controller):
def __init__(self, ext_mgr=None):
self.ext_mgr = ext_mgr
super(ServiceController, self).__init__()
self.volume_api = volume.API()
@wsgi.serializers(xml=ServicesIndexTemplate)
def index(self, req):
"""Return a list of all running services.
Filter by host & service name.
"""
context = req.environ['storage.context']
authorize(context, action='index')
detailed = self.ext_mgr.is_loaded('os-extended-services')
now = timeutils.utcnow(with_timezone=True)
filters = {}
if 'host' in req.GET:
filters['host'] = req.GET['host']
if 'binary' in req.GET:
filters['binary'] = req.GET['binary']
elif 'service' in req.GET:
filters['binary'] = req.GET['service']
versionutils.report_deprecated_feature(LOG, _(
"Query by service parameter is deprecated. "
"Please use binary parameter instead."))
services = objects.ServiceList.get_all(context)
svcs = []
for svc in services:
if svc.host != filters.get("host", svc.host):
continue
if svc.binary != filters.get("binary", svc.binary):
continue
updated_at = svc.updated_at
delta = now - (svc.updated_at or svc.created_at)
delta_sec = delta.total_seconds()
if hasattr(svc, 'modified_at') and svc.modified_at:
delta_mod = now - svc.modified_at
if abs(delta_sec) >= abs(delta_mod.total_seconds()):
updated_at = svc.modified_at
alive = abs(delta_sec) <= CONF.service_down_time
art = (alive and "up") or "down"
active = 'enabled'
if svc.disabled:
active = 'disabled'
if updated_at:
updated_at = timeutils.normalize_time(updated_at)
ret_fields = {'binary': svc.binary, 'host': svc.host,
'zone': svc.availability_zone,
'status': active, 'state': art,
'updated_at': updated_at}
if detailed:
ret_fields['disabled_reason'] = svc.disabled_reason
if svc.binary == "storage-volume":
ret_fields['replication_status'] = svc.replication_status
ret_fields['active_backend_id'] = svc.active_backend_id
ret_fields['frozen'] = svc.frozen
svcs.append(ret_fields)
return {'services': svcs}
def _is_valid_as_reason(self, reason):
if not reason:
return False
try:
utils.check_string_length(reason.strip(), 'Disabled reason',
min_length=1, max_length=255)
except exception.InvalidInput:
return False
return True
def _freeze(self, context, host):
return self.volume_api.freeze_host(context, host)
def _thaw(self, context, host):
return self.volume_api.thaw_host(context, host)
def _failover(self, context, host, backend_id=None):
return self.volume_api.failover_host(context, host, backend_id)
@wsgi.serializers(xml=ServicesUpdateTemplate)
def update(self, req, id, body):
"""Enable/Disable scheduling for a service.
Includes Freeze/Thaw which sends call down to drivers
and allows volume.manager for the specified host to
disable the service rather than accessing the service
directly in this API layer.
"""
context = req.environ['storage.context']
authorize(context, action='update')
ext_loaded = self.ext_mgr.is_loaded('os-extended-services')
ret_val = {}
if id == "enable":
disabled = False
status = "enabled"
if ext_loaded:
ret_val['disabled_reason'] = None
elif (id == "disable" or
(id == "disable-log-reason" and ext_loaded)):
disabled = True
status = "disabled"
elif id == "freeze":
return self._freeze(context, body['host'])
elif id == "thaw":
return self._thaw(context, body['host'])
elif id == "failover_host":
self._failover(
context,
body['host'],
body.get('backend_id', None)
)
return webob.Response(status_int=202)
else:
raise webob.exc.HTTPNotFound(explanation=_("Unknown action"))
try:
host = body['host']
except (TypeError, KeyError):
msg = _("Missing required element 'host' in request body.")
raise webob.exc.HTTPBadRequest(explanation=msg)
ret_val['disabled'] = disabled
if id == "disable-log-reason" and ext_loaded:
reason = body.get('disabled_reason')
if not self._is_valid_as_reason(reason):
msg = _('Disabled reason contains invalid characters '
'or is too long')
raise webob.exc.HTTPBadRequest(explanation=msg)
ret_val['disabled_reason'] = reason
# NOTE(uni): deprecating service request key, binary takes precedence
# Still keeping service key here for API compatibility sake.
service = body.get('service', '')
binary = body.get('binary', '')
binary_key = binary or service
if not binary_key:
raise webob.exc.HTTPBadRequest()
try:
svc = objects.Service.get_by_args(context, host, binary_key)
if not svc:
raise webob.exc.HTTPNotFound(explanation=_('Unknown service'))
svc.disabled = ret_val['disabled']
if 'disabled_reason' in ret_val:
svc.disabled_reason = ret_val['disabled_reason']
svc.save()
except exception.ServiceNotFound:
raise webob.exc.HTTPNotFound(explanation=_("service not found"))
ret_val.update({'host': host, 'service': service,
'binary': binary, 'status': status})
return ret_val
class Services(extensions.ExtensionDescriptor):
"""Services support."""
name = "Services"
alias = "os-services"
namespace = "http://docs.openstack.org/volume/ext/services/api/v2"
updated = "2012-10-28T00:00:00-00:00"
def get_resources(self):
resources = []
controller = ServiceController(self.ext_mgr)
resource = extensions.ResourceExtension('os-services', controller)
resources.append(resource)
return resources
| |
# Copyright (C) 2010 Jim Washington
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from __pyjamas__ import wnd, doc
from pyjamas import DOM
from pyjamas import Window
from pyjamas.ui import GlassWidget
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui import Event
from pyjamas.Timer import Timer
from pyjamas.dnd.utils import DraggingWidget, isCanceled, \
findDraggable, eventCoordinates, \
getElementUnderMouse
from pyjamas.dnd.DataTransfer import DataTransfer, DragDataStore
from pyjamas.dnd.DragEvent import DragEvent
from pyjamas.dnd import READ_ONLY, READ_WRITE, PROTECTED
ACTIVELY_DRAGGING = 3
DRAGGING_NO_MOVEMENT_YET = 2
NOT_DRAGGING = 1
class DNDHelper(object):
"""
DNDHelper is a singleton drag and drop agent.
It acts as dragging/dropping agent for platforms that do not support html5
drag and drop.
"""
def __init__(self):
self.dropTargets = []
self.dragging = NOT_DRAGGING
self.dragBusy = False
self._currentTargetElement = None
self.previousDropTarget = None
self.draggingImage = None
self.origMouseX = 0
self.origMouseY = 0
self.currentDragOperation = 'none'
self.data = None
self.returnTimer = Timer(notify=self.onReturningWidget)
self.mouseEvent = None
self.dragDataStore = None
def setCurrentTargetElement(self, element):
if self._currentTargetElement is not None:
if not DOM.compare(self._currentTargetElement, element):
# leave_event = self.makeDragEvent(self.mouseEvent, 'dragleave',
# self.currentTargetElement)
self.fireDNDEvent('dragleave', self.currentTargetElement,
self.currentDropWidget)
# self.currentDropWidget.onDragLeave(leave_event)
# self.finalize(leave_event)
self._currentTargetElement = element
def getCurrentTargetElement(self):
return self._currentTargetElement
currentTargetElement = property(getCurrentTargetElement,
setCurrentTargetElement)
def getElement(self):
"""
ie6 GlassWidget impl needs this
"""
return self.dragWidget.getElement()
def updateDropEffect(self, dataTransfer, event_type):
"""
http://dev.w3.org/html5/spec/dnd.html#dragevent
"""
# default for dragstart, drag, dragleave
dropEffect='none'
if event_type in ['dragover', 'dragenter']:
ea = dataTransfer.getEffectAllowed()
if ea == 'none':
dropEffect = 'none'
elif ea.startswith('copy') or ea == 'all':
dropEffect = 'copy'
elif ea.startswith('link'):
dropEffect = 'link'
elif ea == 'move':
dropEffect = 'move'
else:
dropEffect = 'copy'
elif event_type in ['drop', 'dragend']:
dropEffect = self.currentDragOperation
dataTransfer.dropEffect = dropEffect
def updateDragOperation(self, event):
"""
http://dev.w3.org/html5/spec/dnd.html
"""
dataTransfer = event.dataTransfer
ea = dataTransfer.effectAllowed
de = dataTransfer.dropEffect
if (de == 'copy' and ea in
['uninitialized', 'copy','copyLink', 'copyMove', 'all']):
self.currentDragOperation = 'copy'
elif (de == 'link' and ea in
['uninitialized', 'link', 'copyLink', 'linkMove', 'all']):
self.currentDragOperation = 'link'
elif (de == 'move' and ea in
['uninitialized', 'move', 'copyMove', 'linkMove', 'all']):
self.currentDragOperation = 'move'
else:
self.currentDragOperation = 'none'
def updateAllowedEffects(self, drag_event):
dt = drag_event.dataTransfer
self.dragDataStore.allowed_effects_state = dt.effectAllowed
def registerTarget(self, target):
"""
Rather than searching the entire document for drop target widgets and
maybe drop targets within widgets, this implementation holds a list of
widgets and searches only within this list for potential drop targets.
"""
if not target in self.dropTargets:
self.dropTargets.append(target)
def unregisterTarget(self, target):
"""
I dont know why, but a widget may no longer want to be registered
as a drop target.
"""
while target in self.dropTargets:
self.dropTargets.remove(target)
def setDragImage(self, element, x, y):
position_absolute = DOM.getStyleAttribute(element,
'position') == 'absolute'
if position_absolute:
self.dragLeftOffset = x + DOM.getAbsoluteLeft(
element.offsetParent)
self.dragTopOffset = y + DOM.getAbsoluteTop(
element.offsetParent)
else:
self.dragLeftOffset = x
self.dragTopOffset = y
if element.tagName.lower().endswith('img'):
src = DOM.getAttribute(element,'src')
element = DOM.createElement('img')
DOM.setAttribute(element, 'src', src)
if not self.draggingImage:
self.createDraggingImage(element)
else:
self.draggingImage.setImage(element)
def addFeedbackElement(self, element):
"""
This is called from DataTransfer
"""
if self.draggingImage:
self.draggingImage.addElement(element)
else:
self.createDraggingImage(element)
def createDraggingImage(self, element):
self.draggingImage = DraggingWidget(element)
return self.draggingImage
def setDragImageLocation(self, x, y):
"""
Move the dragging image around.
"""
elt_top = y - self.dragTopOffset
elt_left = x - self.dragLeftOffset
# if self.absParent:
# ap = self.absParent
# elt_top -= int(self.absTop)
# elt_left -= int(self.absLeft)
self.draggingImage.setStyleAttribute('top', elt_top )
self.draggingImage.setStyleAttribute('left', elt_left)
def getAbsoluteLeft(self):
"""
GlassWidget wants this
"""
# return 0
# if self.absParent:
# return self.absParent.getAbsoluteLeft()
return self.dragWidget.getAbsoluteLeft()
#return self.origLeft
def getAbsoluteTop(self):
"""
GlassWidget wants this
"""
# return 0
# if self.absParent:
# return self.absParent.getAbsoluteTop()
return self.dragWidget.getAbsoluteTop()
#return self.origTop
def makeDragEvent(self, event, type, target=None):
dt = DataTransfer(self.dragDataStore)
self.updateDropEffect(dt, type)
drag_event = DragEvent(event, type, dt, target)
return drag_event
def finalize(self, event):
self.dragDataStore.allowed_effects_state = \
event.dataTransfer.effectAllowed
if event.type in ['dragstart', 'drop']:
self.dragDataStore.setMode(PROTECTED)
event.dataTransfer.dataStore = None
def fireDNDEvent(self, name, target, widget):
if name == 'dragstart':
self.dragDataStore.setMode(READ_WRITE)
elif name == 'drop':
self.dragDataStore.setMode(READ_ONLY)
event = self.makeDragEvent(self.mouseEvent, name, target)
widget.onBrowserEvent(event)
self.finalize(event)
return event
def initFeedbackImage(self):
ds = self.dragDataStore
x = 0
y = 0
if ds.bitmap is not None:
if ds.hotspot_coordinate is not None:
offset = ds.hotspot_coordinate
x = offset[0]
y = offset[1]
self.setDragImage(ds.bitmap, x, y)
return
if self.dragDataStore.elements:
for element in self.dragDataStore.elements:
self.addFeedbackElement(element)
def onMouseMove(self, sender, x, y):
event = DOM.eventGetCurrentEvent()
self.mouseEvent = event
button = DOM.eventGetButton(event)
if not button == Event.BUTTON_LEFT:
return
## The following commented code lets the native dnd happen in IE. sucks.
## But it may enable dragging our widgets out of IE into other apps.
# else:
# try:
# self.dragWidget.getElement().dragDrop()
# return
# except:
# pass
# Adjust x and y to absolute coordinates.
x, y = eventCoordinates(event)
if self.dragging == DRAGGING_NO_MOVEMENT_YET:
self.origMouseX = x
self.origMouseY = y
self.currentDragOperation = 'none'
fromElement = self.dragWidget.getElement()
# Is the widget itself draggable?
try:
draggable = fromElement.draggable
except:
draggable = False
# if not, find the draggable element at (x, y) in the widget
if not draggable:
fromElement = findDraggable(sender.getElement(),
self.origMouseX, self.origMouseY)
# Nothing draggable found. return.
if fromElement is None:
self.dragging = NOT_DRAGGING
return
# Get the location for the dragging widget
#self.absParent = None
#self.absParent = self.dragWidget.getParent()
#self.absLeft = DOM.getStyleAttribute(fromElement, 'left')
#print self.absLeft
#self.absTop = DOM.getStyleAttribute(fromElement, 'top')
#print self.absTop
#self.origTop = DOM.getAbsoluteTop(fromElement) + parent.getAbsoluteTop()
#self.origLeft = DOM.getAbsoluteLeft(fromElement) + parent.getAbsoluteLeft()
self.origTop = DOM.getAbsoluteTop(fromElement)
self.origLeft = DOM.getAbsoluteLeft(fromElement)
#self.glassTop = DOM.getAbsoluteTop(fromElement.offsetParent)
#self.glassLeft = DOM.getAbsoluteTop(fromElement.offsetParent)
position_absolute = DOM.getStyleAttribute(fromElement,
'position') == 'absolute'
if position_absolute:
self.dragLeftOffset = (self.origMouseX -
DOM.getAbsoluteLeft(fromElement.offsetParent))
self.dragTopOffset = (self.origMouseY -
DOM.getAbsoluteTop(fromElement.offsetParent))
else:
self.dragLeftOffset = self.origMouseX - self.origLeft
self.dragTopOffset = self.origMouseY - self.origTop
# self.setDragImage(fromElement,
# self.origMouseX - self.origLeft,
# self.origMouseY - self.origTop)
self.dragDataStore.elements = [fromElement]
dragStartEvent = self.fireDNDEvent('dragstart', None,
self.dragWidget)
if not isCanceled(dragStartEvent):
self.initFeedbackImage()
RootPanel().add(self.draggingImage)
self.setDragImageLocation(x, y)
self.dragging = ACTIVELY_DRAGGING
GlassWidget.show(self)
elif self.dragging == ACTIVELY_DRAGGING:
try:
doc().selection.empty()
except:
wnd().getSelection().removeAllRanges()
self.setDragImageLocation(x, y)
# If we are still working on the previous iteration, or if we have
# done this recently, we'll wait for the next event.
if self.dragBusy or time.time() - self.drag_time < 0.25:
return
self.doDrag(event, x, y)
self.drag_time = time.time()
def doDrag(self, event, x, y):
self.dragBusy = True
#self.dragDataStore.dropEffect = 'none'
drag_event = self.fireDNDEvent('drag', None, self.dragWidget)
# drag event was not canceled
if not isCanceled(drag_event):
target = None
widget = None
# Find the most specific element under the cursor and the widget
# with the drop listener for it.
for widget in self.dropTargets:
target = getElementUnderMouse(widget, x, y)
if target is not None:
break
if target:
drop_widget = widget
drop_element = target
if (not self.currentTargetElement or
not DOM.compare(drop_element, self.currentTargetElement)):
# enter_event = self.makeDragEvent(event,'dragenter',
# drop_element)
enter_event = self.fireDNDEvent('dragenter', drop_element,
drop_widget)
# drop_widget.onDragEnter(enter_event)
# self.finalize(enter_event)
if isCanceled(enter_event):
self.currentTargetElement = drop_element
self.currentDropWidget = drop_widget
if self.currentTargetElement is not None:
# disable dropping if over event is not canceled
# over_event = self.makeDragEvent(event, 'dragover',
# drop_element)
over_event = self.fireDNDEvent('dragover', drop_element,
self.currentDropWidget)
# self.currentDropWidget.onDragOver(over_event)
# self.finalize(over_event)
if isCanceled(over_event):
self.updateDragOperation(over_event)
else:
self.currentDragOperation = 'none'
self.draggingImage.updateCursor(self.currentDragOperation)
else:
self.currentTargetElement = None
else:
self.currentDragOperation = 'none'
self.dragBusy = False
def onMouseDown(self, sender, x, y):
self.dragWidget = sender
event = DOM.eventGetCurrentEvent()
self.mouseEvent = event
button = DOM.eventGetButton(event)
if button != Event.BUTTON_LEFT:
return
# x, y = eventCoordinates(event)
# self.origMouseX = x
# self.origMouseY = y
self.dragging = DRAGGING_NO_MOVEMENT_YET
self.drag_time = time.time()
self.dragDataStore = DragDataStore()
def onMouseUp(self, sender, x, y):
# event = DOM.eventGetCurrentEvent()
self.dragging = NOT_DRAGGING
if self.draggingImage:
GlassWidget.hide()
if (self.currentDragOperation == 'none'
or not self.currentTargetElement):
if self.currentTargetElement:
# leave_event = self.makeDragEvent(event, 'dragleave',
# self.currentTargetElement)
self.fireDNDEvent('dragleave', self.currentTargetElement,
self.currentDropWidget)
# self.currentDropWidget.onDragLeave(leave_event)
# self.finalize(leave_event)
else:
self.currentDragOperation = 'none'
self.returnDrag()
else:
# self.dragDataStore.mode = READ_ONLY
# drop_event = self.makeDragEvent(event, 'drop',
# self.currentTargetElement)
drop_event = self.fireDNDEvent('drop', self.currentTargetElement,
self.currentDropWidget)
#self.dropEffect = self.currentDragOperation
# self.currentDropWidget.onDrop(drop_event)
# self.finalize(drop_event)
if isCanceled(drop_event):
self.currentDragOperation = drop_event.dataTransfer.dropEffect
else:
self.currentDragOperation = 'none'
self.zapDragImage()
#self.dropEffect = self.currentDragOperation
self.fireDNDEvent('dragend', None, self.dragWidget)
# dragEnd_event = self.makeDragEvent(event, 'dragend')
# self.dragWidget.onDragEnd(dragEnd_event)
# self.finalize(dragEnd_event)
def zapDragImage(self):
RootPanel().remove(self.draggingImage)
self.draggingImage = None
def returnDrag(self):
self.moveItemTo(self.draggingImage,self.origLeft, self.origTop)
def returnXY(self, start, destination, count):
start_x, start_y = start
destination_x, destination_y = destination
diff_x = (start_x - destination_x) / count
diff_y = (start_y - destination_y) / count
while (abs(start_x - destination_x) > 10
or abs(start_y - destination_y) > 10):
start_x -= diff_x
start_y -= diff_y
yield start_x, start_y
raise StopIteration
def onReturningWidget(self, timer):
try:
next_loc = self.return_iterator.next()
except StopIteration:
self.zapDragImage()
return
x, y = next_loc
self.draggingImage.setStyleAttribute('top', str(y))
self.draggingImage.setStyleAttribute('left', str(x))
self.returnTimer.schedule(50)
def moveItemTo(self, widget, x, y):
self.returnWidget = widget
returnWidgetDestination = x, y
widgetStart = widget.getAbsoluteLeft(), widget.getAbsoluteTop()
self.return_iterator = self.returnXY(widgetStart,
returnWidgetDestination, 10)
self.returnTimer.schedule(50)
def onMouseEnter(self, sender):
pass
def onMouseLeave(self, sender):
if self.dragging == DRAGGING_NO_MOVEMENT_YET:
self.dragging = NOT_DRAGGING
def onMouseGlassEnter(self, sender):
pass
def onMouseGlassLeave(self, sender):
pass
dndHelper = None
def initDNDHelper():
global dndHelper
if dndHelper is None:
dndHelper = DNDHelper()
initDNDHelper()
| |
#!/usr/bin/env python
""" Read data from mongo collection and save it's relational model to csvs """
__author__ = "Yaroslav Litvinov"
__copyright__ = "Copyright 2016, Rackspace Inc."
__email__ = "yaroslav.litvinov@rackspace.com"
import pprint
import os
import sys
import json
import argparse
import logging
from logging import getLogger
import configparser
from collections import namedtuple
# profiling
from pstats import Stats
from cProfile import Profile
# for data input
from mongo_reader.reader import MongoReader
from mongo_reader.reader import mongo_reader_from_settings
# modules mostly used by data output functions
from mongo_schema.schema_engine import SchemaEngine
from mongo_schema.schema_engine import create_tables_load_bson_data
from mongo_schema.schema_engine import log_table_errors
from gizer.opcsv import CsvWriteManager
from gizer.opcsv import NULLVAL
from gizer.opcreate import generate_drop_table_statement
from gizer.opcreate import generate_create_table_statement
from gizer.opcreate import generate_create_index_statement
from gizer.opcreate import INDEX_ID_IDXS
from gizer.opinsert import table_rows_list
from gizer.opinsert import ENCODE_ONLY
from gizer.opmultiprocessing import FastQueueProcessor
from gizer.opconfig import mongo_settings_from_config
from gizer.etl_mongo_reader import EtlMongoReader
CSV_CHUNK_SIZE = 1024 * 1024 * 100 # 100MB
ETL_PROCESS_NUMBER = 8
ETL_QUEUE_SIZE = ETL_PROCESS_NUMBER*2
TablesToSave = namedtuple('TablesToSave', ['rows', 'errors'])
def create_table(sqltable, psql_schema_name, table_prefix):
""" get drop / create ddl statements """
drop_t = generate_drop_table_statement(sqltable, psql_schema_name,
table_prefix)
create_t = generate_create_table_statement(sqltable, psql_schema_name,
table_prefix)
create_i = generate_create_index_statement(sqltable,
psql_schema_name,
table_prefix,
INDEX_ID_IDXS)
return drop_t + '\n' + create_t + '\n' + create_i + '\n'
def merge_dicts(store, append):
""" merge two dicts, return merged dict. """
for index_key, index_val in append.iteritems():
cached_val = 0
if index_key in store:
cached_val = store[index_key]
store[index_key] = index_val + cached_val
return store
def save_ddl_create_statements(create_statements_file,
schema_engine,
psql_schema_name,
table_prefix):
""" save create table statements to file """
ddls = {}
if not psql_schema_name:
psql_schema_name = ''
if not table_prefix:
table_prefix = ''
sqltables = create_tables_load_bson_data(schema_engine, None).tables
for tablename, sqltable in sqltables.iteritems():
ddls[tablename] = create_table(sqltable, psql_schema_name,
table_prefix)
for table_name in ddls:
create_query = ddls[table_name]
create_statements_file.write(create_query)
def save_csvs(csm, tables_rows):
""" write relational tables to csv files.
tables_rows -- dict {table_name: [rows]} of tables of rows to save"""
written = {}
for table_name in tables_rows:
written[table_name] = csm.write_csv(table_name,
tables_rows[table_name])
return written
def async_worker_handle_mongo_rec(schema_engines, rec_collection):
""" function intended to call by FastQueueProcessor.
process mongo record / bson data in separate process.
schema_engine -- SchemaEngine
rec_collection - tuble(bson record, collection name)"""
rows_as_dict = {}
collection = rec_collection[1]
rec = rec_collection[0]
schema_engine = schema_engines[collection]
tables_obj = create_tables_load_bson_data(schema_engine, [rec])
for table_name, table in tables_obj.tables.iteritems():
rows = table_rows_list(table, ENCODE_ONLY, null_value=NULLVAL)
rows_as_dict[table_name] = rows
return TablesToSave(rows=rows_as_dict, errors=tables_obj.errors)
# Fast queue helpers
def getargs():
""" get args from cmdline """
default_request = '{}'
parser = argparse.ArgumentParser()
parser.add_argument("--config-file", action="store",
help="Config file with settings",
type=file, required=True)
parser.add_argument("-cn", "--collection-name",
help="Mongo collection name ", type=str, required=True)
parser.add_argument("-js-request",
help='Mongo db search request in json format. \
default=%s' % (default_request), type=str)
parser.add_argument("-psql-table-prefix", help="", type=str)
parser.add_argument("--ddl-statements-file",
help="File to save create table statements",
type=argparse.FileType('w'), required=False)
parser.add_argument("-stats-file",
help="File to write written record counts",
type=argparse.FileType('w'))
parser.add_argument("--csv-path",
help="base path for results",
type=str, required=True)
args = parser.parse_args()
if args.js_request is None:
args.js_request = default_request
return args
def print_profiler_stats(profiler):
""" profiling results """
profiler.disable()
state_printer = Stats(profiler, stream=sys.stderr).sort_stats('cumulative')
state_printer.print_stats()
def print_etl_stats(errors, all_written, etl_recs_count):
""" etl summary """
ppinter = pprint.PrettyPrinter(indent=4)
log_table_errors("etl errors:", errors)
if len(all_written):
getLogger(__name__).info("written: " + ppinter.pformat(all_written))
else:
getLogger(__name__).warning("Nothing written!")
getLogger(__name__).info("Expected Etl records count = %d" % etl_recs_count)
def save_etl_stats(out_file, all_written):
""" save list of tables with processed counts """
if out_file:
for name, value in all_written.iteritems():
out_file.write(name + " " + str(value) + "\n")
def main():
""" main """
#for debugging purposes
#profiler = Profile() # profiling
#profiler.enable()
args = getargs()
config = configparser.ConfigParser()
config.read_file(args.config_file)
schema_name = config['psql']['psql-schema-name']
schemas_dir = config['misc']['schemas-dir']
schema_path = os.path.join(schemas_dir, args.collection_name + '.json')
schema_file = open(schema_path, 'r')
mongo_settings = mongo_settings_from_config(config, 'mongo')
mongo_reader = mongo_reader_from_settings(mongo_settings,
args.collection_name,
json.loads(args.js_request))
schema_engine = SchemaEngine(args.collection_name, [json.load(schema_file)])
table_names = create_tables_load_bson_data(schema_engine, None).tables.keys()
csm = CsvWriteManager(table_names, args.csv_path, CSV_CHUNK_SIZE)
etl_mongo_reader = EtlMongoReader(ETL_PROCESS_NUMBER,
ETL_QUEUE_SIZE,
async_worker_handle_mongo_rec,
#1st worker param
{args.collection_name: schema_engine},
{args.collection_name: mongo_reader})
etl_mongo_reader.execute_query(args.collection_name, json.loads(args.js_request))
getLogger(__name__).info("Connecting to mongo server " + mongo_settings.host)
errors = {}
all_written = {}
tables_list = etl_mongo_reader.next_processed()
while tables_list is not None:
for tables in tables_list:
all_written = merge_dicts(all_written,
save_csvs(csm, tables.rows))
errors = merge_dicts(errors, tables.errors)
tables_list = etl_mongo_reader.next_processed()
if args.ddl_statements_file:
save_ddl_create_statements(args.ddl_statements_file,
schema_engine,
schema_name,
args.psql_table_prefix)
# save csv files
csm.finalize()
#for debugging purposes
#print_profiler_stats(profiler)
print_etl_stats(errors, all_written, etl_mongo_reader.etl_recs_count)
save_etl_stats(args.stats_file, all_written)
exit_code = 0
if etl_mongo_reader.current_mongo_reader.failed or \
etl_mongo_reader.fast_queue.error:
exit_code = 1
del etl_mongo_reader
exit(exit_code)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
stream=sys.stdout,
format='%(asctime)s %(levelname)-8s %(message)s')
main()
| |
'''
Copyright (c) 2015, Baidu.com, Inc. All Rights Reserved
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
import nose
import time
import unittest
import common
from conf import const
def setup():
pass
def teardown():
pass
class Snapshot(unittest.TestCase):
def setUp(self):
out = common.runcmd_output("cd %s; ./teracli showts|grep kReady" % (const.teracli_dir), ignore_status=True)
#assert( len(out.split('\n')) == len(const.tabletnode_list) )
common.check_core()
common.cleanup()
#common.createbyfile('testcase/data/table_schema')
def tearDown(self):
pass
'''
table write w/snapshot
1. write data set 1
2. create snapshot
3. write data set 2
4. scan w/snapshot, scan w/o snapshot & compare
:return: None
'''
def test_table_write_snapshot(self):
common.create_singleversion_table()
table_name = 'test'
dump_file1 = 'dump1.out'
dump_file2 = 'dump2.out'
scan_file1 = 'scan1.out'
scan_file2 = 'scan2.out'
common.run_tera_mark([(dump_file1, False)], op='w', table_name=table_name, cf='cf0:q,cf1:q', random='random',
key_seed=1, value_seed=10, value_size=100, num=10000, key_size=20)
snapshot = common.snapshot_op(table_name)
common.run_tera_mark([(dump_file2, False)], op='w', table_name=table_name, cf='cf0:q,cf1:q', random='random',
key_seed=1, value_seed=11, value_size=100, num=10000, key_size=20)
common.compact_tablets(common.get_tablet_list(table_name))
common.scan_table(table_name=table_name, file_path=scan_file1, allversion=False, snapshot=snapshot)
common.scan_table(table_name=table_name, file_path=scan_file2, allversion=False, snapshot=0)
nose.tools.assert_true(common.compare_files(dump_file1, scan_file1, need_sort=True))
nose.tools.assert_true(common.compare_files(dump_file2, scan_file2, need_sort=True))
'''
table write deletion w/snapshot
1. write data set 1
2. create snapshot
3. delete data set 1
4. scan w/snapshot, scan w/o snapshot & compare
:return: None
'''
def test_table_write_del_snapshot(self):
common.create_singleversion_table()
table_name = 'test'
dump_file = 'dump.out'
scan_file1 = 'scan1.out'
scan_file2 = 'scan2.out'
common.run_tera_mark([(dump_file, False)], op='w', table_name=table_name, cf='cf0:q,cf1:q', random='random',
key_seed=1, value_seed=10, value_size=100, num=10000, key_size=20)
snapshot = common.snapshot_op(table_name)
common.run_tera_mark([], op='d', table_name=table_name, cf='cf0:q,cf1:q', random='random',
key_seed=1, value_seed=11, value_size=100, num=10000, key_size=20)
common.compact_tablets(common.get_tablet_list(table_name))
common.scan_table(table_name=table_name, file_path=scan_file1, allversion=False, snapshot=snapshot)
common.scan_table(table_name=table_name, file_path=scan_file2, allversion=False, snapshot=0)
nose.tools.assert_true(common.compare_files(dump_file, scan_file1, need_sort=True))
nose.tools.assert_true(common.file_is_empty(scan_file2))
'''
table write w/version w/snapshot
1. write data set 1, 2
2. create snapshot
3. write data set 3, 4
4. scan w/snapshot, scan w/o snapshot & compare
:return: None
'''
def test_table_write_multiversion_snapshot(self):
common.create_multiversion_table()
table_name = 'test'
dump_file1 = 'dump1.out'
dump_file2 = 'dump2.out'
scan_file1 = 'scan1.out'
scan_file2 = 'scan2.out'
common.run_tera_mark([(dump_file1, False)], op='w', table_name=table_name, cf='cf0:q,cf1:q', random='random',
key_seed=1, value_seed=10, value_size=100, num=10000, key_size=20)
common.run_tera_mark([(dump_file1, True)], op='w', table_name=table_name, cf='cf0:q,cf1:q', random='random',
key_seed=1, value_seed=11, value_size=100, num=10000, key_size=20)
snapshot = common.snapshot_op(table_name)
common.run_tera_mark([(dump_file2, False)], op='w', table_name=table_name, cf='cf0:q,cf1:q', random='random',
key_seed=1, value_seed=10, value_size=100, num=10000, key_size=20)
common.run_tera_mark([(dump_file2, True)], op='w', table_name=table_name, cf='cf0:q,cf1:q', random='random',
key_seed=1, value_seed=11, value_size=100, num=10000, key_size=20)
common.compact_tablets(common.get_tablet_list(table_name))
common.scan_table(table_name=table_name, file_path=scan_file1, allversion=True, snapshot=snapshot)
common.scan_table(table_name=table_name, file_path=scan_file2, allversion=True, snapshot=0)
nose.tools.assert_true(common.compare_files(dump_file1, scan_file1, need_sort=True))
nose.tools.assert_true(common.compare_files(dump_file2, scan_file2, need_sort=True))
'''
kv cluster relaunch
1. write data set 1
2. create snapshot
3. write data set 2
4. scan w/snapshot, scan w/o snapshot & compare
5. kill & launch cluster
6. repeat 4
:return: None
'''
def kv_snapshot_relaunch(self):
table_name = 'test'
dump_file1 = 'dump1.out'
dump_file2 = 'dump2.out'
scan_file1 = 'scan1.out'
scan_file2 = 'scan2.out'
common.run_tera_mark([(dump_file1, False)], op='w', table_name=table_name, random='random',
key_seed=1, value_seed=10, value_size=100, num=10000, key_size=20)
snapshot = common.snapshot_op(table_name)
common.run_tera_mark([(dump_file2, False)], op='w', table_name=table_name, random='random',
key_seed=1, value_seed=11, value_size=100, num=10000, key_size=20)
common.compact_tablets(common.get_tablet_list(table_name))
common.scan_table(table_name=table_name, file_path=scan_file1, allversion=True, snapshot=snapshot)
common.scan_table(table_name=table_name, file_path=scan_file2, allversion=True, snapshot=0)
nose.tools.assert_true(common.compare_files(dump_file1, scan_file1, need_sort=True))
nose.tools.assert_true(common.compare_files(dump_file2, scan_file2, need_sort=True))
common.cluster_op('kill')
common.cluster_op('launch')
time.sleep(2)
common.scan_table(table_name=table_name, file_path=scan_file1, allversion=False, snapshot=snapshot)
common.scan_table(table_name=table_name, file_path=scan_file2, allversion=False, snapshot=0)
nose.tools.assert_true(common.compare_files(dump_file1, scan_file1, need_sort=True))
nose.tools.assert_true(common.compare_files(dump_file2, scan_file2, need_sort=True))
'''
kv cluster relaunch
1. write data set 1
2. create snapshot
3. write data set 2
4. scan w/snapshot, scan w/o snapshot & compare
5. kill & launch cluster
6. repeat 4
:return: None
'''
def test_kv_snapshot_relaunch(self):
common.create_kv_table()
self.kv_snapshot_relaunch()
'''
table cluster relaunch
1. write data set 1
2. create snapshot
3. write data set 2
4. scan w/snapshot, scan w/o snapshot & compare
5. kill & launch cluster
6. repeat 4
:return: None
'''
def table_snapshot_relaunch(self):
table_name = 'test'
dump_file1 = 'dump1.out'
dump_file2 = 'dump2.out'
scan_file1 = 'scan1.out'
scan_file2 = 'scan2.out'
common.run_tera_mark([(dump_file1, False)], op='w', table_name=table_name, cf='cf0:q,cf1:q', random='random',
key_seed=1, value_seed=10, value_size=100, num=10000, key_size=20)
snapshot = common.snapshot_op(table_name)
common.run_tera_mark([(dump_file2, False)], op='w', table_name=table_name, cf='cf0:q,cf1:q', random='random',
key_seed=1, value_seed=11, value_size=100, num=10000, key_size=20)
common.compact_tablets(common.get_tablet_list(table_name))
common.scan_table(table_name=table_name, file_path=scan_file1, allversion=True, snapshot=snapshot)
common.scan_table(table_name=table_name, file_path=scan_file2, allversion=True, snapshot=0)
nose.tools.assert_true(common.compare_files(dump_file1, scan_file1, need_sort=True))
nose.tools.assert_true(common.compare_files(dump_file2, scan_file2, need_sort=True))
common.cluster_op('kill')
common.cluster_op('launch')
time.sleep(2)
common.scan_table(table_name=table_name, file_path=scan_file1, allversion=True, snapshot=snapshot)
common.scan_table(table_name=table_name, file_path=scan_file2, allversion=True, snapshot=0)
nose.tools.assert_true(common.compare_files(dump_file1, scan_file1, need_sort=True))
nose.tools.assert_true(common.compare_files(dump_file2, scan_file2, need_sort=True))
'''
table cluster relaunch
1. write data set 1
2. create snapshot
3. write data set 2
4. scan w/snapshot, scan w/o snapshot & compare
5. kill & launch cluster
6. repeat 4
:return: None
'''
def test_table_snapshot_relaunch(self):
common.create_singleversion_table()
self.table_snapshot_relaunch()
'''
kv snapshot w/multi tablets
1. test_kv_snapshot_relaunch()
:return:
'''
def test_kv_snapshot_multitablets(self):
common.createbyfile(schema=const.data_path + 'kv.schema', deli=const.data_path + 'deli.10')
self.kv_snapshot_relaunch()
'''
table snapshot w/multi tablets
1. test_tablev_snapshot_relaunch()
:return:
'''
def test_table_snapshot_multitablets(self):
common.createbyfile(schema=const.data_path + 'table.schema', deli=const.data_path + 'deli.10')
self.table_snapshot_relaunch()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c) 2011, Vadim Shlyakhov
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
from __future__ import with_statement
import os
import logging
import locale
import csv
from tiler_functions import *
try:
from osgeo import gdal
from osgeo import osr
from osgeo.gdalconst import *
gdal.TermProgress = gdal.TermProgress_nocb
except ImportError:
import osr
import gdal
from gdalconst import *
def dms2dec(degs='0',mins='0',ne='E',sec='0'):
return (float(degs)+float(mins)/60+float(sec)/3600)*(-1 if ne in ('W','S') else 1 )
def dst_path(src,dst_dir,ext='',template='%s'):
src_dir,src_file=os.path.split(src)
base,sext=os.path.splitext(src_file)
dest=(template % base)+ext
if not dst_dir:
dst_dir=src_dir
if dst_dir:
dest='%s/%s' % (dst_dir,dest)
ld('base',base,'dest',dest,'src',src)
return dest
class Opt(object):
def __init__(self,**dictionary):
self.dict=dictionary
def __getattr__(self, name):
return self.dict.setdefault(name,None)
###############################################################################
class RefPoints(object):
'source geo-reference points and polygons'
###############################################################################
@staticmethod
def transpose(ref_lst): # helper function for children classes
return [list(i) for i in zip(*ref_lst)]
def __init__(self,owner,
ids=None,pixels=None,latlong=None,cartesian=None,zone=None,hemisphere=None):
self.owner=owner
self.ids=ids
self.pixels=pixels
self.latlong=latlong
self.cartesian=cartesian
self.zone=zone
self.hemisphere=hemisphere
ld('RefPoints',self.__dict__)
nrefs=len(filter(None,(self.pixels,self.latlong,self.cartesian))[0])
if not self.ids:
self.ids=map(str,range(1,nrefs+1))
if nrefs == 2:
logging.warning(' Only 2 reference points: assuming the chart is north alligned')
self.ids += ['Extra03','Extra04']
for i in filter(None,(self.pixels,self.latlong,self.cartesian,self.zone,self.hemisphere)):
try: # list of coordinates? -- swap x and y between them
i.append((i[0][0],i[1][1]))
i.append((i[1][0],i[0][1]))
except IndexError: # just copy them
i.append(i[0])
i.append(i[1])
ld('RefPoints extra',self.__dict__)
self.ids=[s.encode('utf-8') for s in self.ids]
def srs(self):
return self.owner.srs
def __iter__(self):
for i in zip(self.ids,self.pix_coords(),self.proj_coords()):
yield i
def pix_coords(self,dataset=None):
if self.pixels:
return self.pixels
p_dst=self.proj_coords()
ld(p_dst)
pix_tr=MyTransformer(dataset,METHOD='GCP_TPS')
p_pix=pix_tr.transform(p_dst,inv=True)
ld(p_pix)
return [(p[0],p[1]) for p in p_pix]
def grid2coord(self): # to re-implemented by children if applicable
return self.cartesian
def proj_coords(self):
if self.cartesian:
return self.grid2coord()
dtm=self.owner.dtm
if not dtm:
dtm=[0,0]
latlong=[(lon+dtm[0],lat+dtm[1]) for lon,lat in self.latlong]
srs_tr=MyTransformer(SRC_SRS=proj_cs2geog_cs(self.owner.srs),DST_SRS=self.owner.srs)
coords=srs_tr.transform(latlong)
return coords
def over_180(self):
if not self.cartesian: # refs are lat/long
leftmost=min(zip(self.pixels,self.latlong),key=lambda r: r[0][0])
rightmost=max(zip(self.pixels,self.latlong),key=lambda r: r[0][0])
ld('leftmost',leftmost,'rightmost',rightmost)
if leftmost[1][0] > rightmost[1][0]:
return leftmost[1][0]
return None
###############################################################################
class LatLonRefPoints(RefPoints):
'geo-reference points with geodetic coordinates initialised with a sigle list'
###############################################################################
def __init__(self,owner,ref_lst):
super(LatLonRefPoints,self).__init__(
owner,
**dict(zip(
['ids','pixels','latlong'],
self.transpose(ref_lst)[:3]))
)
###############################################################################
class SrcMap(object):
###############################################################################
def __init__(self,src_file,options=None):
self.options=options
gdal.UseExceptions()
self.load_data() # load datum definitions, ellipses, projections
self.file=src_file.decode(locale.getpreferredencoding(),'ignore')
self.header=self.get_header() # Read map header
def load_csv(self,csv_file,csv_map):
'load datum definitions, ellipses, projections from a file'
csv.register_dialect('strip', skipinitialspace=True)
with open(os.path.join(data_dir(),csv_file),'rb') as data_f:
data_csv=csv.reader(data_f,'strip')
for row in data_csv:
row=[s.decode('utf-8') for s in row]
#ld(row)
try:
dct,unpack=csv_map[row[0]]
unpack(dct,row)
except IndexError:
pass
except KeyError:
pass
for dct,func in csv_map.values():
ld(dct)
def ini_lst(self,dct,row):
dct[row[1]]=row[2:]
def ini_map(self,dct,row):
dct[row[1]]=dict((i.split(':',1) for i in row[2:] if ':' in i))
# def get_layers(self):
# pass
###############################################################################
class SrcLayer(object):
###############################################################################
def __init__(self,src_map,data):
self.map=src_map
self.data=data
self.name=self.get_name()
self.img_file=self.get_raster()
logging.info(' %s : %s (%s)' % (self.map.file,self.name,self.img_file))
print "gdal opening: " + self.img_file
self.raster_ds = gdal.Open(self.img_file.encode(locale.getpreferredencoding()),GA_ReadOnly)
self.dtm=None
self.refs=self.get_refs() # fetch reference points
self.srs,self.dtm=self.get_srs() # estimate SRS
def __del__(self):
self.raster_ds = None
del self.raster_ds
def get_srs(self): # redefined in reader_kml.py
'returns srs for the map, and DTM shifts if any'
options=self.map.options
if options.srs:
return(options.srs,None)
dtm=None
proj4=[]
logging.info(' %s, %s' % (self.get_datum_id(),self.get_proj_id()))
# compute chart's projection
if options.proj:
proj4.append(options.proj)
else:
proj4=self.get_proj()
# setup a central meridian artificialy to allow charts crossing meridian 180
leftmost=self.refs.over_180()
if leftmost and '+lon_0=' not in proj4[0]:
proj4.append(' +lon_0=%i' % int(leftmost))
# compute chart's datum
if options.datum:
proj4.append(options.datum)
elif options.force_dtm or options.dtm_shift:
dtm=self.get_dtm() # get northing, easting to WGS84 if any
proj4.append('+datum=WGS84')
elif not '+proj=' in proj4[0]:
pass # assume datum is defined already
else:
datum=self.get_datum()
proj4.extend(datum)
proj4.extend(['+nodefs']) # '+wktext',
ld('proj4',proj4)
return ' '.join(proj4).encode('utf-8'),dtm
def convert(self,dest=None):
options=self.map.options
if dest:
base=os.path.split(dest)[0]
else:
if options.after_name:
name_patt=self.name
elif options.after_map:
name_patt=self.map.file
else:
name_patt=self.img_file
base=dst_path(name_patt,options.dst_dir)
if options.long_name:
base+=' - ' + "".join([c for c in self.name
if c .isalpha() or c.isdigit() or c in '-_.() '])
dst_dir=os.path.split(base)[0]
out_format='VRT'
ext='.'+out_format.lower()
dst_file= os.path.basename(base+ext) # output file
try:
start_dir=os.getcwd()
if dst_dir:
os.chdir(dst_dir)
dst_drv = gdal.GetDriverByName(out_format)
dst_ds = dst_drv.CreateCopy(dst_file.encode(locale.getpreferredencoding()),
self.raster_ds,0)
dst_ds.SetProjection(self.srs)
#double x = 0.0, double y = 0.0, double z = 0.0, double pixel = 0.0,
#double line = 0.0, char info = "", char id = ""
gcps=[gdal.GCP(c[0],c[1],0,p[0],p[1],'',i) for i,p,c in self.refs]
dst_ds.SetGCPs(gcps,self.refs.srs())
dst_geotr=gdal.GCPsToGeoTransform(gcps) # if len(gcps) < 5 else (0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
dst_ds.SetGeoTransform(dst_geotr)
poly,gmt_data=self.cut_poly(dst_ds)
if poly:
dst_ds.SetMetadataItem('CUTLINE',poly)
if self.name:
dst_ds.SetMetadataItem('DESCRIPTION',self.name.encode('utf-8'))
del dst_ds # close dataset
# re_sub_file(dst_file, [
# ('^.*<GeoTransform>.*\n',''),
# ('^.*<SRS>.*\n','')
# ])
finally:
os.chdir(start_dir)
if options.get_cutline: # print cutline then return
print poly
return
if gmt_data and options.cut_file: # create shapefile with a cut polygon
with open(base+'.gmt','w+') as f:
f.write(gmt_data)
gmt_templ='''# @VGMT1.0 @GPOLYGON
# @Jp"%s"
# FEATURE_DATA
>
# @P
%s
'''
def cut_poly(self,dst_ds):
plys=self.get_plys()
if not plys:
return '',''
pix_lst=plys.pix_coords(dst_ds)
# check if the raster really needs cutting
width=dst_ds.RasterXSize
height=dst_ds.RasterYSize
inside=[i for i in pix_lst # check if the polygon is inside the image border
if (i[0] > 0 or i[0] < width) or (i[1] > 0 or i[1] < height)]
if not inside:
return '',''
# Create cutline
poly_shape=self.gmt_templ % (self.refs.srs(),'\n'.join(
['%r %r' % (i[0],i[1]) for i in plys.proj_coords()]))
poly_wkt='MULTIPOLYGON(((%s)))' % ','.join(['%r %r' % tuple(i) for i in pix_lst]) # Create cutline
return poly_wkt,poly_shape
# SrcLayer
###############################################################################
| |
# Imports
## requests - to make HTTP requests
import requests
## string - primarily for string constants
import string
## decimal - more precise mathematical operations
from decimal import Decimal
## datetime - for handling date/time information
import datetime
# Classless functions
## getPageData
def getPageData(func):
"""
function decorator to ensure that
object retrieves page data before
performing an operation
"""
def func_wrapper(self, *args, **kwargs):
if (not(hasattr(self, 'pageData'))):
self.getData()
return func(self, *args, **kwargs)
return func_wrapper
## checkNonexistent
def checkNonexistent(func):
"""
ensures that object exists before
attempting to retrieve information/
perform an operation
"""
def func_wrapper(self, *args, **kwargs):
if (not(hasattr(self, 'exists'))
or self.exists):
return func(self, *args, **kwargs)
else: # does not exist
return None
return func_wrapper
# Error classes
## ContentError
class ContentError(Exception):
"""
error raised when provided content doesn't conform to markers
specified by calling program or function
"""
pass
# Main parser class
class WLParser(object):
"""
takes in a baseURL (defaults to warlight.net)
and creates URL querystring with specific
provided parameters and values
"""
def __init__(self, baseURL=None, **kwargs):
if baseURL is None:
baseURL = "https://www.warlight.net/?"
self.URL = self.makeURL(baseURL, **kwargs)
## makeURL
@staticmethod
def makeURL(baseURL, **kwargs):
"""
helper function for constructor
generates URL querystring based on
provided parameters and values
"""
URL = baseURL
if URL[-1] != "?":
URL += "?"
appendString = ""
for kwarg in kwargs:
appendString += "&"
appendString += str(kwarg)
appendString += "="
appendString += str(kwargs[kwarg])
URL += appendString[1:]
return URL
## getData
def getData(self, loop=True):
"""
retrieves page data through an HTTP GET request
optionally loops until the request completes
@PARAMS
'loop' (string): whether to loop until request succeeds
(default: True)
"""
stop = False
while (not stop):
try:
r = requests.get(self.URL)
stop = True
except requests.exceptions.RequestException as err:
if (not loop):
stop = True
raise err
self.pageData = r.text
## getValueFromBetween
@staticmethod
def getValueFromBetween(text, before, after):
"""
gets a value in a text field situated between
two known markers
@PARAMS
'text' (string): text to extract from
'before' (string): known marker occurring before desired text
'after' (string): known marker occurring after desired text
"""
if before is None: before = ""
if after is None: after = ""
if (before not in text):
raise ContentError("Missing 'before' marker: " + before +
" in " + text)
if (after not in text):
raise ContentError("Missing 'after' marker! " + after +
" in " + text)
beforeLoc = text.find(before) + len(before)
value = text[beforeLoc:]
if (after == ""): return value
afterLoc = value.find(after)
value = value[:afterLoc]
return value
## getTypedValue
@staticmethod
def getTypedValue(text, marker, typeRange, check=True):
"""
given a known marker and a string containing all values
defining some acceptable type, returns values within a given
text field directly after the known marker that all fall within the
constraints of that acceptable type, terminating when a value
outside the acceptable type is encountered
@PARAMS
'text' (string): text field to extract data from
'marker' (string): known marker occurring before data to be extracted
'typeRange' (string): all members of a known type describing content to
be extracted
'check' (bool): default True; if True, will raise a ContentError
if there is no content in the desired range of the specified type
"""
if marker not in text:
raise ContentError("Missing marker: " + marker + " in " +
text)
loc = text.find(marker) + len(marker)
text = text[loc:]
data = ""
while (len(text) > 0 and text[0] in typeRange):
data += text[0]
text = text[1:]
if (check and (len(data) == 0)):
raise ContentError("No content in specified range!")
return data
## getNumericValue
@classmethod
def getNumericValue(cls, text, marker):
"""
wrapper function for getTypedValue for numeric data
@PARAMS:
'text' (string): text field to extract data from
'marker' (string): known marker occurring before data to be extracted
"""
return float(cls.getTypedValue(text, marker,
(string.digits + ".+-")))
## getIntegerValue
@classmethod
def getIntegerValue(cls, text, marker):
"""
wrapper function for getTypedValue for integer data
@PARAMS:
'text' (string): text field to extract data from
'marker' (string): known marker occuring before data to be extracted
"""
return int(cls.getTypedValue(text, marker,
(string.digits + "-+")))
## getLetterValue
@classmethod
def getLetterValue(cls, text, marker):
"""
wrapper function for getTypedValue for alphabetical data
@PARAMS:
'text' (string): text field to extract data from
'marker' (string): known marker occurring before data to be extracted
"""
return cls.getTypedValue(text, marker, (string.ascii_lowercase
+ string.ascii_uppercase))
## timeConvert
@staticmethod
def timeConvert(timeString):
"""
converts time in string format to # of hours
@PARAMS:
'timeString' (string): time in string format
"""
timeString = timeString.replace(",", "")
timeData = timeString.split(" ")
count = Decimal(0)
fieldTimes = dict()
fieldTimes["year"] = Decimal(24) * Decimal(365.2425)
fieldTimes["years"] = Decimal(24) * Decimal(365.2425)
fieldTimes["month"] = Decimal(2) * Decimal(365.2425)
fieldTimes["months"] = Decimal(2) * Decimal(365.2425)
fieldTimes["day"] = Decimal(24)
fieldTimes["days"] = Decimal(24)
fieldTimes["hour"] = Decimal(1)
fieldTimes["hours"] = Decimal(1)
fieldTimes["minute"] = Decimal(1) / Decimal(60)
fieldTimes["minutes"] = Decimal(1) / Decimal(60)
fieldTimes["second"] = Decimal(1) / Decimal(3600)
fieldTimes["seconds"] = Decimal(1) / Decimal(3600)
for field in fieldTimes:
if field not in timeData: continue
loc = timeData.index(field)
if (loc == 0): continue
data = int(timeData[loc-1])
count += (Decimal(data) * Decimal(fieldTimes[field]))
return float(count)
## getDate
@staticmethod
def getDate(dateString):
"""
creates datetime object from string-formatted date
assumes American format (mm/dd/yyyy)
@PARAMS
'dateString' (string): string formatted as mm/dd/yy date
"""
dateData = dateString.split('/')
month, day, year = (int(dateData[0]), int(dateData[1]),
int(dateData[2]))
return datetime.date(year=year, month=month, day=day)
## getDateTime
@staticmethod
def getDateTime(dateTimeString):
"""
creates datetimeobject from string-formatted date and time
assumes American date (mm/dd/yyyy) followed by hh:mm:ss time
@PARAMS
'dateTimeString' (string): string formatted as mm/dd/yy hh:mm:ss
"""
dateString, timeString = dateTimeString.split(" ")
dateData = dateString.split('/')
month, day, year = (int(dateData[0]), int(dateData[1]),
int(dateData[2]))
timeData = timeString.split(':')
hour, minute, second = (int(timeData[0]), int(timeData[1]),
int(timeData[2]))
return datetime.datetime(year=year, month=month, day=day,
hour=hour, minute=minute,
second=second)
## trimString
@staticmethod
def trimString(data):
"""removes leading and ending spaces in a string"""
while (len(data) > 0 and
(data[0] == " " or data[0] == "\n")):
data = data[1:]
while (len(data) > 0 and
(data[-1] == " " or data[-1] == "\n")):
data = data[:-1]
return data
| |
# -*- coding: utf-8 -*-
import os.path
import re
import xbmcgui
import urllib
import common
from string import lower
from entities.CListItem import CListItem
from xml.dom.minidom import parse as parseXml
from utils import fileUtils as fu
from utils.xbmcUtils import getKeyboard, getImage
from utils import regexUtils
class FavouritesManager:
def __init__(self, favouritesFolder):
self.cfgBuilder = CfgBuilder()
self._favouritesFolder = favouritesFolder
if not os.path.exists(self._favouritesFolder):
os.makedirs(self._favouritesFolder, 0777)
self._favouritesFile = os.path.join(self._favouritesFolder, 'favourites.cfg')
if not os.path.exists(self._favouritesFile):
self._createVirtualFolder('Favourites', self._favouritesFile)
self._favouritesFoldersFolder = os.path.join(self._favouritesFolder, 'favfolders')
if not os.path.exists(self._favouritesFoldersFolder):
os.mkdir(self._favouritesFoldersFolder)
# ----------------------------------------------------------
# Helper functions
# ----------------------------------------------------------
def _getFullPath(self, path):
if path.startswith('favfolders'):
path = os.path.normpath(os.path.join(self._favouritesFolder, path))
return path
def _getShortPath(self, path):
if not path.startswith('favfolders'):
path = os.path.normpath(path).replace(self._favouritesFolder, '').strip(os.path.sep)
return path
def _parseXbmcFavourites(self):
favItems = None
xbmcFavsFile = common.Paths.xbmcFavouritesFile
if os.path.exists(xbmcFavsFile):
doc = parseXml(xbmcFavsFile)
xbmcFavs = doc.documentElement.getElementsByTagName('favourite')
favItems = []
for node in xbmcFavs:
favItem = XbmcFavouriteItem.fromXmlNode(node)
favItems.append(favItem)
return favItems
def _createItem(self, title, m_type, icon, fanart, cfg, url, catcher):
data = self.cfgBuilder.buildItem(title, m_type, url, icon, fanart, cfg, catcher)
return data
def _createFavourite(self, item):
title = item.getInfo('title')
m_type = item.getInfo('type')
icon = item.getInfo('icon')
fanart = item.getInfo('fanart')
cfg = item.getInfo('cfg')
url = item.getInfo('url')
catcher = item.getInfo('catcher')
if m_type == None:
m_type = 'rss'
print(">>>>", title,m_type ,icon ,fanart ,cfg ,url ,catcher )
return self._createItem(title, m_type, icon, fanart, cfg, url, catcher)
# ----------------------------------------------------------
# Virtual folders
# ----------------------------------------------------------
def _virtualFolderSelection(self, name=None, path=None):
print(">>NAme",name,path)
if not name:
name = 'Favourites'
if not path:
path = self._favouritesFile
print(">>NAme",name,path)
fullpath = self._getFullPath(path)
items = self._parseVirtualFolder(fullpath)
virtualFolders = filter(lambda x: self._isVirtualFolder(x), items)
if len(virtualFolders) > 0:
menuItems = ['root(' + name + ')']
menuItems += map(lambda x: x['title'], virtualFolders)
select = xbmcgui.Dialog().select('Select destination', menuItems)
if select == -1:
return None
elif select == 0:
return fullpath
else:
selItem = virtualFolders[select-1]
return self._virtualFolderSelection(selItem['title'], selItem['url'])
else:
return fullpath
def _isVirtualFolder(self, item):
url = item.getInfo('url')
return url and (url.startswith("favfolders/") or url.startswith("favfolders\\"))
def _getVirtualFoldersList(self):
virtualFolders = os.listdir(self._favouritesFoldersFolder)
return virtualFolders
def _createVirtualFolder(self, name, path):
fullPath = self._getFullPath(path)
data = self.cfgBuilder.buildHeader(name)
fu.setFileContent(fullPath, data)
def _removeVirtualFolder(self, path, removeSubfolders=False):
fullPath = self._getFullPath(path)
if removeSubfolders:
items = self._parseVirtualFolder(fullPath)
subFolders = filter(lambda x: self._isVirtualFolder(x), items)
for s in subFolders:
self._removeVirtualFolder(s['url'], True)
if os.path.exists(fullPath) and os.path.isfile(fullPath):
os.remove(fullPath)
def _parseVirtualFolder(self, path):
fullpath = self._getFullPath(path)
data = fu.getFileContent(fullpath)
data = data.replace('\r\n', '\n').split('\n')
items = []
for m in data:
if m and m[0] != '#':
index = m.find('=')
if index != -1:
key = lower(m[:index]).strip()
value = m[index+1:]
index = value.find('|')
if value[:index] == 'sports.devil.locale':
value = common.translate(int(value[index+1:]))
elif value[:index] == 'sports.devil.image':
value = os.path.join(common.Paths.imgDir, value[index+1:])
if key == 'title':
tmp = CListItem()
tmp['title'] = value
elif key == 'url':
tmp['url'] = value
items.append(tmp)
tmp = None
elif tmp != None:
tmp[key] = value
return items
# ----------------------------------------------------------
# Add item
# ----------------------------------------------------------
def add(self, rootFolder=None):
menuItems = ["Add folder", "Add Filmkodi item", "Add xbmc favourite"]
select = xbmcgui.Dialog().select('Choose', menuItems)
if select == 0:
name = getKeyboard(default = '', heading = 'Set name')
if name and len(name) > 0:
return self._addFolder(name, rootFolder)
elif select == 1:
common.showInfo('Please browse through Filmkodi and use \ncontext menu entry "Add to Filmkodi favourites"')
elif select == 2:
return self._addXbmcFavourite(rootFolder)
return False
def _addXbmcFavourite(self, root):
xbmcFavs = self._parseXbmcFavourites()
if xbmcFavs is None:
common.showInfo('Favourites file not found')
elif len(xbmcFavs) == 0:
common.showInfo('No favourites found')
else:
select = xbmcgui.Dialog().select('Choose' , map(lambda x: x.title, xbmcFavs))
if select == -1:
return False
else:
item = xbmcFavs[select].convertToCListItem()
self.addItem(item, root)
return True
return False
def _addFolder(self, name, rootFolder=None):
# create cfg
filename = urllib.quote_plus(fu.cleanFilename(name))
virtualFolderFile = filename + '.cfg'
physicalFolder = os.path.normpath(self._favouritesFoldersFolder)
virtualFolderPath = os.path.join(physicalFolder, virtualFolderFile)
if os.path.exists(virtualFolderPath):
prefix = filename + '-'
suffix = '.cfg'
virtualFolderFile = fu.randomFilename(directory=physicalFolder, prefix=prefix, suffix=suffix)
virtualFolderPath = os.path.join(physicalFolder, virtualFolderFile)
self._createVirtualFolder(name, virtualFolderPath)
# create link
linkToFolder = self._createItem(name, 'rss', '', '', None, 'favfolders/' + virtualFolderFile)
if not rootFolder or os.path.normpath(rootFolder) == self._favouritesFile:
rootFolder = self._favouritesFile
fu.appendFileContent(rootFolder, linkToFolder)
return True
def addItem(self, item, root=None):
print(">>>>>>>>>>>>>>>>>",item,root)
target = root
if not target:
# if virtual folders exist
virtualFolder = self._virtualFolderSelection()
if virtualFolder:
target = virtualFolder
print(">>>>>>>>>>>>>>>>>",target)
if target and os.path.exists(target):
fav = self._createFavourite(item)
fu.appendFileContent(target, fav)
# ----------------------------------------------------------
# Change or remove item
# ----------------------------------------------------------
def editItem(self, item):
menuItems = ["Change label", "Change icon", "Change fanart"]
virtualFolders = self._getVirtualFoldersList()
if len(virtualFolders) > 0 and not item.getInfo('url').startswith('favfolders/'):
menuItems.append("Move to folder")
select = xbmcgui.Dialog().select('Choose' , menuItems)
if select == -1:
return False
cfgFile = self._favouritesFile
definedIn = item.getInfo('definedIn')
if definedIn and definedIn.startswith('favfolders/'):
cfgFile = os.path.join(self._favouritesFoldersFolder, definedIn.split('/')[1])
if select == 0:
newLabel = getKeyboard(default = item.getInfo('title'), heading = 'Change label')
if not newLabel or newLabel == "":
return False
self.changeLabel(item, newLabel)
elif select == 1:
newIcon = getImage('Change icon')
if not newIcon:
return False
self.changeIcon(item, newIcon)
elif select == 2:
newFanart = getImage('Change fanart')
if not newFanart:
return False
self.changeFanart(item, newFanart)
elif select == 3:
newCfgFile = self._virtualFolderSelection()
if not newCfgFile or cfgFile == newCfgFile:
return False
self.moveToFolder(cfgFile, item, newCfgFile)
return True
def _findItem(self, item):
cfgFile = self._favouritesFile
definedIn = item.getInfo('definedIn')
if definedIn and definedIn.startswith('favfolders/'):
cfgFile = os.path.join(self._favouritesFolder, definedIn)
if os.path.exists(cfgFile):
data = fu.getFileContent(cfgFile)
regex = self.cfgBuilder.buildItem(re.escape(item.getInfo('title')), "[^#]*", re.escape(item.getInfo('url')))
matches = regexUtils.findall(data, regex)
if matches:
return (cfgFile, data, matches[0])
return None
def changeLabel(self, item, newLabel):
found = self._findItem(item)
if found:
item['title'] = newLabel
[cfgFile, data, fav] = found
# if it's a virtual folder, rename file, rename header, update link
if self._isVirtualFolder(item):
url = item.getInfo('url')
oldFile = self._getFullPath(url)
newFilename = urllib.quote_plus(fu.cleanFilename(newLabel))
virtualFolderFile = newFilename + '.cfg'
physicalFolder = os.path.normpath(self._favouritesFoldersFolder)
virtualFolderPath = os.path.join(physicalFolder, virtualFolderFile)
# check if new target is valid
if os.path.exists(virtualFolderPath):
prefix = newFilename + '-'
suffix = '.cfg'
virtualFolderFile = fu.randomFilename(directory=physicalFolder, prefix=prefix, suffix=suffix)
virtualFolderPath = os.path.join(physicalFolder, virtualFolderFile)
# update header
content = fu.getFileContent(oldFile)
oldHeader = self.cfgBuilder.buildHeader(item['title'])
newHeader = self.cfgBuilder.buildHeader(newLabel)
content = content.replace(oldHeader, newHeader)
# rename file
self._removeVirtualFolder(oldFile, False)
fu.setFileContent(virtualFolderPath, content)
# update link
item['url'] = self._getShortPath(virtualFolderPath)
newfav = self._createFavourite(item)
new = data.replace(fav, newfav.encode('utf-8'))
fu.setFileContent(cfgFile, new)
def changeIcon(self, item, newIcon):
found = self._findItem(item)
if found:
[cfgFile, data, fav] = found
newfav = self._createFavourite(item, icon=newIcon)
new = data.replace(fav, newfav.encode('utf-8'))
fu.setFileContent(cfgFile, new)
def changeFanart(self, item, newFanart):
found = self._findItem(item)
if found:
[cfgFile, data, fav] = found
newfav = self._createFavourite(item, fanart=newFanart)
new = data.replace(fav, newfav.encode('utf-8'))
fu.setFileContent(cfgFile, new)
def moveToFolder(self, cfgFile, item, newCfgFile):
found = self._findItem(item)
if found:
[cfgFile, data, fav] = found
if os.path.exists(newCfgFile):
new = data.replace(fav,'')
fu.setFileContent(cfgFile, new)
fu.appendFileContent(newCfgFile, fav)
def removeItem(self, item):
found = self._findItem(item)
if found:
try:
# delete virtual folder
if self._isVirtualFolder(item):
self._removeVirtualFolder(item['url'], True)
# delete link
[cfgFile, data, fav] = found
new = data.replace(fav,'')
fu.setFileContent(cfgFile, new)
return True
except:
return False
return False
class XbmcFavouriteItem:
def __init__(self, title, icon, url):
self.title = title
self.icon = icon
self.url = url
@classmethod
def fromXmlNode(cls, node):
try:
title = node.attributes['name'].nodeValue
except:
title = ''
try:
icon = node.attributes['thumb'].nodeValue
except:
icon = ''
try:
url = node.childNodes[0].nodeValue
except:
url = ''
return cls(title, icon, url)
def convertToCListItem(self):
item = CListItem()
item.setInfo('title', self.title)
item.setInfo('type', 'command')
item.setInfo('icon', self.icon)
item.setInfo('url', self.url)
return item
class CfgBuilder:
def __init__(self):
self.minWidth = 52
pass
def buildSeperator(self, title):
titleLength = len(title)
width = max(titleLength, self.minWidth) + 4 # '# ' and ' #' = 4 chars
sepLine = '#' * width
return sepLine
def buildHeader(self, title):
titleLength = len(title)
sepLine = self.buildSeperator(title)
space = len(sepLine) - titleLength - 4
titleLine = '# ' + title.upper() + ' ' * space + ' #'
data = [sepLine, titleLine, sepLine]
return '\n'.join(data)
def buildItem(self, title, m_type, url, icon=None, fanart=None, cfg=None, catcher=None):
sepLine = self.buildSeperator(title)
data = [
'\n' + sepLine,
'title=' + title,
'type=' + m_type
]
if icon:
data.append('icon=' + icon)
if fanart:
data.append('fanart=' + fanart)
if cfg:
data.append('cfg=' + cfg)
if catcher:
data.append('catcher=' + catcher)
data.append('url=' + url)
return '\n'.join(data)
| |
import numpy as np
import pytest
from scipy import sparse
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import check_random_state
from sklearn.utils._testing import assert_raises_regexp
from sklearn.utils._testing import assert_allclose
from sklearn.datasets import make_regression
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model._ransac import _dynamic_max_trials
from sklearn.exceptions import ConvergenceWarning
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
rng = np.random.RandomState(1000)
outliers = np.unique(rng.randint(len(X), size=200))
data[outliers, :] += 50 + rng.rand(len(outliers), 2) * 10
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert X.shape[0] == 2
assert y.shape[0] == 2
return False
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
y = rng.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
with pytest.raises(ValueError):
ransac_estimator.fit(X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert X.shape[0] == 2
assert y.shape[0] == 2
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
with pytest.raises(ValueError):
ransac_estimator.fit(X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
with pytest.raises(ValueError):
ransac_estimator.fit(X, y)
# there is a 1e-9 chance it will take these many trials. No good reason
# 1e-2 isn't enough, can still happen
# 2 is the what ransac defines as min_samples = X.shape[1] + 1
max_trials = _dynamic_max_trials(
len(X) - len(outliers), X.shape[0], 2, 1 - 1e-9)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2)
for i in range(50):
ransac_estimator.set_params(min_samples=2, random_state=i)
ransac_estimator.fit(X, y)
assert ransac_estimator.n_trials_ < max_trials + 1
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert ransac_estimator.n_trials_ == 1
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert ransac_estimator.n_trials_ == 1
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert ransac_estimator.score(X[2:], y[2:]) == 1
assert ransac_estimator.score(X[:2], y[:2]) < 1
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_array_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert ransac_estimator.n_skips_no_inliers_ == 5
assert ransac_estimator.n_skips_invalid_data_ == 0
assert ransac_estimator.n_skips_invalid_model_ == 0
def test_ransac_no_valid_data():
def is_data_valid(X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert ransac_estimator.n_skips_no_inliers_ == 0
assert ransac_estimator.n_skips_invalid_data_ == 5
assert ransac_estimator.n_skips_invalid_model_ == 0
def test_ransac_no_valid_model():
def is_model_valid(estimator, X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_model_valid=is_model_valid,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert ransac_estimator.n_skips_no_inliers_ == 0
assert ransac_estimator.n_skips_invalid_data_ == 0
assert ransac_estimator.n_skips_invalid_model_ == 5
def test_ransac_exceed_max_skips():
def is_data_valid(X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_trials=5,
max_skips=3)
msg = ("RANSAC skipped more iterations than `max_skips`")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert ransac_estimator.n_skips_no_inliers_ == 0
assert ransac_estimator.n_skips_invalid_data_ == 4
assert ransac_estimator.n_skips_invalid_model_ == 0
def test_ransac_warn_exceed_max_skips():
global cause_skip
cause_skip = False
def is_data_valid(X, y):
global cause_skip
if not cause_skip:
cause_skip = True
return True
else:
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_skips=3,
max_trials=5)
warning_message = (
"RANSAC found a valid consensus set but exited "
"early due to skipping more iterations than "
"`max_skips`. See estimator attributes for "
"diagnostics."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
ransac_estimator.fit(X, y)
assert ransac_estimator.n_skips_no_inliers_ == 0
assert ransac_estimator.n_skips_invalid_data_ == 4
assert ransac_estimator.n_skips_invalid_model_ == 0
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, min_samples=2,
residual_threshold=5,
random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
with pytest.raises(ValueError):
ransac_estimator3.fit(X, y)
with pytest.raises(ValueError):
ransac_estimator4.fit(X, y)
with pytest.raises(ValueError):
ransac_estimator7.fit(X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_loss():
def loss_multi1(y_true, y_pred):
return np.sum(np.abs(y_true - y_pred), axis=1)
def loss_multi2(y_true, y_pred):
return np.sum((y_true - y_pred) ** 2, axis=1)
def loss_mono(y_true, y_pred):
return np.abs(y_true - y_pred)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.loss = loss_mono
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss="squared_error")
ransac_estimator3.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert _dynamic_max_trials(100, 100, 2, 0.99) == 1
# e = 5%, min_samples = 2
assert _dynamic_max_trials(95, 100, 2, 0.99) == 2
# e = 10%, min_samples = 2
assert _dynamic_max_trials(90, 100, 2, 0.99) == 3
# e = 30%, min_samples = 2
assert _dynamic_max_trials(70, 100, 2, 0.99) == 7
# e = 50%, min_samples = 2
assert _dynamic_max_trials(50, 100, 2, 0.99) == 17
# e = 5%, min_samples = 8
assert _dynamic_max_trials(95, 100, 8, 0.99) == 5
# e = 10%, min_samples = 8
assert _dynamic_max_trials(90, 100, 8, 0.99) == 9
# e = 30%, min_samples = 8
assert _dynamic_max_trials(70, 100, 8, 0.99) == 78
# e = 50%, min_samples = 8
assert _dynamic_max_trials(50, 100, 8, 0.99) == 1177
# e = 0%, min_samples = 10
assert _dynamic_max_trials(1, 100, 10, 0) == 0
assert _dynamic_max_trials(1, 100, 10, 1) == float('inf')
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
with pytest.raises(ValueError):
ransac_estimator.fit(X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
with pytest.raises(ValueError):
ransac_estimator.fit(X, y)
def test_ransac_fit_sample_weight():
ransac_estimator = RANSACRegressor(random_state=0)
n_samples = y.shape[0]
weights = np.ones(n_samples)
ransac_estimator.fit(X, y, weights)
# sanity check
assert ransac_estimator.inlier_mask_.shape[0] == n_samples
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
# check that mask is correct
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
X_ = random_state.randint(0, 200, [10, 1])
y_ = np.ndarray.flatten(0.2 * X_ + 2)
sample_weight = random_state.randint(0, 10, 10)
outlier_X = random_state.randint(0, 1000, [1, 1])
outlier_weight = random_state.randint(0, 10, 1)
outlier_y = random_state.randint(-1000, 0, 1)
X_flat = np.append(np.repeat(X_, sample_weight, axis=0),
np.repeat(outlier_X, outlier_weight, axis=0), axis=0)
y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0),
np.repeat(outlier_y, outlier_weight, axis=0),
axis=0))
ransac_estimator.fit(X_flat, y_flat)
ref_coef_ = ransac_estimator.estimator_.coef_
sample_weight = np.append(sample_weight, outlier_weight)
X_ = np.append(X_, outlier_X, axis=0)
y_ = np.append(y_, outlier_y)
ransac_estimator.fit(X_, y_, sample_weight)
assert_allclose(ransac_estimator.estimator_.coef_, ref_coef_)
# check that if base_estimator.fit doesn't support
# sample_weight, raises error
base_estimator = OrthogonalMatchingPursuit()
ransac_estimator = RANSACRegressor(base_estimator)
with pytest.raises(ValueError):
ransac_estimator.fit(X, y, weights)
def test_ransac_final_model_fit_sample_weight():
X, y = make_regression(n_samples=1000, random_state=10)
rng = check_random_state(42)
sample_weight = rng.randint(1, 4, size=y.shape[0])
sample_weight = sample_weight / sample_weight.sum()
ransac = RANSACRegressor(base_estimator=LinearRegression(), random_state=0)
ransac.fit(X, y, sample_weight=sample_weight)
final_model = LinearRegression()
mask_samples = ransac.inlier_mask_
final_model.fit(
X[mask_samples], y[mask_samples],
sample_weight=sample_weight[mask_samples]
)
assert_allclose(ransac.estimator_.coef_, final_model.coef_, atol=1e-12)
# TODO: Remove in v1.2
def test_loss_squared_loss_deprecated():
est1 = RANSACRegressor(loss="squared_loss", random_state=0)
with pytest.warns(FutureWarning,
match="The loss 'squared_loss' was deprecated"):
est1.fit(X, y)
est2 = RANSACRegressor(loss="squared_error", random_state=0)
est2.fit(X, y)
assert_allclose(est1.predict(X), est2.predict(X))
| |
"""
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric)
from .deprecation import deprecated
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
from ..exceptions import ConvergenceWarning as ConvergenceWarning_
from ..exceptions import DataConversionWarning as DataConversionWarning_
class ConvergenceWarning(ConvergenceWarning_):
pass
ConvergenceWarning = deprecated("ConvergenceWarning has been moved "
"into the sklearn.exceptions module. "
"It will not be available here from "
"version 0.19")(ConvergenceWarning)
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning_)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1"
% n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
| |
from sqlalchemy.orm import create_session, relationship, mapper, \
contains_eager, joinedload, subqueryload, subqueryload_all,\
Session, aliased
from sqlalchemy import Integer, String, ForeignKey
from sqlalchemy.engine import default
from test.lib import AssertsCompiledSQL, fixtures, testing
from test.lib.schema import Table, Column
from test.lib.testing import assert_raises, eq_
class Company(fixtures.ComparableEntity):
pass
class Person(fixtures.ComparableEntity):
pass
class Engineer(Person):
pass
class Manager(Person):
pass
class Boss(Manager):
pass
class Machine(fixtures.ComparableEntity):
pass
class Paperwork(fixtures.ComparableEntity):
pass
class SelfReferentialTestJoinedToBase(fixtures.MappedTest):
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
Table('engineers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('primary_language', String(50)),
Column('reports_to_id', Integer,
ForeignKey('people.person_id')))
@classmethod
def setup_mappers(cls):
engineers, people = cls.tables.engineers, cls.tables.people
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Engineer, engineers,
inherits=Person,
inherit_condition=engineers.c.person_id == people.c.person_id,
polymorphic_identity='engineer',
properties={
'reports_to':relationship(
Person,
primaryjoin=
people.c.person_id == engineers.c.reports_to_id)})
def test_has(self):
p1 = Person(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=p1)
sess = create_session()
sess.add(p1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to.has(Person.name == 'dogbert'))
.first(),
Engineer(name='dilbert'))
def test_oftype_aliases_in_exists(self):
e1 = Engineer(name='dilbert', primary_language='java')
e2 = Engineer(name='wally', primary_language='c++', reports_to=e1)
sess = create_session()
sess.add_all([e1, e2])
sess.flush()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to
.of_type(Engineer)
.has(Engineer.name == 'dilbert'))
.first(),
e2)
def test_join(self):
p1 = Person(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=p1)
sess = create_session()
sess.add(p1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.join('reports_to', aliased=True)
.filter(Person.name == 'dogbert').first(),
Engineer(name='dilbert'))
class SelfReferentialJ2JTest(fixtures.MappedTest):
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
people = Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
engineers = Table('engineers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('primary_language', String(50)),
Column('reports_to_id', Integer,
ForeignKey('managers.person_id'))
)
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
)
@classmethod
def setup_mappers(cls):
engineers = cls.tables.engineers
managers = cls.tables.managers
people = cls.tables.people
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Manager, managers,
inherits=Person,
polymorphic_identity='manager')
mapper(Engineer, engineers,
inherits=Person,
polymorphic_identity='engineer',
properties={
'reports_to':relationship(
Manager,
primaryjoin=
managers.c.person_id == engineers.c.reports_to_id,
backref='engineers')})
def test_has(self):
m1 = Manager(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=m1)
sess = create_session()
sess.add(m1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to.has(Manager.name == 'dogbert'))
.first(),
Engineer(name='dilbert'))
def test_join(self):
m1 = Manager(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=m1)
sess = create_session()
sess.add(m1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.join('reports_to', aliased=True)
.filter(Manager.name == 'dogbert').first(),
Engineer(name='dilbert'))
def test_filter_aliasing(self):
m1 = Manager(name='dogbert')
m2 = Manager(name='foo')
e1 = Engineer(name='wally', primary_language='java', reports_to=m1)
e2 = Engineer(name='dilbert', primary_language='c++', reports_to=m2)
e3 = Engineer(name='etc', primary_language='c++')
sess = create_session()
sess.add_all([m1, m2, e1, e2, e3])
sess.flush()
sess.expunge_all()
# filter aliasing applied to Engineer doesn't whack Manager
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Manager.name == 'dogbert').all(),
[m1])
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Engineer.name == 'dilbert').all(),
[m2])
eq_(sess.query(Manager, Engineer)
.join(Manager.engineers)
.order_by(Manager.name.desc()).all(),
[(m2, e2), (m1, e1)])
def test_relationship_compare(self):
m1 = Manager(name='dogbert')
m2 = Manager(name='foo')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=m1)
e2 = Engineer(name='wally', primary_language='c++', reports_to=m2)
e3 = Engineer(name='etc', primary_language='c++')
sess = create_session()
sess.add(m1)
sess.add(m2)
sess.add(e1)
sess.add(e2)
sess.add(e3)
sess.flush()
sess.expunge_all()
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Engineer.reports_to == None).all(),
[])
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Engineer.reports_to == m1).all(),
[m1])
class SelfReferentialJ2JSelfTest(fixtures.MappedTest):
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
people = Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
engineers = Table('engineers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('reports_to_id', Integer,
ForeignKey('engineers.person_id')))
@classmethod
def setup_mappers(cls):
engineers = cls.tables.engineers
people = cls.tables.people
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Engineer, engineers,
inherits=Person,
polymorphic_identity='engineer',
properties={
'reports_to':relationship(
Engineer,
primaryjoin=
engineers.c.person_id == engineers.c.reports_to_id,
backref='engineers',
remote_side=engineers.c.person_id)})
def _two_obj_fixture(self):
e1 = Engineer(name='wally')
e2 = Engineer(name='dilbert', reports_to=e1)
sess = Session()
sess.add_all([e1, e2])
sess.commit()
return sess
def _five_obj_fixture(self):
sess = Session()
e1, e2, e3, e4, e5 = [
Engineer(name='e%d' % (i + 1)) for i in xrange(5)
]
e3.reports_to = e1
e4.reports_to = e2
sess.add_all([e1, e2, e3, e4, e5])
sess.commit()
return sess
def test_has(self):
sess = self._two_obj_fixture()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to.has(Engineer.name == 'wally'))
.first(),
Engineer(name='dilbert'))
def test_join_explicit_alias(self):
sess = self._five_obj_fixture()
ea = aliased(Engineer)
eq_(sess.query(Engineer)
.join(ea, Engineer.engineers)
.filter(Engineer.name == 'e1').all(),
[Engineer(name='e1')])
def test_join_aliased_flag_one(self):
sess = self._two_obj_fixture()
eq_(sess.query(Engineer)
.join('reports_to', aliased=True)
.filter(Engineer.name == 'wally').first(),
Engineer(name='dilbert'))
def test_join_aliased_flag_two(self):
sess = self._five_obj_fixture()
eq_(sess.query(Engineer)
.join(Engineer.engineers, aliased=True)
.filter(Engineer.name == 'e4').all(),
[Engineer(name='e2')])
def test_relationship_compare(self):
sess = self._five_obj_fixture()
e1 = sess.query(Engineer).filter_by(name='e1').one()
eq_(sess.query(Engineer)
.join(Engineer.engineers, aliased=True)
.filter(Engineer.reports_to == None).all(),
[])
eq_(sess.query(Engineer)
.join(Engineer.engineers, aliased=True)
.filter(Engineer.reports_to == e1).all(),
[e1])
class M2MFilterTest(fixtures.MappedTest):
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
organizations = Table('organizations', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)))
engineers_to_org = Table('engineers_to_org', metadata,
Column('org_id', Integer,
ForeignKey('organizations.id')),
Column('engineer_id', Integer,
ForeignKey('engineers.person_id')))
people = Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
engineers = Table('engineers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('primary_language', String(50)))
@classmethod
def setup_mappers(cls):
organizations = cls.tables.organizations
people = cls.tables.people
engineers = cls.tables.engineers
engineers_to_org = cls.tables.engineers_to_org
class Organization(cls.Comparable):
pass
mapper(Organization, organizations,
properties={
'engineers':relationship(
Engineer,
secondary=engineers_to_org,
backref='organizations')})
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Engineer, engineers,
inherits=Person,
polymorphic_identity='engineer')
@classmethod
def insert_data(cls):
Organization = cls.classes.Organization
e1 = Engineer(name='e1')
e2 = Engineer(name='e2')
e3 = Engineer(name='e3')
e4 = Engineer(name='e4')
org1 = Organization(name='org1', engineers=[e1, e2])
org2 = Organization(name='org2', engineers=[e3, e4])
sess = create_session()
sess.add(org1)
sess.add(org2)
sess.flush()
def test_not_contains(self):
Organization = self.classes.Organization
sess = create_session()
e1 = sess.query(Person).filter(Engineer.name == 'e1').one()
# this works
eq_(sess.query(Organization)
.filter(~Organization.engineers
.of_type(Engineer)
.contains(e1))
.all(),
[Organization(name='org2')])
# this had a bug
eq_(sess.query(Organization)
.filter(~Organization.engineers
.contains(e1))
.all(),
[Organization(name='org2')])
def test_any(self):
sess = create_session()
Organization = self.classes.Organization
eq_(sess.query(Organization)
.filter(Organization.engineers
.of_type(Engineer)
.any(Engineer.name == 'e1'))
.all(),
[Organization(name='org1')])
eq_(sess.query(Organization)
.filter(Organization.engineers
.any(Engineer.name == 'e1'))
.all(),
[Organization(name='org1')])
class SelfReferentialM2MTest(fixtures.MappedTest, AssertsCompiledSQL):
@classmethod
def define_tables(cls, metadata):
Table('secondary', metadata,
Column('left_id', Integer,
ForeignKey('parent.id'),
nullable=False),
Column('right_id', Integer,
ForeignKey('parent.id'),
nullable=False))
Table('parent', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('cls', String(50)))
Table('child1', metadata,
Column('id', Integer,
ForeignKey('parent.id'),
primary_key=True))
Table('child2', metadata,
Column('id', Integer,
ForeignKey('parent.id'),
primary_key=True))
@classmethod
def setup_classes(cls):
class Parent(cls.Basic):
pass
class Child1(Parent):
pass
class Child2(Parent):
pass
@classmethod
def setup_mappers(cls):
child1 = cls.tables.child1
child2 = cls.tables.child2
Parent = cls.classes.Parent
parent = cls.tables.parent
Child1 = cls.classes.Child1
Child2 = cls.classes.Child2
secondary = cls.tables.secondary
mapper(Parent, parent,
polymorphic_on=parent.c.cls)
mapper(Child1, child1,
inherits=Parent,
polymorphic_identity='child1',
properties={
'left_child2':relationship(
Child2,
secondary=secondary,
primaryjoin=parent.c.id == secondary.c.right_id,
secondaryjoin=parent.c.id == secondary.c.left_id,
uselist=False,
backref="right_children")})
mapper(Child2, child2,
inherits=Parent,
polymorphic_identity='child2')
def test_query_crit(self):
Child1, Child2 = self.classes.Child1, self.classes.Child2
sess = create_session()
c11, c12, c13 = Child1(), Child1(), Child1()
c21, c22, c23 = Child2(), Child2(), Child2()
c11.left_child2 = c22
c12.left_child2 = c22
c13.left_child2 = c23
sess.add_all([c11, c12, c13, c21, c22, c23])
sess.flush()
# test that the join to Child2 doesn't alias Child1 in the select
eq_(set(sess.query(Child1).join(Child1.left_child2)),
set([c11, c12, c13]))
eq_(set(sess.query(Child1, Child2).join(Child1.left_child2)),
set([(c11, c22), (c12, c22), (c13, c23)]))
# test __eq__() on property is annotating correctly
eq_(set(sess.query(Child2)
.join(Child2.right_children)
.filter(Child1.left_child2 == c22)),
set([c22]))
# test the same again
self.assert_compile(
sess.query(Child2)
.join(Child2.right_children)
.filter(Child1.left_child2 == c22)
.with_labels().statement,
"SELECT child2.id AS child2_id, parent.id AS parent_id, "
"parent.cls AS parent_cls FROM secondary AS secondary_1, "
"parent JOIN child2 ON parent.id = child2.id JOIN secondary AS "
"secondary_2 ON parent.id = secondary_2.left_id JOIN (SELECT "
"parent.id AS parent_id, parent.cls AS parent_cls, child1.id AS "
"child1_id FROM parent JOIN child1 ON parent.id = child1.id) AS "
"anon_1 ON anon_1.parent_id = secondary_2.right_id WHERE "
"anon_1.parent_id = secondary_1.right_id AND :param_1 = "
"secondary_1.left_id",
dialect=default.DefaultDialect()
)
def test_eager_join(self):
Child1, Child2 = self.classes.Child1, self.classes.Child2
sess = create_session()
c1 = Child1()
c1.left_child2 = Child2()
sess.add(c1)
sess.flush()
# test that the splicing of the join works here, doesn't break in
# the middle of "parent join child1"
q = sess.query(Child1).options(joinedload('left_child2'))
self.assert_compile(q.limit(1).with_labels().statement,
"SELECT anon_1.child1_id AS anon_1_child1_id, anon_1.parent_id "
"AS anon_1_parent_id, anon_1.parent_cls AS anon_1_parent_cls, "
"anon_2.child2_id AS anon_2_child2_id, anon_2.parent_id AS "
"anon_2_parent_id, anon_2.parent_cls AS anon_2_parent_cls FROM "
"(SELECT child1.id AS child1_id, parent.id AS parent_id, "
"parent.cls AS parent_cls FROM parent JOIN child1 ON parent.id = "
"child1.id LIMIT :param_1) AS anon_1 LEFT OUTER JOIN secondary "
"AS secondary_1 ON anon_1.parent_id = secondary_1.right_id LEFT "
"OUTER JOIN (SELECT parent.id AS parent_id, parent.cls AS "
"parent_cls, child2.id AS child2_id FROM parent JOIN child2 ON "
"parent.id = child2.id) AS anon_2 ON anon_2.parent_id = "
"secondary_1.left_id",
{'param_1':1},
dialect=default.DefaultDialect())
# another way to check
assert q.limit(1).with_labels().subquery().count().scalar() == 1
assert q.first() is c1
def test_subquery_load(self):
Child1, Child2 = self.classes.Child1, self.classes.Child2
sess = create_session()
c1 = Child1()
c1.left_child2 = Child2()
sess.add(c1)
sess.flush()
sess.expunge_all()
query_ = sess.query(Child1).options(subqueryload('left_child2'))
for row in query_.all():
assert row.left_child2
class EagerToSubclassTest(fixtures.MappedTest):
"""Test eager loads to subclass mappers"""
run_setup_classes = 'once'
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('parent', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('data', String(10)))
Table('base', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10)),
Column('related_id', Integer,
ForeignKey('related.id')))
Table('sub', metadata,
Column('id', Integer,
ForeignKey('base.id'),
primary_key=True),
Column('data', String(10)),
Column('parent_id', Integer,
ForeignKey('parent.id'),
nullable=False))
Table('related', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('data', String(10)))
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Base(cls.Comparable):
pass
class Sub(Base):
pass
class Related(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
sub = cls.tables.sub
Sub = cls.classes.Sub
base = cls.tables.base
Base = cls.classes.Base
parent = cls.tables.parent
Parent = cls.classes.Parent
related = cls.tables.related
Related = cls.classes.Related
mapper(Parent, parent,
properties={'children':relationship(Sub, order_by=sub.c.data)})
mapper(Base, base,
polymorphic_on=base.c.type,
polymorphic_identity='b',
properties={'related':relationship(Related)})
mapper(Sub, sub,
inherits=Base,
polymorphic_identity='s')
mapper(Related, related)
@classmethod
def insert_data(cls):
global p1, p2
Parent = cls.classes.Parent
Sub = cls.classes.Sub
Related = cls.classes.Related
sess = Session()
r1, r2 = Related(data='r1'), Related(data='r2')
s1 = Sub(data='s1', related=r1)
s2 = Sub(data='s2', related=r2)
s3 = Sub(data='s3')
s4 = Sub(data='s4', related=r2)
s5 = Sub(data='s5')
p1 = Parent(data='p1', children=[s1, s2, s3])
p2 = Parent(data='p2', children=[s4, s5])
sess.add(p1)
sess.add(p2)
sess.commit()
def test_joinedload(self):
Parent = self.classes.Parent
sess = Session()
def go():
eq_(sess.query(Parent)
.options(joinedload(Parent.children)).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager(self):
Parent = self.classes.Parent
Sub = self.classes.Sub
sess = Session()
def go():
eq_(sess.query(Parent)
.join(Parent.children)
.options(contains_eager(Parent.children))
.order_by(Parent.data, Sub.data).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
def test_subq_through_related(self):
Parent = self.classes.Parent
Sub = self.classes.Sub
sess = Session()
def go():
eq_(sess.query(Parent)
.options(subqueryload_all(Parent.children, Sub.related))
.order_by(Parent.data).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 3)
class SubClassEagerToSubClassTest(fixtures.MappedTest):
"""Test joinedloads from subclass to subclass mappers"""
run_setup_classes = 'once'
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('parent', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10)),
)
Table('subparent', metadata,
Column('id', Integer,
ForeignKey('parent.id'),
primary_key=True),
Column('data', String(10)),
)
Table('base', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10)),
)
Table('sub', metadata,
Column('id', Integer,
ForeignKey('base.id'),
primary_key=True),
Column('data', String(10)),
Column('subparent_id', Integer,
ForeignKey('subparent.id'),
nullable=False)
)
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Subparent(Parent):
pass
class Base(cls.Comparable):
pass
class Sub(Base):
pass
@classmethod
def setup_mappers(cls):
sub = cls.tables.sub
Sub = cls.classes.Sub
base = cls.tables.base
Base = cls.classes.Base
parent = cls.tables.parent
Parent = cls.classes.Parent
subparent = cls.tables.subparent
Subparent = cls.classes.Subparent
mapper(Parent, parent,
polymorphic_on=parent.c.type,
polymorphic_identity='b')
mapper(Subparent, subparent,
inherits=Parent,
polymorphic_identity='s',
properties={
'children':relationship(Sub, order_by=base.c.id)})
mapper(Base, base,
polymorphic_on=base.c.type,
polymorphic_identity='b')
mapper(Sub, sub,
inherits=Base,
polymorphic_identity='s')
@classmethod
def insert_data(cls):
global p1, p2
Sub, Subparent = cls.classes.Sub, cls.classes.Subparent
sess = create_session()
p1 = Subparent(
data='p1',
children=[Sub(data='s1'), Sub(data='s2'), Sub(data='s3')])
p2 = Subparent(
data='p2',
children=[Sub(data='s4'), Sub(data='s5')])
sess.add(p1)
sess.add(p2)
sess.flush()
def test_joinedload(self):
Subparent = self.classes.Subparent
sess = create_session()
def go():
eq_(sess.query(Subparent)
.options(joinedload(Subparent.children)).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
eq_(sess.query(Subparent)
.options(joinedload("children")).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager(self):
Subparent = self.classes.Subparent
sess = create_session()
def go():
eq_(sess.query(Subparent)
.join(Subparent.children)
.options(contains_eager(Subparent.children)).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
eq_(sess.query(Subparent)
.join(Subparent.children)
.options(contains_eager("children")).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
def test_subqueryload(self):
Subparent = self.classes.Subparent
sess = create_session()
def go():
eq_(sess.query(Subparent)
.options(subqueryload(Subparent.children)).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 2)
sess.expunge_all()
def go():
eq_(sess.query(Subparent)
.options(subqueryload("children")).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 2)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The MatchMaker classes should except a Topic or Fanout exchange key and
return keys for direct exchanges, per (approximate) AMQP parlance.
"""
import contextlib
import eventlet
from oslo.config import cfg
from savanna.openstack.common.gettextutils import _ # noqa
from savanna.openstack.common import log as logging
matchmaker_opts = [
cfg.IntOpt('matchmaker_heartbeat_freq',
default=300,
help='Heartbeat frequency'),
cfg.IntOpt('matchmaker_heartbeat_ttl',
default=600,
help='Heartbeat time-to-live.'),
]
CONF = cfg.CONF
CONF.register_opts(matchmaker_opts)
LOG = logging.getLogger(__name__)
contextmanager = contextlib.contextmanager
class MatchMakerException(Exception):
"""Signified a match could not be found."""
message = _("Match not found by MatchMaker.")
class Exchange(object):
"""Implements lookups.
Subclass this to support hashtables, dns, etc.
"""
def __init__(self):
pass
def run(self, key):
raise NotImplementedError()
class Binding(object):
"""A binding on which to perform a lookup."""
def __init__(self):
pass
def test(self, key):
raise NotImplementedError()
class MatchMakerBase(object):
"""Match Maker Base Class.
Build off HeartbeatMatchMakerBase if building a heartbeat-capable
MatchMaker.
"""
def __init__(self):
# Array of tuples. Index [2] toggles negation, [3] is last-if-true
self.bindings = []
self.no_heartbeat_msg = _('Matchmaker does not implement '
'registration or heartbeat.')
def register(self, key, host):
"""Register a host on a backend.
Heartbeats, if applicable, may keepalive registration.
"""
pass
def ack_alive(self, key, host):
"""Acknowledge that a key.host is alive.
Used internally for updating heartbeats, but may also be used
publically to acknowledge a system is alive (i.e. rpc message
successfully sent to host)
"""
pass
def is_alive(self, topic, host):
"""Checks if a host is alive."""
pass
def expire(self, topic, host):
"""Explicitly expire a host's registration."""
pass
def send_heartbeats(self):
"""Send all heartbeats.
Use start_heartbeat to spawn a heartbeat greenthread,
which loops this method.
"""
pass
def unregister(self, key, host):
"""Unregister a topic."""
pass
def start_heartbeat(self):
"""Spawn heartbeat greenthread."""
pass
def stop_heartbeat(self):
"""Destroys the heartbeat greenthread."""
pass
def add_binding(self, binding, rule, last=True):
self.bindings.append((binding, rule, False, last))
#NOTE(ewindisch): kept the following method in case we implement the
# underlying support.
#def add_negate_binding(self, binding, rule, last=True):
# self.bindings.append((binding, rule, True, last))
def queues(self, key):
workers = []
# bit is for negate bindings - if we choose to implement it.
# last stops processing rules if this matches.
for (binding, exchange, bit, last) in self.bindings:
if binding.test(key):
workers.extend(exchange.run(key))
# Support last.
if last:
return workers
return workers
class HeartbeatMatchMakerBase(MatchMakerBase):
"""Base for a heart-beat capable MatchMaker.
Provides common methods for registering, unregistering, and maintaining
heartbeats.
"""
def __init__(self):
self.hosts = set()
self._heart = None
self.host_topic = {}
super(HeartbeatMatchMakerBase, self).__init__()
def send_heartbeats(self):
"""Send all heartbeats.
Use start_heartbeat to spawn a heartbeat greenthread,
which loops this method.
"""
for key, host in self.host_topic:
self.ack_alive(key, host)
def ack_alive(self, key, host):
"""Acknowledge that a host.topic is alive.
Used internally for updating heartbeats, but may also be used
publically to acknowledge a system is alive (i.e. rpc message
successfully sent to host)
"""
raise NotImplementedError("Must implement ack_alive")
def backend_register(self, key, host):
"""Implements registration logic.
Called by register(self,key,host)
"""
raise NotImplementedError("Must implement backend_register")
def backend_unregister(self, key, key_host):
"""Implements de-registration logic.
Called by unregister(self,key,host)
"""
raise NotImplementedError("Must implement backend_unregister")
def register(self, key, host):
"""Register a host on a backend.
Heartbeats, if applicable, may keepalive registration.
"""
self.hosts.add(host)
self.host_topic[(key, host)] = host
key_host = '.'.join((key, host))
self.backend_register(key, key_host)
self.ack_alive(key, host)
def unregister(self, key, host):
"""Unregister a topic."""
if (key, host) in self.host_topic:
del self.host_topic[(key, host)]
self.hosts.discard(host)
self.backend_unregister(key, '.'.join((key, host)))
LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
{'key': key, 'host': host})
def start_heartbeat(self):
"""Implementation of MatchMakerBase.start_heartbeat.
Launches greenthread looping send_heartbeats(),
yielding for CONF.matchmaker_heartbeat_freq seconds
between iterations.
"""
if not self.hosts:
raise MatchMakerException(
_("Register before starting heartbeat."))
def do_heartbeat():
while True:
self.send_heartbeats()
eventlet.sleep(CONF.matchmaker_heartbeat_freq)
self._heart = eventlet.spawn(do_heartbeat)
def stop_heartbeat(self):
"""Destroys the heartbeat greenthread."""
if self._heart:
self._heart.kill()
class DirectBinding(Binding):
"""Specifies a host in the key via a '.' character.
Although dots are used in the key, the behavior here is
that it maps directly to a host, thus direct.
"""
def test(self, key):
if '.' in key:
return True
return False
class TopicBinding(Binding):
"""Where a 'bare' key without dots.
AMQP generally considers topic exchanges to be those *with* dots,
but we deviate here in terminology as the behavior here matches
that of a topic exchange (whereas where there are dots, behavior
matches that of a direct exchange.
"""
def test(self, key):
if '.' not in key:
return True
return False
class FanoutBinding(Binding):
"""Match on fanout keys, where key starts with 'fanout.' string."""
def test(self, key):
if key.startswith('fanout~'):
return True
return False
class StubExchange(Exchange):
"""Exchange that does nothing."""
def run(self, key):
return [(key, None)]
class LocalhostExchange(Exchange):
"""Exchange where all direct topics are local."""
def __init__(self, host='localhost'):
self.host = host
super(Exchange, self).__init__()
def run(self, key):
return [('.'.join((key.split('.')[0], self.host)), self.host)]
class DirectExchange(Exchange):
"""Exchange where all topic keys are split, sending to second half.
i.e. "compute.host" sends a message to "compute.host" running on "host"
"""
def __init__(self):
super(Exchange, self).__init__()
def run(self, key):
e = key.split('.', 1)[1]
return [(key, e)]
class MatchMakerLocalhost(MatchMakerBase):
"""Match Maker where all bare topics resolve to localhost.
Useful for testing.
"""
def __init__(self, host='localhost'):
super(MatchMakerLocalhost, self).__init__()
self.add_binding(FanoutBinding(), LocalhostExchange(host))
self.add_binding(DirectBinding(), DirectExchange())
self.add_binding(TopicBinding(), LocalhostExchange(host))
class MatchMakerStub(MatchMakerBase):
"""Match Maker where topics are untouched.
Useful for testing, or for AMQP/brokered queues.
Will not work where knowledge of hosts is known (i.e. zeromq)
"""
def __init__(self):
super(MatchMakerStub, self).__init__()
self.add_binding(FanoutBinding(), StubExchange())
self.add_binding(DirectBinding(), StubExchange())
self.add_binding(TopicBinding(), StubExchange())
| |
from __future__ import absolute_import, print_function
import warnings
from ._version import get_versions
from . import utils
try:
from .__conda_version__ import conda_version
__version__ = conda_version.replace("'","")
del conda_version
except ImportError:
__version__ = get_versions()['version']
del get_versions
_notebook_loaded = None
def load_notebook(resources=None, verbose=False, force=False, skip=False):
''' Prepare the IPython notebook for displaying Bokeh plots.
Args:
resources (Resource, optional) :
how and where to load BokehJS from
verbose (bool, optional) :
whether to report detailed settings (default: False)
force (bool, optional) :
whether to skip IPython notebook check (default: False)
Returns:
None
'''
global _notebook_loaded
# It's possible the IPython folks will chance things in the future, `force` parameter
# provides an escape hatch as long as `displaypub` works
if not force:
notebook = False
try:
notebook = 'notebook' in get_ipython().config.IPKernelApp.parent_appname
except Exception:
pass
if not notebook:
raise RuntimeError('load_notebook only works inside an '
'IPython notebook, try using force=True.')
from .resources import INLINE
from .templates import NOTEBOOK_LOAD, RESOURCES
if resources is None:
resources = INLINE
plot_resources = RESOURCES.render(
js_raw = resources.js_raw,
css_raw = resources.css_raw,
js_files = resources.js_files,
css_files = resources.css_files,
)
if resources.mode == 'inline':
js_info = 'inline'
css_info = 'inline'
else:
js_info = resources.js_files[0] if len(resources.js_files) == 1 else resources.js_files
css_info = resources.css_files[0] if len(resources.css_files) == 1 else resources.css_files
warnings = ["Warning: " + msg['text'] for msg in resources.messages if msg['type'] == 'warn']
if _notebook_loaded:
warnings.append('Warning: BokehJS previously loaded')
_notebook_loaded = resources
html = NOTEBOOK_LOAD.render(
plot_resources = plot_resources,
logo_url = resources.logo_url,
verbose = verbose,
js_info = js_info,
css_info = css_info,
bokeh_version = __version__,
warnings = warnings,
skip = skip,
)
utils.publish_display_data({'text/html': html})
from .settings import settings
from . import sampledata
def _print_versions():
import platform as pt
message = """
Bokeh version: %s
Python version: %s-%s
Platform: %s
""" % (__version__, pt.python_version(),
pt.python_implementation(), pt.platform())
return(message)
def print_versions():
""" Print the versions for Bokeh and the current Python and OS.
Returns:
None
"""
print(_print_versions())
def report_issue(number=None , owner="ContinuumIO", repo="bokeh",
versions=True, browser=True):
""" Open or add to a Github issue programmatically.
This interactive function will ask you for some minimal content
and submit a new Github issue, adding information about your
current environment.
You can also call this function with a specific issue number to
add a comment to an already open issue.
Args:
number (int, optional) :
Omit to create a new issue, otherwise supply to comment on an
already created issue. (default: None)
owner (str, optional) : owner username (default: "ContinuumIO")
repo (str, optional) : repository name (default: "bokeh")
versions (bool, optional) :
Whether to print system information. If True, add the current
system info to the end of the issue description. (default: True)
browser (bool, optional) :
Whether to open a browser automatically. If True, open a browser
to the GitHub issue page (default: True)
Notes:
Setting the environment variables GHUSER (Github username) and
GHPASS (Github password) will supply those values automatically
and streamline the dialog. Additionally, this function can report
on any GitHub project by changing the default parameters.
Returns:
None
"""
import requests
import json
import os
import webbrowser
from six.moves import input
from six.moves.urllib.parse import urljoin
print("This is the Bokeh reporting engine.\n\n"
"You will be guided to build a GitHub issue.\n")
if number is None:
title = input('Issue title: ')
body = input('Description: ')
else:
body = input('Write your comment here: ')
ghuser, ghpass = (os.environ.get(x) for x in ["GHUSER", "GHPASS"])
if ghuser is None:
ghuser = input('GitHub username: ')
else:
print("Found GHUSER, using for GitHub username")
if ghpass is None:
ghpass = input('GitHub password: ')
else:
print("Found GHPASS, using for GitHub password")
base = "https://api.github.com"
if number is None:
url = "/".join(["repos", owner, repo, "issues"])
if versions:
data = {"title": title, "body": body + "\nSystem information:" + _print_versions()}
else:
data = {"title": title, "body": body}
else:
url = "/".join(["repos", owner, repo, "issues", str(number), "comments"])
if versions:
data = {"body": body + "\nSystem information:" + _print_versions()}
else:
data = {"body": body}
issues_url = urljoin(base, url)
print("\nPreview:\n")
print("Title: ", data["title"])
print("Description:\n\n")
print(data["body"])
value = input('Submit (y/n)? ')
if value.lower() in ["true", "yes", "y", "1"]:
r = requests.post(issues_url,
auth=(ghuser, ghpass),
headers={'Content-Type': 'application/json'},
data=json.dumps(data))
if r.status_code == 201:
g = requests.get(issues_url)
if number is None:
print("Issue successfully submitted.")
if browser:
webbrowser.open_new(g.json()[0].get("html_url"))
else:
print("Comment successfully submitted.")
g = requests.get(issues_url)
if browser:
webbrowser.open_new(g.json()[-1].get("html_url"))
else:
print("Something failed, please check your username and password.")
else:
print("Issue not submitted.")
def test(verbosity=1, xunitfile=None, exit=False):
""" Run the full Bokeh test suite, and output the results of the tests
to sys.stdout.
This function uses nosetests to discover which tests to run, and will
run tests in any 'tests' subdirectory within the Bokeh module.
Args:
verbosity (int, optional) :
Acceptatable values are 0 (less verbose) to 2 (most verbose)
xunitfile (str, optional) :
Write xunit-style XML test results to a given filename. This
is useful for running tests on a CI server. (default: None)
exit (bool, optional) :
Whether to return or exit. If True, call sys.exit with an
error code after the tests are finished. (default: False)
Returns:
int : nose return code
"""
import nose, os, sys
argv = ['nosetests', '--verbosity=%d' % verbosity]
# Output an xunit file if requested
if xunitfile:
argv.extend(['--with-xunit', '--xunit-file=%s' % xunitfile])
# Set the logging level to warn
argv.extend(['--logging-level=WARN'])
# Add all 'tests' subdirectories to the options
rootdir = os.path.dirname(__file__)
for root, dirs, files in os.walk(rootdir):
if 'tests' in dirs:
testsdir = os.path.join(root, 'tests')
argv.append(testsdir)
print('Test dir: %s' % testsdir[len(rootdir)+1:])
# print versions (handy when reporting problems)
print_versions()
sys.stdout.flush()
# Run the tests
return nose.main(argv=argv, exit=exit)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Options for BigMLer main subcommand processing
"""
def get_main_options(defaults=None, constants=None):
"""Main subcommand-related options
"""
if defaults is None:
defaults = {}
if constants is None:
constants = {}
max_models = constants.get('MAX_MODELS')
plurality = constants.get('PLURALITY')
last = constants.get('LAST_PREDICTION')
options = {
# If a BigML model is provided, the script will use it to generate
# predictions.
'--model': {
'action': 'store',
'dest': 'model',
'default': defaults.get('model', None),
'help': "BigML model Id."},
# Use it to compute predictions remotely.
'--remote': {
'action': 'store_true',
'dest': 'remote',
'default': defaults.get('remote', False),
'help': "Compute predictions remotely."},
# The path to a file containing model ids.
'--models': {
'action': 'store',
'dest': 'models',
'default': defaults.get('models', None),
'help': ("Path to a file containing model/ids. One model"
" per line (e.g., model/50a206a8035d0706dc000376"
").")},
# If a BigML json file containing a model structure is provided,
# the script will use it.
'--model-file': {
'action': 'store',
'dest': 'model_file',
'default': defaults.get('model_file', None),
'help': "BigML model JSON structure file."},
# Sets pruning.
'--pruning': {
'action': 'store',
'default': defaults.get('pruning', "smart"),
'choices': ["smart", "statistical", "no-pruning"],
'help': ("Set pruning type: smart, statistical,"
" no-pruning.")},
# Number of models to create when using ensembles.
'--number-of-models': {
'action': 'store',
'dest': 'number_of_models',
'default': defaults.get('number_of_models', 1),
'type': int,
'help': ("Number of models to create when using"
" ensembles.")},
# Replacement to use when using bagging.
'--replacement': {
'action': 'store_true',
'default': defaults.get('replacement', False),
'help': "Use replacement when sampling."},
# Max number of models to predict from in parallel.
'--max-batch-models': {
'action': 'store',
'dest': 'max_batch_models',
'default': defaults.get('max_batch_models', max_models),
'type': int,
'help': ("Max number of models to predict from"
" in parallel.")},
# Randomize feature selection at each split.
'--randomize': {
'action': 'store_true',
'dest': 'randomize',
'default': defaults.get('randomize', False),
'help': "Randomize feature selection at each split."},
# Make model a public black-box model.
'--black-box': {
'action': 'store_true',
'dest': 'black_box',
'default': defaults.get('black_box', False),
'help': "Make generated model black-box."},
# Make model a public white-box model.
'--white-box': {
'action': 'store_true',
'dest': 'white_box',
'default': defaults.get('white_box', False),
'help': "Make generated model white-box."},
# Set a price tag to your white-box model.
'--model-price': {
'action': 'store',
'dest': 'model_price',
'type': float,
'default': defaults.get('model_price', 0.0),
'help': ("The price other users must pay to clone your"
" model.")},
# Set credits per prediction to your white box or black box models.
'--cpp': {
'action': 'store',
'type': float,
'default': defaults.get('cpp', 0.0),
'help': ("The number of credits that other users will"
" consume to make a prediction with your"
" model.")},
# Does not create a model just a dataset.
'--no-model': {
'action': 'store_true',
'dest': 'no_model',
'default': defaults.get('no_model', False),
'help': "Do not create a model."},
# Prediction directories to be combined.
'--combine-votes': {
'action': 'store',
'dest': 'votes_dirs',
'default': defaults.get('combine_votes', None),
'help': ("Comma separated list of"
" directories that contain models' votes"
" for the same test set.")},
# Method to combine votes in multiple models predictions
'--method': {
'action': 'store',
'dest': 'method',
'default': defaults.get('method', plurality),
'choices': ["plurality", "confidence weighted",
"probability weighted", "threshold",
"combined"],
'help': ("Method to combine votes from ensemble"
" predictions. Allowed methods: plurality"
", \"confidence weighted\", "
" \"probability weighted\", threshold. Also"
" \"combined\" for datasets with subsets of"
" categories")},
# Evaluate a model
'--evaluate': {
'action': 'store_true',
'help': "Evaluate command."},
# Max number of models to create in parallel.
'--max-parallel-models': {
"action": 'store',
"dest": 'max_parallel_models',
"default": defaults.get('max_parallel_models', 1),
"type": int,
"help": "Max number of models to create in parallel."},
# Max number of evaluations to create in parallel.
'--max-parallel-evaluations': {
"action": 'store',
"dest": 'max_parallel_evaluations',
"default": defaults.get('max_parallel_evaluations', 1),
"type": int,
"help": ("Max number of evaluations to create in"
" parallel.")},
# The name of the field that represents the objective field (i.e.,
# class or label) or its column number.
'--objective': {
"action": 'store',
"dest": 'objective_field',
"default": defaults.get('objective', None),
"help": ("The column number of the Objective Field"
" or its name, if headers are given.")},
# The path to a file containing the mapping of fields' ids from
# the test dataset fields to the model fields.
'--fields-map': {
'action': 'store',
'dest': 'fields_map',
'default': defaults.get('fields_map', None),
'help': ("Path to a csv file describing fields mapping. "
"One definition per line (e.g., 00000,"
"00000a).")},
# Set the part of training data to be held out for cross-validation
'--cross-validation-rate': {
'action': 'store',
'dest': 'cross_validation_rate',
'type': float,
'default': defaults.get('cross_validation_rate', 0.0),
'help': ("Part of training data to be held out for "
"cross-validation.")},
# Number of evaluations used in cross-validation
'--number-of-evaluations': {
'action': 'store',
'dest': 'number_of_evaluations',
'type': int,
'default': defaults.get('number_of_evaluations', 0),
'help': ("Number of evaluations used for"
" cross-validation.")},
# If a BigML ensemble is provided, the script will use it to generate
# predictions.
'--ensemble': {
'action': 'store',
'dest': 'ensemble',
'default': defaults.get('ensemble', None),
'help': "BigML ensemble Id."},
# If a BigML ensemble is created, creation will use this task-level
# parallelism
'--tlp': {
'action': 'store',
'dest': 'tlp',
'default': defaults.get('tlp', 1),
'type': int,
'help': ("BigML ensemble's creation task-level"
" parallelism.")},
# Prediction log format: `short` will only log predictions, `long` will
# log also confidence information
'--prediction-info': {
'action': 'store',
'dest': 'prediction_info',
'default': defaults.get('prediction_info', 'normal'),
'choices': ["brief", "normal", "full", "full data"],
'help': ("Prediction log format: 'brief' will only "
"log predictions, 'normal' will write confidence"
" too, 'full' will write in a row the"
" input data that generates the prediction"
" followed by the latter.")},
# Multi-label. The objective field has multiple labels.
'--multi-label': {
'action': 'store_true',
'dest': 'multi_label',
'default': defaults.get('multi_label', False),
'help': ("The objective field has multiple labels that"
" should be treated independently.")},
# Prediction header. If set, headers are added to the prediction file.
'--prediction-header': {
'action': 'store_true',
'dest': 'prediction_header',
'default': defaults.get('prediction_header', False),
'help': "Headers are added to the prediction file."},
# Prediction fields. A comma-separated list of the fields that should
# be included in the prediction file.
'--prediction-fields': {
'action': 'store',
'dest': 'prediction_fields',
'default': defaults.get('prediction_fields', None),
'help': "Fields added to the prediction file."},
# Max number of ensembles to create in parallel.
'--max-parallel-ensembles': {
'action': 'store',
'dest': 'max_parallel_ensembles',
'default': defaults.get('max_parallel_ensembles', 1),
'type': int,
'help': "Max number of ensembles to create in parallel."},
# The path to a file containing ensemble ids.
'--ensembles': {
'action': 'store',
'dest': 'ensembles',
'default': defaults.get('ensembles', None),
'help': ("Path to a file containing ensemble/ids. One "
"ensemble per line (e.g., "
"ensemble/50a206a8035d0706dc000376).")},
# If a BigML json file containing a model structure is provided,
# the script will use it.
'--ensemble-file': {
'action': 'store',
'dest': 'ensemble_file',
'default': defaults.get('ensemble_file', None),
'help': "BigML ensemble JSON structure file."},
# Threshold. Minimum necessary number of votes to issue a prediction.
'--threshold': {
'action': 'store',
'dest': 'threshold',
'default': defaults.get('threshold', 1),
'type': int,
'help': ("Minimum number of votes to issue a prediction"
" for the threshold combiner.")},
# Class. Label for the category used in threshold voting predictions.
'--class': {
'action': 'store',
'dest': 'threshold_class',
'default': defaults.get('threshold_class', None),
'help': "Category used in threshold combiner method."},
# Max number of categories to be included in a model
'--max-categories': {
'action': 'store',
'dest': 'max_categories',
'default': defaults.get('max_categories', 0),
'type': int,
'help': ("Max number of categories to be included in"
" a model.")},
# No batch predictions. Remote predictions are created individually.
'--no-batch': {
'action': 'store_true',
'dest': 'no_batch',
'default': defaults.get('no_batch', False),
'help': "Create remote predictions individually."},
# Evaluations flag: excluding one dataset from the datasets list to
# test
'--dataset-off': {
'action': 'store_true',
'dest': 'dataset_off',
'default': defaults.get('dataset_off', False),
'help': ("Excluding one dataset at a time from the"
" datasets list to test.")},
# The path to a file containing model attributes.
'--model-attributes': {
'action': 'store',
'dest': 'model_attributes',
'default': defaults.get('model_attributes', None),
'help': ("Path to a json file describing model"
" attributes.")},
# Input fields to include in the model.
'--model-fields': {
"action": 'store',
"dest": 'model_fields',
"default": defaults.get('model_fields', None),
"help": ("Comma-separated list of input fields"
" (predictors) to create the model.")},
# Balance. Automatically balance all the classes evenly.
'--balance': {
"action": 'store_true',
"dest": 'balance',
"default": defaults.get('balance', False),
"help": ("Automatically balance all objective classes"
" evenly.")},
# Balance. Do not automatically balance all the classes evenly.
# (opposed to balance)
'--no-balance': {
"action": 'store_false',
"dest": 'balance',
"default": defaults.get('balance', False),
"help": ("Do not automatically balance all objective"
" classes evenly.")},
# Node threshold. Maximum number of nodes in the tree.
'--node-threshold': {
'action': 'store',
'dest': 'node_threshold',
'default': defaults.get('node_threshold', 0),
'type': int,
'help': "Maximum number of nodes in the model."},
# The path to a file containing ensemble attributes.
'--ensemble-attributes': {
'action': 'store',
'dest': 'ensemble_attributes',
'default': defaults.get('ensemble_attributes', None),
'help': ("Path to a json file describing ensemble"
" attributes.")},
# The path to a file containing evaluation attributes.
'--evaluation-attributes': {
'action': 'store',
'dest': 'evaluation_attributes',
'default': defaults.get('evaluation_attributes', None),
'help': ("Path to a json file describing evaluation"
" attributes.")},
# The path to a file containing batch prediction attributes.
'--batch-prediction-attributes': {
'action': 'store',
'dest': 'batch_prediction_attributes',
'default': defaults.get('batch_prediction_attributes', None),
'help': ("Path to a json file describing batch prediction"
" attributes.")},
# Weight-field. Use the contents of the given field as weights.
'--weight-field': {
'action': 'store',
'dest': 'weight_field',
'default': defaults.get('weight_field', None),
'help': ("Sets the name (or column) of the field"
" that contains the weights for the instances.")},
# Objective-weights. Path a to a CSV file of class, weight pairs.
'--objective-weights': {
'action': 'store',
'dest': 'objective_weights',
'default': defaults.get('objective_weights', None),
'help': "Path to a CSV file of class, weight pairs."},
# Strategy used in predictions when a missing value is found for the
# field used to split the node.
'--missing-strategy': {
'action': 'store',
'dest': 'missing_strategy',
'default': defaults.get('missing_strategy', last),
'choices': ["last", "proportional"],
'help': ("Strategy used when the field used in the split"
" to next nodes is missing in the input data."
" Allowed values: last or proportional")},
# Report. Additional output report formats
'--reports': {
'action': 'store',
'dest': 'reports',
'nargs': '*',
'default': defaults.get('reports', []),
'choices': ["gazibit"],
'help': "Output report formats."},
# Set it to use the missing splits operators: including missing values
# in tree branches.
'--missing-splits': {
'action': 'store_true',
'dest': 'missing_splits',
'default': defaults.get('missing_splits', False),
'help': ("Accept missing values as valid in some branches of the"
"tree.")},
# Disables reports upload.
'--no-upload': {
'action': 'store_false',
'dest': 'upload',
'default': defaults.get('upload', True),
'help': "Disables upload for reports"},
# Use it to compute predictions locally.
'--local': {
'action': 'store_false',
'dest': 'remote',
'default': defaults.get('remote', False),
'help': "Compute predictions locally"},
# Deactivate replacement to use when using bagging.
'--no-replacement': {
'action': 'store_false',
'dest': 'replacement',
'default': defaults.get('replacement', False),
'help': "Don't use replacement when sampling."},
# Doesn't randomize feature selection at each split.
'--no-randomize': {
'action': 'store_false',
'dest': 'randomize',
'default': defaults.get('randomize', False),
'help': ("Doesn't randomize feature selection at each"
" split.")},
# Doesn't make model a public black-box model.
'--no-black-box': {
'action': 'store_false',
'dest': 'black_box',
'default': defaults.get('black_box', False),
'help': "Doesn't make generated model black-box."},
# Doesn't make model a public white-box model.
'--no-white-box': {
'action': 'store_false',
'dest': 'white_box',
'default': defaults.get('white_box', False),
'help': "Doesn't make generated model white-box."},
# Create a model just a dataset.
'--no-no-model': {
'action': 'store_false',
'dest': 'no_model',
'default': defaults.get('no_model', False),
'help': "Create a model."},
# Don't clear global bigmler log files
'--no-clear-logs': {
'action': 'store_false',
'dest': 'clear_logs',
'default': defaults.get('clear_logs', False),
'help': "Don't clear global bigmler log files."},
# Don't store the retrieved resources in the output directory
'--no-store': {
'action': 'store_false',
'dest': 'store',
'default': defaults.get('store', False),
'help': ("Don't store the retrieved resources in the"
" output directory.")},
# Multi-label. The objective field hasn't multiple labels.
'--no-multi-label': {
'action': 'store_false',
'dest': 'multi_label',
'default': defaults.get('multi_label', False),
'help': "The objective field has not multiple labels."},
# Prediction-header.
'--no-prediction-header': {
'action': 'store_false',
'dest': 'prediction_header',
'default': defaults.get('prediction_header', False),
'help': "Headers are not added to the prediction file."},
# Batch predictions. Remote predictions are created in batch mode.
'--batch': {
'action': 'store_false',
'dest': 'no_batch',
'default': defaults.get('no_batch', False),
'help': "Create remote predictions in batch."},
# Multi-dataset. Generating a new dataset from a list of existing
# datasets.
'--no-multi-dataset': {
'action': 'store_false',
'dest': 'multi_dataset',
'default': defaults.get('multi_dataset', False),
'help': "Do not generate a new dataset."},
# Shared. Shares all shareable resources and uses its shared links in
# reports
'--unshared': {
'action': 'store_false',
'dest': 'shared',
'default': defaults.get('shared', False),
'help': ("Share resources and use its shared urls "
" in reports.")},
# Enables reports upload.
'--upload': {
'action': 'store_true',
'dest': 'upload',
'default': defaults.get('upload', True),
'help': "Enables upload for reports"},
# Dataset-off. Turning off the dataset-off flag.
'--no-dataset-off': {
'action': 'store_false',
'dest': 'dataset_off',
'default': defaults.get('dataset_off', False),
'help': "Turning off the dataset-off flag."},
# No missing_splits used: Don't include missing values in branches
# of the tree.
'--no-missing-splits': {
'action': 'store_false',
'dest': 'missing_splits',
'default': defaults.get('missing_splits', False),
'help': ("Turning off the --missing-splits flag: don't include"
" missing values in branches of the tree.")},
# Used in models combinations, ensembles predictions. Keeps prediction
# in memory to be combined and no partial results are stored in files.
'--fast': {
'action': 'store_true',
'dest': 'fast',
'default': defaults.get('fast', True),
'help': ("Enables fast ensemble's predictions with no partial"
" results files.")},
# Used in models combinations, ensembles predictions. Stores
# predictions for each model in files that can be used and combined
# later
'--no-fast': {
'action': 'store_false',
'dest': 'fast',
'default': defaults.get('fast', True),
'help': ("Enables fast ensemble's predictions with partial"
" results files.")},
# Does not create a csv as output of a batch prediction.
'--no-csv': {
'action': 'store_true',
'dest': 'no_csv',
'default': defaults.get('no_csv', False),
'help': ("Do not create a csv file as output of a batch"
" prediction.")},
# Create a csv as output (as opposed to --no-csv).
'--no-no-csv': {
'action': 'store_false',
'dest': 'no_csv',
'default': defaults.get('no_csv', False),
'help': ("Create a csv file as output of a batch"
" prediction (as opposed to --no-csv)")},
# Create a dataset as ouput of a batch prediction
'--to-dataset': {
'action': 'store_true',
'dest': 'to_dataset',
'default': defaults.get('to_dataset', False),
'help': ("Create a dataset as ouput of a batch"
" prediction.")},
# Use median as predicted value in local models predictions
'--median': {
'action': 'store_true',
'dest': 'median',
'default': defaults.get('median', False),
'help': ("Use medtan instead on mean as node"
" prediction.")},
# Use mean as predicted value in local models predictions
'--no-median': {
'action': 'store_false',
'dest': 'median',
'default': defaults.get('median', False),
'help': ("Use mean instead on median as node"
" prediction.")}}
return options
| |
import hail as hl
from hail.typecheck import typecheck, sequenceof
from hail.expr.expressions import expr_str, expr_call, expr_locus, expr_array
from typing import List
@typecheck(locus=expr_locus(),
alleles=expr_array(expr_str),
proband_call=expr_call,
father_call=expr_call,
mother_call=expr_call)
def phase_by_transmission(
locus: hl.expr.LocusExpression,
alleles: hl.expr.ArrayExpression,
proband_call: hl.expr.CallExpression,
father_call: hl.expr.CallExpression,
mother_call: hl.expr.CallExpression
) -> hl.expr.ArrayExpression:
"""Phases genotype calls in a trio based allele transmission.
Notes
-----
In the phased calls returned, the order is as follows:
- Proband: father_allele | mother_allele
- Parents: transmitted_allele | untransmitted_allele
Phasing of sex chromosomes:
- Sex chromosomes of male individuals should be haploid to be phased correctly.
- If `proband_call` is diploid on non-par regions of the sex chromosomes, it is assumed to be female.
Returns `NA` when genotype calls cannot be phased.
The following genotype calls combinations cannot be phased by transmission:
1. One of the calls in the trio is missing
2. The proband genotype cannot be obtained from the parents alleles (Mendelian violation)
3. All individuals of the trio are heterozygous for the same two alleles
4. Father is diploid on non-PAR region of X or Y
5. Proband is diploid on non-PAR region of Y
In addition, individual phased genotype calls are returned as missing in the following situations:
1. All mother genotype calls non-PAR region of Y
2. Diploid father genotype calls on non-PAR region of X for a male proband (proband and mother are still phased as father doesn't participate in allele transmission)
Note
----
:func:`~.phase_trio_matrix_by_transmission` provides a convenience wrapper for phasing a trio matrix.
Parameters
----------
locus : :class:`.LocusExpression`
Expression for the locus in the trio matrix
alleles : :class:`.ArrayExpression`
Expression for the alleles in the trio matrix
proband_call : :class:`.CallExpression`
Expression for the proband call in the trio matrix
father_call : :class:`.CallExpression`
Expression for the father call in the trio matrix
mother_call : :class:`.CallExpression`
Expression for the mother call in the trio matrix
Returns
-------
:class:`.ArrayExpression`
Array containing: [phased proband call, phased father call, phased mother call]"""
def call_to_one_hot_alleles_array(call: hl.expr.CallExpression, alleles: hl.expr.ArrayExpression) -> hl.expr.ArrayExpression:
"""
Get the set of all different one-hot-encoded allele-vectors in a genotype call.
It is returned as an ordered array where the first vector corresponds to the first allele,
and the second vector (only present if het) the second allele.
:param CallExpression call: genotype
:param ArrayExpression alleles: Alleles at the site
:return: Array of one-hot-encoded alleles
:rtype: ArrayExpression
"""
return hl.if_else(
call.is_het(),
hl.array([
hl.call(call[0]).one_hot_alleles(alleles),
hl.call(call[1]).one_hot_alleles(alleles),
]),
hl.array([hl.call(call[0]).one_hot_alleles(alleles)])
)
def phase_parent_call(call: hl.expr.CallExpression, transmitted_allele_index: int):
"""
Given a genotype and which allele was transmitted to the offspring, returns the parent phased genotype.
:param CallExpression call: Parent genotype
:param int transmitted_allele_index: index of transmitted allele (0 or 1)
:return: Phased parent genotype
:rtype: CallExpression
"""
return hl.call(
call[transmitted_allele_index],
call[hl.int(transmitted_allele_index == 0)],
phased=True
)
def phase_diploid_proband(
locus: hl.expr.LocusExpression,
alleles: hl.expr.ArrayExpression,
proband_call: hl.expr.CallExpression,
father_call: hl.expr.CallExpression,
mother_call: hl.expr.CallExpression
) -> hl.expr.ArrayExpression:
"""
Returns phased genotype calls in the case of a diploid proband
(autosomes, PAR regions of sex chromosomes or non-PAR regions of a female proband)
:param LocusExpression locus: Locus in the trio MatrixTable
:param ArrayExpression alleles: Alleles in the trio MatrixTable
:param CallExpression proband_call: Input proband genotype call
:param CallExpression father_call: Input father genotype call
:param CallExpression mother_call: Input mother genotype call
:return: Array containing: phased proband call, phased father call, phased mother call
:rtype: ArrayExpression
"""
proband_v = proband_call.one_hot_alleles(alleles)
father_v = hl.if_else(
locus.in_x_nonpar() | locus.in_y_nonpar(),
hl.or_missing(father_call.is_haploid(), hl.array([father_call.one_hot_alleles(alleles)])),
call_to_one_hot_alleles_array(father_call, alleles)
)
mother_v = call_to_one_hot_alleles_array(mother_call, alleles)
combinations = hl.flatmap(
lambda f:
hl.enumerate(mother_v)
.filter(lambda m: m[1] + f[1] == proband_v)
.map(lambda m: hl.struct(m=m[0], f=f[0])),
hl.enumerate(father_v)
)
return (
hl.or_missing(
hl.is_defined(combinations) & (hl.len(combinations) == 1),
hl.array([
hl.call(father_call[combinations[0].f], mother_call[combinations[0].m], phased=True),
hl.if_else(father_call.is_haploid(), hl.call(father_call[0], phased=True), phase_parent_call(father_call, combinations[0].f)),
phase_parent_call(mother_call, combinations[0].m)
])
)
)
def phase_haploid_proband_x_nonpar(
proband_call: hl.expr.CallExpression,
father_call: hl.expr.CallExpression,
mother_call: hl.expr.CallExpression
) -> hl.expr.ArrayExpression:
"""
Returns phased genotype calls in the case of a haploid proband in the non-PAR region of X
:param CallExpression proband_call: Input proband genotype call
:param CallExpression father_call: Input father genotype call
:param CallExpression mother_call: Input mother genotype call
:return: Array containing: phased proband call, phased father call, phased mother call
:rtype: ArrayExpression
"""
transmitted_allele = hl.enumerate(hl.array([mother_call[0], mother_call[1]])).find(lambda m: m[1] == proband_call[0])
return hl.or_missing(
hl.is_defined(transmitted_allele),
hl.array([
hl.call(proband_call[0], phased=True),
hl.or_missing(father_call.is_haploid(), hl.call(father_call[0], phased=True)),
phase_parent_call(mother_call, transmitted_allele[0])
])
)
def phase_y_nonpar(
proband_call: hl.expr.CallExpression,
father_call: hl.expr.CallExpression,
) -> hl.expr.ArrayExpression:
"""
Returns phased genotype calls in the non-PAR region of Y (requires both father and proband to be haploid to return phase)
:param CallExpression proband_call: Input proband genotype call
:param CallExpression father_call: Input father genotype call
:return: Array containing: phased proband call, phased father call, phased mother call
:rtype: ArrayExpression
"""
return hl.or_missing(
proband_call.is_haploid() & father_call.is_haploid() & (father_call[0] == proband_call[0]),
hl.array([
hl.call(proband_call[0], phased=True),
hl.call(father_call[0], phased=True),
hl.null(hl.tcall)
])
)
return (
hl.case()
.when(locus.in_x_nonpar() & proband_call.is_haploid(), phase_haploid_proband_x_nonpar(proband_call, father_call, mother_call))
.when(locus.in_y_nonpar(), phase_y_nonpar(proband_call, father_call))
.when(proband_call.is_diploid(), phase_diploid_proband(locus, alleles, proband_call, father_call, mother_call))
.or_missing()
)
@typecheck(tm=hl.MatrixTable,
call_field=str,
phased_call_field=str)
def phase_trio_matrix_by_transmission(tm: hl.MatrixTable, call_field: str = 'GT', phased_call_field: str = 'PBT_GT') -> hl.MatrixTable:
"""Adds a phased genoype entry to a trio MatrixTable based allele transmission in the trio.
Example
-------
>>> # Create a trio matrix
>>> pedigree = hl.Pedigree.read('data/case_control_study.fam')
>>> trio_dataset = hl.trio_matrix(dataset, pedigree, complete_trios=True)
>>> # Phase trios by transmission
>>> phased_trio_dataset = phase_trio_matrix_by_transmission(trio_dataset)
Notes
-----
Uses only a `Call` field to phase and only phases when all 3 members of the trio are present and have a call.
In the phased genotypes, the order is as follows:
- Proband: father_allele | mother_allele
- Parents: transmitted_allele | untransmitted_allele
Phasing of sex chromosomes:
- Sex chromosomes of male individuals should be haploid to be phased correctly.
- If a proband is diploid on non-par regions of the sex chromosomes, it is assumed to be female.
Genotypes that cannot be phased are set to `NA`.
The following genotype calls combinations cannot be phased by transmission (all trio members phased calls set to missing):
1. One of the calls in the trio is missing
2. The proband genotype cannot be obtained from the parents alleles (Mendelian violation)
3. All individuals of the trio are heterozygous for the same two alleles
4. Father is diploid on non-PAR region of X or Y
5. Proband is diploid on non-PAR region of Y
In addition, individual phased genotype calls are returned as missing in the following situations:
1. All mother genotype calls non-PAR region of Y
2. Diploid father genotype calls on non-PAR region of X for a male proband (proband and mother are still phased as father doesn't participate in allele transmission)
Parameters
----------
tm : :class:`.MatrixTable`
Trio MatrixTable (entries have to be a Struct with `proband_entry`, `mother_entry` and `father_entry` present)
call_field : str
genotype field name in the matrix entries to use for phasing
phased_call_field : str
name for the phased genotype field in the matrix entries
Returns
-------
:class:`.MatrixTable`
Trio MatrixTable entry with additional phased genotype field for each individual"""
tm = tm.annotate_entries(
__phased_GT=phase_by_transmission(
tm.locus,
tm.alleles,
tm.proband_entry[call_field],
tm.father_entry[call_field],
tm.mother_entry[call_field]
)
)
return tm.select_entries(
proband_entry=hl.struct(
**tm.proband_entry,
**{phased_call_field: tm.__phased_GT[0]}
),
father_entry=hl.struct(
**tm.father_entry,
**{phased_call_field: tm.__phased_GT[1]}
),
mother_entry=hl.struct(
**tm.mother_entry,
**{phased_call_field: tm.__phased_GT[2]}
)
)
@typecheck(tm=hl.MatrixTable,
col_keys=sequenceof(str),
keep_trio_cols=bool,
keep_trio_entries=bool)
def explode_trio_matrix(tm: hl.MatrixTable, col_keys: List[str] = ['s'], keep_trio_cols: bool = True, keep_trio_entries: bool = False) -> hl.MatrixTable:
"""Splits a trio MatrixTable back into a sample MatrixTable.
Example
-------
>>> # Create a trio matrix from a sample matrix
>>> pedigree = hl.Pedigree.read('data/case_control_study.fam')
>>> trio_dataset = hl.trio_matrix(dataset, pedigree, complete_trios=True)
>>> # Explode trio matrix back into a sample matrix
>>> exploded_trio_dataset = explode_trio_matrix(trio_dataset)
Notes
-----
The resulting MatrixTable column schema is the same as the proband/father/mother schema,
and the resulting entry schema is the same as the proband_entry/father_entry/mother_entry schema.
If the `keep_trio_cols` option is set, then an additional `source_trio` column is added with the trio column data.
If the `keep_trio_entries` option is set, then an additional `source_trio_entry` column is added with the trio entry data.
Note
----
This assumes that the input MatrixTable is a trio MatrixTable (similar to
the result of :func:`~.trio_matrix`) Its entry schema has to contain
'proband_entry`, `father_entry` and `mother_entry` all with the same type.
Its column schema has to contain 'proband`, `father` and `mother` all with
the same type.
Parameters
----------
tm : :class:`.MatrixTable`
Trio MatrixTable (entries have to be a Struct with `proband_entry`, `mother_entry` and `father_entry` present)
col_keys : :obj:`list` of str
Column key(s) for the resulting sample MatrixTable
keep_trio_cols: bool
Whether to add a `source_trio` column with the trio column data (default `True`)
keep_trio_entries: bool
Whether to add a `source_trio_entries` column with the trio entry data (default `False`)
Returns
-------
:class:`.MatrixTable`
Sample MatrixTable
"""
select_entries_expr = {'__trio_entries': hl.array([tm.proband_entry, tm.father_entry, tm.mother_entry])}
if keep_trio_entries:
select_entries_expr['source_trio_entry'] = hl.struct(**tm.entry)
tm = tm.select_entries(**select_entries_expr)
tm = tm.key_cols_by()
select_cols_expr = {'__trio_members': hl.enumerate(hl.array([tm.proband, tm.father, tm.mother]))}
if keep_trio_cols:
select_cols_expr['source_trio'] = hl.struct(**tm.col)
tm = tm.select_cols(**select_cols_expr)
mt = tm.explode_cols(tm.__trio_members)
mt = mt.transmute_entries(
**mt.__trio_entries[mt.__trio_members[0]]
)
mt = mt.key_cols_by()
mt = mt.transmute_cols(**mt.__trio_members[1])
if col_keys:
mt = mt.key_cols_by(*col_keys)
return mt
| |
import unittest
from pyramid.tests.test_scripts import dummy
class TestPViewsCommand(unittest.TestCase):
def _getTargetClass(self):
from pyramid.scripts.pviews import PViewsCommand
return PViewsCommand
def _makeOne(self, registry=None):
cmd = self._getTargetClass()([])
cmd.bootstrap = (dummy.DummyBootstrap(registry=registry),)
cmd.args = ('/foo/bar/myapp.ini#myapp',)
return cmd
def _register_mapper(self, registry, routes):
from pyramid.interfaces import IRoutesMapper
mapper = dummy.DummyMapper(*routes)
registry.registerUtility(mapper, IRoutesMapper)
def test__find_view_no_match(self):
from pyramid.registry import Registry
registry = Registry()
self._register_mapper(registry, [])
command = self._makeOne(registry)
result = command._find_view('/a', registry)
self.assertEqual(result, None)
def test__find_view_no_match_multiview_registered(self):
from zope.interface import implementer
from zope.interface import providedBy
from pyramid.interfaces import IRequest
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IMultiView
from pyramid.traversal import DefaultRootFactory
from pyramid.registry import Registry
registry = Registry()
@implementer(IMultiView)
class View1(object):
pass
request = dummy.DummyRequest({'PATH_INFO':'/a'})
root = DefaultRootFactory(request)
root_iface = providedBy(root)
registry.registerAdapter(View1(),
(IViewClassifier, IRequest, root_iface),
IMultiView)
self._register_mapper(registry, [])
command = self._makeOne(registry=registry)
result = command._find_view('/x', registry)
self.assertEqual(result, None)
def test__find_view_traversal(self):
from zope.interface import providedBy
from pyramid.interfaces import IRequest
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IView
from pyramid.traversal import DefaultRootFactory
from pyramid.registry import Registry
registry = Registry()
def view1(): pass
request = dummy.DummyRequest({'PATH_INFO':'/a'})
root = DefaultRootFactory(request)
root_iface = providedBy(root)
registry.registerAdapter(view1,
(IViewClassifier, IRequest, root_iface),
IView, name='a')
self._register_mapper(registry, [])
command = self._makeOne(registry=registry)
result = command._find_view('/a', registry)
self.assertEqual(result, view1)
def test__find_view_traversal_multiview(self):
from zope.interface import implementer
from zope.interface import providedBy
from pyramid.interfaces import IRequest
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IMultiView
from pyramid.traversal import DefaultRootFactory
from pyramid.registry import Registry
registry = Registry()
@implementer(IMultiView)
class View1(object):
pass
request = dummy.DummyRequest({'PATH_INFO':'/a'})
root = DefaultRootFactory(request)
root_iface = providedBy(root)
view = View1()
registry.registerAdapter(view,
(IViewClassifier, IRequest, root_iface),
IMultiView, name='a')
self._register_mapper(registry, [])
command = self._makeOne(registry=registry)
result = command._find_view('/a', registry)
self.assertEqual(result, view)
def test__find_view_route_no_multiview(self):
from zope.interface import Interface
from zope.interface import implementer
from pyramid.interfaces import IRouteRequest
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IView
from pyramid.registry import Registry
registry = Registry()
def view():pass
class IMyRoot(Interface):
pass
class IMyRoute(Interface):
pass
registry.registerAdapter(view,
(IViewClassifier, IMyRoute, IMyRoot),
IView, '')
registry.registerUtility(IMyRoute, IRouteRequest, name='a')
@implementer(IMyRoot)
class Factory(object):
def __init__(self, request):
pass
routes = [dummy.DummyRoute('a', '/a', factory=Factory, matchdict={}),
dummy.DummyRoute('b', '/b', factory=Factory)]
self._register_mapper(registry, routes)
command = self._makeOne(registry=registry)
result = command._find_view('/a', registry)
self.assertEqual(result, view)
def test__find_view_route_multiview_no_view_registered(self):
from zope.interface import Interface
from zope.interface import implementer
from pyramid.interfaces import IRouteRequest
from pyramid.interfaces import IMultiView
from pyramid.interfaces import IRootFactory
from pyramid.registry import Registry
registry = Registry()
def view1():pass
def view2():pass
class IMyRoot(Interface):
pass
class IMyRoute1(Interface):
pass
class IMyRoute2(Interface):
pass
registry.registerUtility(IMyRoute1, IRouteRequest, name='a')
registry.registerUtility(IMyRoute2, IRouteRequest, name='b')
@implementer(IMyRoot)
class Factory(object):
def __init__(self, request):
pass
registry.registerUtility(Factory, IRootFactory)
routes = [dummy.DummyRoute('a', '/a', matchdict={}),
dummy.DummyRoute('b', '/a', matchdict={})]
self._register_mapper(registry, routes)
command = self._makeOne(registry=registry)
result = command._find_view('/a', registry)
self.assertTrue(IMultiView.providedBy(result))
def test__find_view_route_multiview(self):
from zope.interface import Interface
from zope.interface import implementer
from pyramid.interfaces import IRouteRequest
from pyramid.interfaces import IViewClassifier
from pyramid.interfaces import IView
from pyramid.interfaces import IMultiView
from pyramid.interfaces import IRootFactory
from pyramid.registry import Registry
registry = Registry()
def view1():pass
def view2():pass
class IMyRoot(Interface):
pass
class IMyRoute1(Interface):
pass
class IMyRoute2(Interface):
pass
registry.registerAdapter(view1,
(IViewClassifier, IMyRoute1, IMyRoot),
IView, '')
registry.registerAdapter(view2,
(IViewClassifier, IMyRoute2, IMyRoot),
IView, '')
registry.registerUtility(IMyRoute1, IRouteRequest, name='a')
registry.registerUtility(IMyRoute2, IRouteRequest, name='b')
@implementer(IMyRoot)
class Factory(object):
def __init__(self, request):
pass
registry.registerUtility(Factory, IRootFactory)
routes = [dummy.DummyRoute('a', '/a', matchdict={}),
dummy.DummyRoute('b', '/a', matchdict={})]
self._register_mapper(registry, routes)
command = self._makeOne(registry=registry)
result = command._find_view('/a', registry)
self.assertTrue(IMultiView.providedBy(result))
self.assertEqual(len(result.views), 2)
self.assertTrue((None, view1, None) in result.views)
self.assertTrue((None, view2, None) in result.views)
def test__find_multi_routes_all_match(self):
command = self._makeOne()
def factory(request): pass
routes = [dummy.DummyRoute('a', '/a', factory=factory, matchdict={}),
dummy.DummyRoute('b', '/a', factory=factory, matchdict={})]
mapper = dummy.DummyMapper(*routes)
request = dummy.DummyRequest({'PATH_INFO':'/a'})
result = command._find_multi_routes(mapper, request)
self.assertEqual(result, [{'match':{}, 'route':routes[0]},
{'match':{}, 'route':routes[1]}])
def test__find_multi_routes_some_match(self):
command = self._makeOne()
def factory(request): pass
routes = [dummy.DummyRoute('a', '/a', factory=factory),
dummy.DummyRoute('b', '/a', factory=factory, matchdict={})]
mapper = dummy.DummyMapper(*routes)
request = dummy.DummyRequest({'PATH_INFO':'/a'})
result = command._find_multi_routes(mapper, request)
self.assertEqual(result, [{'match':{}, 'route':routes[1]}])
def test__find_multi_routes_none_match(self):
command = self._makeOne()
def factory(request): pass
routes = [dummy.DummyRoute('a', '/a', factory=factory),
dummy.DummyRoute('b', '/a', factory=factory)]
mapper = dummy.DummyMapper(*routes)
request = dummy.DummyRequest({'PATH_INFO':'/a'})
result = command._find_multi_routes(mapper, request)
self.assertEqual(result, [])
def test_views_command_not_found(self):
from pyramid.registry import Registry
registry = Registry()
command = self._makeOne(registry=registry)
L = []
command.out = L.append
command._find_view = lambda arg1, arg2: None
command.args = ('/foo/bar/myapp.ini#myapp', '/a')
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(L[1], 'URL = /a')
self.assertEqual(L[3], ' Not found.')
def test_views_command_not_found_url_starts_without_slash(self):
from pyramid.registry import Registry
registry = Registry()
command = self._makeOne(registry=registry)
L = []
command.out = L.append
command._find_view = lambda arg1, arg2: None
command.args = ('/foo/bar/myapp.ini#myapp', 'a')
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(L[1], 'URL = /a')
self.assertEqual(L[3], ' Not found.')
def test_views_command_single_view_traversal(self):
from pyramid.registry import Registry
registry = Registry()
command = self._makeOne(registry=registry)
L = []
command.out = L.append
view = dummy.DummyView(context='context', view_name='a')
command._find_view = lambda arg1, arg2: view
command.args = ('/foo/bar/myapp.ini#myapp', '/a')
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(L[1], 'URL = /a')
self.assertEqual(L[3], ' context: context')
self.assertEqual(L[4], ' view name: a')
self.assertEqual(L[8],
' pyramid.tests.test_scripts.dummy.DummyView')
def test_views_command_single_view_function_traversal(self):
from pyramid.registry import Registry
registry = Registry()
command = self._makeOne(registry=registry)
L = []
command.out = L.append
def view(): pass
view.__request_attrs__ = {'context': 'context', 'view_name': 'a'}
command._find_view = lambda arg1, arg2: view
command.args = ('/foo/bar/myapp.ini#myapp', '/a')
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(L[1], 'URL = /a')
self.assertEqual(L[3], ' context: context')
self.assertEqual(L[4], ' view name: a')
self.assertEqual(L[8],
' pyramid.tests.test_scripts.test_pviews.view')
def test_views_command_single_view_traversal_with_permission(self):
from pyramid.registry import Registry
registry = Registry()
command = self._makeOne(registry=registry)
L = []
command.out = L.append
view = dummy.DummyView(context='context', view_name='a')
view.__permission__ = 'test'
command._find_view = lambda arg1, arg2: view
command.args = ('/foo/bar/myapp.ini#myapp', '/a')
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(L[1], 'URL = /a')
self.assertEqual(L[3], ' context: context')
self.assertEqual(L[4], ' view name: a')
self.assertEqual(L[8],
' pyramid.tests.test_scripts.dummy.DummyView')
self.assertEqual(L[9], ' required permission = test')
def test_views_command_single_view_traversal_with_predicates(self):
from pyramid.registry import Registry
registry = Registry()
command = self._makeOne(registry=registry)
L = []
command.out = L.append
def predicate(): pass
predicate.text = lambda *arg: "predicate = x"
view = dummy.DummyView(context='context', view_name='a')
view.__predicates__ = [predicate]
command._find_view = lambda arg1, arg2: view
command.args = ('/foo/bar/myapp.ini#myapp', '/a')
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(L[1], 'URL = /a')
self.assertEqual(L[3], ' context: context')
self.assertEqual(L[4], ' view name: a')
self.assertEqual(L[8],
' pyramid.tests.test_scripts.dummy.DummyView')
self.assertEqual(L[9], ' view predicates (predicate = x)')
def test_views_command_single_view_route(self):
from pyramid.registry import Registry
registry = Registry()
command = self._makeOne(registry=registry)
L = []
command.out = L.append
route = dummy.DummyRoute('a', '/a', matchdict={})
view = dummy.DummyView(context='context', view_name='a',
matched_route=route, subpath='')
command._find_view = lambda arg1, arg2: view
command.args = ('/foo/bar/myapp.ini#myapp', '/a')
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(L[1], 'URL = /a')
self.assertEqual(L[3], ' context: context')
self.assertEqual(L[4], ' view name: a')
self.assertEqual(L[6], ' Route:')
self.assertEqual(L[8], ' route name: a')
self.assertEqual(L[9], ' route pattern: /a')
self.assertEqual(L[10], ' route path: /a')
self.assertEqual(L[11], ' subpath: ')
self.assertEqual(L[15],
' pyramid.tests.test_scripts.dummy.DummyView')
def test_views_command_multi_view_nested(self):
from pyramid.registry import Registry
registry = Registry()
command = self._makeOne(registry=registry)
L = []
command.out = L.append
view1 = dummy.DummyView(context='context', view_name='a1')
view1.__name__ = 'view1'
view1.__view_attr__ = 'call'
multiview1 = dummy.DummyMultiView(view1, context='context',
view_name='a1')
multiview2 = dummy.DummyMultiView(multiview1, context='context',
view_name='a')
command._find_view = lambda arg1, arg2: multiview2
command.args = ('/foo/bar/myapp.ini#myapp', '/a')
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(L[1], 'URL = /a')
self.assertEqual(L[3], ' context: context')
self.assertEqual(L[4], ' view name: a')
self.assertEqual(L[8],
' pyramid.tests.test_scripts.dummy.DummyMultiView')
self.assertEqual(L[12],
' pyramid.tests.test_scripts.dummy.view1.call')
def test_views_command_single_view_route_with_route_predicates(self):
from pyramid.registry import Registry
registry = Registry()
command = self._makeOne(registry=registry)
L = []
command.out = L.append
def predicate(): pass
predicate.text = lambda *arg: "predicate = x"
route = dummy.DummyRoute('a', '/a', matchdict={}, predicate=predicate)
view = dummy.DummyView(context='context', view_name='a',
matched_route=route, subpath='')
command._find_view = lambda arg1, arg2: view
command.args = ('/foo/bar/myapp.ini#myapp', '/a')
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(L[1], 'URL = /a')
self.assertEqual(L[3], ' context: context')
self.assertEqual(L[4], ' view name: a')
self.assertEqual(L[6], ' Route:')
self.assertEqual(L[8], ' route name: a')
self.assertEqual(L[9], ' route pattern: /a')
self.assertEqual(L[10], ' route path: /a')
self.assertEqual(L[11], ' subpath: ')
self.assertEqual(L[12], ' route predicates (predicate = x)')
self.assertEqual(L[16],
' pyramid.tests.test_scripts.dummy.DummyView')
def test_views_command_multiview(self):
from pyramid.registry import Registry
registry = Registry()
command = self._makeOne(registry=registry)
L = []
command.out = L.append
view = dummy.DummyView(context='context')
view.__name__ = 'view'
view.__view_attr__ = 'call'
multiview = dummy.DummyMultiView(view, context='context', view_name='a')
command._find_view = lambda arg1, arg2: multiview
command.args = ('/foo/bar/myapp.ini#myapp', '/a')
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(L[1], 'URL = /a')
self.assertEqual(L[3], ' context: context')
self.assertEqual(L[4], ' view name: a')
self.assertEqual(L[8],
' pyramid.tests.test_scripts.dummy.view.call')
def test_views_command_multiview_with_permission(self):
from pyramid.registry import Registry
registry = Registry()
command = self._makeOne(registry=registry)
L = []
command.out = L.append
view = dummy.DummyView(context='context')
view.__name__ = 'view'
view.__view_attr__ = 'call'
view.__permission__ = 'test'
multiview = dummy.DummyMultiView(view, context='context', view_name='a')
command._find_view = lambda arg1, arg2: multiview
command.args = ('/foo/bar/myapp.ini#myapp', '/a')
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(L[1], 'URL = /a')
self.assertEqual(L[3], ' context: context')
self.assertEqual(L[4], ' view name: a')
self.assertEqual(L[8],
' pyramid.tests.test_scripts.dummy.view.call')
self.assertEqual(L[9], ' required permission = test')
def test_views_command_multiview_with_predicates(self):
from pyramid.registry import Registry
registry = Registry()
command = self._makeOne(registry=registry)
L = []
command.out = L.append
def predicate(): pass
predicate.text = lambda *arg: "predicate = x"
view = dummy.DummyView(context='context')
view.__name__ = 'view'
view.__view_attr__ = 'call'
view.__predicates__ = [predicate]
multiview = dummy.DummyMultiView(view, context='context', view_name='a')
command._find_view = lambda arg1, arg2: multiview
command.args = ('/foo/bar/myapp.ini#myapp', '/a')
result = command.run()
self.assertEqual(result, 0)
self.assertEqual(L[1], 'URL = /a')
self.assertEqual(L[3], ' context: context')
self.assertEqual(L[4], ' view name: a')
self.assertEqual(L[8],
' pyramid.tests.test_scripts.dummy.view.call')
self.assertEqual(L[9], ' view predicates (predicate = x)')
class Test_main(unittest.TestCase):
def _callFUT(self, argv):
from pyramid.scripts.pviews import main
return main(argv, quiet=True)
def test_it(self):
result = self._callFUT(['pviews'])
self.assertEqual(result, 2)
| |
# -*- coding: utf-8 -*-
"""
pyvisa-sim.parser
~~~~~~~~~~~~~~~~~
Parser function
:copyright: 2014 by PyVISA-sim Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
import os
from io import open, StringIO
from contextlib import closing
from traceback import format_exc
import pkg_resources
import yaml
from .component import NoResponse
from .devices import Devices, Device
from .channels import Channels
def _ver_to_tuple(ver):
return tuple(map(int, (ver.split("."))))
#: Version of the specification
SPEC_VERSION = "1.1"
SPEC_VERSION_TUPLE = _ver_to_tuple(SPEC_VERSION)
class SimpleChainmap(object):
"""Combine multiple mappings for sequential lookup."""
def __init__(self, *maps):
self._maps = maps
def __getitem__(self, key):
for mapping in self._maps:
try:
return mapping[key]
except KeyError:
pass
raise KeyError(key)
def _s(s):
"""Strip white spaces"""
if s is NoResponse:
return s
return s.strip(" ")
def _get_pair(dd):
"""Return a pair from a dialogue dictionary.
:param dd: Dialogue dictionary.
:type dd: Dict[str, str]
:return: (query, response)
:rtype: (str, str)
"""
return _s(dd["q"]), _s(dd.get("r", NoResponse))
def _get_triplet(dd):
"""Return a triplet from a dialogue dictionary.
:param dd: Dialogue dictionary.
:type dd: Dict[str, str]
:return: (query, response, error response)
:rtype: (str, str | NoResponse, str | NoResponse)
"""
return _s(dd["q"]), _s(dd.get("r", NoResponse)), _s(dd.get("e", NoResponse))
def _load(content_or_fp):
"""YAML Parse a file or str and check version."""
try:
data = yaml.load(content_or_fp, Loader=yaml.loader.BaseLoader)
except Exception as e:
raise type(e)("Malformed yaml file:\n%r" % format_exc())
try:
ver = data["spec"]
except:
raise ValueError("The file does not specify a spec version")
try:
ver = tuple(map(int, (ver.split("."))))
except:
raise ValueError(
"Invalid spec version format. Expect 'X.Y'"
" (X and Y integers), found %s" % ver
)
if ver > SPEC_VERSION_TUPLE:
raise ValueError(
"The spec version of the file is "
"%s but the parser is %s. "
"Please update pyvisa-sim." % (ver, SPEC_VERSION)
)
return data
def parse_resource(name):
"""Parse a resource file"""
with closing(pkg_resources.resource_stream(__name__, name)) as fp:
rbytes = fp.read()
return _load(StringIO(rbytes.decode("utf-8")))
def parse_file(fullpath):
"""Parse a file"""
with open(fullpath, encoding="utf-8") as fp:
return _load(fp)
def update_component(name, comp, component_dict):
"""Get a component from a component dict."""
for dia in component_dict.get("dialogues", ()):
try:
comp.add_dialogue(*_get_pair(dia))
except Exception as e:
msg = "In device %s, malformed dialogue %s\n%r"
raise Exception(msg % (name, dia, e))
for prop_name, prop_dict in component_dict.get("properties", {}).items():
try:
getter = _get_pair(prop_dict["getter"]) if "getter" in prop_dict else None
setter = (
_get_triplet(prop_dict["setter"]) if "setter" in prop_dict else None
)
comp.add_property(
prop_name,
prop_dict.get("default", ""),
getter,
setter,
prop_dict.get("specs", {}),
)
except Exception as e:
msg = "In device %s, malformed property %s\n%r"
raise type(e)(msg % (name, prop_name, format_exc()))
def get_bases(definition_dict, loader):
"""Collect dependencies."""
bases = definition_dict.get("bases", ())
if bases:
bases = (
loader.get_comp_dict(required_version=SPEC_VERSION_TUPLE[0], **b)
for b in bases
)
return SimpleChainmap(definition_dict, *bases)
else:
return definition_dict
def get_channel(device, ch_name, channel_dict, loader, resource_dict):
"""Get a channels from a channels dictionary.
:param device:
:param ch_name:
:param channel_dict:
:param loader:
:param resource_dict:
:rtype: Device
"""
channel_dict = get_bases(channel_dict, loader)
r_ids = resource_dict.get("channel_ids", {}).get(ch_name, [])
ids = r_ids if r_ids else channel_dict.get("ids", {})
can_select = False if channel_dict.get("can_select") == "False" else True
channels = Channels(device, ids, can_select)
update_component(ch_name, channels, channel_dict)
return channels
def get_device(name, device_dict, loader, resource_dict):
"""Get a device from a device dictionary.
:param loader:
:param resource_dict:
:param name: name of the device
:param device_dict: device dictionary
:rtype: Device
"""
device = Device(name, device_dict.get("delimiter", ";").encode("utf-8"))
device_dict = get_bases(device_dict, loader)
err = device_dict.get("error", {})
device.add_error_handler(err)
for itype, eom_dict in device_dict.get("eom", {}).items():
device.add_eom(itype, *_get_pair(eom_dict))
update_component(name, device, device_dict)
for ch_name, ch_dict in device_dict.get("channels", {}).items():
device.add_channels(
ch_name, get_channel(device, ch_name, ch_dict, loader, resource_dict)
)
return device
class Loader(object):
def __init__(self, filename, bundled):
# (absolute path / resource name / None, bundled) -> dict
# :type: dict[str | None, bool, dict]
self._cache = {}
self.data = self._load(filename, bundled, SPEC_VERSION_TUPLE[0])
self._filename = filename
self._bundled = bundled
self._basepath = os.path.dirname(filename)
def load(self, filename, bundled, parent, required_version):
if self._bundled and not bundled:
msg = "Only other bundled files can be loaded from bundled files."
raise ValueError(msg)
if parent is None:
parent = self._filename
base = os.path.dirname(parent)
filename = os.path.join(base, filename)
return self._load(filename, bundled, required_version)
def _load(self, filename, bundled, required_version):
if (filename, bundled) in self._cache:
return self._cache[(filename, bundled)]
if bundled:
data = parse_resource(filename)
else:
data = parse_file(filename)
ver = _ver_to_tuple(data["spec"])[0]
if ver != required_version:
raise ValueError(
"Invalid version in %s (bundled = %s). "
"Expected %s, found %s," % (filename, bundled, required_version, ver)
)
self._cache[(filename, bundled)] = data
return data
def get_device_dict(self, device, filename, bundled, required_version):
if filename is None:
data = self.data
else:
data = self.load(filename, bundled, None, required_version)
return data["devices"][device]
def get_devices(filename, bundled):
"""Get a Devices object from a file.
:param bundled:
:param filename: full path of the file to parse or name of the resource.
:rtype: Devices
"""
loader = Loader(filename, bundled)
data = loader.data
devices = Devices()
# Iterate through the resources and generate each individual device
# on demand.
for resource_name, resource_dict in data.get("resources", {}).items():
device_name = resource_dict["device"]
dd = loader.get_device_dict(
device_name,
resource_dict.get("filename", None),
resource_dict.get("bundled", False),
SPEC_VERSION_TUPLE[0],
)
devices.add_device(
resource_name, get_device(device_name, dd, loader, resource_dict)
)
return devices
| |
#!/usr/bin/python
# -*- coding: utf8 -*-
""" module to manage argument request command """
from argparse import ArgumentParser
import sys
DEFAULT_CONFIGURATION_FILE='/etc/compta/cli.cfg'
class ParseArgs(object):
""" Default class to manage parse argument """
def __init__(self, **kwargs):
"""Initialize default initialisation parser"""
self.parser = ArgumentParser(**kwargs)
self.parser.add_argument('-d', '--debug', help='Debug', action='store_true')
self.parser.add_argument('-p', '--prompt', help='Prompt', action='store_true')
self.parser.add_argument("-C", "--configfile", action="store",
dest="configfile",
default=DEFAULT_CONFIGURATION_FILE, type=str,
help="configuration file used for client")
self.subparsers = self.parser.add_subparsers(title='database',
dest='database',
help='Medthod to get information'
)
def get_args(self):
""" Return argument """
options = self.parser.parse_args()
return options
def set_banque(self):
""" Initialize banque """
self.parser_banque = self.subparsers.add_parser('banque', help='banque help')
self.parser_banque.add_argument('cmd',
help='command to pass [list, update, delete, create]',
choices=('list', 'create', 'update', 'delete'))
self.parser_banque.add_argument('-f', '--filter',
help='filter to apply',
nargs='+')
self.parser_banque.add_argument('-a', '--attribut',
help='filter on attribut',
nargs='+')
self.parser_banque.add_argument('-s', '--sort',
help='filter on sort',
nargs='+')
def get_banque(self):
""" Return argument banque """
sys.argv[0] = 'banque'
return self.parser.parse_args(sys.argv)
def set_compte(self):
""" Initialize compte """
self.parser_compte = self.subparsers.add_parser('compte', help='compte help')
self.parser_compte.add_argument('cmd',
help='command to pass [list, update, delete, create]',
choices=('list', 'create', 'update', 'delete')
)
self.parser_compte.add_argument('-f', '--filter',
help='filter to apply',
nargs='+')
self.parser_compte.add_argument('-a', '--attribut',
help='filter on attribut',
nargs='+')
self.parser_compte.add_argument('-s', '--sort',
help='filter on sort',
nargs='+')
def get_compte(self):
""" Return argument compte"""
sys.argv[0] = 'compte'
return self.parser.parse_args(sys.argv)
def get_categorie(self):
""" Return argument categorie"""
sys.argv[0] = 'categorie'
return self.parser.parse_args(sys.argv)
def set_categorie(self):
""" Initialize categorie """
self.parser_categorie = self.subparsers.add_parser('categorie', help='categorie help')
self.parser_categorie.add_argument('cmd',
help='command to pass [list, update, delete, create]',
choices=('list', 'create', 'update', 'delete')
)
self.parser_categorie.add_argument('-f', '--filter',
help='filter to apply',
nargs='+')
self.parser_categorie.add_argument('-a', '--attribut',
help='filter on attribut',
nargs='+')
self.parser_categorie.add_argument('-s', '--sort',
help='filter on sort',
nargs='+')
def get_tag(self):
""" Return argument categorie"""
sys.argv[0] = 'tag'
return self.parser.parse_args(sys.argv)
def set_tag(self):
""" Initialize tag """
self.parser_tag = self.subparsers.add_parser('tag', help='tag help')
self.parser_tag.add_argument('cmd',
help='command to pass [list, update, delete, create]',
choices=('list', 'create', 'update', 'delete')
)
self.parser_tag.add_argument('-f', '--filter',
help='filter to apply',
nargs='+')
self.parser_tag.add_argument('-a', '--attribut',
help='filter on attribut',
nargs='+')
self.parser_tag.add_argument('-s', '--sort',
help='filter on sort',
nargs='+')
def set_ecriture(self):
""" Initialize ecriture """
self.parser_ecriture = self.subparsers.add_parser('ecriture', help='ecriture help')
self.parser_ecriture.add_argument('cmd',
help='command to pass [list, update, delete, create, import]',
choices=('list', 'create', 'update', 'delete', 'import')
)
self.parser_ecriture.add_argument('-f', '--filter',
help='filter to apply',
nargs='+')
self.parser_ecriture.add_argument('-a', '--attribut',
help="""filter on attribut [nom,
type [Vr, Pr, Cb, Ch, Re, Li],
date [YYYY/MM/DD, DD/MM],
valide [true, false],
compte_id,
montant,
description,
tag
]""",
nargs='+')
self.parser_ecriture.add_argument('-s', '--sort',
help='filter on sort',
nargs='+')
self.parser_ecriture.add_argument("-i", "--import", action="store",
dest="importfile",
default=None, type=str,
help="file to import, only ofx file is supported")
def get_ecriture(self):
""" Return argument """
sys.argv[0] = 'ecriture'
return self.parser.parse_args(sys.argv)
def get_montant(self):
""" Return argument montant"""
sys.argv[0] = 'montant'
return self.parser.parse_args(sys.argv)
def set_montant(self):
""" Initialize montant """
self.parser_categorie = self.subparsers.add_parser('montant', help='categorie help')
self.parser_categorie.add_argument('cmd',
help='command to pass [list, update, delete, create]',
choices=('list', 'create', 'update', 'delete')
)
self.parser_categorie.add_argument('-f', '--filter',
help='filter to apply',
nargs='+')
self.parser_categorie.add_argument('-a', '--attribut',
help='filter on attribut',
nargs='+')
self.parser_categorie.add_argument('-s', '--sort',
help='filter on sort',
nargs='+')
@staticmethod
def get_method(method):
""" Static method to create fabric """
parse = ParseArgs()
if method == "banque":
parse.set_banque()
return parse.get_banque()
elif method == "compte":
parse.set_compte()
return parse.get_compte()
elif method == "ecriture":
parse.set_ecriture()
return parse.get_ecriture()
elif method == "montant":
parse.set_montant()
return parse.get_montant()
elif method == "categorie":
parse.set_categorie()
return parse.get_categorie()
elif method == "tag":
parse.set_tag()
return parse.get_tag()
else:
parse.set_banque()
parse.set_compte()
parse.set_categorie()
parse.set_ecriture()
parse.set_montant()
parse.set_tag()
return parse.get_args()
#class ParseEcriture(ParseArgs):
# """ Class for create ecriture object """
#
# def __init__(self, **kwargs):
# """ Initialize default class """
# ParseArgs.__init__(self, **kwargs)
| |
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
import netaddr
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron.tests import base
from networking_cisco.plugins.cisco.cfg_agent.device_drivers.asr1k import (
asr1k_routing_driver as driver)
from networking_cisco.plugins.cisco.cfg_agent.device_drivers.asr1k import (
asr1k_snippets as snippets)
from networking_cisco.plugins.cisco.cfg_agent.device_drivers.iosxe import (
cisco_iosxe_snippets as iosxe_snippets)
from networking_cisco.plugins.cisco.cfg_agent.device_drivers.iosxe import (
iosxe_routing_driver as iosxe_driver)
from networking_cisco.plugins.cisco.cfg_agent.service_helpers import (
routing_svc_helper)
from networking_cisco.plugins.cisco.common import cisco_constants
from networking_cisco.plugins.cisco.extensions import ha
from networking_cisco.plugins.cisco.extensions import routerrole
from networking_cisco.tests.unit.cisco.cfg_agent import cfg_agent_test_support
sys.modules['ncclient'] = mock.MagicMock()
_uuid = uuidutils.generate_uuid
DEV_NAME_LEN = iosxe_driver.IosXeRoutingDriver.DEV_NAME_LEN
HA_INFO = 'ha_info'
ROUTER_ROLE_ATTR = routerrole.ROUTER_ROLE_ATTR
ROUTER_ROLE_HA_REDUNDANCY = cisco_constants.ROUTER_ROLE_HA_REDUNDANCY
class ASR1kRoutingDriver(base.BaseTestCase,
cfg_agent_test_support.CfgAgentTestSupportMixin):
def setUp(self):
super(ASR1kRoutingDriver, self).setUp()
cfg.CONF.set_override('enable_multi_region', False, 'multi_region')
device_params = self.prepare_hosting_device_params()
self.driver = driver.ASR1kRoutingDriver(**device_params)
self.driver._ncc_connection = mock.MagicMock()
self.driver._check_response = mock.MagicMock(return_value=True)
self.driver._check_acl = mock.MagicMock(return_value=False)
def tearDown(self):
super(ASR1kRoutingDriver, self).tearDown()
self.driver._ncc_connection.reset_mock()
def _create_test_routers(self, is_user_visible=True):
self.router, ports = self.prepare_router_data(
is_user_visible=is_user_visible)
self.ri = routing_svc_helper.RouterInfo(self.router['id'],
self.router)
self.ha_priority = self.router[ha.DETAILS][ha.PRIORITY]
self.vrf = ('nrouter-' + self.router['id'])[:DEV_NAME_LEN]
# router port on external network, i.e., gateway port
self.ext_gw_port = self.router['gw_port']
self.ext_gw_port['ip_info'] = {
'subnet_id': self.ext_gw_port['subnets'][0]['id'],
'is_primary': True,
'ip_cidr': self.ext_gw_port['subnets'][0]['cidr']
}
self.ext_phy_infc = (
self.ext_gw_port['hosting_info']['physical_interface'])
self.vlan_ext = self.ext_gw_port['hosting_info']['segmentation_id']
self.ext_gw_upstream_ip = self.ext_gw_port['subnets'][0]['gateway_ip']
self.ext_gw_ip = self.ext_gw_port['fixed_ips'][0]['ip_address']
self.ext_gw_ip_cidr = self.ext_gw_port['subnets'][0]['cidr']
self.ext_gw_ip_mask = str(
netaddr.IPNetwork(self.ext_gw_ip_cidr).netmask)
port_ha_info = self.ext_gw_port['ha_info']
self.ext_gw_ha_group = port_ha_info['group']
# router port on internal network
self.int_port = ports[0]
self.int_port['ip_info'] = {
'subnet_id': self.int_port['subnets'][0]['id'],
'is_primary': True,
'ip_cidr': self.int_port['subnets'][0]['cidr']
}
self.int_port['change_details'] = {
'new_ports': [self.int_port],
'current_ports': [self.int_port],
'old_ports': [],
'former_ports': []
}
self.int_phy_infc = self.int_port['hosting_info']['physical_interface']
self.vlan_int = self.int_port['hosting_info']['segmentation_id']
self.int_gw_ip = self.int_port['fixed_ips'][0]['ip_address']
self.int_gw_ip_cidr = self.int_port['subnets'][0]['cidr']
self.int_gw_ip_mask = str(
netaddr.IPNetwork(self.int_gw_ip_cidr).netmask)
port_ha_info = self.int_port['ha_info']
self.int_gw_ip_vip = (
port_ha_info['ha_port']['fixed_ips'][0]['ip_address'])
self.int_gw_ha_group = port_ha_info['group']
self.floating_ip = '19.4.0.6'
self.fixed_ip = '35.4.0.20'
def _create_test_global_routers(self, num_ext_subnets=1, subnet_index=0):
# global router and its ports
self.global_router, gl_ports = self.prepare_router_data(
is_global=True, num_ext_subnets=num_ext_subnets)
self.ha_priority = self.global_router[ha.DETAILS][ha.PRIORITY]
self.ri_global = routing_svc_helper.RouterInfo(
self.global_router['id'], self.global_router)
self.gl_port = gl_ports[0]
self.gl_port['ip_info'] = {
'subnet_id': self.gl_port['subnets'][0]['id'],
'is_primary': True,
'ip_cidr': self.gl_port['subnets'][0]['cidr']
}
self.ext_phy_infc = self.gl_port['hosting_info']['physical_interface']
self.vlan_ext = self.gl_port['hosting_info']['segmentation_id']
self.gl_port_ip = self.gl_port['fixed_ips'][subnet_index]['ip_address']
self.gl_port_ip_cidr = self.gl_port['subnets'][subnet_index]['cidr']
self.gl_port_ip_mask = str(
netaddr.IPNetwork(self.gl_port_ip_cidr).netmask)
port_ha_info = self.gl_port['ha_info']
self.gl_port_vip = (
port_ha_info['ha_port']['fixed_ips'][subnet_index]['ip_address'])
self.gl_port_ha_group = port_ha_info['group']
def assert_edit_run_cfg(self, snippet_name, args):
if args:
confstr = snippet_name % args
else:
confstr = snippet_name
self.driver._ncc_connection.edit_config.assert_any_call(
target='running', config=confstr)
def _assert_number_of_edit_run_cfg_calls(self, num):
self.assertEqual(num,
self.driver._ncc_connection.edit_config.call_count)
def _generate_hsrp_cfg_args(self, subintfc, group, priority, vip, vlan):
return (subintfc,
group, priority,
group, vip,
group,
group, group, vlan)
def test_internal_network_added(self):
self._create_test_routers()
self.driver.internal_network_added(self.ri, self.int_port)
sub_interface = self.int_phy_infc + '.' + str(self.vlan_int)
cfg_args_sub = (sub_interface, self.vlan_int, self.vrf, self.int_gw_ip,
self.int_gw_ip_mask)
self.assert_edit_run_cfg(
snippets.CREATE_SUBINTERFACE_WITH_ID, cfg_args_sub)
cfg_args_hsrp = self._generate_hsrp_cfg_args(
sub_interface, self.int_gw_ha_group, self.ha_priority,
self.int_gw_ip_vip, self.vlan_int)
self.assert_edit_run_cfg(
snippets.SET_INTC_ASR_HSRP_EXTERNAL, cfg_args_hsrp)
def test_internal_network_added_with_multi_region(self):
cfg.CONF.set_override('enable_multi_region', True, 'multi_region')
self._create_test_routers()
is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region
self.assertEqual(True, is_multi_region_enabled)
region_id = cfg.CONF.multi_region.region_id
vrf = self.vrf + "-" + region_id
self.driver.internal_network_added(self.ri, self.int_port)
sub_interface = self.int_phy_infc + '.' + str(self.vlan_int)
cfg_args_sub = (sub_interface, region_id, self.vlan_int, vrf,
self.int_gw_ip, self.int_gw_ip_mask)
self.assert_edit_run_cfg(
snippets.CREATE_SUBINTERFACE_REGION_ID_WITH_ID, cfg_args_sub)
cfg_args_hsrp = self._generate_hsrp_cfg_args(
sub_interface, self.int_gw_ha_group, self.ha_priority,
self.int_gw_ip_vip, self.vlan_int)
self.assert_edit_run_cfg(
snippets.SET_INTC_ASR_HSRP_EXTERNAL, cfg_args_hsrp)
def test_internal_network_added_global_router(self):
self._create_test_global_routers()
self.driver.internal_network_added(self.ri_global, self.gl_port)
sub_interface = self.ext_phy_infc + '.' + str(self.vlan_ext)
cfg_args_sub = (sub_interface, self.vlan_ext,
self.gl_port_ip, self.gl_port_ip_mask)
self.assert_edit_run_cfg(
snippets.CREATE_SUBINTERFACE_EXTERNAL_WITH_ID, cfg_args_sub)
cfg_args_hsrp = self._generate_hsrp_cfg_args(
sub_interface, self.gl_port_ha_group, self.ha_priority,
self.gl_port_vip, self.vlan_ext)
self.assert_edit_run_cfg(
snippets.SET_INTC_ASR_HSRP_EXTERNAL, cfg_args_hsrp)
def test_internal_network_added_global_router_secondary_subnet(self):
self._create_test_global_routers(num_ext_subnets=2, subnet_index=1)
self.gl_port['ip_info']['subnet_id'] = self.gl_port['subnets'][1]['id']
self.gl_port['ip_info']['ip_cidr'] = self.gl_port['subnets'][1]['cidr']
self.gl_port['ip_info']['is_primary'] = False
self.driver.internal_network_added(self.ri_global, self.gl_port)
sub_interface = self.ext_phy_infc + '.' + str(self.vlan_ext)
cfg_args_sub = (sub_interface, self.gl_port_ip, self.gl_port_ip_mask)
self.assert_edit_run_cfg(
snippets.SET_INTERFACE_SECONDARY_IP, cfg_args_sub)
cfg_args_hsrp = (sub_interface, self.gl_port_ha_group,
self.gl_port_vip)
self.assert_edit_run_cfg(
snippets.SET_INTC_ASR_SECONDARY_HSRP_EXTERNAL, cfg_args_hsrp)
def test_internal_network_added_global_router_with_multi_region(self):
cfg.CONF.set_override('enable_multi_region', True, 'multi_region')
self._create_test_global_routers()
is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region
self.assertEqual(True, is_multi_region_enabled)
region_id = cfg.CONF.multi_region.region_id
self.driver.internal_network_added(self.ri_global, self.gl_port)
sub_interface = self.ext_phy_infc + '.' + str(self.vlan_ext)
cfg_args_sub = (sub_interface, region_id, self.vlan_ext,
self.gl_port_ip, self.gl_port_ip_mask)
self.assert_edit_run_cfg(
snippets.CREATE_SUBINTERFACE_EXT_REGION_ID_WITH_ID, cfg_args_sub)
cfg_args_hsrp = self._generate_hsrp_cfg_args(
sub_interface, self.gl_port_ha_group, self.ha_priority,
self.gl_port_vip, self.vlan_ext)
self.assert_edit_run_cfg(
snippets.SET_INTC_ASR_HSRP_EXTERNAL, cfg_args_hsrp)
def test_internal_network_added_global_router_with_multi_region_sec_sn(
self):
cfg.CONF.set_override('enable_multi_region', True, 'multi_region')
self._create_test_global_routers(num_ext_subnets=2, subnet_index=1)
is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region
self.assertEqual(True, is_multi_region_enabled)
self.gl_port['ip_info']['subnet_id'] = self.gl_port['subnets'][1]['id']
self.gl_port['ip_info']['ip_cidr'] = self.gl_port['subnets'][1]['cidr']
self.gl_port['ip_info']['is_primary'] = False
self.driver.internal_network_added(self.ri_global, self.gl_port)
sub_interface = self.ext_phy_infc + '.' + str(self.vlan_ext)
cfg_args_sub = (sub_interface, self.gl_port_ip, self.gl_port_ip_mask)
self.assert_edit_run_cfg(
snippets.SET_INTERFACE_SECONDARY_IP, cfg_args_sub)
cfg_args_hsrp = (sub_interface, self.gl_port_ha_group,
self.gl_port_vip)
self.assert_edit_run_cfg(
snippets.SET_INTC_ASR_SECONDARY_HSRP_EXTERNAL, cfg_args_hsrp)
def _make_test_router_non_ha(self):
self._create_test_routers()
self.ri.router[ha.ENABLED] = False
del self.ri.router[ha.DETAILS]
del self.ext_gw_port[HA_INFO]
del self.int_port[HA_INFO]
def test_external_network_added_non_ha(self):
self._make_test_router_non_ha()
self.driver.external_gateway_added(self.ri, self.ext_gw_port)
sub_interface = self.ext_phy_infc + '.' + str(self.vlan_ext)
self.assert_edit_run_cfg(iosxe_snippets.ENABLE_INTF, sub_interface)
cfg_params_nat = (self.vrf + '_nat_pool', self.ext_gw_ip,
self.ext_gw_ip, self.ext_gw_ip_mask)
self.assert_edit_run_cfg(snippets.CREATE_NAT_POOL, cfg_params_nat)
def test_external_network_added_user_visible_router(self):
self._create_test_routers()
self.driver.external_gateway_added(self.ri, self.ext_gw_port)
sub_interface = self.ext_phy_infc + '.' + str(self.vlan_ext)
self.assert_edit_run_cfg(iosxe_snippets.ENABLE_INTF, sub_interface)
cfg_params_nat = (self.vrf + '_nat_pool', self.ext_gw_ip,
self.ext_gw_ip, self.ext_gw_ip_mask)
self.assert_edit_run_cfg(snippets.CREATE_NAT_POOL, cfg_params_nat)
def test_external_network_added_redundancy_router(self):
self._create_test_routers(is_user_visible=False)
self.driver.external_gateway_added(self.ri, self.ext_gw_port)
sub_interface = self.ext_phy_infc + '.' + str(self.vlan_ext)
self.assert_edit_run_cfg(iosxe_snippets.ENABLE_INTF, sub_interface)
cfg_params_nat = (self.vrf + '_nat_pool', self.ext_gw_ip,
self.ext_gw_ip, self.ext_gw_ip_mask)
self.assert_edit_run_cfg(snippets.CREATE_NAT_POOL, cfg_params_nat)
def test_external_network_added_with_multi_region(self):
cfg.CONF.set_override('enable_multi_region', True, 'multi_region')
self._create_test_routers()
is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region
self.assertEqual(True, is_multi_region_enabled)
region_id = cfg.CONF.multi_region.region_id
vrf = self.vrf + "-" + region_id
self.driver.external_gateway_added(self.ri, self.ext_gw_port)
sub_interface = self.ext_phy_infc + '.' + str(self.vlan_ext)
self.assert_edit_run_cfg(iosxe_snippets.ENABLE_INTF, sub_interface)
cfg_params_nat = (vrf + '_nat_pool', self.ext_gw_ip,
self.ext_gw_ip, self.ext_gw_ip_mask)
self.assert_edit_run_cfg(snippets.CREATE_NAT_POOL, cfg_params_nat)
def test_external_gateway_removed_non_ha(self):
self._make_test_router_non_ha()
self.driver.external_gateway_removed(self.ri, self.ext_gw_port)
cfg_params_nat = (self.vrf + '_nat_pool', self.ext_gw_ip,
self.ext_gw_ip, self.ext_gw_ip_mask)
self.assert_edit_run_cfg(snippets.DELETE_NAT_POOL, cfg_params_nat)
sub_interface = self.ext_phy_infc + '.' + str(self.vlan_ext)
cfg_params_remove_route = (self.vrf,
sub_interface, self.ext_gw_upstream_ip)
self.assert_edit_run_cfg(snippets.REMOVE_DEFAULT_ROUTE_WITH_INTF,
cfg_params_remove_route)
def test_external_gateway_removed_user_visible_router(self):
self._create_test_routers()
self.driver.external_gateway_removed(self.ri, self.ext_gw_port)
cfg_params_nat = (self.vrf + '_nat_pool', self.ext_gw_ip,
self.ext_gw_ip, self.ext_gw_ip_mask)
self.assert_edit_run_cfg(snippets.DELETE_NAT_POOL, cfg_params_nat)
sub_interface = self.ext_phy_infc + '.' + str(self.vlan_ext)
cfg_params_remove_route = (self.vrf,
sub_interface, self.ext_gw_upstream_ip)
self.assert_edit_run_cfg(snippets.REMOVE_DEFAULT_ROUTE_WITH_INTF,
cfg_params_remove_route)
def test_external_gateway_removed_redundancy_router(self):
self._create_test_routers(is_user_visible=False)
self.driver.external_gateway_removed(self.ri, self.ext_gw_port)
cfg_params_nat = (self.vrf + '_nat_pool', self.ext_gw_ip,
self.ext_gw_ip, self.ext_gw_ip_mask)
self.assert_edit_run_cfg(snippets.DELETE_NAT_POOL, cfg_params_nat)
sub_interface = self.ext_phy_infc + '.' + str(self.vlan_ext)
cfg_params_remove_route = (self.vrf,
sub_interface, self.ext_gw_upstream_ip)
self.assert_edit_run_cfg(snippets.REMOVE_DEFAULT_ROUTE_WITH_INTF,
cfg_params_remove_route)
def test_external_gateway_removed_with_multi_region(self):
cfg.CONF.set_override('enable_multi_region', True, 'multi_region')
self._create_test_routers()
is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region
self.assertEqual(True, is_multi_region_enabled)
region_id = cfg.CONF.multi_region.region_id
vrf = self.vrf + "-" + region_id
self.driver.external_gateway_removed(self.ri, self.ext_gw_port)
cfg_params_nat = (vrf + '_nat_pool', self.ext_gw_ip,
self.ext_gw_ip, self.ext_gw_ip_mask)
self.assert_edit_run_cfg(snippets.DELETE_NAT_POOL, cfg_params_nat)
sub_interface = self.ext_phy_infc + '.' + str(self.vlan_ext)
cfg_params_remove_route = (vrf,
sub_interface, self.ext_gw_upstream_ip)
self.assert_edit_run_cfg(snippets.REMOVE_DEFAULT_ROUTE_WITH_INTF,
cfg_params_remove_route)
def test_external_gateway_removed_global_router(self):
self._create_test_global_routers()
self.driver._interface_exists = mock.MagicMock(return_value=True)
self.driver.external_gateway_removed(self.ri_global, self.gl_port)
sub_interface = self.ext_phy_infc + '.' + str(self.vlan_ext)
self.assert_edit_run_cfg(
iosxe_snippets.REMOVE_SUBINTERFACE, sub_interface)
def test_floating_ip_added(self):
self._create_test_routers()
self.driver.floating_ip_added(self.ri, self.ext_gw_port,
self.floating_ip, self.fixed_ip)
self._assert_number_of_edit_run_cfg_calls(1)
cfg_params_floating = (self.fixed_ip, self.floating_ip, self.vrf,
self.ext_gw_ha_group, self.vlan_ext)
self.assert_edit_run_cfg(snippets.SET_STATIC_SRC_TRL_NO_VRF_MATCH,
cfg_params_floating)
def test_floating_ip_added_with_multi_region(self):
cfg.CONF.set_override('enable_multi_region', True, 'multi_region')
self._create_test_routers()
is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region
self.assertEqual(True, is_multi_region_enabled)
region_id = cfg.CONF.multi_region.region_id
vrf = self.vrf + "-" + region_id
self.driver.floating_ip_added(self.ri, self.ext_gw_port,
self.floating_ip, self.fixed_ip)
self._assert_number_of_edit_run_cfg_calls(1)
cfg_params_floating = (self.fixed_ip, self.floating_ip, vrf,
self.ext_gw_ha_group, self.vlan_ext)
self.assert_edit_run_cfg(snippets.SET_STATIC_SRC_TRL_NO_VRF_MATCH,
cfg_params_floating)
def test_floating_ip_removed(self):
self._create_test_routers()
self.driver.floating_ip_removed(self.ri, self.ext_gw_port,
self.floating_ip, self.fixed_ip)
self._assert_number_of_edit_run_cfg_calls(1)
cfg_params_floating = (self.fixed_ip, self.floating_ip, self.vrf,
self.ext_gw_ha_group, self.vlan_ext)
self.assert_edit_run_cfg(snippets.REMOVE_STATIC_SRC_TRL_NO_VRF_MATCH,
cfg_params_floating)
def test_floating_ip_removed_with_multi_region(self):
cfg.CONF.set_override('enable_multi_region', True, 'multi_region')
self._create_test_routers()
is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region
self.assertEqual(True, is_multi_region_enabled)
region_id = cfg.CONF.multi_region.region_id
vrf = self.vrf + "-" + region_id
self.driver.floating_ip_removed(self.ri, self.ext_gw_port,
self.floating_ip, self.fixed_ip)
self._assert_number_of_edit_run_cfg_calls(1)
cfg_params_floating = (self.fixed_ip, self.floating_ip, vrf,
self.ext_gw_ha_group, self.vlan_ext)
self.assert_edit_run_cfg(snippets.REMOVE_STATIC_SRC_TRL_NO_VRF_MATCH,
cfg_params_floating)
def test_driver_enable_internal_network_NAT(self):
self._create_test_routers()
self.driver.enable_internal_network_NAT(self.ri, self.int_port,
self.ext_gw_port)
self._assert_number_of_edit_run_cfg_calls(4)
acl_name = '%(acl_prefix)s_%(vlan)s_%(port)s' % {
'acl_prefix': 'neutron_acl',
'vlan': self.vlan_int,
'port': self.int_port['id'][:8]}
net = netaddr.IPNetwork(self.int_gw_ip_cidr).network
net_mask = netaddr.IPNetwork(self.int_gw_ip_cidr).hostmask
cfg_params_create_acl = (acl_name, net, net_mask)
self.assert_edit_run_cfg(
iosxe_snippets.CREATE_ACL, cfg_params_create_acl)
pool_name = "%s_nat_pool" % self.vrf
cfg_params_dyn_trans = (acl_name, pool_name, self.vrf)
self.assert_edit_run_cfg(
snippets.SET_DYN_SRC_TRL_POOL, cfg_params_dyn_trans)
sub_interface_int = self.int_phy_infc + '.' + str(self.vlan_int)
sub_interface_ext = self.int_phy_infc + '.' + str(self.vlan_ext)
self.assert_edit_run_cfg(iosxe_snippets.SET_NAT,
(sub_interface_int, 'inside'))
self.assert_edit_run_cfg(iosxe_snippets.SET_NAT,
(sub_interface_ext, 'outside'))
def test_driver_enable_internal_network_NAT_with_multi_region(self):
cfg.CONF.set_override('enable_multi_region', True, 'multi_region')
self._create_test_routers()
is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region
self.assertEqual(True, is_multi_region_enabled)
region_id = cfg.CONF.multi_region.region_id
vrf = self.vrf + "-" + region_id
self.driver.enable_internal_network_NAT(self.ri, self.int_port,
self.ext_gw_port)
self._assert_number_of_edit_run_cfg_calls(4)
acl_name = '%(acl_prefix)s_%(region_id)s_%(vlan)s_%(port)s' % {
'acl_prefix': 'neutron_acl',
'region_id': region_id,
'vlan': self.vlan_int,
'port': self.int_port['id'][:8]}
net = netaddr.IPNetwork(self.int_gw_ip_cidr).network
net_mask = netaddr.IPNetwork(self.int_gw_ip_cidr).hostmask
cfg_params_create_acl = (acl_name, net, net_mask)
self.assert_edit_run_cfg(
iosxe_snippets.CREATE_ACL, cfg_params_create_acl)
pool_name = "%s_nat_pool" % vrf
cfg_params_dyn_trans = (acl_name, pool_name, vrf)
self.assert_edit_run_cfg(
snippets.SET_DYN_SRC_TRL_POOL, cfg_params_dyn_trans)
sub_interface_int = self.int_phy_infc + '.' + str(self.vlan_int)
sub_interface_ext = self.int_phy_infc + '.' + str(self.vlan_ext)
self.assert_edit_run_cfg(iosxe_snippets.SET_NAT,
(sub_interface_int, 'inside'))
self.assert_edit_run_cfg(iosxe_snippets.SET_NAT,
(sub_interface_ext, 'outside'))
def test_driver_disable_internal_network_NAT(self):
self._create_test_routers()
self.driver.disable_internal_network_NAT(self.ri, self.int_port,
self.ext_gw_port)
self._assert_number_of_edit_run_cfg_calls(3)
acl_name = '%(acl_prefix)s_%(vlan)s_%(port)s' % {
'acl_prefix': 'neutron_acl',
'vlan': self.vlan_int,
'port': self.int_port['id'][:8]}
pool_name = "%s_nat_pool" % self.vrf
cfg_params_dyn_trans = (acl_name, pool_name, self.vrf)
self.assert_edit_run_cfg(
snippets.REMOVE_DYN_SRC_TRL_POOL, cfg_params_dyn_trans)
self.assert_edit_run_cfg(iosxe_snippets.REMOVE_ACL, acl_name)
def test_driver_disable_internal_network_NAT_with_multi_region(self):
cfg.CONF.set_override('enable_multi_region', True, 'multi_region')
self._create_test_routers()
is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region
self.assertEqual(True, is_multi_region_enabled)
region_id = cfg.CONF.multi_region.region_id
vrf = self.vrf + "-" + region_id
self.driver.disable_internal_network_NAT(self.ri, self.int_port,
self.ext_gw_port)
self._assert_number_of_edit_run_cfg_calls(3)
acl_name = '%(acl_prefix)s_%(region_id)s_%(vlan)s_%(port)s' % {
'acl_prefix': 'neutron_acl',
'region_id': region_id,
'vlan': self.vlan_int,
'port': self.int_port['id'][:8]}
pool_name = "%s_nat_pool" % vrf
cfg_params_dyn_trans = (acl_name, pool_name, vrf)
self.assert_edit_run_cfg(
snippets.REMOVE_DYN_SRC_TRL_POOL, cfg_params_dyn_trans)
self.assert_edit_run_cfg(iosxe_snippets.REMOVE_ACL, acl_name)
def test_enable_interface_user_visible_router(self):
self._create_test_routers()
self.driver.enable_router_interface(self.ri, self.ext_gw_port)
sub_interface = self.ext_phy_infc + '.' + str(self.vlan_ext)
self.assert_edit_run_cfg(iosxe_snippets.ENABLE_INTF, sub_interface)
def test_enable_interface_redundancy_router(self):
self._create_test_routers(is_user_visible=False)
self.driver.enable_router_interface(self.ri, self.ext_gw_port)
sub_interface = self.ext_phy_infc + '.' + str(self.vlan_ext)
self.assert_edit_run_cfg(iosxe_snippets.ENABLE_INTF, sub_interface)
def test_disable_interface_user_visible_router(self):
self._create_test_routers()
self.driver.disable_router_interface(self.ri, self.ext_gw_port)
sub_interface = self.ext_phy_infc + '.' + str(self.vlan_ext)
self.assert_edit_run_cfg(iosxe_snippets.DISABLE_INTF, sub_interface)
def test_disable_interface_redundancy_router(self):
self._create_test_routers(is_user_visible=False)
self.driver.disable_router_interface(self.ri, self.ext_gw_port)
sub_interface = self.ext_phy_infc + '.' + str(self.vlan_ext)
self.assert_edit_run_cfg(iosxe_snippets.DISABLE_INTF, sub_interface)
def test_get_configuration(self):
self._create_test_routers()
self.driver._get_running_config = mock.MagicMock()
self.driver.get_configuration()
self.driver._get_running_config.assert_called_once_with(split=False)
| |
#
# Copyright 2012 WebFilings, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import json
import os
import unittest
from nose.plugins.attrib import attr
from mock import call
from mock import Mock
from mock import patch
from furious.test_stubs.appengine.queues import _fetch_random_task_from_queue
from furious.test_stubs.appengine.queues import run_random
from furious.test_stubs.appengine.queues import _run_random_task_from_queue
from furious.test_stubs.appengine.queues import _is_furious_task
class TestExecuteTask(unittest.TestCase):
"""Ensure _execute_task runs the tasks."""
@patch('time.ctime')
def test_run_task(self, ctime):
"""When a task is passed to _execute_task, make sure it is run.
Ensure the task's environment is cleaned up.
"""
from furious.context import _local
from furious.test_stubs.appengine.queues import _execute_task
# Create the async_options to call the target, ctime()
async_options = {'job': ('time.ctime', None, None)}
body = base64.b64encode(json.dumps(async_options))
url = '/_ah/queue/async'
task = {'url': url, 'body': body, 'headers': {}}
_execute_task(task)
# Make sure our function was called
self.assertTrue(ctime.called)
# Make sure context cleanup worked
self.assertFalse('REQUEST_ID_HASH' in os.environ)
self.assertFalse(hasattr(_local._local_context, 'registry'))
@patch('time.strftime', autospec=True)
def test_run_task_with_args_kwargs(self, strftime):
"""When a task with args and kwargs is passed to _execute_task, make
sure it is run with those parameters.
Ensure the task's environment is cleaned up.
"""
from furious.context import _local
from furious.test_stubs.appengine.queues import _execute_task
# Create the async_options to call the mocked target, strftime().
# To test args and kwargs, our arguments to the mocked strftime
# won't match the real strftime's expected parameters.
args = [1, 2]
kwargs = {'my_kwarg': 'my_value'}
async_options = {'job': ('time.strftime',
args, kwargs)}
body = base64.b64encode(json.dumps(async_options))
url = '/_ah/queue/async'
task = {'url': url, 'body': body, 'headers': {}}
_execute_task(task)
# Make sure our function was called with the right arguments
strftime.assert_called_once_with(*args, **kwargs)
# Make sure context cleanup worked
self.assertFalse('REQUEST_ID_HASH' in os.environ)
self.assertFalse(hasattr(_local._local_context, 'registry'))
class TestRunQueue(unittest.TestCase):
"""Ensure tasks from queues are run."""
@patch('furious.test_stubs.appengine.queues._execute_task')
def test_run_queue(self, _execute_task):
"""When run() is called, ensure tasks are run, and
the queue is flushed to remove run tasks. Also, ensure True
is returned since messages were processed.
"""
from furious.test_stubs.appengine.queues import run_queue
queue_service = Mock()
queue_service.GetTasks.return_value = ['task1', 'task2', 'task3']
num_processed = run_queue(queue_service, 'default')
# Expect _execute_task() to be called for each task
expected_call_args_list = [call('task1', None, None),
call('task2', None, None),
call('task3', None, None)]
self.assertEquals(_execute_task.call_args_list,
expected_call_args_list)
# Make sure FlushQueue was called once to clear the queue after
# tasks were processed
self.assertEqual(1, queue_service.FlushQueue.call_count)
# We should have processed tasks, so verify the num processed.
self.assertEqual(3, num_processed)
@patch('furious.test_stubs.appengine.queues._execute_task')
def test_run_queue_no_tasks(self, _execute_task):
"""When run() is called and there are no tasks in the queue,
ensure _execute_task is not called.
Ensure False is returned since no messages were processed.
"""
from furious.test_stubs.appengine.queues import run_queue
queue_service = Mock()
queue_service.GetTasks.return_value = []
num_processed = run_queue(queue_service, 'default')
# Expect _execute_task() to not be called since there are no tasks
self.assertFalse(_execute_task.called)
# We should not have processed any tasks, so verify 0 processed.
self.assertEqual(0, num_processed)
class TestRunQueues(unittest.TestCase):
"""Ensure tasks from queues are run."""
@patch('furious.test_stubs.appengine.queues.run_queue')
def test_run(self, run_queue):
"""Ensure all push queues are processed by run().
Ensure pull queues are skipped.
"""
from furious.test_stubs.appengine.queues import run
queue_descs = [
{'name': 'default', 'mode': 'push', 'bucket_size': 100},
{'name': 'default-pull', 'mode': 'pull', 'bucket_size': 5},
{'name': 'another-pull', 'mode': 'pull', 'bucket_size': 5},
{'name': 'my_queue', 'mode': 'push', 'bucket_size': 100}]
queue_service = Mock()
queue_service.GetQueues.side_effect = [queue_descs]
# Simulate that messages are processed from each push queue.
num_in_default = 2
num_in_my = 1
# The two zeros are num remaining in the 2nd iteration for each queue.
run_queue.side_effect = [num_in_default, num_in_my, 0, 0]
run_result = run(queue_service)
# Expected 'default' and 'my_queue' to be the only queues processed
# since others are pull queues.
expected_call_args_list = [call(queue_service, 'default', None, None, False),
call(queue_service, 'my_queue', None, None, False),
call(queue_service, 'default', None, None, False),
call(queue_service, 'my_queue', None, None, False)]
# Ensure run_queue processes the push queues.
self.assertEqual(run_queue.call_args_list, expected_call_args_list)
# Make sure 2 is returned as the number of messages processed.
self.assertEqual(num_in_default + num_in_my,
run_result['tasks_processed'])
self.assertEqual(2, run_result['iterations'])
@patch('furious.test_stubs.appengine.queues.run_queue')
def test_run_no_messages(self, run_queue):
"""Ensure the return value is False when no messages are processed from
the queues.
Ensure all push queues are processed by run().
Ensure pull queues are skipped.
"""
from furious.test_stubs.appengine.queues import run
queue_descs = [
{'name': 'default', 'mode': 'push', 'bucket_size': 100},
{'name': 'default-pull', 'mode': 'pull', 'bucket_size': 5},
{'name': 'my_queue', 'mode': 'push', 'bucket_size': 100}]
queue_service = Mock()
queue_service.GetQueues.side_effect = [queue_descs]
# Simulate that there are no messages processed from any queue.
run_queue.return_value = 0
run_result = run(queue_service)
# Expect 'default' and 'my_queue' to be processed since the other one
# is a pull queue.
expected_call_args_list = [call(queue_service, 'default', None, None, False),
call(queue_service, 'my_queue', None, None, False)]
# Ensure run_queue processes tries to process the push queues.
self.assertEqual(run_queue.call_args_list,
expected_call_args_list)
# Make sure that 0 is the number of messages processed.
self.assertEqual(0, run_result['tasks_processed'])
self.assertEqual(1, run_result['iterations'])
@patch('furious.test_stubs.appengine.queues.run_queue')
def test_run_some_queues_with_messages(self, run_queue):
"""Ensure that the tasks_processed in the return dict is 5 when the
first queue processes 5 messages and the next queue processes 0.
Ensure all push queues are processed by run().
Ensure pull queues are skipped.
"""
from furious.test_stubs.appengine.queues import run
queue_descs = [
{'name': 'default', 'mode': 'push', 'bucket_size': 100},
{'name': 'my_queue', 'mode': 'push', 'bucket_size': 100}]
queue_service = Mock(GetQueues=Mock(side_effect=[queue_descs]))
# Simulate that messages were processed from the first push queue,
# but not the second.
run_queue.side_effect = [5, 0, 0, 0]
run_result = run(queue_service)
# Expected 'default' and 'my_queue' to be processed.
# They are processed twice each since messages were processed the
# first iteration.
expected_call_args_list = [call(queue_service, 'default', None, None, False),
call(queue_service, 'my_queue', None, None, False),
call(queue_service, 'default', None, None, False),
call(queue_service, 'my_queue', None, None, False)]
# Ensure run_queue processes the push queues.
self.assertEqual(run_queue.call_args_list,
expected_call_args_list)
# Make sure that 5 was returned as the number of messages processed.
self.assertEqual(5, run_result['tasks_processed'])
self.assertEqual(2, run_result['iterations'])
@attr('slow')
class TestRunQueuesIntegration(unittest.TestCase):
"""Ensure tasks from queues are run."""
def setUp(self):
from google.appengine.ext import testbed
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_taskqueue_stub(root_path="")
self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
self.taskqueue_service = self.testbed.get_stub(
testbed.TASKQUEUE_SERVICE_NAME)
def tearDown(self):
self.testbed.deactivate()
@patch('time.ctime')
def test_run(self, ctime):
"""Ensure tasks are run when run_queues is called."""
from furious.async import Async
from furious.test_stubs.appengine.queues import run as run_queues
# Enqueue a couple of tasks
async = Async(target='time.ctime')
async.start()
async2 = Async(target='time.ctime')
async2.start()
# Run the tasks in the queue
run_queues(self.taskqueue_service)
self.assertEqual(2, ctime.call_count)
@patch('time.ctime')
def test_run_with_retries(self, ctime):
"""
Ensure tasks are retries when they raise an exception.
Ensure 10 retries are made - 11 total calls.
"""
from furious.async import Async
from furious.test_stubs.appengine.queues import run as run_queues
# Count the task runs.
global call_count
call_count = 0
def task_call():
"""The function our task will call."""
num_retries = int(os.environ.get('HTTP_X_APPENGINE_TASKRETRYCOUNT'))
global call_count
# Ensure the num_retries env var is incremented each time.
self.assertEqual(num_retries, call_count)
call_count += 1
# Raise an Exception to retry until max retries are reached.
raise Exception()
ctime.side_effect = task_call
# Enqueue our task that will fail.
async = Async(target='time.ctime')
async.start()
# Run the tasks in the queue
run_queues(self.taskqueue_service, enable_retries=True)
# By default app engine will run the task 11 times. 10 retries
# after the # initial run.
self.assertEqual(11, call_count)
@patch('time.ctime')
@patch('time.asctime')
@patch('time.accept2dyear')
def test_run_with_retries_and_retries_reset(self, accept2dyear, asctime,
ctime):
"""
Ensure tasks retry counts are separate between asyncs.
Ensure tasks retry counts are reset once an Async is successful.
"""
from furious.async import Async
from furious.test_stubs.appengine.queues import run as run_queues
# Count the task runs.
self.async1_call_count = 0
self.async2_call_count = 0
self.async3_call_count = 0
self.async1_retries_env = 0
self.async2_retries_env = 0
self.async3_retries_env = 0
def task_call_task1():
"""The function task1 will call."""
int(os.environ.get('HTTP_X_APPENGINE_TASKRETRYCOUNT'))
self.async1_call_count += 1
if self.async1_call_count < 2:
# Fail once.
raise Exception()
self.async1_retries_env = int(
os.environ.get('HTTP_X_APPENGINE_TASKRETRYCOUNT'))
def task_call_task3():
"""The function task3 will call."""
self.async3_call_count += 1
self.async3_retries_env = int(
os.environ.get('HTTP_X_APPENGINE_TASKRETRYCOUNT'))
def task_call_task2():
"""The function task2 will call."""
self.async2_call_count += 1
if self.async2_call_count < 3:
# Fail twice.
raise Exception()
self.async2_retries_env = int(
os.environ.get('HTTP_X_APPENGINE_TASKRETRYCOUNT'))
async3 = Async(target='time.accept2dyear')
async3.start()
ctime.side_effect = task_call_task1
asctime.side_effect = task_call_task2
accept2dyear.side_effect = task_call_task3
# Enqueue our task that will fail.
async1 = Async(target='time.ctime')
async1.start()
async2 = Async(target='time.asctime')
async2.start()
# Run the tasks in the queue
run_queues(self.taskqueue_service, enable_retries=True)
self.assertEqual(self.async1_call_count, 2)
self.assertEqual(self.async2_call_count, 3)
self.assertEqual(self.async3_call_count, 1)
self.assertEqual(self.async1_retries_env, 1)
self.assertEqual(self.async2_retries_env, 2)
self.assertEqual(self.async3_retries_env, 0)
# Clear
del self.async1_call_count
del self.async2_call_count
del self.async3_call_count
del self.async1_retries_env
del self.async2_retries_env
del self.async3_retries_env
@attr('slow')
class TestPurgeTasks(unittest.TestCase):
"""Ensure that purge_tasks() clears tasks from queues."""
def setUp(self):
from google.appengine.ext import testbed
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_taskqueue_stub(root_path="")
self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
self.taskqueue_service = self.testbed.get_stub(
testbed.TASKQUEUE_SERVICE_NAME)
def tearDown(self):
self.testbed.deactivate()
@patch('time.ctime')
def test_purge_tasks_with_no_tasks(self, ctime):
"""Ensure no errors occur when purging queues containing no tasks.
Ensure the number of tasks cleared is correct.
"""
from furious.test_stubs.appengine.queues import purge_tasks
num_cleared = purge_tasks(self.taskqueue_service)
# Ensure zero tasks were cleared.
self.assertEqual(0, num_cleared)
# Ensure no tasks were run
self.assertEqual(0, ctime.call_count)
@patch('time.ctime')
def test_purge_tasks_with_tasks(self, ctime):
"""After queues are run, ensure no tasks are left to execute.
Ensure the number of tasks cleared is correct.
"""
from furious.async import Async
from furious.batcher import Message
from furious.test_stubs.appengine.queues import run as run_queues
from furious.test_stubs.appengine.queues import purge_tasks
# Enqueue a couple of tasks
async = Async(target='time.ctime')
async.start()
async2 = Async(target='time.ctime')
async2.start()
Message(queue='default-pull').insert()
num_cleared = purge_tasks(self.taskqueue_service)
# Run the tasks to check if tasks remain
run_queues(self.taskqueue_service)
# Ensure three tasks were cleared, from 'default' and 'default-pull'.
self.assertEqual(3, num_cleared)
# Ensure no tasks were run
self.assertEqual(0, ctime.call_count)
@patch('time.ctime')
def test_purge_tasks_with_queue_names_provided(self, ctime):
"""When a list of queue_names is provided, ensure purge_tasks() clears
the tasks and none are left to execute.
Ensure the number of tasks cleared is correct.
"""
from furious.async import Async
from furious.batcher import Message
from furious.test_stubs.appengine.queues import run as run_queues
from furious.test_stubs.appengine.queues import purge_tasks
# Enqueue a couple of tasks
async = Async(target='time.ctime')
async.start()
async2 = Async(target='time.ctime')
async2.start()
Message(queue='default-pull').insert()
num_cleared = purge_tasks(self.taskqueue_service, ['default'])
# Run the tasks to check if tasks remain
run_queues(self.taskqueue_service)
# Ensure two tasks from the default queue were cleared.
self.assertEqual(2, num_cleared)
# Ensure no tasks were run
self.assertEqual(0, ctime.call_count)
@patch('time.ctime')
def test_purge_tasks_with_string_passed_to_queue_names(self, ctime):
"""If a single queue_name is passed to purge_tasks() instead of a list,
ensure that the queue specified is still cleared.
Ensure the number of tasks cleared is correct.
"""
from furious.async import Async
from furious.batcher import Message
from furious.test_stubs.appengine.queues import run as run_queues
from furious.test_stubs.appengine.queues import purge_tasks
# Enqueue a couple of tasks
async = Async(target='time.ctime')
async.start()
async2 = Async(target='time.ctime')
async2.start()
# Insert a pull task
Message(queue='default-pull').insert()
num_cleared = purge_tasks(self.taskqueue_service, 'default')
# Run the tasks to check if tasks remain
run_queues(self.taskqueue_service)
# Ensure two tasks from the default queue were cleared.
self.assertEqual(2, num_cleared)
# Ensure no tasks were run
self.assertEqual(0, ctime.call_count)
def test_purge_with_nonexistent_queue(self, ):
"""If purge is attempted on a queue that does not exist, ensure that an
Exception is raised.
"""
from furious.test_stubs.appengine.queues import purge_tasks
self.assertRaises(Exception, purge_tasks, self.taskqueue_service,
'non-existent-queue')
@attr('slow')
class TestNamesFromQueueService(unittest.TestCase):
"""Ensure that get_queue_names(), get_pull_queue_names(), and
get_push_queue_names() return the correct names.
"""
def setUp(self):
from google.appengine.ext import testbed
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_taskqueue_stub(root_path="")
self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
self.taskqueue_service = self.testbed.get_stub(
testbed.TASKQUEUE_SERVICE_NAME)
def tearDown(self):
self.testbed.deactivate()
def test_get_pull_queue_names(self):
"""Ensure the correct pull queue names are returned from
get_pull_queue_names().
"""
from furious.test_stubs.appengine.queues import get_pull_queue_names
names = get_pull_queue_names(self.taskqueue_service)
self.assertEqual(names, ['default-pull'])
def test_get_push_queue_names(self):
"""Ensure the correct push queue names are returned from
get_push_queue_names().
"""
from furious.test_stubs.appengine.queues import get_push_queue_names
names = get_push_queue_names(self.taskqueue_service)
self.assertEqual(names, ['default'])
def test_get_queue_names(self):
"""Ensure the correct queue names are returned from get_queue_names."""
from furious.test_stubs.appengine.queues import get_queue_names
names = get_queue_names(self.taskqueue_service)
self.assertEqual(names, ['default', 'default-pull'])
@attr('slow')
class TestGetTasks(unittest.TestCase):
"""Ensure that get_tasks(), returns the queues' tasks."""
def setUp(self):
from google.appengine.ext import testbed
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_taskqueue_stub(root_path="")
self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
self.queue_service = self.testbed.get_stub(
testbed.TASKQUEUE_SERVICE_NAME)
def tearDown(self):
self.testbed.deactivate()
def test_get_tasks_when_there_are_no_tasks(self):
"""Ensure that no tasks are returned from get_tasks() when no tasks
have been added yet.
"""
from furious.test_stubs.appengine.queues import get_tasks
task_dict = get_tasks(self.queue_service)
num_tasks = sum([len(task_list) for task_list in task_dict.values()])
self.assertEqual(0, num_tasks)
def test_get_tasks_from_all_queues(self):
"""Ensure all tasks are returned from get_tasks()."""
from furious.async import Async
from furious.batcher import Message
from furious.test_stubs.appengine.queues import get_tasks
# Enqueue a couple of tasks
async = Async(target='time.ctime')
async.start()
async2 = Async(target='time.ctime')
async2.start()
# Insert a pull task
Message(queue='default-pull').insert()
task_dict = get_tasks(self.queue_service)
num_tasks = sum([len(task_list) for task_list in task_dict.values()])
self.assertEqual(3, num_tasks)
def test_get_tasks_when_queue_names_are_specified(self):
"""Ensure queues' tasks are returned from get_tasks() when a list of
queue_names are passed as an argument.
"""
from furious.async import Async
from furious.batcher import Message
from furious.test_stubs.appengine.queues import get_tasks
# Enqueue a couple of tasks
async = Async(target='time.ctime')
async.start()
async2 = Async(target='time.ctime')
async2.start()
# Insert a pull task
Message(queue='default-pull').insert()
task_dict = get_tasks(self.queue_service, ['default'])
num_tasks = sum([len(task_list) for task_list in task_dict.values()])
self.assertEqual(2, num_tasks)
def test_get_tasks_when_queue_name_string_is_passed(self):
"""Ensure a queue's tasks are returned from get_tasks() when a
queue_name is passed as a string.
"""
from furious.async import Async
from furious.batcher import Message
from furious.test_stubs.appengine.queues import get_tasks
# Enqueue a couple of tasks
async = Async(target='time.ctime')
async.start()
async2 = Async(target='time.ctime')
async2.start()
# Insert a pull task
Message(queue='default-pull').insert()
task_dict = get_tasks(self.queue_service, 'default-pull')
num_tasks = sum([len(task_list) for task_list in task_dict.values()])
self.assertEqual(1, num_tasks)
def test_get_tasks_with_nonexistent_queue(self):
"""If a non-existing queue is passed to get_tasks(), ensure that an
Exception is raised.
"""
from furious.test_stubs.appengine.queues import get_tasks
self.assertRaises(Exception, get_tasks, self.queue_service,
'non-existent-queue')
@attr('slow')
class TestAddTasks(unittest.TestCase):
"""Ensure that add_tasks(), adds tasks to App Engine's queues."""
def setUp(self):
from google.appengine.ext import testbed
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_taskqueue_stub(root_path="")
self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
self.queue_service = self.testbed.get_stub(
testbed.TASKQUEUE_SERVICE_NAME)
def tearDown(self):
self.testbed.deactivate()
def test_add_tasks_when_there_are_no_tasks(self):
"""Ensure that no tasks are added to add_tasks() when the
task_dict is empty.
"""
from furious.test_stubs.appengine.queues import add_tasks
from furious.test_stubs.appengine.queues import purge_tasks
task_dict = {}
num_added = add_tasks(self.queue_service, task_dict)
# Purge tasks to count if any tasks remained.
num_purged = purge_tasks(self.queue_service)
self.assertEqual(0, num_added)
self.assertEqual(0, num_purged)
@patch('google.appengine.api.taskqueue.Queue.add', autospec=True)
def test_add_tasks_with_empty_queues(self, queue_add):
"""Ensure qeueue.add() is not called when there are no tasks to queue.
In some cases adding an empty list causes an error in the taskqueue
stub.
"""
from furious.test_stubs.appengine.queues import add_tasks
from furious.test_stubs.appengine.queues import purge_tasks
task_dict = {'default': [], 'default-pull': []}
# Add using empty lists of tasks.
num_added = add_tasks(self.queue_service, task_dict)
# Purge tasks to verify the count of tasks added.
num_purged = purge_tasks(self.queue_service)
# Ensure no tasks were added.
self.assertEqual(0, queue_add.call_count)
self.assertEqual(0, num_added)
self.assertEqual(0, num_purged)
def test_add_push_queue_tasks(self):
"""Ensure that push queue tasks can be added with add_tasks()."""
from furious.async import Async
from furious.test_stubs.appengine.queues import add_tasks
from furious.test_stubs.appengine.queues import get_tasks
from furious.test_stubs.appengine.queues import purge_tasks
# Add tasks the normal way so we can get them and test readding them
async = Async(target='time.ctime')
async.start()
async2 = Async(target='time.ctime')
async2.start()
task_dict = get_tasks(self.queue_service)
# purge current tasks so we can verify how many we add next.
purge_tasks(self.queue_service)
num_added = add_tasks(self.queue_service, task_dict)
# Purge tasks to check how many tasks are in the queues
num_queued = purge_tasks(self.queue_service)
self.assertEqual(2, num_added)
self.assertEqual(2, num_queued)
def test_add_pull_queue_tasks(self):
"""Ensure that pull tasks can be added with add_tasks()."""
from furious.batcher import Message
from furious.test_stubs.appengine.queues import add_tasks
from furious.test_stubs.appengine.queues import get_tasks
from furious.test_stubs.appengine.queues import purge_tasks
# Add tasks the normal way so we can get them and test readding them
Message(queue='default-pull').insert()
task_dict = get_tasks(self.queue_service)
# purge current tasks so we can verify how many we add next.
purge_tasks(self.queue_service)
num_added = add_tasks(self.queue_service, task_dict)
# Purge tasks to check how many tasks are in the queues
num_queued = purge_tasks(self.queue_service)
self.assertEqual(1, num_added)
self.assertEqual(1, num_queued)
def test_add_pull_and_push_queue_tasks(self):
"""Ensure that push and pull tasks can be added with add_tasks()."""
from furious.async import Async
from furious.batcher import Message
from furious.test_stubs.appengine.queues import add_tasks
from furious.test_stubs.appengine.queues import get_tasks
from furious.test_stubs.appengine.queues import purge_tasks
# Add tasks the normal way so we can get them and test readding them
async = Async(target='time.ctime')
async.start()
async2 = Async(target='time.ctime')
async2.start()
Message(queue='default-pull').insert()
task_dict = get_tasks(self.queue_service)
# purge current tasks so we can verify how many we will add next.
purge_tasks(self.queue_service)
num_added = add_tasks(self.queue_service, task_dict)
# Purge tasks to check how many tasks are in the queues
num_queued = purge_tasks(self.queue_service)
self.assertEqual(3, num_added)
self.assertEqual(3, num_queued)
@patch('time.ctime')
def test_add_async_and_message_tasks(self, ctime):
"""Ensure taskqueue.Task() instances from furious Asyncs and Messages
can be added.
"""
from google.appengine.api import taskqueue
from furious.async import Async
from furious.batcher import Message
from furious.test_stubs.appengine.queues import add_tasks
from furious.test_stubs.appengine.queues import run as run_queues
# Create asyncs
async = Async(target='time.ctime')
async2 = Async(target='time.ctime')
# Create a message
options = {'task_args': {'payload': 'abcdefg'}}
message = Message(payload='abc', **options)
message_task = message.to_task()
task_dict = {'default': [async.to_task(), async2.to_task()],
'default-pull': [message_task]}
num_added = add_tasks(self.queue_service, task_dict)
# Ensure three tasks were added.
self.assertEqual(3, num_added)
# Run the tasks to make sure they were inserted correctly.
run_queues(self.queue_service)
# Ensure both push queue tasks were executed.
self.assertEqual(2, ctime.call_count)
# Lease the pull queue task and make sure it has the correct payload.
tasks = taskqueue.Queue('default-pull').lease_tasks(3600, 100)
returned_task_message = tasks[0]
# Ensure pull queue task payload is the same as the original.
self.assertEqual(returned_task_message.payload, message_task.payload)
class TestRunRandom(unittest.TestCase):
"""Tests random processing of task queues."""
def setUp(self):
self.queue_names = [
{'name': 'a', 'mode': 'push'},
{'name': 'b', 'mode': 'push'},
{'name': 'c', 'mode': 'pull'},
{'name': 'd', 'mode': 'push'}]
self.test_queues = {
'a': [{'name': '1'}, {'name': '2'}],
'b': [{'name': '4'}, {'name': '5'}],
'c': [{'name': '7'}, {'name': '9'}],
'd': [{'name': '11'}, {'name': '12'}]}
def test_run_without_queues(self):
"""Ensure that we exit early if there aren't any queues.
"""
queue_service = Mock()
tasks_ran = run_random(queue_service, None)
self.assertEqual(0, tasks_ran)
@patch('random.seed')
@patch('random.randrange')
@patch('furious.test_stubs.appengine.queues._run_random_task_from_queue')
def test_run_with_empty_queues(self, run_task_from_queue, random_range,
random_seed):
"""Ensures that we hit all queue names when all queues are empty.
"""
queue_service = Mock()
random_range.return_value = 1
run_task_from_queue.return_value = False
test_seed = 555
tasks_ran = run_random(queue_service, self.queue_names, test_seed)
self.assertEqual(0, tasks_ran)
random_seed.assert_called_once_with(test_seed)
self.assertEqual(3, run_task_from_queue.call_count)
index = 0
for queue_name in ['b', 'd', 'a']:
call_args = run_task_from_queue.call_args_list[index]
self.assertEqual(call(queue_service, queue_name), call_args)
index += 1
@patch('furious.test_stubs.appengine.queues._run_random_task_from_queue')
def test_run_tasks_in_queues(self, run_task_from_queue):
"""Ensures that we run all tasks from popuplated queues.
"""
queue_service = Mock()
run_task_from_queue.side_effect = self._run_side_effect
tasks_ran = run_random(queue_service, self.queue_names)
self.assertEqual(6, tasks_ran)
self.assertIsNotNone(self.test_queues.get('c'))
self.assertEqual(2, len(self.test_queues.get('c')))
for queue_name in ['a', 'b', 'd']:
tasks = self.test_queues.get(queue_name)
self.assertIsNotNone(tasks)
self.assertEqual(0, len(tasks))
@patch('furious.test_stubs.appengine.queues._run_random_task_from_queue')
def test_run_tasks_in_queues_greater_than_max(self, run_task_from_queue):
"""Ensures that we only run as many tasks as the 'max_tasks'
"""
queue_service = Mock()
run_task_from_queue.side_effect = self._run_side_effect
tasks_ran = run_random(queue_service, self.queue_names, max_tasks=3)
self.assertEqual(3, tasks_ran)
remaining_tasks = 0
for queue, tasks in self.test_queues.iteritems():
remaining_tasks += len(tasks)
self.assertEqual(5, remaining_tasks)
def _run_side_effect(self, service, queue_name):
mock_tasks = self.test_queues.get(queue_name, [])
task = None
if mock_tasks:
task = mock_tasks.pop()
return task
class TestRunRandomTaskFromQueue(unittest.TestCase):
"""Tests proper processing of tasks through _run_random_task_from_queue"""
def setUp(self):
self.test_queue = 'queue-ABC'
self.test_task = 'task-ABC'
@patch('furious.test_stubs.appengine.queues._fetch_random_task_from_queue')
def test_run_without_task(self, fetch_task):
"""Ensure that we don't run a task if fetch returns None.
"""
fetch_task.return_value = None
queue_service = Mock()
result = _run_random_task_from_queue(queue_service, self.test_queue)
self.assertFalse(result)
fetch_task.assert_called_once_with(queue_service, self.test_queue)
@patch('furious.test_stubs.appengine.queues._execute_task')
@patch('furious.test_stubs.appengine.queues._fetch_random_task_from_queue')
def test_run_with_task(self, fetch_task, execute_task):
"""Ensure that we handle a task run properly.
"""
task = {'name': self.test_task}
fetch_task.return_value = task
queue_service = Mock()
queue_service.DeleteTask = Mock()
result = _run_random_task_from_queue(queue_service, self.test_queue)
self.assertTrue(result)
fetch_task.assert_called_once_with(queue_service, self.test_queue)
execute_task.assert_called_once_with(task)
queue_service.DeleteTask.assert_called_once_with(
self.test_queue, self.test_task)
class TestFetchRandomTaskFromQueue(unittest.TestCase):
"""Ensure tasks from queues are run randomly."""
def setUp(self):
self.test_queue = 'queue-ABC'
def test_fetch_with_no_tasks(self):
"""Ensure None is returned when GetTasks returns None.
"""
queue_service = Mock()
queue_service.GetTasks.return_value = None
result = _fetch_random_task_from_queue(queue_service, self.test_queue)
self.assertIsNone(result)
queue_service.GetTasks.assert_called_once_with(self.test_queue)
@patch('random.choice')
def test_fetch_with_tasks(self, choice):
"""Ensure None is returned when GetTasks returns None.
"""
queue_service = Mock()
queue_service.GetTasks.return_value = ['a', 'b', 'c']
choice.return_value = 'b'
result = _fetch_random_task_from_queue(queue_service, self.test_queue)
self.assertEqual('b', result)
queue_service.GetTasks.assert_called_once_with(self.test_queue)
class IsFuriousTaskTestCase(unittest.TestCase):
def test_no_furious_url_prefixes(self):
"""Ensure if no non_furious_url_prefixes are passed in True is
returned.
"""
task = {}
furious_url_prefixes = None
non_furious_handler = None
result = _is_furious_task(task, furious_url_prefixes,
non_furious_handler)
self.assertTrue(result)
def test_url_not_url_prefixes(self):
"""Ensure task url not in non_furious_url_prefixes True is returned."""
task = {
'url': '/_ah/queue/async'
}
furious_url_prefixes = ('/_ah/queue/defer',)
non_furious_handler = None
result = _is_furious_task(task, furious_url_prefixes,
non_furious_handler)
self.assertTrue(result)
def test_url_in_url_prefixes_with_no_handler(self):
"""Ensure task url not in furious_url_prefixes True is returned but the
handler is not called."""
task = {
'url': '/_ah/queue/defer'
}
furious_url_prefixes = ('/_ah/queue/defer',)
non_furious_handler = None
result = _is_furious_task(task, furious_url_prefixes,
non_furious_handler)
self.assertFalse(result)
def test_url_in_url_prefixes_with_handler(self):
"""Ensure task url not in furious_url_prefixes True is returned and
the handler is called.
"""
task = {
'url': '/_ah/queue/defer',
}
furious_url_prefixes = ('/_ah/queue/defer',)
non_furious_handler = Mock()
result = _is_furious_task(task, furious_url_prefixes,
non_furious_handler)
self.assertFalse(result)
non_furious_handler.assert_called_once_with(task)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for spectral_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.ops.signal import spectral_ops
from tensorflow.python.ops.signal import window_ops
from tensorflow.python.platform import test
class SpectralOpsTest(test.TestCase):
@staticmethod
def _np_hann_periodic_window(length):
if length == 1:
return np.ones(1)
odd = length % 2
if not odd:
length += 1
window = 0.5 - 0.5 * np.cos(2.0 * np.pi * np.arange(length) / (length - 1))
if not odd:
window = window[:-1]
return window
@staticmethod
def _np_frame(data, window_length, hop_length):
num_frames = 1 + int(np.floor((len(data) - window_length) // hop_length))
shape = (num_frames, window_length)
strides = (data.strides[0] * hop_length, data.strides[0])
return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)
@staticmethod
def _np_stft(data, fft_length, hop_length, window_length):
frames = SpectralOpsTest._np_frame(data, window_length, hop_length)
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return np.fft.rfft(frames * window, fft_length)
@staticmethod
def _np_inverse_stft(stft, fft_length, hop_length, window_length):
frames = np.fft.irfft(stft, fft_length)
# Pad or truncate frames's inner dimension to window_length.
frames = frames[..., :window_length]
frames = np.pad(frames, [[0, 0]] * (frames.ndim - 1) +
[[0, max(0, window_length - frames.shape[-1])]], "constant")
window = SpectralOpsTest._np_hann_periodic_window(window_length)
return SpectralOpsTest._np_overlap_add(frames * window, hop_length)
@staticmethod
def _np_overlap_add(stft, hop_length):
num_frames, window_length = np.shape(stft)
# Output length will be one complete window, plus another hop_length's
# worth of points for each additional window.
output_length = window_length + (num_frames - 1) * hop_length
output = np.zeros(output_length)
for i in range(num_frames):
output[i * hop_length:i * hop_length + window_length] += stft[i,]
return output
def _compare(self, signal, frame_length, frame_step, fft_length):
with spectral_ops_test_util.fft_kernel_label_map(), (
self.cached_session(use_gpu=True)) as sess:
actual_stft = spectral_ops.stft(
signal, frame_length, frame_step, fft_length, pad_end=False)
signal_ph = array_ops.placeholder(dtype=dtypes.as_dtype(signal.dtype))
actual_stft_from_ph = spectral_ops.stft(
signal_ph, frame_length, frame_step, fft_length, pad_end=False)
actual_inverse_stft = spectral_ops.inverse_stft(
actual_stft, frame_length, frame_step, fft_length)
actual_stft, actual_stft_from_ph, actual_inverse_stft = sess.run(
[actual_stft, actual_stft_from_ph, actual_inverse_stft],
feed_dict={signal_ph: signal})
actual_stft_ph = array_ops.placeholder(dtype=actual_stft.dtype)
actual_inverse_stft_from_ph = sess.run(
spectral_ops.inverse_stft(
actual_stft_ph, frame_length, frame_step, fft_length),
feed_dict={actual_stft_ph: actual_stft})
# Confirm that there is no difference in output when shape/rank is fully
# unknown or known.
self.assertAllClose(actual_stft, actual_stft_from_ph)
self.assertAllClose(actual_inverse_stft, actual_inverse_stft_from_ph)
expected_stft = SpectralOpsTest._np_stft(
signal, fft_length, frame_step, frame_length)
self.assertAllClose(expected_stft, actual_stft, 1e-4, 1e-4)
expected_inverse_stft = SpectralOpsTest._np_inverse_stft(
expected_stft, fft_length, frame_step, frame_length)
self.assertAllClose(
expected_inverse_stft, actual_inverse_stft, 1e-4, 1e-4)
@test_util.disable_xla("This test never passed for XLA")
def test_shapes(self):
with spectral_ops_test_util.fft_kernel_label_map(), (
self.session(use_gpu=True)):
signal = np.zeros((512,)).astype(np.float32)
# If fft_length is not provided, the smallest enclosing power of 2 of
# frame_length (8) is used.
stft = spectral_ops.stft(signal, frame_length=7, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,
fft_length=16, pad_end=True)
self.assertAllEqual([64, 9], stft.shape.as_list())
self.assertAllEqual([64, 9], self.evaluate(stft).shape)
stft = spectral_ops.stft(signal, frame_length=16, frame_step=8,
fft_length=8, pad_end=True)
self.assertAllEqual([64, 5], stft.shape.as_list())
self.assertAllEqual([64, 5], self.evaluate(stft).shape)
stft = np.zeros((32, 9)).astype(np.complex64)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length=8,
fft_length=16, frame_step=8)
expected_length = (stft.shape[0] - 1) * 8 + 8
self.assertAllEqual([256], inverse_stft.shape.as_list())
self.assertAllEqual([expected_length], self.evaluate(inverse_stft).shape)
@test_util.disable_xla("This test never passed for XLA")
def test_stft_and_inverse_stft(self):
"""Test that spectral_ops.stft/inverse_stft match a NumPy implementation."""
# Tuples of (signal_length, frame_length, frame_step, fft_length).
test_configs = [
(512, 64, 32, 64),
(512, 64, 64, 64),
(512, 72, 64, 64),
(512, 64, 25, 64),
(512, 25, 15, 36),
(123, 23, 5, 42),
]
for signal_length, frame_length, frame_step, fft_length in test_configs:
signal = np.random.random(signal_length).astype(np.float32)
self._compare(signal, frame_length, frame_step, fft_length)
def test_stft_round_trip(self):
# Tuples of (signal_length, frame_length, frame_step, fft_length,
# threshold, corrected_threshold).
test_configs = [
# 87.5% overlap.
(4096, 256, 32, 256, 1e-5, 1e-6),
# 75% overlap.
(4096, 256, 64, 256, 1e-5, 1e-6),
# Odd frame hop.
(4096, 128, 25, 128, 1e-3, 1e-6),
# Odd frame length.
(4096, 127, 32, 128, 1e-3, 1e-6),
# 50% overlap.
(4096, 128, 64, 128, 0.40, 1e-6),
]
for (signal_length, frame_length, frame_step, fft_length, threshold,
corrected_threshold) in test_configs:
# Generate a random white Gaussian signal.
signal = random_ops.random_normal([signal_length])
with spectral_ops_test_util.fft_kernel_label_map(), (
self.cached_session(use_gpu=True)) as sess:
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length,
pad_end=False)
inverse_stft = spectral_ops.inverse_stft(stft, frame_length, frame_step,
fft_length)
inverse_stft_corrected = spectral_ops.inverse_stft(
stft, frame_length, frame_step, fft_length,
window_fn=spectral_ops.inverse_stft_window_fn(frame_step))
signal, inverse_stft, inverse_stft_corrected = sess.run(
[signal, inverse_stft, inverse_stft_corrected])
# Truncate signal to the size of inverse stft.
signal = signal[:inverse_stft.shape[0]]
# Ignore the frame_length samples at either edge.
signal = signal[frame_length:-frame_length]
inverse_stft = inverse_stft[frame_length:-frame_length]
inverse_stft_corrected = inverse_stft_corrected[
frame_length:-frame_length]
# Check that the inverse and original signal are close up to a scale
# factor.
inverse_stft_scaled = inverse_stft / np.mean(np.abs(inverse_stft))
signal_scaled = signal / np.mean(np.abs(signal))
self.assertLess(np.std(inverse_stft_scaled - signal_scaled), threshold)
# Check that the inverse with correction and original signal are close.
self.assertLess(np.std(inverse_stft_corrected - signal),
corrected_threshold)
def test_inverse_stft_window_fn(self):
"""Test that inverse_stft_window_fn has unit gain at each window phase."""
# Tuples of (frame_length, frame_step).
test_configs = [
(256, 32),
(256, 64),
(128, 25),
(127, 32),
(128, 64),
]
for (frame_length, frame_step) in test_configs:
hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)
inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)
inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)
with self.cached_session(use_gpu=True) as sess:
hann_window, inverse_window = self.evaluate(
[hann_window, inverse_window])
# Expect unit gain at each phase of the window.
product_window = hann_window * inverse_window
for i in range(frame_step):
self.assertAllClose(1.0, np.sum(product_window[i::frame_step]))
def test_inverse_stft_window_fn_special_case(self):
"""Test inverse_stft_window_fn in special overlap = 3/4 case."""
# Cases in which frame_length is an integer multiple of 4 * frame_step are
# special because they allow exact reproduction of the waveform with a
# squared Hann window (Hann window in both forward and reverse transforms).
# In the case where frame_length = 4 * frame_step, that combination
# produces a constant gain of 1.5, and so the corrected window will be the
# Hann window / 1.5.
# Tuples of (frame_length, frame_step).
test_configs = [
(256, 64),
(128, 32),
]
for (frame_length, frame_step) in test_configs:
hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)
inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)
inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)
with self.cached_session(use_gpu=True) as sess:
hann_window, inverse_window = self.evaluate(
[hann_window, inverse_window])
self.assertAllClose(hann_window, inverse_window * 1.5)
@staticmethod
def _compute_stft_gradient(signal, frame_length=32, frame_step=16,
fft_length=32):
"""Computes the gradient of the STFT with respect to `signal`."""
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length)
magnitude_stft = math_ops.abs(stft)
loss = math_ops.reduce_sum(magnitude_stft)
return gradients_impl.gradients([loss], [signal])[0]
def test_gradients(self):
"""Test that spectral_ops.stft has a working gradient."""
with spectral_ops_test_util.fft_kernel_label_map(), (
self.session(use_gpu=True)) as sess:
signal_length = 512
# An all-zero signal has all zero gradients with respect to the sum of the
# magnitude STFT.
empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32)
empty_signal_gradient = sess.run(
self._compute_stft_gradient(empty_signal))
self.assertTrue((empty_signal_gradient == 0.0).all())
# A sinusoid will have non-zero components of its gradient with respect to
# the sum of the magnitude STFT.
sinusoid = math_ops.sin(
2 * np.pi * math_ops.linspace(0.0, 1.0, signal_length))
sinusoid_gradient = self.evaluate(self._compute_stft_gradient(sinusoid))
self.assertFalse((sinusoid_gradient == 0.0).all())
def test_gradients_numerical(self):
with spectral_ops_test_util.fft_kernel_label_map(), (
self.session(use_gpu=True)):
# Tuples of (signal_length, frame_length, frame_step, fft_length,
# stft_bound, inverse_stft_bound).
# TODO(rjryan): Investigate why STFT gradient error is so high.
test_configs = [
(64, 16, 8, 16),
(64, 16, 16, 16),
(64, 16, 7, 16),
(64, 7, 4, 9),
(29, 5, 1, 10),
]
for (signal_length, frame_length, frame_step, fft_length) in test_configs:
signal_shape = [signal_length]
signal = random_ops.random_uniform(signal_shape)
stft_shape = [max(0, 1 + (signal_length - frame_length) // frame_step),
fft_length // 2 + 1]
stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length,
pad_end=False)
inverse_stft_shape = [(stft_shape[0] - 1) * frame_step + frame_length]
inverse_stft = spectral_ops.inverse_stft(stft, frame_length, frame_step,
fft_length)
stft_error = test.compute_gradient_error(signal, [signal_length],
stft, stft_shape)
inverse_stft_error = test.compute_gradient_error(
stft, stft_shape, inverse_stft, inverse_stft_shape)
self.assertLess(stft_error, 2e-3)
self.assertLess(inverse_stft_error, 5e-4)
if __name__ == "__main__":
test.main()
| |
#Author : Lewis Mervin lhm30@cam.ac.uk
#Supervisor : Dr. A. Bender
#All rights reserved 2014
#Protein Target Prediction Tool trained on SARs from PubChem (Mined 08/04/14) and ChEMBL18
#Molecular Descriptors : 2048bit Morgan Binary Fingerprints (Rdkit) - ECFP4
#Dependencies : rdkit, sklearn, numpy
#libraries
import pymysql
import random
import time
import getpass
random.seed(2)
from rdkit import Chem
from rdkit.Chem import AllChem
from sklearn.naive_bayes import BernoulliNB
import cPickle
import glob
import gc
from collections import Counter
import os
import sys
import numpy as np
from multiprocessing import Pool
import multiprocessing
multiprocessing.freeze_support()
N_cores = 10
def introMessage():
print '=============================================================================================='
print ' Author: Lewis Mervin\n Email: lhm30@cam.ac.uk\n Supervisor: Dr. A. Bender. Number of cores: ' + str(N_cores)
print ' Address: Centre For Molecular Informatics, Dept. Chemistry, Lensfield Road, Cambridge CB2 1EW'
print '==============================================================================================\n'
return
def login():
user = raw_input(" Enter Username for PIDGIN & BIOSYSTEMS DB [%s]: " % getpass.getuser())
if not user:
user = getpass.getuser()
pprompt = lambda: (getpass.getpass(' Enter Password for DB: '), getpass.getpass(' Retype password: '))
p1, p2 = pprompt()
while p1 != p2:
print(' Passwords do not match. Try again')
p1, p2 = pprompt()
samples = raw_input(" Enter Number of Samples: ")
return user, p1, int(samples)
def ispwneeded():
msg = " Calculate Pathway Enrichment from BioSystems? [y/n]: "
pwneeded = raw_input(msg)
while pwneeded not in ['y','n']:
print(' Please type y for yes, or n for no. Try again')
pwneeded = raw_input(msg)
return pwneeded
def printprog(size,count,message):
count = count+1
percent = (float(count)/float(size))*100
sys.stdout.write(message + ' : %3d%%\r' % percent)
sys.stdout.flush()
#import user query
def importQuery(name):
outproblem = open('problematic_smiles.smi','w')
query = open(name).read().splitlines()
matrix = []
problem = 0
for q in query:
try:
fp = calcFingerprints(q)
gc.disable()
matrix.append(fp)
gc.enable()
except:
problem +=1
outproblem.write(q + '\n')
matrix = np.array(matrix, dtype=np.uint8)
if problem > 0:
print 'WARNING: ' + str(problem) + ' SMILES HAVE ERRORS'
outproblem.close()
else:
outproblem.close()
os.remove('problematic_smiles.smi')
return matrix
#calculate 2048bit morgan fingerprints, radius 2
def calcFingerprints(smiles):
m1 = Chem.MolFromSmiles(smiles)
fp = AllChem.GetMorganFingerprintAsBitVect(m1,2, nBits=2048)
binary = fp.ToBitString()
return list(binary)
def arrayFP(input):
outfp = []
for i in input:
gc.disable()
outfp.append(calcFingerprints(i[0]))
gc.enable()
return np.array(outfp, dtype=np.uint8)
def getRandomCompoundPredictions(metric):
global usr, pw
conn = pymysql.connect(db='pidgin', user=usr, passwd=pw, host='localhost', port=3306)
cur = conn.cursor()
cur.execute("SELECT "+metric+" FROM preds limit 100000;")
preds = np.array(cur.fetchall())[:,0]
return preds
#get names of uniprots
def getUpName():
global u_name
t_file = open('classes_in_model.txt').read().splitlines()
t_file.pop(0)
for t in t_file:
t = t.split('\t')
u_name[t[1]] = t[0]
return
#import thresholds as specified by user
def importThresholds():
global thresholds
global metric
m = None
if metric == 'p':
m = 1
if metric == 'f':
m = 2
if metric == 'r':
m = 3
if metric == 'a':
m = 4
if metric == '0.5':
m = 5
if m is None:
print ' ERROR: Please enter threshold!'
quit()
t_file = open('thresholds.txt').read().splitlines()
for t in t_file:
t = t.split('\t')
thresholds[t[0]] = float(t[m])
return
#parallel train models
def trainModels():
models = dict()
pool = Pool(processes=N_cores) # set up resources
train_tasks = [modelFile for modelFile in glob.glob('models/*.pkl')] #create queue
jobs = pool.imap_unordered(trainer, train_tasks)
t_job = len(train_tasks)
for i, result in enumerate(jobs):
models[result[0]] = result[1]
pool.close()
pool.join()
return models
#trainer worker
def trainer(x):
with open(x, 'rb') as fid:
loaded = cPickle.load(fid)
return [x[7:-4], loaded]
def getPW():
global models
bsid_a = dict()
conn = pymysql.connect(db='biosystems', user=usr, passwd=pw, host='localhost', port=3306)
cur = conn.cursor()
for m in models.keys():
cur.execute("SELECT bsid FROM target_bsid WHERE target ='"+str(m)+"';")
bsids = np.array(cur.fetchall(),dtype=int)
try:
bsid_a[m] = bsids[::,0]
except IndexError:
bsid_a[m] = []
return bsid_a
#predict worker
def predict(x):
global models
global thresholds
mod, input = x
hits = 0
probs = models[mod].predict_proba(input)[::,1]
hits = probs > [thresholds[mod]]*len(probs)
return [mod, hits.sum()]
#calculate enriched target metrics and calculate background pw array
def calculateEnrichmentT(bgpred):
global bsid_a
global positives
print
lwin = dict((el,0) for el in positives.keys())
avr = dict((el,0) for el in positives.keys())
bgpw = []
#for each comparison
for _ in range(samples):
try:
chunk = random.sample(bgpred,len(querymatrix))
except ValueError:
chunk = [random.choice(bgpred) for r in range(len(querymatrix))]
printprog(samples,_,' Calculating Enriched Targets vs BG ')
chunk = np.matrix(map(list,chunk),dtype=np.uint8)
pw = dict()
for i,mod in enumerate(sorted(models.keys())):
hits = np.sum(chunk[:,i])
if hits >= 1:
#update count of hits for target (for average-ratio)
avr[mod] = avr[mod] + hits
for b in bsid_a[mod]:
try:
pw[b] += hits
except KeyError:
pw[b] = hits
#update times that query was larger than background (for e-ratio)
if positives[mod] > hits:
lwin[mod] +=1
bgpw.append(pw)
return lwin, avr, bgpw
def calculateEnrichmentPW():
global positivespw
lwin = dict()
avr = dict()
pool = Pool(processes=N_cores) # set up resources
tasks = [[bsid, count] for bsid, count in positivespw.items()] #create queue
jobs = pool.imap_unordered(processPW, tasks)
for i, result in enumerate(jobs):
lwin[result[0]]= result[1]
avr[result[0]]= result[2]
aratiopw = calcAR(avr,positivespw)
return lwin,avr,aratiopw
def processPW(input):
global bgpw
lwin = 0
avr = 0
bsid, count = input
for split in bgpw:
try:
split = split[bsid]
except:
split = 0
if count > split:
lwin +=1
avr = avr + split
return [bsid,lwin,avr]
#calculate enrichment
def calcAR(avr,positiv):
global samples
aratio = dict()
for annotation, bhits in avr.items():
#average background hit ratio
normhit = float(bhits)/float(samples)
#number of predictions
numpreds = float(len(querymatrix))
try:
#normed positive hit ratio / normed background hits
aratio[annotation] = (float(normhit)/float(numpreds))/(float(positiv[annotation])/float(numpreds))
except:
if float(bhits) == 0.0:
aratio[annotation] = 0.0
if positiv[annotation] == 0.0:
aratio[annotation] = 999.0
return aratio
#main
introMessage()
usr, pw, samples = login()
metric = sys.argv[1]
print ' Using Class Specific Cut-off Thresholds of : ' + metric
thresholds = dict()
importThresholds()
file_name = sys.argv[2]
output_name, output_name2 = [file_name + 'out_targets_enriched.txt', file_name + 'out_pathways_enriched.txt']
models = trainModels()
u_name = dict()
getUpName()
bsid_a = getPW()
t_count = len(models.keys())
print ' Total Number of Classes : ' + str(t_count)
querymatrix = importQuery(file_name)
print ' Total Number of Library Molecules : ' + str(len(querymatrix))
positives = dict()
positivespw = dict()
pool = Pool(processes=N_cores) # set up resources
test_prediction_tasks = [[mod, querymatrix] for mod in models.keys()] #create queue
jobs = pool.imap_unordered(predict, test_prediction_tasks)
for i, result in enumerate(jobs):
mod, hit = result
printprog(len(test_prediction_tasks),i,' Calculating Targets and Pathways for ' + file_name)
positives[mod] = hit
#update list of hit pw
if hit >= 1:
for b in bsid_a[mod]:
try:
positivespw[b] += hit
except KeyError:
positivespw[b] = hit
pool.close()
pool.join()
#import background db
bgpred = getRandomCompoundPredictions(metric)
#predict for random background, calculating number of times enriched in lib
lwin, avr, bgpw = calculateEnrichmentT(bgpred)
bgpred = None
#calculate average ratio
aratio = calcAR(avr,positives)
numpreds = float(len(querymatrix))
#write to target file
file = open(output_name, 'w')
file.write('uniprot\tname\tquery_hits\te_score\taverage_ratio\n')
for uniprot, hit in positives.items():
if hit >=1:
file.write(uniprot + '\t' + u_name[uniprot] + '\t' + str(hit) + '\t' + str(1.0-(float(lwin[uniprot])/float(samples))) + '\t' + str(aratio[uniprot]) + '\n')
print '\n Wrote Target Results to : ' + output_name
file.close()
#run pathway analysis?
if ispwneeded() == 'n': quit()
#write to pw file
file = open(output_name2, 'w')
file.write('bsid\tname\tdatabase\texternal_id\tclass\tquery_hits\te_score\taverage_ratio\n')
lwin, avr, aratiopw = calculateEnrichmentPW()
conn = pymysql.connect(db='biosystems', user=usr, passwd=pw, host='localhost', port=3306)
cur = conn.cursor()
for bsid, count in positivespw.items():
cur.execute("SELECT * FROM bsid_info WHERE bsid ='"+str(bsid)+"';")
BSID_n = cur.fetchall()[0]
file.write('\t'.join(map(str,BSID_n)) + '\t' + str(count) + '\t' + str(1.0-(float(lwin[bsid])/float(samples))) + '\t' + str(aratiopw[bsid]) + '\n')
print ' Wrote Pathway Results to : ' + output_name2
file.close()
| |
# Status: ported, except for unit tests.
# Base revision: 64488
#
# Copyright 2001, 2002, 2003 Dave Abrahams
# Copyright 2002, 2006 Rene Rivera
# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or https://www.bfgroup.xyz/b2/LICENSE.txt)
import re
from b2.manager import get_manager
from b2.util import utility, bjam_signature, is_iterable_typed
import b2.util.set
from b2.util.utility import add_grist, get_grist, ungrist, replace_grist, to_seq
from b2.exceptions import *
__re_split_subfeatures = re.compile ('<(.*):(.*)>')
__re_no_hyphen = re.compile ('^([^:]+)$')
__re_slash_or_backslash = re.compile (r'[\\/]')
VALID_ATTRIBUTES = {
'implicit',
'composite',
'optional',
'symmetric',
'free',
'incidental',
'path',
'dependency',
'propagated',
'link-incompatible',
'subfeature',
'order-sensitive'
}
class Feature(object):
def __init__(self, name, values, attributes):
assert isinstance(name, basestring)
assert is_iterable_typed(values, basestring)
assert is_iterable_typed(attributes, basestring)
self.name = name
self.values = values
self.default = None
self.subfeatures = []
self.parent = None
self.attributes_string_list = []
self._hash = hash(self.name)
for attr in attributes:
self.attributes_string_list.append(attr)
attr = attr.replace("-", "_")
setattr(self, attr, True)
def add_values(self, values):
assert is_iterable_typed(values, basestring)
self.values.extend(values)
def set_default(self, value):
assert isinstance(value, basestring)
for attr in ('free', 'optional'):
if getattr(self, attr):
get_manager().errors()('"{}" feature "<{}>" cannot have a default value.'
.format(attr, self.name))
self.default = value
def add_subfeature(self, name):
assert isinstance(name, Feature)
self.subfeatures.append(name)
def set_parent(self, feature, value):
assert isinstance(feature, Feature)
assert isinstance(value, basestring)
self.parent = (feature, value)
def __hash__(self):
return self._hash
def __str__(self):
return self.name
def reset ():
""" Clear the module state. This is mainly for testing purposes.
"""
global __all_attributes, __all_features, __implicit_features, __composite_properties
global __subfeature_from_value, __all_top_features, __free_features
global __all_subfeatures
# sets the default value of False for each valid attribute
for attr in VALID_ATTRIBUTES:
setattr(Feature, attr.replace("-", "_"), False)
# A map containing all features. The key is the feature name.
# The value is an instance of Feature class.
__all_features = {}
# All non-subfeatures.
__all_top_features = []
# Maps valus to the corresponding implicit feature
__implicit_features = {}
# A map containing all composite properties. The key is a Property instance,
# and the value is a list of Property instances
__composite_properties = {}
# Maps a value to the corresponding subfeature name.
__subfeature_from_value = {}
# All free features
__free_features = []
__all_subfeatures = []
reset ()
def enumerate ():
""" Returns an iterator to the features map.
"""
return __all_features.iteritems ()
def get(name):
"""Return the Feature instance for the specified name.
Throws if no feature by such name exists
"""
assert isinstance(name, basestring)
return __all_features[name]
# FIXME: prepare-test/finish-test?
@bjam_signature((["name"], ["values", "*"], ["attributes", "*"]))
def feature (name, values, attributes = []):
""" Declares a new feature with the given name, values, and attributes.
name: the feature name
values: a sequence of the allowable values - may be extended later with feature.extend
attributes: a sequence of the feature's attributes (e.g. implicit, free, propagated, ...)
"""
__validate_feature_attributes (name, attributes)
feature = Feature(name, [], attributes)
__all_features[name] = feature
# Temporary measure while we have not fully moved from 'gristed strings'
__all_features["<" + name + ">"] = feature
name = add_grist(name)
if 'subfeature' in attributes:
__all_subfeatures.append(name)
else:
__all_top_features.append(feature)
extend (name, values)
# FIXME: why his is needed.
if 'free' in attributes:
__free_features.append (name)
return feature
@bjam_signature((["feature"], ["value"]))
def set_default (feature, value):
""" Sets the default value of the given feature, overriding any previous default.
feature: the name of the feature
value: the default value to assign
"""
f = __all_features[feature]
bad_attribute = None
if f.free:
bad_attribute = "free"
elif f.optional:
bad_attribute = "optional"
if bad_attribute:
raise InvalidValue ("%s property %s cannot have a default" % (bad_attribute, f.name))
if value not in f.values:
raise InvalidValue ("The specified default value, '%s' is invalid.\n" % value + "allowed values are: %s" % f.values)
f.set_default(value)
def defaults(features):
""" Returns the default property values for the given features.
"""
assert is_iterable_typed(features, Feature)
# FIXME: should merge feature and property modules.
from . import property
result = []
for f in features:
if not f.free and not f.optional and f.default:
result.append(property.Property(f, f.default))
return result
def valid (names):
""" Returns true iff all elements of names are valid features.
"""
if isinstance(names, str):
names = [names]
assert is_iterable_typed(names, basestring)
return all(name in __all_features for name in names)
def attributes (feature):
""" Returns the attributes of the given feature.
"""
assert isinstance(feature, basestring)
return __all_features[feature].attributes_string_list
def values (feature):
""" Return the values of the given feature.
"""
assert isinstance(feature, basestring)
validate_feature (feature)
return __all_features[feature].values
def is_implicit_value (value_string):
""" Returns true iff 'value_string' is a value_string
of an implicit feature.
"""
assert isinstance(value_string, basestring)
if value_string in __implicit_features:
return __implicit_features[value_string]
v = value_string.split('-')
if v[0] not in __implicit_features:
return False
feature = __implicit_features[v[0]]
for subvalue in (v[1:]):
if not __find_implied_subfeature(feature, subvalue, v[0]):
return False
return True
def implied_feature (implicit_value):
""" Returns the implicit feature associated with the given implicit value.
"""
assert isinstance(implicit_value, basestring)
components = implicit_value.split('-')
if components[0] not in __implicit_features:
raise InvalidValue ("'%s' is not a value of an implicit feature" % implicit_value)
return __implicit_features[components[0]]
def __find_implied_subfeature (feature, subvalue, value_string):
assert isinstance(feature, Feature)
assert isinstance(subvalue, basestring)
assert isinstance(value_string, basestring)
try:
return __subfeature_from_value[feature][value_string][subvalue]
except KeyError:
return None
# Given a feature and a value of one of its subfeatures, find the name
# of the subfeature. If value-string is supplied, looks for implied
# subfeatures that are specific to that value of feature
# feature # The main feature name
# subvalue # The value of one of its subfeatures
# value-string # The value of the main feature
def implied_subfeature (feature, subvalue, value_string):
assert isinstance(feature, Feature)
assert isinstance(subvalue, basestring)
assert isinstance(value_string, basestring)
result = __find_implied_subfeature (feature, subvalue, value_string)
if not result:
raise InvalidValue ("'%s' is not a known subfeature value of '%s%s'" % (subvalue, feature, value_string))
return result
def validate_feature (name):
""" Checks if all name is a valid feature. Otherwise, raises an exception.
"""
assert isinstance(name, basestring)
if name not in __all_features:
raise InvalidFeature ("'%s' is not a valid feature name" % name)
else:
return __all_features[name]
# Uses Property
def __expand_subfeatures_aux (property_, dont_validate = False):
""" Helper for expand_subfeatures.
Given a feature and value, or just a value corresponding to an
implicit feature, returns a property set consisting of all component
subfeatures and their values. For example:
expand_subfeatures <toolset>gcc-2.95.2-linux-x86
-> <toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86
equivalent to:
expand_subfeatures gcc-2.95.2-linux-x86
feature: The name of the feature, or empty if value corresponds to an implicit property
value: The value of the feature.
dont_validate: If True, no validation of value string will be done.
"""
from . import property # no __debug__ since Property is used elsewhere
assert isinstance(property_, property.Property)
assert isinstance(dont_validate, int) # matches bools
f = property_.feature
v = property_.value
if not dont_validate:
validate_value_string(f, v)
components = v.split ("-")
v = components[0]
result = [property.Property(f, components[0])]
subvalues = components[1:]
while len(subvalues) > 0:
subvalue = subvalues [0] # pop the head off of subvalues
subvalues = subvalues [1:]
subfeature = __find_implied_subfeature (f, subvalue, v)
# If no subfeature was found, reconstitute the value string and use that
if not subfeature:
return [property.Property(f, '-'.join(components))]
result.append(property.Property(subfeature, subvalue))
return result
def expand_subfeatures(properties, dont_validate = False):
"""
Make all elements of properties corresponding to implicit features
explicit, and express all subfeature values as separate properties
in their own right. For example, the property
gcc-2.95.2-linux-x86
might expand to
<toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86
properties: A sequence with elements of the form
<feature>value-string or just value-string in the
case of implicit features.
: dont_validate: If True, no validation of value string will be done.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
assert isinstance(dont_validate, int) # matches bools
result = []
for p in properties:
# Don't expand subfeatures in subfeatures
if p.feature.subfeature:
result.append (p)
else:
result.extend(__expand_subfeatures_aux (p, dont_validate))
return result
# rule extend was defined as below:
# Can be called three ways:
#
# 1. extend feature : values *
# 2. extend <feature> subfeature : values *
# 3. extend <feature>value-string subfeature : values *
#
# * Form 1 adds the given values to the given feature
# * Forms 2 and 3 add subfeature values to the given feature
# * Form 3 adds the subfeature values as specific to the given
# property value-string.
#
#rule extend ( feature-or-property subfeature ? : values * )
#
# Now, the specific rule must be called, depending on the desired operation:
# extend_feature
# extend_subfeature
@bjam_signature([['name'], ['values', '*']])
def extend (name, values):
""" Adds the given values to the given feature.
"""
assert isinstance(name, basestring)
assert is_iterable_typed(values, basestring)
name = add_grist (name)
__validate_feature (name)
feature = __all_features [name]
if feature.implicit:
for v in values:
if v in __implicit_features:
raise BaseException ("'%s' is already associated with the feature '%s'" % (v, __implicit_features [v]))
__implicit_features[v] = feature
if values and not feature.values and not(feature.free or feature.optional):
# This is the first value specified for this feature,
# take it as default value
feature.set_default(values[0])
feature.add_values(values)
def validate_value_string (f, value_string):
""" Checks that value-string is a valid value-string for the given feature.
"""
assert isinstance(f, Feature)
assert isinstance(value_string, basestring)
if f.free or value_string in f.values:
return
values = [value_string]
if f.subfeatures:
if not value_string in f.values and \
not value_string in f.subfeatures:
values = value_string.split('-')
# An empty value is allowed for optional features
if not values[0] in f.values and \
(values[0] or not f.optional):
raise InvalidValue ("'%s' is not a known value of feature '%s'\nlegal values: '%s'" % (values [0], f.name, f.values))
for v in values [1:]:
# this will validate any subfeature values in value-string
implied_subfeature(f, v, values[0])
""" Extends the given subfeature with the subvalues. If the optional
value-string is provided, the subvalues are only valid for the given
value of the feature. Thus, you could say that
<target-platform>mingw is specific to <toolset>gcc-2.95.2 as follows:
extend-subfeature toolset gcc-2.95.2 : target-platform : mingw ;
feature: The feature whose subfeature is being extended.
value-string: If supplied, specifies a specific value of the
main feature for which the new subfeature values
are valid.
subfeature: The name of the subfeature.
subvalues: The additional values of the subfeature being defined.
"""
def extend_subfeature (feature_name, value_string, subfeature_name, subvalues):
assert isinstance(feature_name, basestring)
assert isinstance(value_string, basestring)
assert isinstance(subfeature_name, basestring)
assert is_iterable_typed(subvalues, basestring)
feature = validate_feature(feature_name)
if value_string:
validate_value_string(feature, value_string)
subfeature_name = feature_name + '-' + __get_subfeature_name (subfeature_name, value_string)
extend(subfeature_name, subvalues) ;
subfeature = __all_features[subfeature_name]
if value_string == None: value_string = ''
if feature not in __subfeature_from_value:
__subfeature_from_value[feature] = {}
if value_string not in __subfeature_from_value[feature]:
__subfeature_from_value[feature][value_string] = {}
for subvalue in subvalues:
__subfeature_from_value [feature][value_string][subvalue] = subfeature
@bjam_signature((["feature_name", "value_string", "?"], ["subfeature"],
["subvalues", "*"], ["attributes", "*"]))
def subfeature (feature_name, value_string, subfeature, subvalues, attributes = []):
""" Declares a subfeature.
feature_name: Root feature that is not a subfeature.
value_string: An optional value-string specifying which feature or
subfeature values this subfeature is specific to,
if any.
subfeature: The name of the subfeature being declared.
subvalues: The allowed values of this subfeature.
attributes: The attributes of the subfeature.
"""
parent_feature = validate_feature (feature_name)
# Add grist to the subfeature name if a value-string was supplied
subfeature_name = __get_subfeature_name (subfeature, value_string)
if subfeature_name in __all_features[feature_name].subfeatures:
message = "'%s' already declared as a subfeature of '%s'" % (subfeature, feature_name)
message += " specific to '%s'" % value_string
raise BaseException (message)
# First declare the subfeature as a feature in its own right
f = feature (feature_name + '-' + subfeature_name, subvalues, attributes + ['subfeature'])
f.set_parent(parent_feature, value_string)
parent_feature.add_subfeature(f)
# Now make sure the subfeature values are known.
extend_subfeature (feature_name, value_string, subfeature, subvalues)
@bjam_signature((["composite_property_s"], ["component_properties_s", "*"]))
def compose (composite_property_s, component_properties_s):
""" Sets the components of the given composite property.
All parameters are <feature>value strings
"""
from . import property
component_properties_s = to_seq (component_properties_s)
composite_property = property.create_from_string(composite_property_s)
f = composite_property.feature
if len(component_properties_s) > 0 and isinstance(component_properties_s[0], property.Property):
component_properties = component_properties_s
else:
component_properties = [property.create_from_string(p) for p in component_properties_s]
if not f.composite:
raise BaseException ("'%s' is not a composite feature" % f)
if property in __composite_properties:
raise BaseException ('components of "%s" already set: %s' % (composite_property, str (__composite_properties[composite_property])))
if composite_property in component_properties:
raise BaseException ('composite property "%s" cannot have itself as a component' % composite_property)
__composite_properties[composite_property] = component_properties
def expand_composite(property_):
if __debug__:
from .property import Property
assert isinstance(property_, Property)
result = [ property_ ]
if property_ in __composite_properties:
for p in __composite_properties[property_]:
result.extend(expand_composite(p))
return result
@bjam_signature((['feature'], ['properties', '*']))
def get_values (feature, properties):
""" Returns all values of the given feature specified by the given property set.
"""
if feature[0] != '<':
feature = '<' + feature + '>'
result = []
for p in properties:
if get_grist (p) == feature:
result.append (replace_grist (p, ''))
return result
def free_features ():
""" Returns all free features.
"""
return __free_features
def expand_composites (properties):
""" Expand all composite properties in the set so that all components
are explicitly expressed.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
explicit_features = set(p.feature for p in properties)
result = []
# now expand composite features
for p in properties:
expanded = expand_composite(p)
for x in expanded:
if not x in result:
f = x.feature
if f.free:
result.append (x)
elif not x in properties: # x is the result of expansion
if not f in explicit_features: # not explicitly-specified
if any(r.feature == f for r in result):
raise FeatureConflict(
"expansions of composite features result in "
"conflicting values for '%s'\nvalues: '%s'\none contributing composite property was '%s'" %
(f.name, [r.value for r in result if r.feature == f] + [x.value], p))
else:
result.append (x)
elif any(r.feature == f for r in result):
raise FeatureConflict ("explicitly-specified values of non-free feature '%s' conflict\n"
"existing values: '%s'\nvalue from expanding '%s': '%s'" % (f,
[r.value for r in result if r.feature == f], p, x.value))
else:
result.append (x)
return result
# Uses Property
def is_subfeature_of (parent_property, f):
""" Return true iff f is an ordinary subfeature of the parent_property's
feature, or if f is a subfeature of the parent_property's feature
specific to the parent_property's value.
"""
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert isinstance(f, Feature)
if not f.subfeature:
return False
p = f.parent
if not p:
return False
parent_feature = p[0]
parent_value = p[1]
if parent_feature != parent_property.feature:
return False
if parent_value and parent_value != parent_property.value:
return False
return True
def __is_subproperty_of (parent_property, p):
""" As is_subfeature_of, for subproperties.
"""
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert isinstance(p, Property)
return is_subfeature_of (parent_property, p.feature)
# Returns true iff the subvalue is valid for the feature. When the
# optional value-string is provided, returns true iff the subvalues
# are valid for the given value of the feature.
def is_subvalue(feature, value_string, subfeature, subvalue):
assert isinstance(feature, basestring)
assert isinstance(value_string, basestring)
assert isinstance(subfeature, basestring)
assert isinstance(subvalue, basestring)
if not value_string:
value_string = ''
try:
return __subfeature_from_value[feature][value_string][subvalue] == subfeature
except KeyError:
return False
# Uses Property
def expand (properties):
""" Given a property set which may consist of composite and implicit
properties and combined subfeature values, returns an expanded,
normalized property set with all implicit features expressed
explicitly, all subfeature values individually expressed, and all
components of composite properties expanded. Non-free features
directly expressed in the input properties cause any values of
those features due to composite feature expansion to be dropped. If
two values of a given non-free feature are directly expressed in the
input, an error is issued.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
expanded = expand_subfeatures(properties)
return expand_composites (expanded)
# Accepts list of Property objects
def add_defaults (properties):
""" Given a set of properties, add default values for features not
represented in the set.
Note: if there's there's ordinary feature F1 and composite feature
F2, which includes some value for F1, and both feature have default values,
then the default value of F1 will be added, not the value in F2. This might
not be right idea: consider
feature variant : debug ... ;
<variant>debug : .... <runtime-debugging>on
feature <runtime-debugging> : off on ;
Here, when adding default for an empty property set, we'll get
<variant>debug <runtime_debugging>off
and that's kind of strange.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
# create a copy since properties will be modified
result = list(properties)
# We don't add default for conditional properties. We don't want
# <variant>debug:<define>DEBUG to be takes as specified value for <variant>
handled_features = set(p.feature for p in properties if not p.condition)
missing_top = [f for f in __all_top_features if not f in handled_features]
more = defaults(missing_top)
result.extend(more)
handled_features.update(p.feature for p in more)
# Add defaults for subfeatures of features which are present
for p in result[:]:
subfeatures = [s for s in p.feature.subfeatures if not s in handled_features]
more = defaults(__select_subfeatures(p, subfeatures))
handled_features.update(h.feature for h in more)
result.extend(more)
return result
def minimize (properties):
""" Given an expanded property set, eliminate all redundancy: properties
which are elements of other (composite) properties in the set will
be eliminated. Non-symmetric properties equal to default values will be
eliminated, unless the override a value from some composite property.
Implicit properties will be expressed without feature
grist, and sub-property values will be expressed as elements joined
to the corresponding main property.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
# remove properties implied by composite features
components = []
component_features = set()
for property in properties:
if property in __composite_properties:
cs = __composite_properties[property]
components.extend(cs)
component_features.update(c.feature for c in cs)
properties = b2.util.set.difference (properties, components)
# handle subfeatures and implicit features
# move subfeatures to the end of the list
properties = [p for p in properties if not p.feature.subfeature] +\
[p for p in properties if p.feature.subfeature]
result = []
while properties:
p = properties[0]
f = p.feature
# locate all subproperties of $(x[1]) in the property set
subproperties = [x for x in properties if is_subfeature_of(p, x.feature)]
if subproperties:
# reconstitute the joined property name
subproperties.sort ()
joined = b2.build.property.Property(p.feature, p.value + '-' + '-'.join ([sp.value for sp in subproperties]))
result.append(joined)
properties = b2.util.set.difference(properties[1:], subproperties)
else:
# eliminate properties whose value is equal to feature's
# default and which are not symmetric and which do not
# contradict values implied by composite properties.
# since all component properties of composites in the set
# have been eliminated, any remaining property whose
# feature is the same as a component of a composite in the
# set must have a non-redundant value.
if p.value != f.default or f.symmetric or f in component_features:
result.append (p)
properties = properties[1:]
return result
def split (properties):
""" Given a property-set of the form
v1/v2/...vN-1/<fN>vN/<fN+1>vN+1/...<fM>vM
Returns
v1 v2 ... vN-1 <fN>vN <fN+1>vN+1 ... <fM>vM
Note that vN...vM may contain slashes. This is resilient to the
substitution of backslashes for slashes, since Jam, unbidden,
sometimes swaps slash direction on NT.
"""
assert isinstance(properties, basestring)
def split_one (properties):
pieces = re.split (__re_slash_or_backslash, properties)
result = []
for x in pieces:
if not get_grist (x) and len (result) > 0 and get_grist (result [-1]):
result = result [0:-1] + [ result [-1] + '/' + x ]
else:
result.append (x)
return result
if isinstance (properties, str):
return split_one (properties)
result = []
for p in properties:
result += split_one (p)
return result
def compress_subproperties (properties):
""" Combine all subproperties into their parent properties
Requires: for every subproperty, there is a parent property. All
features are explicitly expressed.
This rule probably shouldn't be needed, but
build-request.expand-no-defaults is being abused for unintended
purposes and it needs help
"""
from .property import Property
assert is_iterable_typed(properties, Property)
result = []
matched_subs = set()
all_subs = set()
for p in properties:
f = p.feature
if not f.subfeature:
subs = [x for x in properties if is_subfeature_of(p, x.feature)]
if subs:
matched_subs.update(subs)
subvalues = '-'.join (sub.value for sub in subs)
result.append(Property(
p.feature, p.value + '-' + subvalues,
p.condition))
else:
result.append(p)
else:
all_subs.add(p)
# TODO: this variables are used just for debugging. What's the overhead?
assert all_subs == matched_subs
return result
######################################################################################
# Private methods
def __select_subproperties (parent_property, properties):
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
assert isinstance(parent_property, Property)
return [ x for x in properties if __is_subproperty_of (parent_property, x) ]
def __get_subfeature_name (subfeature, value_string):
assert isinstance(subfeature, basestring)
assert isinstance(value_string, basestring) or value_string is None
if value_string == None:
prefix = ''
else:
prefix = value_string + ':'
return prefix + subfeature
def __validate_feature_attributes (name, attributes):
assert isinstance(name, basestring)
assert is_iterable_typed(attributes, basestring)
for attribute in attributes:
if attribute not in VALID_ATTRIBUTES:
raise InvalidAttribute ("unknown attributes: '%s' in feature declaration: '%s'" % (str (b2.util.set.difference (attributes, __all_attributes)), name))
if name in __all_features:
raise AlreadyDefined ("feature '%s' already defined" % name)
elif 'implicit' in attributes and 'free' in attributes:
raise InvalidAttribute ("free features cannot also be implicit (in declaration of feature '%s')" % name)
elif 'free' in attributes and 'propagated' in attributes:
raise InvalidAttribute ("free features cannot also be propagated (in declaration of feature '%s')" % name)
def __validate_feature (feature):
""" Generates an error if the feature is unknown.
"""
assert isinstance(feature, basestring)
if feature not in __all_features:
raise BaseException ('unknown feature "%s"' % feature)
def __select_subfeatures (parent_property, features):
""" Given a property, return the subset of features consisting of all
ordinary subfeatures of the property's feature, and all specific
subfeatures of the property's feature which are conditional on the
property's value.
"""
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert is_iterable_typed(features, Feature)
return [f for f in features if is_subfeature_of (parent_property, f)]
# FIXME: copy over tests.
| |
#!/usr/bin/python2.7
#
# Interface for the assignement
#
__author__ = 'Nitin Pasumarthy'
DATABASE_NAME = 'test_dds_assgn1'
# TODO: Change these as per your code
RATINGS_TABLE = 'ratings'
RANGE_TABLE_PREFIX = 'range_part'
RROBIN_TABLE_PREFIX = 'rrobin_part'
USER_ID_COLNAME = 'userid'
MOVIE_ID_COLNAME = 'movieid'
RATING_COLNAME = 'rating'
INPUT_FILE_PATH = 'test_data.dat'
ACTUAL_ROWS_IN_INPUT_FILE = 20 # Number of lines in the input file
import psycopg2
import datetime
import time
import Assignment as MyAssignment # TODO: Change the 'Assignment' to your filename
# SETUP Functions
def createdb(dbname):
"""
We create a DB by connecting to the default user and database of Postgres
The function first checks if an existing database exists for a given name, else creates it.
:return:None
"""
# Connect to the default database
con = getopenconnection()
con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cur = con.cursor()
# Check if an existing database with the same name exists
cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\'%s\'' % (dbname,))
count = cur.fetchone()[0]
if count == 0:
cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database
else:
print('A database named "{0}" already exists'.format(dbname))
# Clean up
cur.close()
con.close()
def getopenconnection(user='postgres', password='1234', dbname='postgres'):
return psycopg2.connect("dbname='" + dbname + "' user='" + user + "' host='localhost' password='" + password + "'")
# ##############
# Utilities
def handleerror(message):
print('\nE: {0} {1}'.format(getformattedtime(time.time()), message))
def getformattedtime(srctime):
return datetime.datetime.fromtimestamp(srctime).strftime('%Y-%m-%d %H:%M:%S')
def formattedprint(message, newlineafter=False):
if newlineafter:
print("T: {0} {1}\n".format(getformattedtime(time.time()), message))
else:
print("T: {0} {1}".format(getformattedtime(time.time()), message))
# ##############
# Decorators
def timeme(func):
def timeme_and_call(*args, **kwargs):
tic = time.time()
res = func(*args, **kwargs)
toc = time.time()
formattedprint('Took %2.5fs for "%r()"' % (toc - tic, func.__name__))
return res
return timeme_and_call
class LogMe(object):
def __init__(self, *args, **kwargs):
self.message = args[0]
pass
def __call__(self, func):
def wrapped_func(*args, **kwargs):
formattedprint(self.message)
res = func(*args, **kwargs)
return res
return wrapped_func
def testme(func):
def testme_and_call(*args, **kwargs):
try:
res = func(*args, **kwargs)
formattedprint('Test passed!', True)
except Exception as e:
formattedprint('Test failed :( Error: {0}'.format(e), True)
return False
return res
return testme_and_call
# ##########
# Helpers for Tester functions
def checkpartitioncount(cursor, expectedpartitions, prefix):
cursor.execute(
"SELECT COUNT(table_name) FROM information_schema.tables WHERE table_schema = 'public' AND table_name LIKE '{0}%';".format(
prefix))
count = int(cursor.fetchone()[0])
if count != expectedpartitions: raise Exception(
'Range partitioning not done properly. Excepted {0} table(s) but found {1} table(s)'.format(
expectedpartitions,
count))
def totalrowsinallpartitions(cur, n, rangepartitiontableprefix, partitionstartindex):
selects = []
for i in range(partitionstartindex, n + partitionstartindex):
selects.append('SELECT * FROM {0}{1}'.format(rangepartitiontableprefix, i))
cur.execute('SELECT COUNT(*) FROM ({0}) AS T'.format(' UNION ALL '.join(selects)))
count = int(cur.fetchone()[0])
return count
def testrangeandrobinpartitioning(n, openconnection, rangepartitiontableprefix, partitionstartindex):
with openconnection.cursor() as cur:
if not isinstance(n, int) or n < 0:
# Test 1: Check the number of tables created, if 'n' is invalid
checkpartitioncount(cur, 0, rangepartitiontableprefix)
else:
# Test 2: Check the number of tables created, if all args are correct
checkpartitioncount(cur, n, rangepartitiontableprefix)
# Test 3: Test Completeness by SQL UNION ALL Magic
count = totalrowsinallpartitions(cur, n, rangepartitiontableprefix, partitionstartindex)
if count < ACTUAL_ROWS_IN_INPUT_FILE: raise Exception(
"Completeness property of Range Partitioning failed. Excpected {0} rows after merging all tables, but found {1} rows".format(
ACTUAL_ROWS_IN_INPUT_FILE, count))
# Test 4: Test Disjointness by SQL UNION Magic
count = totalrowsinallpartitions(cur, n, rangepartitiontableprefix, partitionstartindex)
if count > ACTUAL_ROWS_IN_INPUT_FILE: raise Exception(
"Dijointness property of Range Partitioning failed. Excpected {0} rows after merging all tables, but found {1} rows".format(
ACTUAL_ROWS_IN_INPUT_FILE, count))
# Test 5: Test Reconstruction by SQL UNION Magic
count = totalrowsinallpartitions(cur, n, rangepartitiontableprefix, partitionstartindex)
if count != ACTUAL_ROWS_IN_INPUT_FILE: raise Exception(
"Rescontruction property of Range Partitioning failed. Excpected {0} rows after merging all tables, but found {1} rows".format(
ACTUAL_ROWS_IN_INPUT_FILE, count))
def testrangerobininsert(expectedtablename, itemid, openconnection, rating, userid):
with openconnection.cursor() as cur:
cur.execute(
'SELECT COUNT(*) FROM {0} WHERE {4} = {1} AND {5} = {2} AND {6} = {3}'.format(expectedtablename, userid,
itemid, rating,
USER_ID_COLNAME,
MOVIE_ID_COLNAME,
RATING_COLNAME))
count = int(cur.fetchone()[0])
if count != 1: return False
return True
# ##########
# Testers
@LogMe('Testing LoadingRating()')
@testme
@timeme
def testloadratings(ratingstablename, filepath, openconnection, rowsininpfile):
"""
Tests the load ratings function
:param ratingstablename: Argument for function to be tested
:param filepath: Argument for function to be tested
:param openconnection: Argument for function to be tested
:param rowsininpfile: Number of rows in the input file provided for assertion
:return:Raises exception if any test fails
"""
MyAssignment.loadratings(ratingstablename, filepath, openconnection)
# Test 1: Count the number of rows inserted
with openconnection.cursor() as cur:
cur.execute('SELECT COUNT(*) from {0}'.format(RATINGS_TABLE))
count = int(cur.fetchone()[0])
if count != rowsininpfile:
raise Exception(
'Expected {0} rows, but {1} rows in \'{2}\' table'.format(rowsininpfile, count, RATINGS_TABLE))
@LogMe('Testing RangePartition()')
@testme
@timeme
def testrangepartition(ratingstablename, n, openconnection, rangepartitiontableprefix, partitionstartindex):
"""
Tests the range partition function for Completness, Disjointness and Reconstruction
:param ratingstablename: Argument for function to be tested
:param n: Argument for function to be tested
:param openconnection: Argument for function to be tested
:param rangepartitiontableprefix: This function assumes that you tables are named in an order. Eg: rangepart1, rangepart2...
:param partitionstartindex: Indicates how the table names are indexed. Do they start as rangepart1, 2 ... or rangepart0, 1, 2...
:return:Raises exception if any test fails
"""
try:
MyAssignment.rangepartition(ratingstablename, n, openconnection)
except Exception:
# ignore any exceptions raised by function
pass
testrangeandrobinpartitioning(n, openconnection, rangepartitiontableprefix, partitionstartindex)
@LogMe('Testing RoundRobinPartition()')
@testme
@timeme
def testroundrobinpartition(ratingstablename, numberofpartitions, openconnection, robinpartitiontableprefix,
partitionstartindex):
"""
Tests the round robin partitioning for Completness, Disjointness and Reconstruction
:param ratingstablename: Argument for function to be tested
:param numberofpartitions: Argument for function to be tested
:param openconnection: Argument for function to be tested
:param robinpartitiontableprefix: This function assumes that you tables are named in an order. Eg: robinpart1, robinpart2...
:param partitionstartindex: Indicates how the table names are indexed. Do they start as robinpart1, 2 ... or robinpart0, 1, 2...
:return:Raises exception if any test fails
"""
try:
MyAssignment.roundrobinpartition(ratingstablename, numberofpartitions, openconnection)
except Exception:
# ignore any exceptions raised by function
pass
testrangeandrobinpartitioning(numberofpartitions, openconnection, robinpartitiontableprefix, partitionstartindex)
@LogMe('Testing RoundRobinInsert()')
@testme
@timeme
def testroundrobininsert(ratingstablename, userid, itemid, rating, openconnection, expectedtablename):
"""
Tests the roundrobin insert function by checking whether the tuple is inserted in he Expected table you provide
:param ratingstablename: Argument for function to be tested
:param userid: Argument for function to be tested
:param itemid: Argument for function to be tested
:param rating: Argument for function to be tested
:param openconnection: Argument for function to be tested
:param expectedtablename: The expected table to which the record has to be saved
:return:Raises exception if any test fails
"""
try:
MyAssignment.roundrobininsert(ratingstablename, userid, itemid, rating, openconnection)
except Exception:
# ignore any exceptions raised by function
pass
if not testrangerobininsert(expectedtablename, itemid, openconnection, rating, userid):
raise Exception(
'Round robin insert failed! Couldnt find ({0}, {1}, {2}) tuple in {3} table'.format(userid, itemid, rating,
expectedtablename))
@LogMe('Testing RangeInsert()')
@testme
@timeme
def testrangeinsert(ratingstablename, userid, itemid, rating, openconnection, expectedtablename):
"""
Tests the range insert function by checking whether the tuple is inserted in he Expected table you provide
:param ratingstablename: Argument for function to be tested
:param userid: Argument for function to be tested
:param itemid: Argument for function to be tested
:param rating: Argument for function to be tested
:param openconnection: Argument for function to be tested
:param expectedtablename: The expected table to which the record has to be saved
:return:Raises exception if any test fails
"""
try:
MyAssignment.rangeinsert(ratingstablename, userid, itemid, rating, openconnection)
except Exception:
# ignore any exceptions raised by function
pass
if not testrangerobininsert(expectedtablename, itemid, openconnection, rating, userid):
raise Exception(
'Range insert failed! Couldnt find ({0}, {1}, {2}) tuple in {3} table'.format(userid, itemid, rating,
expectedtablename))
@LogMe('Deleting all testing tables using your own function')
def testdelete(openconnection):
# Not testing this piece!!!
MyAssignment.deleteeverythingandexit(openconnection)
# Middleware
def before_db_creation_middleware():
# Use it if you want to
pass
def after_db_creation_middleware(databasename):
# Use it if you want to
pass
def before_test_script_starts_middleware(openconnection, databasename):
pass
def after_test_script_ends_middleware(openconnection, databasename):
# Use it if you want to
pass
if __name__ == '__main__':
try:
# Use this function to do any set up before creating the DB, if any
before_db_creation_middleware()
createdb(DATABASE_NAME)
# Use this function to do any set up after creating the DB, if any
after_db_creation_middleware(DATABASE_NAME)
with getopenconnection(dbname=DATABASE_NAME) as conn:
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
before_test_script_starts_middleware(conn, DATABASE_NAME)
testloadratings(RATINGS_TABLE, INPUT_FILE_PATH, conn, ACTUAL_ROWS_IN_INPUT_FILE)
# ALERT:: Use only one at a time i.e. uncomment only one line at a time and run the script
testrangepartition(RATINGS_TABLE, 5, conn, RANGE_TABLE_PREFIX, 1)
# testrangepartition(RATINGS_TABLE, -1, conn, RANGE_TABLE_PREFIX, 1)
# testrangepartition(RATINGS_TABLE, 5.6, conn, RANGE_TABLE_PREFIX, 1)
# ALERT:: Use only one at a time i.e. uncomment only one line at a time and run the script
testroundrobinpartition(RATINGS_TABLE, 5, conn, RROBIN_TABLE_PREFIX, 0)
# testroundrobinpartition(RATINGS_TABLE, -1, conn, RROBIN_TABLE_PREFIX, 0)
# testroundrobinpartition(RATINGS_TABLE, 5.6, conn, RROBIN_TABLE_PREFIX, 0)
# ALERT:: Use only one at a time i.e. uncomment only one line at a time and run the script
testroundrobininsert(RATINGS_TABLE, 100, 1, 3, conn, RROBIN_TABLE_PREFIX + '1')
# testroundrobininsert(RATINGS_TABLE, 100, 1, -3, conn, RROBIN_TABLE_PREFIX + '1')
# ALERT:: Use only one at a time i.e. uncomment only one line at a time and run the script
testrangeinsert(RATINGS_TABLE, 100, 2, 3, conn, RANGE_TABLE_PREFIX + '3')
# testrangeinsert(RATINGS_TABLE, 100, 2, -3, conn, RANGE_TABLE_PREFIX + '3')
choice = raw_input('Press enter to Delete all tables? ')
if choice == '':
testdelete(conn)
# Use this function to do any set up after I finish testing, if you want to
after_test_script_ends_middleware(conn, DATABASE_NAME)
except Exception as detail:
handleerror(detail)
| |
import networkx as nx
import random
from PyGNA import NetworkFrames
import itertools
import copy
def generateSimpleGNA(timesteps, startingNodes, startingEdges):
#graph = nx.DiGraph(name='Test Network')
graph = nx.Graph(name='Test Network')
myNetworkFrames = NetworkFrames.NetworkFrames()
for nodes in xrange(startingNodes):
addNodeSimple(graph,nodes)
for edges in xrange(startingEdges):
addEdge(graph)
for timestep in xrange(timesteps):
randNode = graph.nodes()[random.randint(0,len(graph.nodes())-1)]
if randNode % 2 == 0:
#If 'state' == 0 add node
if graph.node[randNode]['state'] == 0:
newNode = graph.nodes()[-1] + 1
addNodeSimple(graph,newNode)
elif graph.node[randNode]['state'] == 1:
delNode(graph, randNode)
elif randNode % 3 == 0:
changeMe = graph.nodes()[random.randint(0,len(graph.nodes())-1)]
changeNodeState(graph, changeMe)
randEdge = 0
if len(graph.edges()) > 0:
randEdge = random.randint(0,len(graph.edges())-1)
if randEdge % 2 == 0:
addEdge(graph)
else:
delEdge(graph,randEdge)
myNetworkFrames.addGraph(graph)
myNetworkFrames.writeGraphs("simpleNetwork.graphML")
def generateBinaryStateGNA(timesteps, rule=""):
graph = nx.Graph()
addNode(graph,0,0)
myNetworkFrames = NetworkFrames.NetworkFrames()
myNetworkFrames.addGraph(graph)
replacementRules = {}
if rule != "":
digits = list(rule)
replacementRules = {(1,1):int(digits[0]),(1,0):int(digits[1]),(0,1):int(digits[2]),(0,0):int(digits[3])}
else:
replacementRules = {(0,0):random.randint(0,9),(0,1):random.randint(0,9),(1,0):random.randint(0,9),(1,1):random.randint(0,9)}
for time in xrange(timesteps):
if len(graph.nodes()) > 0:
randNode = graph.nodes()[random.randint(0, len(graph.nodes())-1)]
replace(graph, randNode, replacementRules[getTuple(graph, randNode)])
myNetworkFrames.addGraph(graph)
else:
break
myNetworkFrames.writeGraphs("BinaryState.graphML")
def getTuple(graph, node):
nodeState = graph.node[node]['state']
majority = nodeState
if len(graph.edges()) > 0:
for nodes in graph.edge[node]:
if nodes in graph.node:
if graph.node[nodes]['state'] == 0:
majority -= 1
else:
majority += 1
if majority == 0:
majority = random.randint(0,1)
else:
majority = 0 if majority < 0 else 1
return (nodeState,majority)
def replace(graph, node, rewritingMethod):
if rewritingMethod == 0:
graph.remove_node(node)
elif rewritingMethod == 1:
return
elif rewritingMethod == 2:
graph.node[node]['state'] = 0 if graph.node[node]['state'] == 1 else 1
elif rewritingMethod == 3:
duplicateNodes(graph, node, 2, 0)
elif rewritingMethod == 4:
duplicateNodes(graph, node, 2, 1)
elif rewritingMethod == 5:
duplicateNodes(graph, node, 2, 2)
elif rewritingMethod == 6:
duplicateNodes(graph, node, 3, 0)
elif rewritingMethod == 7:
duplicateNodes(graph, node, 3, 1)
elif rewritingMethod == 8:
duplicateNodes(graph, node, 3, 3)
elif rewritingMethod == 9:
duplicateNodes(graph, node, 3, 2)
def duplicateNodes(graph, node, dup, divideType):
originalEdges = copy.deepcopy(graph.edge[node])
for edges in originalEdges:
graph.remove_edge(node,edges)
newNodes = []
dups = 1
unique = getUniqueNodeIdentifier(graph)
while dups < dup:
newNodes.append(unique)
unique += 1
dups += 1
# Preserver node state
if divideType == 0:
for nodes in newNodes:
addNode(graph, nodes, graph.node[node]['state'])
# Invert node states
elif divideType == 1:
invert = 0 if graph.node[node]['state'] == 1 else 1
for nodes in newNodes:
addNode(graph, nodes, invert)
# Invert one node state
elif divideType == 2:
invert = 0 if graph.node[node]['state'] == 1 else 1
initial = graph.node[node]['state'] if dup == 3 else invert
for nodes in newNodes:
addNode(graph, nodes, initial)
initial = 0 if initial == 1 else 1
# invert two node states
elif divideType == 3:
initial = graph.node[node]['state']
flipped = 0
for nodes in newNodes:
addNode(graph, nodes, initial)
if flipped == 0:
initial = 0 if initial == 1 else 1
flipped = 1
# Prepare list for edge connection section
newNodes.append(node)
# Distribute original edges
nodeTarget = 0
for edges in originalEdges:
graph.add_edge(newNodes[nodeTarget],edges)
if nodeTarget == len(newNodes) -1:
nodeTarget = 0
else:
nodeTarget += 1
# Connect new nodes to eachother
start = 0
end = 1
while start <= len(newNodes) - 1:
graph.add_edge(newNodes[start],newNodes[end])
start += 1
end += 1
if end > len(newNodes) - 1:
end = 0
def getUniqueNodeIdentifier(graph):
returnValue = 0
if len(graph.nodes()) > 0:
returnValue = graph.nodes()[-1]
returnValue += 1
while returnValue in graph.node:
returnValue += 1
return returnValue
def addNodeSimple(graph, node):
nodeState = random.randint(0,1)
graph.add_node(node,state=nodeState)
def addNode(graph, node, stateval):
graph.add_node(node,state=stateval)
def changeNodeState(graph, node):
if graph.node[node]['state'] == 0:
graph.node[node]['state'] = 1
else:
graph.node[node]['state'] = 0
def delNode(graph, node):
print node
graph.remove_node(node)
def addEdge(graph):
for attempts in xrange(len(graph.nodes())):
start = graph.nodes()[random.randint(0,len(graph.nodes())-1)]
end = start
while start == end:
end = graph.nodes()[random.randint(0,len(graph.nodes())-1)]
if (start,end) not in graph.edges():
graph.add_edge(start,end)
break
def delEdge(graph, edge):
remove = graph.edges()[edge]
graph.remove_edge(remove[0],remove[1])
def readNetworks():
myInNetworks = NetworkFrames.NetworkFrames()
myOutNetworks = NetworkFrames.NetworkFrames()
myInNetworks.readGraphML("output.graphML")
fileNum = 1
for network in myInNetworks.getInputNetwork():
fileName = "output" + str(fileNum) + ".graphML"
myOutNetworks.addGraph(network)
myOutNetworks.writeGraphs(fileName)
myOutNetworks.clearInputNetworks()
fileNum += 1
if __name__ == "__main__":
network = input("Enter 0 for simple network and 1 for binary state GNA: ")
if int(network) == 0:
numTime=input("Enter number of time steps: ")
numNodes=input("Enter number of starting nodes: ")
numEdges=input("Enter number of starting edges: ")
generateSimpleGNA(int(numTime),int(numNodes),int(numEdges))
else:
numTime = input("Enter number of time steps: ")
rnInput = input("Enter specific rule number >= 0 & <= 9999 or enter -1 for a random rule. ")
ruleNumber = ""
if rnInput != -1:
ruleNumber = str(rnInput)
while len(ruleNumber) < 4:
ruleNumber = '0' + ruleNumber
while len(ruleNumber) > 4:
ruleNumber = ruleNumber[:-1]
generateBinaryStateGNA(int(numTime),ruleNumber)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib.data.python.kernel_tests import dataset_serialization_test_base
from tensorflow.contrib.data.python.ops import batching
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class BatchDatasetTest(test.TestCase):
def assertSparseValuesEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testDenseToSparseBatchDataset(self):
components = np.random.randint(12, size=(100,)).astype(np.int32)
iterator = (
dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.fill([x], x)).apply(
batching.dense_to_sparse_batch(4, [12]))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for start in range(0, len(components), 4):
results = sess.run(get_next)
self.assertAllEqual([[i, j]
for i, c in enumerate(components[start:start + 4])
for j in range(c)], results.indices)
self.assertAllEqual(
[c for c in components[start:start + 4] for _ in range(c)],
results.values)
self.assertAllEqual([min(4,
len(components) - start), 12],
results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testDenseToSparseBatchDatasetWithUnknownShape(self):
components = np.random.randint(5, size=(40,)).astype(np.int32)
iterator = (
dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.fill([x, x], x)).apply(
batching.dense_to_sparse_batch(
4, [5, None])).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for start in range(0, len(components), 4):
results = sess.run(get_next)
self.assertAllEqual([[i, j, z]
for i, c in enumerate(components[start:start + 4])
for j in range(c)
for z in range(c)], results.indices)
self.assertAllEqual([
c
for c in components[start:start + 4] for _ in range(c)
for _ in range(c)
], results.values)
self.assertAllEqual([
min(4,
len(components) - start), 5,
np.max(components[start:start + 4])
], results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testDenseToSparseBatchDatasetWithInvalidShape(self):
input_tensor = array_ops.constant([[1]])
with self.assertRaisesRegexp(ValueError, "Dimension -2 must be >= 0"):
dataset_ops.Dataset.from_tensors(input_tensor).apply(
batching.dense_to_sparse_batch(4, [-2])).make_initializable_iterator()
def testDenseToSparseBatchDatasetShapeErrors(self):
input_tensor = array_ops.placeholder(dtypes.int32)
iterator = (
dataset_ops.Dataset.from_tensors(input_tensor).apply(
batching.dense_to_sparse_batch(4, [12]))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# Initialize with an input tensor of incompatible rank.
sess.run(init_op, feed_dict={input_tensor: [[1]]})
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"incompatible with the row shape"):
sess.run(get_next)
# Initialize with an input tensor that is larger than `row_shape`.
sess.run(init_op, feed_dict={input_tensor: range(13)})
with self.assertRaisesRegexp(errors.DataLossError,
"larger than the row shape"):
sess.run(get_next)
def testUnbatchScalarDataset(self):
data = tuple([math_ops.range(10) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = (dtypes.int32,) * 3
data = data.batch(2)
self.assertEqual(expected_types, data.output_types)
data = data.apply(batching.unbatch())
self.assertEqual(expected_types, data.output_types)
iterator = data.make_one_shot_iterator()
op = iterator.get_next()
with self.test_session() as sess:
for i in range(10):
self.assertEqual((i,) * 3, sess.run(op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(op)
def testUnbatchSingleElementTupleDataset(self):
data = tuple([(math_ops.range(10),) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = ((dtypes.int32,),) * 3
data = data.batch(2)
self.assertEqual(expected_types, data.output_types)
data = data.apply(batching.unbatch())
self.assertEqual(expected_types, data.output_types)
iterator = data.make_one_shot_iterator()
op = iterator.get_next()
with self.test_session() as sess:
for i in range(10):
self.assertEqual(((i,),) * 3, sess.run(op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(op)
def testUnbatchMultiElementTupleDataset(self):
data = tuple([(math_ops.range(10 * i, 10 * i + 10),
array_ops.fill([10], "hi")) for i in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = ((dtypes.int32, dtypes.string),) * 3
data = data.batch(2)
self.assertAllEqual(expected_types, data.output_types)
data = data.apply(batching.unbatch())
self.assertAllEqual(expected_types, data.output_types)
iterator = data.make_one_shot_iterator()
op = iterator.get_next()
with self.test_session() as sess:
for i in range(10):
self.assertEqual(((i, b"hi"), (10 + i, b"hi"), (20 + i, b"hi")),
sess.run(op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(op)
def testBatchAndDropRemainder(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (
dataset_ops.Dataset.from_tensor_slices(components).apply(
batching.batch_and_drop_remainder(batch_size))
.make_initializable_iterator())
next_element = iterator.get_next()
with self.test_session() as sess:
for test_batch_size in [1, 3, 7, 10]:
sess.run(iterator.initializer, feed_dict={batch_size: test_batch_size})
num_batches = 7 // test_batch_size
for i in range(num_batches):
result = sess.run(next_element)
for component, result_component in zip(components, result):
for j in range(test_batch_size):
self.assertAllEqual(component[(i * test_batch_size + j)],
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testBatchAndDropRemainderSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
iterator = dataset_ops.Dataset.range(12).map(_sparse).apply(
batching.batch_and_drop_remainder(5)).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(2):
actual = sess.run(get_next)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
values=[i * 5, i * 5 + 1, i * 5 + 2, i * 5 + 3, i * 5 + 4],
dense_shape=[5, 1])
self.assertTrue(sparse_tensor.is_sparse(actual))
self.assertSparseValuesEqual(actual, expected)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPaddedBatchAndDropRemainder(self):
els = []
for length in [3, 6, 9, 4, 12, 10, 2]:
els.append((np.array(length), np.arange(length) + 1,
np.array(length * 2)))
dataset = dataset_ops.Dataset.from_tensors(els[0])
for el in els[1:]:
dataset = dataset.concatenate(dataset_ops.Dataset.from_tensors(el))
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (
dataset.apply(
batching.padded_batch_and_drop_remainder(
batch_size, ([], [None], []))).make_initializable_iterator())
next_element = iterator.get_next()
with self.test_session() as sess:
for test_batch_size in [1, 3, 7, 10]:
sess.run(iterator.initializer, feed_dict={batch_size: test_batch_size})
num_batches = 7 // test_batch_size
for i in range(num_batches):
result = sess.run(next_element)
for component_idx, result_component in enumerate(result):
for j in range(test_batch_size):
data_idx = i * test_batch_size + j
comp = result_component[j]
unpadded = comp[comp > 0]
if np.isscalar(comp):
# The boolean mask indexing above adds a dim back. Rm it.
unpadded = unpadded[0]
self.assertAllEqual(els[data_idx][component_idx], unpadded)
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testPaddedBatchAndDropRemainderSparseError(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0]], values=(i * [1]), dense_shape=[1, 1]), i
with self.assertRaises(TypeError):
_ = dataset_ops.Dataset.range(10).map(_map_fn).apply(
batching.padded_batch_and_drop_remainder(5))
def testBatchAndDropRemainderShapeInference(self):
components = (array_ops.placeholder(dtypes.int32),
(array_ops.placeholder(dtypes.int32, shape=[None]),
array_ops.placeholder(dtypes.int32, shape=[20, 30])))
# Test with a statically known batch size.
dataset = (
dataset_ops.Dataset.from_tensor_slices(components).apply(
batching.batch_and_drop_remainder(128)))
self.assertIs(None, dataset.output_shapes[0].ndims)
self.assertEqual([128], dataset.output_shapes[1][0].as_list())
self.assertEqual([128, 30], dataset.output_shapes[1][1].as_list())
# Test with a dynamic batch size: the static shape will be unknown, because
# `batch_size` is a placeholder.
batch_size = array_ops.placeholder(dtypes.int64)
dataset = (
dataset_ops.Dataset.from_tensor_slices(components).apply(
batching.batch_and_drop_remainder(batch_size)))
self.assertIs(None, dataset.output_shapes[0].ndims)
self.assertEqual([None], dataset.output_shapes[1][0].as_list())
self.assertEqual([None, 30], dataset.output_shapes[1][1].as_list())
def _testMapAndBatchDatasetHelper(self, num_parallel_batches=1):
"""Test a dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset ->
# RepeatDataset(count) -> MapAndBatchDataset(square_3, batch_size).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(count).apply(
batching.map_and_batch(
map_func=_map_fn,
batch_size=batch_size,
num_parallel_batches=num_parallel_batches))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([[None] + list(c.shape[1:]) for c in components],
[t.shape.as_list() for t in get_next])
with self.test_session() as sess:
# Batch of a finite input, where the batch_size divides the
# total number of elements.
sess.run(init_op, feed_dict={count: 28, batch_size: 14})
num_batches = (28 * 7) // 14
for i in range(num_batches):
result = sess.run(get_next)
for component, result_component in zip(components, result):
for j in range(14):
self.assertAllEqual(component[(i * 14 + j) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Batch of a finite input, where the batch_size does not
# divide the total number of elements.
sess.run(init_op, feed_dict={count: 14, batch_size: 8})
# We expect (num_batches - 1) full-sized batches.
num_batches = int(math.ceil((14 * 7) / 8))
for i in range(num_batches - 1):
result = sess.run(get_next)
for component, result_component in zip(components, result):
for j in range(8):
self.assertAllEqual(component[(i * 8 + j) % 7]**2,
result_component[j])
result = sess.run(get_next)
for component, result_component in zip(components, result):
for j in range((14 * 7) % 8):
self.assertAllEqual(component[((num_batches - 1) * 8 + j) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Batch of an empty input should fail straight away.
sess.run(init_op, feed_dict={count: 0, batch_size: 8})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Empty batch should be an initialization time error.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(init_op, feed_dict={count: 14, batch_size: 0})
def testMapAndBatchDataset(self):
return self._testMapAndBatchDatasetHelper()
def testMapAndBatchDatasetWithParallelBatching(self):
return self._testMapAndBatchDatasetHelper(num_parallel_batches=10)
def _testMapAndBatchPartialBatchHelper(self, drop_remainder=False):
iterator = (
dataset_ops.Dataset.range(10).apply(
batching.map_and_batch(
lambda x: array_ops.reshape(x * x, [1]),
batch_size=4,
drop_remainder=drop_remainder)).make_one_shot_iterator())
if drop_remainder:
self.assertEqual([4, 1], iterator.output_shapes.as_list())
else:
self.assertEqual([None, 1], iterator.output_shapes.as_list())
next_element = iterator.get_next()
with self.test_session() as sess:
self.assertAllEqual([[0], [1], [4], [9]], sess.run(next_element))
self.assertAllEqual([[16], [25], [36], [49]], sess.run(next_element))
if not drop_remainder:
self.assertAllEqual([[64], [81]], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testMapAndBatchPartialBatch(self):
return self._testMapAndBatchPartialBatchHelper()
def testMapAndBatchPartialBatchDropRemainder(self):
return self._testMapAndBatchPartialBatchHelper(drop_remainder=True)
def testMapAndBatchYieldsPartialBatch(self):
iterator = (dataset_ops.Dataset.range(10)
.apply(batching.map_and_batch(
lambda x: array_ops.reshape(x * x, [1]), 4))
.make_one_shot_iterator())
self.assertEqual([None, 1], iterator.output_shapes.as_list())
next_element = iterator.get_next()
with self.test_session() as sess:
self.assertAllEqual([[0], [1], [4], [9]], sess.run(next_element))
self.assertAllEqual([[16], [25], [36], [49]], sess.run(next_element))
self.assertAllEqual([[64], [81]], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testMapAndBatchSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
iterator = dataset_ops.Dataset.range(10).apply(
batching.map_and_batch(_sparse, 5)).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(2):
actual = sess.run(get_next)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
values=[i * 5, i * 5 + 1, i * 5 + 2, i * 5 + 3, i * 5 + 4],
dense_shape=[5, 1])
self.assertTrue(sparse_tensor.is_sparse(actual))
self.assertSparseValuesEqual(actual, expected)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMapAndBatchDatasetFails(self):
"""Test a dataset that maps a TF function across its input elements."""
dataset = dataset_ops.Dataset.from_tensors(
array_ops.check_numerics(
constant_op.constant(1.0) / constant_op.constant(0.0), "oops"))
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (
dataset.apply(batching.map_and_batch(lambda x: x, batch_size))
.make_initializable_iterator())
init_op = iterator.initializer
with self.test_session() as sess:
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(init_op, feed_dict={batch_size: 14})
def testMapAndBatchDatasetShapeMismatch(self):
"""Test a dataset that maps a TF function across its input elements."""
def generator():
yield [1]
yield [2]
yield [3]
yield [[4, 5, 6]]
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int32)
batch_size = 4
iterator = (
dataset.apply(batching.map_and_batch(lambda x: x, batch_size))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"number of elements does not match"):
sess.run(get_next)
class BatchDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def build_dataset(self, multiplier=15.0, tensor_slice_len=2, batch_size=2):
components = (
np.arange(tensor_slice_len),
np.array([[1, 2, 3]]) * np.arange(tensor_slice_len)[:, np.newaxis],
np.array(multiplier) * np.arange(tensor_slice_len))
return dataset_ops.Dataset.from_tensor_slices(components).batch(batch_size)
def testCore(self):
tensor_slice_len = 8
batch_size = 2
num_outputs = tensor_slice_len // batch_size
self.run_core_tests(
lambda: self.build_dataset(15.0, tensor_slice_len, batch_size),
lambda: self.build_dataset(20.0, tensor_slice_len, batch_size),
num_outputs)
def _build_dataset_dense_to_sparse(self, components):
return dataset_ops.Dataset.from_tensor_slices(components).map(
lambda x: array_ops.fill([x], x)).apply(
batching.dense_to_sparse_batch(4, [12]))
# TODO(b/70988345): Re-enable when sparse tensors are properly supported by
# the DatasetSerializationTestBase.
def _testDenseToSparseBatchDatasetCore(self):
components = np.random.randint(5, size=(40,)).astype(np.int32)
diff_comp = np.random.randint(2, size=(100,)).astype(np.int32)
num_outputs = len(components) // 4
self.run_core_tests(lambda: self._build_dataset_dense_to_sparse(components),
lambda: self._build_dataset_dense_to_sparse(diff_comp),
num_outputs)
def _sparse(self, i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
def _build_dataset_sparse(self, batch_size=5):
return dataset_ops.Dataset.range(10).map(self._sparse).batch(batch_size)
def testSparseCore(self):
self.run_core_tests(self._build_dataset_sparse,
lambda: self._build_dataset_sparse(2), 2)
def _build_dataset_nested_sparse(self):
return dataset_ops.Dataset.range(10).map(self._sparse).batch(5).batch(2)
def testNestedSparseCore(self):
self.run_core_tests(self._build_dataset_nested_sparse, None, 1)
class PaddedBatchDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def testPaddedBatch(self):
def build_dataset(seq_lens):
return dataset_ops.Dataset.from_tensor_slices(seq_lens).map(
lambda x: array_ops.fill([x], x)).padded_batch(
4, padded_shapes=[-1])
seq_lens1 = np.random.randint(1, 20, size=(32,)).astype(np.int32)
seq_lens2 = np.random.randint(21, 40, size=(32,)).astype(np.int32)
self.run_core_tests(lambda: build_dataset(seq_lens1),
lambda: build_dataset(seq_lens2), 8)
def testPaddedBatchNonDefaultPadding(self):
def build_dataset(seq_lens):
def fill_tuple(x):
filled = array_ops.fill([x], x)
return (filled, string_ops.as_string(filled))
padded_shape = [-1]
return dataset_ops.Dataset.from_tensor_slices(seq_lens).map(
fill_tuple).padded_batch(
4,
padded_shapes=(padded_shape, padded_shape),
padding_values=(-1, "<end>"))
seq_lens1 = np.random.randint(1, 20, size=(32,)).astype(np.int32)
seq_lens2 = np.random.randint(21, 40, size=(32,)).astype(np.int32)
self.run_core_tests(lambda: build_dataset(seq_lens1),
lambda: build_dataset(seq_lens2), 8)
if __name__ == "__main__":
test.main()
| |
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.minigame.DistributedDivingGame
from panda3d.core import BitMask32, CollideMask, CollisionHandler, CollisionHandlerEvent, CollisionHandlerPusher, CollisionNode, CollisionSphere, CollisionTraverser, Lens, NodePath, Point3, Vec2, Vec3
from direct.showbase.ShowBaseGlobal import *
from toontown.toonbase.ToonBaseGlobal import *
from direct.interval.IntervalGlobal import *
from toontown.toonbase import ToontownTimer
from DistributedMinigame import *
from direct.distributed.ClockDelta import *
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.task import Task
from direct.actor import Actor
from toontown.toon import LaffMeter
from direct.distributed import DistributedSmoothNode
import ArrowKeys
import Ring
import RingTrack
import DivingGameGlobals
import RingGroup
import RingTrackGroups
import random
import DivingGameToonSD
import DivingFishSpawn
import DivingTreasure
import math
import TreasureScorePanel
from otp.distributed.TelemetryLimiter import TelemetryLimiter, TLGatherAllAvs
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
class DivingGameRotationLimiter(TelemetryLimiter):
def __init__(self, h, p):
self._h = h
self._p = p
def __call__(self, obj):
obj.setHpr(self._h, self._p, obj.getR())
class DistributedDivingGame(DistributedMinigame):
COLLISION_WATCH_TASK = 'DivingGameCollisionWatchTask'
TREASURE_BOUNDS_TASK = 'DivingGameTreasureBoundsTask'
CRAB_TASK = 'DivingGameCrabTask'
UPDATE_LOCALTOON_TASK = 'DivingGameUpdateLocalToonTask'
COLLISION_DETECTION_PRIORITY = 5
MAP_DIV = 2.8
MAP_OFF = 14.0
LAG_COMP = 1.25
def __init__(self, cr):
DistributedMinigame.__init__(self, cr)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedDivingGame', [State.State('off', self.enterOff, self.exitOff, ['swim']), State.State('swim', self.enterSwim, self.exitSwim, ['cleanup']), State.State('cleanup', self.enterCleanup, self.exitCleanup, [])], 'off', 'cleanup')
self.addChildGameFSM(self.gameFSM)
self.iCount = 0
self.reachedFlag = 0
self.grabbingTreasure = -1
self.dead = 0
def getTitle(self):
return TTLocalizer.DivingGameTitle
def getInstructions(self):
p = self.avIdList.index(self.localAvId)
if self.isSinglePlayer():
text = TTLocalizer.DivingInstructionsSinglePlayer
else:
text = TTLocalizer.DivingInstructionsMultiPlayer
return text
def load(self):
self.notify.debug('load')
DistributedMinigame.load(self)
loadBase = 'phase_4/models/minigames/'
loadBaseShip = 'phase_5/models/props/'
self.sndAmbience = loader.loadSfx('phase_4/audio/sfx/AV_ambient_water.ogg')
self.environModel = loader.loadModel(loadBase + 'diving_game')
self.boatModel = self.environModel.find('**/boat')
self.skyModel = self.environModel.find('**/sky')
self.waterModel = self.environModel.find('**/seawater')
self.frontMap = self.environModel.find('**/sea_front')
self.frontMap.setY(3)
self.frontMap.setBin('fixed', 0)
self.frontMap.setDepthTest(0)
self.waterModel.setY(1.0)
bubbleModel = self.environModel.find('**/bubbles1')
bubbleModel.setY(1.0)
bubbleModel = self.environModel.find('**/bubbles2')
bubbleModel.setY(1.0)
bubbleModel = self.environModel.find('**/bubbles3')
bubbleModel.setY(1.0)
bubbleModel = self.environModel.find('**/bubbles4')
bubbleModel.setY(1.0)
bubbleModel = self.environModel.find('**/bubbles5')
bubbleModel.setY(1.0)
self.mapModel = loader.loadModel(loadBase + 'diving_game')
boatMap = self.mapModel.find('**/boat')
skyMap = self.mapModel.find('**/sky')
frontMap = self.mapModel.find('**/sea_front')
skyMap.hide()
frontMap.hide()
boatMap.setZ(28.5)
self.crabs = []
self.spawners = []
self.toonSDs = {}
avId = self.localAvId
toonSD = DivingGameToonSD.DivingGameToonSD(avId, self)
self.toonSDs[avId] = toonSD
toonSD.load()
crabSoundName = 'King_Crab.ogg'
crabSoundPath = 'phase_4/audio/sfx/%s' % crabSoundName
self.crabSound = loader.loadSfx(crabSoundPath)
treasureSoundName = 'SZ_DD_treasure.ogg'
treasureSoundPath = 'phase_4/audio/sfx/%s' % treasureSoundName
self.treasureSound = loader.loadSfx(treasureSoundPath)
hitSoundName = 'diving_game_hit.ogg'
hitSoundPath = 'phase_4/audio/sfx/%s' % hitSoundName
self.hitSound = loader.loadSfx(hitSoundPath)
self.music = loader.loadMusic('phase_4/audio/bgm/MG_Target.ogg')
self.addSound('dropGold', 'diving_treasure_drop_off.ogg', 'phase_4/audio/sfx/')
self.addSound('getGold', 'diving_treasure_pick_up.ogg', 'phase_4/audio/sfx/')
self.swimSound = loader.loadSfx('phase_4/audio/sfx/diving_swim_loop.ogg')
self.swimSound.setVolume(0.0)
self.swimSound.setPlayRate(1.0)
self.swimSound.setLoop(True)
self.swimSound.play()
def addSound(self, name, soundName, path = None):
if not hasattr(self, 'soundTable'):
self.soundTable = {}
if path:
self.soundPath = path
soundSource = '%s%s' % (self.soundPath, soundName)
self.soundTable[name] = loader.loadSfx(soundSource)
def playSound(self, name, volume = 1.0):
self.soundTable[name].setVolume(1.0)
self.soundTable[name].play()
def unload(self):
self.notify.debug('unload')
DistributedMinigame.unload(self)
self.mapModel.removeNode()
del self.mapModel
if hasattr(self, 'soundTable'):
del self.soundTable
del self.sndAmbience
del self.hitSound
del self.crabSound
del self.treasureSound
self.swimSound.stop()
del self.swimSound
self.environModel.removeNode()
del self.environModel
self.removeChildGameFSM(self.gameFSM)
for avId in self.toonSDs.keys():
toonSD = self.toonSDs[avId]
toonSD.unload()
del self.toonSDs
del self.gameFSM
del self.music
def fishCollision(self, collEntry):
avId = int(collEntry.getFromNodePath().getName())
toonSD = self.toonSDs[avId]
name = collEntry.getIntoNodePath().getName()
if len(name) >= 7:
if name[0:6] == 'crabby':
self.sendUpdate('handleCrabCollision', [avId, toonSD.status])
else:
spawnerId = int(name[2])
spawnId = int(name[3:len(name)])
if spawnId in self.spawners[spawnerId].fishArray:
self.sendUpdate('handleFishCollision', [avId,
spawnId,
spawnerId,
toonSD.status])
def fishSpawn(self, timestamp, fishcode, spawnerId, offset):
if self.dead is 1:
return
ts = globalClockDelta.localElapsedTime(timestamp)
if not hasattr(self, 'spawners'):
return
if abs(self.spawners[spawnerId].lastSpawn - timestamp) < 150:
return
fish = self.spawners[spawnerId].createFish(fishcode)
fish.offset = offset
fish.setPos(self.spawners[spawnerId].position)
func = Func(self.fishRemove, fish.code)
self.spawners[spawnerId].lastSpawn = timestamp
iName = '%s %s' % (fish.name, self.iCount)
self.iCount += 1
if fish.name == 'clown':
fish.moveLerp = Sequence(LerpPosInterval(fish, duration=8 * self.SPEEDMULT * self.LAG_COMP, startPos=self.spawners[spawnerId].position, pos=self.spawners[spawnerId].position + Point3(50 * self.spawners[spawnerId].direction, 0, (offset - 4) / 2.0), name=iName), func)
fish.specialLerp = Sequence()
elif fish.name == 'piano':
fish.moveLerp = Sequence(LerpPosInterval(fish, duration=5 * self.SPEEDMULT * self.LAG_COMP, startPos=self.spawners[spawnerId].position, pos=self.spawners[spawnerId].position + Point3(50 * self.spawners[spawnerId].direction, 0, (offset - 4) / 2.0), name=iName), func)
fish.specialLerp = Sequence()
elif fish.name == 'pbj':
fish.moveLerp = Sequence(LerpFunc(fish.setX, duration=12 * self.SPEEDMULT * self.LAG_COMP, fromData=self.spawners[spawnerId].position.getX(), toData=self.spawners[spawnerId].position.getX() + 50 * self.spawners[spawnerId].direction, name=iName), func)
fish.specialLerp = LerpFunc(self.pbjMove, duration=5 * self.SPEEDMULT * self.LAG_COMP, fromData=0, toData=6.28318, extraArgs=[fish, self.spawners[spawnerId].position.getZ()], blendType='easeInOut')
elif fish.name == 'balloon':
fish.moveLerp = Sequence(LerpPosInterval(fish, duration=10 * self.SPEEDMULT * self.LAG_COMP, startPos=self.spawners[spawnerId].position, pos=self.spawners[spawnerId].position + Point3(50 * self.spawners[spawnerId].direction, 0, (offset - 4) / 2.0), name=iName), func)
fish.specialLerp = Sequence(Wait(offset / 10.0 * 2 + 1.5), Parallel(LerpScaleInterval(fish, duration=0.3, startScale=Vec3(2, 2, 2), scale=Vec3(5, 3, 5), blendType='easeInOut')), Wait(1.0), Parallel(LerpScaleInterval(fish, duration=0.4, startScale=Vec3(5, 3, 5), scale=Vec3(2, 2, 2), blendType='easeInOut')))
elif fish.name == 'bear' or fish.name == 'nurse':
fish.moveLerp = Sequence(LerpPosInterval(fish, duration=20 * self.LAG_COMP, startPos=self.spawners[spawnerId].position, pos=self.spawners[spawnerId].position + Point3(50 * self.spawners[spawnerId].direction, 0, 0), name=iName), func)
fish.specialLerp = Sequence()
fish.moveLerp.start(ts)
fish.specialLerp.loop(ts)
def pbjMove(self, x, fish, Z):
z = math.sin(x + fish.offset * 3) * 3
fish.setZ(z + Z)
def getIntroMovie(self):
seq = Sequence()
seq.append(Wait(2.0))
seq.append(LerpFunc(camera.setZ, duration=5, fromData=36, toData=-23, blendType='easeInOut', name='intro'))
seq.append(Wait(2.0))
seq.append(LerpFunc(camera.setZ, duration=5, fromData=-23, toData=39, blendType='easeInOut', name='intro'))
return seq
def onstage(self):
self.notify.debug('onstage')
DistributedMinigame.onstage(self)
base.localAvatar.collisionsOff()
DistributedSmoothNode.activateSmoothing(1, 1)
numToons = self.numPlayers
self.NUMTREASURES = numToons
camera.reparentTo(render)
camera.setZ(36)
camera.setHpr(0, 0, 0)
camera.setX(0)
base.camLens.setMinFov(31 / (4.0 / 3.0))
camera.setY(-54)
base.camLens.setFar(1500)
self.introMovie = self.getIntroMovie()
self.introMovie.start()
self.accept('FishHit', self.fishCollision)
toonSD = self.toonSDs[self.localAvId]
toonSD.enter()
toonSD.fsm.request('normal')
toon = base.localAvatar
toon.reparentTo(render)
toon.setPos(-9, -1, 36)
self.__placeToon(self.localAvId)
self.arrowKeys = ArrowKeys.ArrowKeys()
self.xVel = 0
self.zVel = 0
self.orientNode = toon.attachNewNode('orientNode')
self.orientNode.setPos(0, 0, 1)
self.orientNode2 = toon.attachNewNode('orientNode')
self.orientNode2.setPos(0, 0, -1)
self.environNode = render.attachNewNode('environNode')
self.environModel.reparentTo(self.environNode)
self.environModel.setScale(2.8, 2.8, 2.73)
self.environModel.setPos(0, 0.5, -41)
self.skyModel.setScale(1.3, 1.0, 1.0)
boatoff = 6.75
self.boatModel.reparentTo(self.environNode)
self.boatModel.setPos(0, 3.0, 40 - boatoff)
self.boatModel.setScale(2.8)
cSphere = CollisionSphere(0.0, 0.0, 2.0, 3.0)
cSphere.setTangible(0)
name = 'boat'
cSphereNode = CollisionNode(name)
cSphereNode.setIntoCollideMask(DivingGameGlobals.CollideMask)
cSphereNode.addSolid(cSphere)
self.boatNode = cSphereNode
self.boatCNP = self.boatModel.attachNewNode(cSphereNode)
self.accept('reach-boat', self.__boatReached)
self.boatTilt = Sequence(LerpFunc(self.boatModel.setR, duration=5, fromData=5, toData=-5, blendType='easeInOut', name='tilt'), LerpFunc(self.boatModel.setR, duration=5, fromData=-5, toData=5, blendType='easeInOut', name='tilt'))
self.boatTilt.loop()
self.mapScaleRatio = 40
self.mapModel.reparentTo(base.a2dTopRight)
self.mapModel.setScale(1.0 / self.mapScaleRatio)
self.mapModel.setTransparency(1)
self.mapModel.setPos(-0.22, 0.0, -1.3)
self.mapModel.setColorScale(1, 1, 1, 0.7)
self.mapModel.hide()
if self.sndAmbience:
self.sndAmbience.setLoop(True)
self.sndAmbience.play()
self.sndAmbience.setVolume(0.01)
def offstage(self):
self.notify.debug('offstage')
DistributedMinigame.offstage(self)
self.introMovie.finish()
self.boatTilt.finish()
self.mapModel.hide()
DistributedSmoothNode.activateSmoothing(1, 0)
for avId in self.toonSDs.keys():
self.toonSDs[avId].exit()
base.camLens.setFar(ToontownGlobals.DefaultCameraFar)
base.camLens.setMinFov(settings['fov'] / (4.0 / 3.0))
self.arrowKeys.destroy()
del self.arrowKeys
self.environNode.removeNode()
del self.environNode
if None != self.sndAmbience:
self.sndAmbience.stop()
for avId in self.avIdList:
av = self.getAvatar(avId)
if av:
av.dropShadow.show()
av.setAnimState('neutral', 1.0)
self.dead = 1
self.__killCrabTask()
for spawner in self.spawners:
spawner.destroy()
del spawner
del self.spawners
for crab in self.crabs:
crab.moveLerp.finish()
crab.moveLerp = None
crab.cleanup()
del crab
if hasattr(self, 'treasures') and self.treasures:
for i in xrange(self.NUMTREASURES):
self.treasures[i].destroy()
del self.treasures
if hasattr(self, 'cSphereNodePath1'):
self.cSphereNodePath1.removeNode()
del self.cSphereNodePath1
if hasattr(self, 'cSphereNodePath1'):
self.cSphereNodePath2.removeNode()
del self.cSphereNodePath2
if hasattr(self, 'remoteToonCollNPs'):
for np in self.remoteToonCollNPs.values():
np.removeNode()
del self.remoteToonCollNPs
self.pusher = None
self.cTrav = None
self.cTrav2 = None
base.localAvatar.collisionsOn()
return
def handleDisabledAvatar(self, avId):
self.dead = 1
self.notify.debug('handleDisabledAvatar')
self.notify.debug('avatar ' + str(avId) + ' disabled')
self.toonSDs[avId].exit(unexpectedExit=True)
del self.toonSDs[avId]
def __placeToon(self, avId):
toon = self.getAvatar(avId)
i = self.avIdList.index(avId)
numToons = float(self.numPlayers)
x = -10 + i * 5
toon.setPos(x, -1, 36)
toon.setHpr(180, 180, 0)
def getTelemetryLimiter(self):
return TLGatherAllAvs('DivingGame', Functor(DivingGameRotationLimiter, 180, 180))
def setGameReady(self):
self.notify.debug('setGameReady')
if not self.hasLocalToon:
return
if DistributedMinigame.setGameReady(self):
return
self.dead = 0
self.difficultyPatterns = {ToontownGlobals.ToontownCentral: [1,
1.5,
65,
3],
ToontownGlobals.DonaldsDock: [1,
1.3,
65,
1],
ToontownGlobals.DaisyGardens: [2,
1.2,
65,
1],
ToontownGlobals.MinniesMelodyland: [2,
1.0,
65,
1],
ToontownGlobals.TheBrrrgh: [3,
1.0,
65,
1],
ToontownGlobals.DonaldsDreamland: [3,
1.0,
65,
1],
ToontownGlobals.ForestsEnd: [3,
1.0,
65,
1]}
pattern = self.difficultyPatterns[self.getSafezoneId()]
self.NUMCRABS = pattern[0]
self.SPEEDMULT = pattern[1]
self.TIME = pattern[2]
loadBase = 'phase_4/models/char/'
for i in xrange(self.NUMCRABS):
self.crabs.append(Actor.Actor(loadBase + 'kingCrab-zero', {'anim': loadBase + 'kingCrab-swimLOOP'}))
for i in xrange(len(self.crabs)):
crab = self.crabs[i]
crab.reparentTo(render)
crab.name = 'king'
crab.crabId = i
cSphere = CollisionSphere(0.0, 0.0, 1, 1.3)
cSphereNode = CollisionNode('crabby' + str(i))
cSphereNode.addSolid(cSphere)
cSphereNode.setFromCollideMask(BitMask32.allOff())
cSphereNode.setIntoCollideMask(DivingGameGlobals.CollideMask)
cSphereNodePath = crab.attachNewNode(cSphereNode)
cSphereNodePath.setScale(1, 3, 1)
self.accept('hitby-crabby' + str(i), self.fishCollision)
if i % 2 is 0:
crab.setPos(20, 0, -40)
crab.direction = -1
else:
crab.setPos(-20, 0, -40)
crab.direction = 1
crab.loop('anim')
crab.setScale(1, 0.3, 1)
crab.moveLerp = Sequence()
self.collHandEvent = CollisionHandlerEvent()
self.cTrav = CollisionTraverser('DistributedDiverGame')
self.cTrav2 = CollisionTraverser('DistributedDiverGame')
self.collHandEvent.addInPattern('reach-%in')
self.collHandEvent.addAgainPattern('reach-%in')
self.collHandEvent.addInPattern('into-%in')
self.collHandEvent.addInPattern('hitby-%in')
loadBase = 'phase_4/models/minigames/'
self.treasures = []
self.chestIcons = {}
for i in xrange(self.NUMTREASURES):
self.chestIcons[i] = loader.loadModel(loadBase + 'treasure_chest')
self.chestIcons[i].reparentTo(self.mapModel)
self.chestIcons[i].setScale(1.5)
treasure = DivingTreasure.DivingTreasure(i)
self.accept('grab-' + str(i), self.__treasureGrabbed)
self.collHandEvent.addInPattern('grab-%in')
self.collHandEvent.addAgainPattern('grab-%in')
self.treasures.append(treasure)
self.cTrav.traverse(render)
spawnX = 24 * self.LAG_COMP
spawnY = 0.6
self.spawners.append(DivingFishSpawn.DivingFishSpawn(0, 1, Point3(-spawnX, spawnY, 25), self.collHandEvent))
self.spawners.append(DivingFishSpawn.DivingFishSpawn(1, -1, Point3(spawnX, spawnY, 16), self.collHandEvent))
self.spawners.append(DivingFishSpawn.DivingFishSpawn(2, 1, Point3(-spawnX, spawnY, 6), self.collHandEvent))
self.spawners.append(DivingFishSpawn.DivingFishSpawn(3, -1, Point3(spawnX, spawnY, -4), self.collHandEvent))
self.spawners.append(DivingFishSpawn.DivingFishSpawn(4, 1, Point3(-spawnX, spawnY, -15), self.collHandEvent))
self.spawners.append(DivingFishSpawn.DivingFishSpawn(5, -1, Point3(spawnX, spawnY, -23), self.collHandEvent))
for spawner in self.spawners:
spawner.lastSpawn = 0
cSphere = CollisionSphere(0.0, 0.0, 0.0, 1.4)
cSphereNode = CollisionNode('%s' % self.localAvId)
cSphereNode.addSolid(cSphere)
cSphereNode.setFromCollideMask(DivingGameGlobals.CollideMask)
cSphereNode.setIntoCollideMask(BitMask32.allOff())
headparts = base.localAvatar.getHeadParts()
pos = headparts[0].getPos()
self.cSphereNodePath1 = base.localAvatar.attachNewNode(cSphereNode)
self.cSphereNodePath1.setPos(pos + Point3(0, 1.5, 1))
self.cTrav.addCollider(self.cSphereNodePath1, self.collHandEvent)
cSphere = CollisionSphere(0.0, 0.0, 0.0, 1.4)
cSphereNode = CollisionNode('%s' % self.localAvId)
cSphereNode.addSolid(cSphere)
cSphereNode.setFromCollideMask(DivingGameGlobals.CollideMask)
cSphereNode.setFromCollideMask(BitMask32.allOff())
cSphereNode.setIntoCollideMask(BitMask32.allOff())
headparts = base.localAvatar.getHeadParts()
pos = headparts[0].getPos()
self.cSphereNodePath2 = base.localAvatar.attachNewNode(cSphereNode)
self.cSphereNodePath2.setPos(pos + Point3(0, 1.5, -1))
self.cTrav.addCollider(self.cSphereNodePath2, self.collHandEvent)
self.pusher = CollisionHandlerPusher()
self.pusher.addCollider(self.cSphereNodePath1, base.localAvatar)
self.pusher.addCollider(self.cSphereNodePath2, base.localAvatar)
self.pusher.setHorizontal(0)
self.cTrav2.addCollider(self.cSphereNodePath1, self.pusher)
self.cTrav2.addCollider(self.cSphereNodePath2, self.pusher)
self.remoteToonCollNPs = {}
for avId in self.remoteAvIdList:
toon = self.getAvatar(avId)
if toon:
headparts = toon.getHeadParts()
pos = headparts[0].getPos()
cSphere = CollisionSphere(0.0, 0.0, 0.0, 1.4)
cSphereNode = CollisionNode('%s' % avId)
cSphereNode.addSolid(cSphere)
cSphereNode.setCollideMask(DivingGameGlobals.CollideMask)
cSphereNP = toon.attachNewNode(cSphereNode)
cSphereNP.setPos(pos + Point3(0, 1.5, 1))
self.remoteToonCollNPs[int(str(avId) + str(1))] = cSphereNP
cSphere = CollisionSphere(0.0, 0.0, 0.0, 1.4)
cSphereNode = CollisionNode('%s' % avId)
cSphereNode.addSolid(cSphere)
cSphereNode.setCollideMask(DivingGameGlobals.CollideMask)
cSphereNP = toon.attachNewNode(cSphereNode)
cSphereNP.setPos(pos + Point3(0, 1.5, -1))
self.remoteToonCollNPs[int(str(avId) + str(1))] = cSphereNP
toonSD = DivingGameToonSD.DivingGameToonSD(avId, self)
self.toonSDs[avId] = toonSD
toonSD.load()
toonSD.enter()
toonSD.fsm.request('normal')
for avId in self.remoteAvIdList:
toon = self.getAvatar(avId)
if toon:
toon.reparentTo(render)
self.__placeToon(avId)
toon.startSmooth()
self.remoteToons = {}
for avId in self.remoteAvIdList:
toon = self.getAvatar(avId)
self.remoteToons[avId] = toon
def setGameStart(self, timestamp):
if not self.hasLocalToon:
return
DistributedMinigame.setGameStart(self, timestamp)
self.notify.debug('setGameStart')
self.treasurePanel = TreasureScorePanel.TreasureScorePanel()
self.treasurePanel.setPos(0.145, 0, -0.27)
self.treasurePanel.reparentTo(base.a2dTopLeft)
self.treasurePanel.makeTransparent(0.7)
self.introMovie.finish()
self.gameFSM.request('swim')
def enterOff(self):
self.notify.debug('enterOff')
def exitOff(self):
pass
def enterSwim(self):
self.notify.debug('enterSwim')
base.playMusic(self.music, looping=1, volume=0.9)
self.localLerp = Sequence()
self.timer = ToontownTimer.ToontownTimer()
self.timer.posInTopRightCorner()
self.timer.setTime(self.TIME)
self.timer.countdown(self.TIME, self.timerExpired)
self.mapModel.show()
self.mapAvatars = {}
avatarScale = 0.025 * self.mapScaleRatio
for avId in self.remoteAvIdList:
avatar = base.cr.doId2do.get(avId, False)
if avatar != False:
self.mapAvatars[avId] = LaffMeter.LaffMeter(avatar.style, avatar.hp, avatar.maxHp)
self.mapAvatars[avId].reparentTo(self.mapModel)
self.mapAvatars[avId].setScale(avatarScale)
self.mapAvatars[avId].start()
avatar = base.cr.doId2do[self.localAvId]
self.mapAvatars[self.localAvId] = LaffMeter.LaffMeter(avatar.style, avatar.hp, avatar.maxHp)
self.mapAvatars[self.localAvId].reparentTo(self.mapModel)
self.mapAvatars[self.localAvId].setScale(avatarScale)
self.mapAvatars[self.localAvId].start()
self.accept('resetClock', self.__resetClock)
self.__spawnUpdateLocalToonTask()
self.__spawnCrabTask()
self.__spawnTreasureBoundsTask()
def __resetClock(self, tOffset):
self.notify.debug('resetClock')
self.gameStartTime += tOffset
self.timer.countdown(self.timer.currentTime + tOffset, self.timerExpired)
def timerExpired(self):
self.notify.debug('local timer expired')
self.dead = 1
self.gameOver()
def __initPosBroadcast(self):
self.__posBroadcastPeriod = 0.2
self.__timeSinceLastPosBroadcast = 0.0
self.__lastPosBroadcast = self.getAvatar(self.localAvId).getPos()
self.__storeStop = 0
lt = self.getAvatar(self.localAvId)
lt.d_clearSmoothing()
lt.sendCurrentPosition()
def __posBroadcast(self, dt):
self.__timeSinceLastPosBroadcast += dt
if self.__timeSinceLastPosBroadcast > self.__posBroadcastPeriod:
self.__timeSinceLastPosBroadcast -= self.__posBroadcastPeriod
self.getAvatar(self.localAvId).cnode.broadcastPosHprFull()
def __spawnTreasureBoundsTask(self):
taskMgr.remove(self.TREASURE_BOUNDS_TASK)
taskMgr.add(self.__treasureBoundsTask, self.TREASURE_BOUNDS_TASK)
def __killTreasureBoundsTask(self):
taskMgr.remove(self.TREASURE_BOUNDS_TASK)
def __treasureBoundsTask(self, task):
for i in xrange(self.NUMTREASURES):
self.chestIcons[i].setPos(self.treasures[i].chest.getPos(render) / self.MAP_DIV)
self.chestIcons[i].setZ(self.chestIcons[i].getZ() + self.MAP_OFF)
if self.treasures[i].treasureNode.getZ() < -36:
self.treasures[i].treasureNode.setZ(-36)
if self.treasures[i].treasureNode.getX() < -20:
self.treasures[i].treasureNode.setX(-20)
if self.treasures[i].treasureNode.getX() > 20:
self.treasures[i].treasureNode.setX(20)
return Task.cont
def incrementScore(self, avId, newSpot, timestamp):
if not self.hasLocalToon:
return
newSpot += -15
ts = globalClockDelta.localElapsedTime(timestamp)
toonSD = self.toonSDs[avId]
if avId == self.localAvId:
self.reachedFlag = 0
if toonSD.status == 'treasure' and self.treasures and self.chestIcons:
for i in xrange(self.NUMTREASURES):
if self.treasures[i].grabbedId == avId:
self.treasures[i].treasureNode.wrtReparentTo(render)
self.treasures[i].grabbedId = 0
seq = Sequence()
shrink = LerpScaleInterval(self.treasures[i].treasureNode, duration=1.0, startScale=self.treasures[i].treasureNode.getScale(), scale=Vec3(0.001, 0.001, 0.001), blendType='easeIn')
shrinkIcon = LerpScaleInterval(self.chestIcons[i], duration=1.0, startScale=self.chestIcons[i].getScale(), scale=Vec3(0.001, 0.001, 0.001), blendType='easeIn')
jump = ProjectileInterval(self.treasures[i].treasureNode, duration=1.0, startPos=self.treasures[i].treasureNode.getPos(), endPos=Point3(0, 0, 40), gravityMult=0.7)
shrinkJump = Parallel(shrink, shrinkIcon, jump)
toonSD.fsm.request('normal')
grow = LerpScaleInterval(self.treasures[i].treasureNode, duration=1.0, scale=self.treasures[i].treasureNode.getScale(), startScale=Vec3(0.001, 0.001, 0.001), blendType='easeIn')
growIcon = LerpScaleInterval(self.chestIcons[i], duration=1.0, scale=self.chestIcons[i].getScale(), startScale=Vec3(0.001, 0.001, 0.001), blendType='easeIn')
place = Parallel(Func(self.treasures[i].treasureNode.setPos, Vec3(newSpot, 0.25, -36)), Func(self.treasures[i].treasureNode.setHpr, Vec3(0, 0, 0)))
growItems = Parallel(grow, growIcon)
resetChest = Func(self.treasures[i].chestNode.setIntoCollideMask, DivingGameGlobals.CollideMask)
seq = Sequence(shrinkJump, Wait(1.5), place, growItems, resetChest)
self.treasures[i].moveLerp.pause()
self.treasures[i].moveLerp = seq
self.treasures[i].moveLerp.start(ts)
self.playSound('dropGold')
self.treasurePanel.incrScore()
def __boatReached(self, collEntry):
toonSD = self.toonSDs[self.localAvId]
if toonSD.status == 'treasure' and not self.reachedFlag:
self.sendUpdate('treasureRecovered')
self.reachedFlag = 1
def __treasureGrabbed(self, collEntry):
avId = int(collEntry.getFromNodePath().getName())
chestId = int(collEntry.getIntoNodePath().getName())
toonSD = self.toonSDs[avId]
if toonSD.status == 'normal' and self.grabbingTreasure == -1:
self.grabbingTreasure = chestId
self.sendUpdate('pickupTreasure', [chestId])
def setTreasureDropped(self, avId, timestamp):
if not hasattr(self, 'treasures'):
return
ts = globalClockDelta.localElapsedTime(timestamp)
for i in xrange(self.NUMTREASURES):
if self.treasures[i].grabbedId == avId:
self.treasures[i].grabbedId = 0
toonSD = self.toonSDs[avId]
dist = abs(36.0 + self.treasures[i].treasureNode.getZ(render))
delta = dist / 72.0
dur = 10 * delta
self.treasures[i].treasureNode.wrtReparentTo(render)
self.treasures[i].chestNode.setIntoCollideMask(BitMask32.allOff())
resetChest = Func(self.treasures[i].chestNode.setIntoCollideMask, DivingGameGlobals.CollideMask)
self.treasures[i].moveLerp.pause()
self.treasures[i].moveLerp = Parallel(Sequence(Wait(1.0), resetChest), LerpFunc(self.treasures[i].treasureNode.setZ, duration=dur, fromData=self.treasures[i].treasureNode.getZ(render), toData=-36, blendType='easeIn'))
self.treasures[i].moveLerp.start(ts)
def performCrabCollision(self, avId, timestamp):
if not self.hasLocalToon:
return
ts = globalClockDelta.localElapsedTime(timestamp)
toonSD = self.toonSDs[avId]
toon = self.getAvatar(avId)
distance = base.localAvatar.getDistance(toon)
volume = 0
soundRange = 15.0
if distance < soundRange:
volume = (soundRange - distance) / soundRange
if toonSD.status == 'normal' or toonSD.status == 'treasure':
self.localLerp.finish()
self.localLerp = Sequence(Func(toonSD.fsm.request, 'freeze'), Wait(3.0), Func(toonSD.fsm.request, 'normal'))
self.localLerp.start(ts)
self.hitSound.play()
self.hitSound.setVolume(volume)
def performFishCollision(self, avId, spawnId, spawnerId, timestamp):
if not hasattr(self, 'spawners'):
return
toonSD = self.toonSDs[avId]
ts = globalClockDelta.localElapsedTime(timestamp)
toon = self.getAvatar(avId)
distance = base.localAvatar.getDistance(toon)
volume = 0
soundRange = 15.0
if distance < soundRange:
volume = (soundRange - distance) / soundRange
if toonSD.status == 'normal' or toonSD.status == 'treasure':
self.localLerp.finish()
self.localLerp = Sequence(Func(toonSD.fsm.request, 'freeze'), Wait(3.0), Func(toonSD.fsm.request, 'normal'))
self.localLerp.start(ts)
if spawnId in self.spawners[spawnerId].fishArray:
fish = self.spawners[spawnerId].fishArray[spawnId]
endX = self.spawners[spawnerId].position.getX()
if fish.name == 'clown':
fishSoundName = 'Clownfish.ogg'
elif fish.name == 'pbj':
fishSoundName = 'PBJ_Fish.ogg'
elif fish.name == 'balloon':
fishSoundName = 'BalloonFish.ogg'
elif fish.name == 'bear':
fishSoundName = 'Bear_Acuda.ogg'
elif fish.name == 'nurse':
fishSoundName = 'Nurse_Shark.ogg'
elif fish.name == 'piano':
fishSoundName = 'Piano_Tuna.ogg'
else:
fishSoundName = ' '
fishSoundPath = 'phase_4/audio/sfx/%s' % fishSoundName
fish.sound = loader.loadSfx(fishSoundPath)
if fish.sound:
fish.sound.play()
fish.sound.setVolume(volume)
self.hitSound.play()
self.hitSound.setVolume(volume)
if fish.name is 'bear' or fish.name is 'nurse':
return
colList = fish.findAllMatches('**/fc*')
for col in colList:
col.removeNode()
fish.moveLerp.pause()
if fish.name == 'clown' or fish.name == 'piano':
if fish.name != 'piano':
endHpr = Vec3(fish.getH() * -1, 0, 0)
elif fish.direction == -1:
endHpr = Vec3(180, 0, 0)
else:
endHpr = Vec3(0, 0, 0)
fish.moveLerp = Sequence(LerpHprInterval(fish, duration=0.4, startHpr=fish.getHpr(), hpr=endHpr), LerpFunc(fish.setX, duration=1.5, fromData=fish.getX(), toData=endX), Func(self.fishRemove, str(spawnerId) + str(spawnId)))
elif fish.name == 'pbj':
fish.moveLerp = Sequence(LerpFunc(fish.setX, duration=2, fromData=fish.getX(), toData=endX), Func(self.fishRemove, str(spawnerId) + str(spawnId)))
elif fish.name == 'balloon':
fish.specialLerp.pause()
anim = Func(fish.play, 'anim', fromFrame=110, toFrame=200)
fish.setH(180)
speed = Func(fish.setPlayRate, 3.0, 'anim')
fish.moveLerp = Sequence(Func(fish.stop, 'anim'), speed, anim, Wait(1.0), LerpScaleInterval(fish, duration=0.8, startScale=fish.getScale, scale=0.001, blendType='easeIn'), Func(self.fishRemove, str(spawnerId) + str(spawnId)))
fish.sound.setTime(11.5)
fish.moveLerp.start(ts)
def fishRemove(self, code):
spawnId = int(code[1:len(code)])
spawnerId = int(code[0])
if spawnId in self.spawners[spawnerId].fishArray:
fish = self.spawners[spawnerId].fishArray[spawnId]
fish.specialLerp.finish()
fish.moveLerp.finish()
fish.specialLerp = None
fish.moveLerp = None
fish.cleanup()
del fish
del self.spawners[spawnerId].fishArray[spawnId]
return
def setTreasureGrabbed(self, avId, chestId):
if not self.hasLocalToon:
return
if self.grabbingTreasure == chestId:
self.grabbingTreasure = -1
toonSD = self.toonSDs.get(avId)
if toonSD and toonSD.status == 'normal':
toonSD.fsm.request('treasure')
self.treasures[chestId].moveLerp.pause()
self.treasures[chestId].moveLerp = Sequence()
self.treasures[chestId].chestNode.setIntoCollideMask(BitMask32.allOff())
self.treasures[chestId].treasureNode.reparentTo(self.getAvatar(avId))
headparts = self.getAvatar(avId).getHeadParts()
pos = headparts[0].getPos()
self.treasures[chestId].treasureNode.setPos(pos + Point3(0, 0.2, 3))
self.treasures[chestId].grabbedId = avId
timestamp = globalClockDelta.getFrameNetworkTime()
self.playSound('getGold')
def __spawnCrabTask(self):
taskMgr.remove(self.CRAB_TASK)
taskMgr.add(self.__crabTask, self.CRAB_TASK)
def __killCrabTask(self):
taskMgr.remove(self.CRAB_TASK)
def __crabTask(self, task):
dt = globalClock.getDt()
for crab in self.crabs:
if not crab.moveLerp.isPlaying():
crab.moveLerp = Wait(1.0)
crab.moveLerp.loop()
self.sendUpdate('getCrabMoving', [crab.crabId, crab.getX(), crab.direction])
return Task.cont
return Task.cont
def setCrabMoving(self, crabId, timestamp, rand1, rand2, crabX, dir):
if self.dead == 1:
self.__killCrabTask()
return
if not hasattr(self, 'crabs'):
return
crab = self.crabs[crabId]
ts = globalClockDelta.localElapsedTime(timestamp)
x = 0
for i in xrange(self.NUMTREASURES):
x += self.treasures[i].treasureNode.getX(render)
x /= self.NUMTREASURES
goalX = int(x + dir * (rand1 / 10.0) * 12 + 4.0)
goalZ = -35 + 5 * (rand2 / 10.0)
xTime = 1 + rand1 / 10.0 * 2
zTime = 0.5 + rand2 / 10.0
wait = rand1 / 10.0 + rand2 / 10.0 + 1
crab.direction *= -1
if goalX > 20:
goalX = 20
elif goalX < -20:
goalX = 20
loc = crab.getPos(render)
distance = base.localAvatar.getDistance(crab)
crabVolume = 0
soundRange = 25.0
if distance < soundRange:
crabVolume = (soundRange - distance) / soundRange
crabSoundInterval = SoundInterval(self.crabSound, loop=0, duration=1.6, startTime=0.0)
seq = Sequence(Wait(wait), LerpPosInterval(crab, duration=xTime, startPos=Point3(crabX, 0, -40), pos=Point3(goalX, 0, -40), blendType='easeIn'), Parallel(Func(self.grabCrapVolume, crab), LerpPosInterval(crab, duration=zTime, startPos=Point3(goalX, 0, -40), pos=Point3(goalX, 0, goalZ), blendType='easeOut')), LerpPosInterval(crab, duration=zTime, startPos=Point3(goalX, 0, goalZ), pos=Point3(goalX, 0, -40), blendType='easeInOut'))
crab.moveLerp.pause()
crab.moveLerp = seq
crab.moveLerp.start(ts)
def grabCrapVolume(self, crab):
if crab:
distance = base.localAvatar.getDistance(crab)
self.crabVolume = 0
soundRange = 25.0
if distance < soundRange:
crabVolume = (soundRange - distance) / soundRange
crabSoundInterval = SoundInterval(self.crabSound, loop=0, duration=1.6, startTime=0.0, volume=crabVolume)
crabSoundInterval.start()
def __spawnUpdateLocalToonTask(self):
self.__initPosBroadcast()
taskMgr.remove(self.UPDATE_LOCALTOON_TASK)
taskMgr.add(self.__updateLocalToonTask, self.UPDATE_LOCALTOON_TASK)
def __killUpdateLocalToonTask(self):
taskMgr.remove(self.UPDATE_LOCALTOON_TASK)
def __updateLocalToonTask(self, task):
dt = globalClock.getDt()
toonPos = base.localAvatar.getPos()
toonHpr = base.localAvatar.getHpr()
self.xVel *= 0.99
self.zVel *= 0.99
pos = [toonPos[0], toonPos[1], toonPos[2]]
hpr = [toonHpr[0], toonHpr[1], toonHpr[2]]
r = 0
toonSD = self.toonSDs[self.localAvId]
if toonSD.status == 'normal' or toonSD.status == 'treasure':
if self.arrowKeys.leftPressed():
r -= 80
if self.arrowKeys.rightPressed():
r += 80
hpr[2] += r * dt
pos1 = self.orientNode.getPos(render)
pos2 = self.orientNode2.getPos(render)
upVec = Vec2(pos1[0], pos1[2])
bkVec = Vec2(pos2[0], pos2[2])
forVec = upVec - Vec2(pos[0], pos[2])
bckVec = bkVec - Vec2(pos[0], pos[2])
r = 0
if self.arrowKeys.upPressed():
r += 20
self.xVel = forVec[0] * 8
self.zVel = forVec[1] * 8
elif self.arrowKeys.downPressed():
r -= 20
self.xVel = bckVec[0] * 4
self.zVel = bckVec[1] * 4
if self.xVel > 20:
self.xVel = 20
elif self.xVel < -20:
self.xVel = -20
if self.zVel > 10:
self.zVel = 10
elif self.zVel < -10:
self.zVel = -10
swimVolume = (abs(self.zVel) + abs(self.xVel)) / 15.0
self.swimSound.setVolume(swimVolume)
pos[0] += self.xVel * dt
pos[1] = -2
pos[2] += self.zVel * dt
found = 0
for i in xrange(self.NUMTREASURES):
if self.treasures[i].grabbedId == self.localAvId:
found = 1
i = self.NUMTREASURES + 1
pos[2] -= 0.8 * dt
if found == 0:
pos[2] += 0.8 * dt
if pos[2] < -38:
pos[2] = -38
elif pos[2] > 36:
pos[2] = 36
if pos[0] < -20:
pos[0] = -20
elif pos[0] > 20:
pos[0] = 20
base.localAvatar.setPos(pos[0], pos[1], pos[2])
base.localAvatar.setHpr(hpr[0], hpr[1], hpr[2])
posDiv = self.MAP_DIV
self.mapAvatars[self.localAvId].setPos(pos[0] / posDiv, pos[1] / posDiv, pos[2] / posDiv + self.MAP_OFF)
for avId in self.remoteAvIdList:
toon = self.getAvatar(avId)
if toon:
pos = toon.getPos()
self.mapAvatars[avId].setPos(pos / posDiv)
self.mapAvatars[avId].setZ(self.mapAvatars[avId].getZ() + self.MAP_OFF)
self.cTrav.traverse(render)
self.cTrav2.traverse(render)
self.__posBroadcast(dt)
z = self.getAvatar(self.localAvId).getZ() + 3
camBottom = math.tan(base.camLens.getVfov() / 2.0 * math.pi / 180) * 54
z = max(z, -42 + camBottom)
camera.setZ(z)
ambVolume = abs(z - 25.0) / 50.0 + 0.1
if ambVolume < 0.0:
ambVolume = 0.0
if ambVolume > 1.0:
ambVolume = 1.0
ambVolume = pow(ambVolume, 0.75)
self.sndAmbience.setVolume(ambVolume)
return Task.cont
def exitSwim(self):
self.music.stop()
self.ignore('resetClock')
self.__killUpdateLocalToonTask()
self.__killCrabTask()
self.__killTreasureBoundsTask()
self.timer.stop()
self.timer.destroy()
self.localLerp.finish()
self.introMovie.finish()
self.boatTilt.finish()
self.treasurePanel.cleanup()
self.mapAvatars[self.localAvId].destroy()
del self.mapAvatars
for i in xrange(self.NUMTREASURES):
del self.chestIcons[i]
del self.timer
def enterCleanup(self):
pass
def exitCleanup(self):
pass
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import with_statement
import socket
import threading
import time
from kombu.entity import Exchange, Queue
from kombu.messaging import Consumer, Producer
from celery import states
from celery.exceptions import TimeoutError
from celery.utils.log import get_logger
from .base import BaseDictBackend
logger = get_logger(__name__)
class BacklogLimitExceeded(Exception):
"""Too much state history to fast-forward."""
def repair_uuid(s):
# Historically the dashes in UUIDS are removed from AMQ entity names,
# but there is no known reason to. Hopefully we'll be able to fix
# this in v3.0.
return "%s-%s-%s-%s-%s" % (s[:8], s[8:12], s[12:16], s[16:20], s[20:])
class AMQPBackend(BaseDictBackend):
"""Publishes results by sending messages."""
Exchange = Exchange
Queue = Queue
Consumer = Consumer
Producer = Producer
BacklogLimitExceeded = BacklogLimitExceeded
supports_native_join = True
retry_policy = {
"max_retries": 20,
"interval_start": 0,
"interval_step": 1,
"interval_max": 1,
}
def __init__(self, connection=None, exchange=None, exchange_type=None,
persistent=None, serializer=None, auto_delete=True,
**kwargs):
super(AMQPBackend, self).__init__(**kwargs)
conf = self.app.conf
self._connection = connection
self.queue_arguments = {}
self.persistent = (conf.CELERY_RESULT_PERSISTENT if persistent is None
else persistent)
delivery_mode = persistent and "persistent" or "transient"
exchange = exchange or conf.CELERY_RESULT_EXCHANGE
exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE
self.exchange = self.Exchange(name=exchange,
type=exchange_type,
delivery_mode=delivery_mode,
durable=self.persistent,
auto_delete=False)
self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER
self.auto_delete = auto_delete
# AMQP_TASK_RESULT_EXPIRES setting is deprecated and will be
# removed in version 3.0.
dexpires = conf.CELERY_AMQP_TASK_RESULT_EXPIRES
self.expires = None
if "expires" in kwargs:
if kwargs["expires"] is not None:
self.expires = self.prepare_expires(kwargs["expires"])
else:
self.expires = self.prepare_expires(dexpires)
if self.expires:
self.queue_arguments["x-expires"] = int(self.expires * 1000)
self.mutex = threading.Lock()
def _create_binding(self, task_id):
name = task_id.replace("-", "")
return self.Queue(name=name,
exchange=self.exchange,
routing_key=name,
durable=self.persistent,
auto_delete=self.auto_delete,
queue_arguments=self.queue_arguments)
def revive(self, channel):
pass
def _store_result(self, task_id, result, status, traceback=None):
"""Send task return value and status."""
with self.mutex:
with self.app.amqp.producer_pool.acquire(block=True) as pub:
pub.publish({"task_id": task_id, "status": status,
"result": self.encode_result(result, status),
"traceback": traceback,
"children": self.current_task_children()},
exchange=self.exchange,
routing_key=task_id.replace("-", ""),
serializer=self.serializer,
retry=True, retry_policy=self.retry_policy,
declare=[self._create_binding(task_id)])
return result
def wait_for(self, task_id, timeout=None, cache=True, propagate=True,
**kwargs):
cached_meta = self._cache.get(task_id)
if cache and cached_meta and \
cached_meta["status"] in states.READY_STATES:
meta = cached_meta
else:
try:
meta = self.consume(task_id, timeout=timeout)
except socket.timeout:
raise TimeoutError("The operation timed out.")
state = meta["status"]
if state == states.SUCCESS:
return meta["result"]
elif state in states.PROPAGATE_STATES:
if propagate:
raise self.exception_to_python(meta["result"])
return meta["result"]
else:
return self.wait_for(task_id, timeout, cache)
def get_task_meta(self, task_id, backlog_limit=1000):
# Polling and using basic_get
with self.app.pool.acquire_channel(block=True) as (_, channel):
binding = self._create_binding(task_id)(channel)
binding.declare()
latest, acc = None, None
for i in xrange(backlog_limit):
latest, acc = acc, binding.get(no_ack=True)
if not acc: # no more messages
break
else:
raise self.BacklogLimitExceeded(task_id)
if latest:
# new state to report
payload = self._cache[task_id] = latest.payload
return payload
else:
# no new state, use previous
try:
return self._cache[task_id]
except KeyError:
# result probably pending.
return {"status": states.PENDING, "result": None}
poll = get_task_meta # XXX compat
def drain_events(self, connection, consumer, timeout=None, now=time.time):
wait = connection.drain_events
results = {}
def callback(meta, message):
if meta["status"] in states.READY_STATES:
uuid = repair_uuid(message.delivery_info["routing_key"])
results[uuid] = meta
consumer.callbacks[:] = [callback]
time_start = now()
while 1:
# Total time spent may exceed a single call to wait()
if timeout and now() - time_start >= timeout:
raise socket.timeout()
wait(timeout=timeout)
if results: # got event on the wanted channel.
break
self._cache.update(results)
return results
def consume(self, task_id, timeout=None):
with self.app.pool.acquire_channel(block=True) as (conn, channel):
binding = self._create_binding(task_id)
with self.Consumer(channel, binding, no_ack=True) as consumer:
return self.drain_events(conn, consumer, timeout).values()[0]
def get_many(self, task_ids, timeout=None, **kwargs):
with self.app.pool.acquire_channel(block=True) as (conn, channel):
ids = set(task_ids)
cached_ids = set()
for task_id in ids:
try:
cached = self._cache[task_id]
except KeyError:
pass
else:
if cached["status"] in states.READY_STATES:
yield task_id, cached
cached_ids.add(task_id)
ids ^= cached_ids
bindings = [self._create_binding(task_id) for task_id in task_ids]
with self.Consumer(channel, bindings, no_ack=True) as consumer:
while ids:
r = self.drain_events(conn, consumer, timeout)
ids ^= set(r)
for ready_id, ready_meta in r.iteritems():
yield ready_id, ready_meta
def reload_task_result(self, task_id):
raise NotImplementedError(
"reload_task_result is not supported by this backend.")
def reload_taskset_result(self, task_id):
"""Reload taskset result, even if it has been previously fetched."""
raise NotImplementedError(
"reload_taskset_result is not supported by this backend.")
def save_taskset(self, taskset_id, result):
raise NotImplementedError(
"save_taskset is not supported by this backend.")
def restore_taskset(self, taskset_id, cache=True):
raise NotImplementedError(
"restore_taskset is not supported by this backend.")
def delete_taskset(self, taskset_id):
raise NotImplementedError(
"delete_taskset is not supported by this backend.")
def __reduce__(self, args=(), kwargs={}):
kwargs.update(connection=self._connection,
exchange=self.exchange.name,
exchange_type=self.exchange.type,
persistent=self.persistent,
serializer=self.serializer,
auto_delete=self.auto_delete,
expires=self.expires)
return super(AMQPBackend, self).__reduce__(args, kwargs)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import main_op
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import saver_test_utils
from tensorflow.python.util import compat
SAVED_MODEL_PATH = ("cc/saved_model/testdata/half_plus_two/00000123")
def tearDownModule():
file_io.delete_recursively(test.get_temp_dir())
@test_util.with_c_api
class SavedModelTest(test.TestCase):
def _get_export_dir(self, label):
if ops._USE_C_API:
label += "_c_api"
return os.path.join(test.get_temp_dir(), label)
def _init_and_validate_variable(self, sess, variable_name, variable_value):
v = variables.Variable(variable_value, name=variable_name)
sess.run(variables.global_variables_initializer())
self.assertEqual(variable_value, v.eval())
def _build_asset_collection(self, asset_file_name, asset_file_contents,
asset_file_tensor_name):
asset_filepath = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes(asset_file_name))
file_io.write_string_to_file(asset_filepath, asset_file_contents)
asset_file_tensor = constant_op.constant(
asset_filepath, name=asset_file_tensor_name)
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, asset_file_tensor)
asset_collection = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
return asset_collection
def _validate_asset_collection(self, export_dir, graph_collection_def,
expected_asset_file_name,
expected_asset_file_contents,
expected_asset_tensor_name):
assets_any = graph_collection_def[constants.ASSETS_KEY].any_list.value
asset = meta_graph_pb2.AssetFileDef()
assets_any[0].Unpack(asset)
assets_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes(expected_asset_file_name))
actual_asset_contents = file_io.read_file_to_string(assets_path)
self.assertEqual(expected_asset_file_contents,
compat.as_text(actual_asset_contents))
self.assertEqual(expected_asset_file_name, asset.filename)
self.assertEqual(expected_asset_tensor_name, asset.tensor_info.name)
def _validate_inputs_tensor_info(self, builder, tensor_info):
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def({
"foo_inputs": tensor_info
}, dict(), "foo")
self.assertRaises(
AssertionError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_outputs_tensor_info(self, builder, tensor_info):
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
dict(), {"foo_outputs": tensor_info}, "foo")
self.assertRaises(
AssertionError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def testMaybeSavedModelDir(self):
base_path = test.test_src_dir_path("/python/saved_model")
self.assertFalse(loader.maybe_saved_model_directory(base_path))
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.assertTrue(loader.maybe_saved_model_directory(base_path))
base_path = "complete_garbage"
self.assertFalse(loader.maybe_saved_model_directory(base_path))
def testBadSavedModelFileFormat(self):
export_dir = self._get_export_dir("test_bad_saved_model_file_format")
# Attempt to load a SavedModel from an export directory that does not exist.
with self.test_session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError,
"SavedModel file does not exist at: %s" %
export_dir):
loader.load(sess, ["foo"], export_dir)
os.makedirs(export_dir)
# Write an invalid binary proto to saved_model.pb.
path_to_pb = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PB)
with open(path_to_pb, "w") as f:
f.write("invalid content")
with self.test_session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError, "Cannot parse file.*%s" %
constants.SAVED_MODEL_FILENAME_PB):
loader.load(sess, ["foo"], export_dir)
# Cleanup the directory and start again.
file_io.delete_recursively(export_dir)
os.makedirs(export_dir)
# Write an invalid text proto to saved_model.pbtxt
path_to_pbtxt = os.path.join(export_dir,
constants.SAVED_MODEL_FILENAME_PBTXT)
with open(path_to_pbtxt, "w") as f:
f.write("invalid content")
with self.test_session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError, "Cannot parse file.*%s" %
constants.SAVED_MODEL_FILENAME_PBTXT):
loader.load(sess, ["foo"], export_dir)
def testVerifySessionGraphUsage(self):
export_dir = self._get_export_dir("test_verify_session_graph_usage")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Save the SavedModel to disk.
builder.save()
# Build a session and supply it to the load operation.
sess = session.Session(graph=ops.Graph())
loader.load(sess, [tag_constants.TRAINING], export_dir)
# Check the variable within the scope of the session and its graph.
with sess:
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
def testSequence(self):
export_dir = self._get_export_dir("test_sequence")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Expect an assertion error since add_meta_graph_and_variables() should be
# invoked before any add_meta_graph() calls.
with self.test_session(graph=ops.Graph()) as sess:
self.assertRaises(AssertionError, builder.add_meta_graph, ["foo"])
# Expect an assertion error for multiple calls of
# add_meta_graph_and_variables() since weights should be saved exactly once.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["bar"])
self.assertRaises(AssertionError, builder.add_meta_graph_and_variables,
sess, ["baz"])
def testTags(self):
export_dir = self._get_export_dir("test_tags")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
# - a single tag (from predefined constants).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - a single tag (from predefined constants).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph([tag_constants.SERVING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - multiple tags (from predefined constants).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 45)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.GPU])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - multiple tags (from predefined constants for serving on TPU).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 45)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.TPU])
# Graph that updates the single variable. SavedModel is invoked:
# - to add the model (weights are not updated).
# - multiple custom tags.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 44)
builder.add_meta_graph(["foo", "bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.TRAINING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with a single predefined tag whose variables were not
# saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple predefined tags whose variables were not
# saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING, tag_constants.GPU], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple predefined tags (for serving on TPU)
# whose variables were not saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING, tag_constants.TPU], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple tags. Provide duplicate tags to test set
# semantics.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo", "bar", "foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Try restoring a graph with a non-existent tag. This should yield a runtime
# error.
with self.test_session(graph=ops.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["INVALID"],
export_dir)
# Try restoring a graph where a subset of the tags match. Since tag matching
# for meta graph defs follows "all" semantics, this should yield a runtime
# error.
with self.test_session(graph=ops.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["foo", "baz"],
export_dir)
def testVariables(self):
export_dir = self._get_export_dir("test_variables")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with two variables. SavedModel invoked to:
# - add with weights.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v1", 1)
self._init_and_validate_variable(sess, "v2", 2)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with a single variable (subset of the variables from the previous
# graph whose weights were saved). SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v2", 3)
builder.add_meta_graph(["bar"])
# Graph with a single variable (disjoint set of variables from the previous
# graph whose weights were saved). SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v3", 4)
builder.add_meta_graph(["baz"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(collection_vars), 2)
self.assertEqual(1, collection_vars[0].eval())
self.assertEqual(2, collection_vars[1].eval())
# Restore the graph with tag "bar", whose variables were not saved. Only the
# subset of the variables added to the graph will be restored with the
# checkpointed value.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(collection_vars), 1)
self.assertEqual(2, collection_vars[0].eval())
# Try restoring the graph with tag "baz", whose variables were not saved.
# Since this graph has a disjoint set of variables from the set that was
# saved, this should raise an error.
with self.test_session(graph=ops.Graph()) as sess:
self.assertRaises(errors.NotFoundError, loader.load, sess, ["baz"],
export_dir)
def testGraphWithoutVariables(self):
export_dir = self._get_export_dir("test_graph_has_variables")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with no variables.
with self.test_session(graph=ops.Graph()) as sess:
constant_5_name = constant_op.constant(5.0).name
builder.add_meta_graph_and_variables(sess, ["foo"])
# Second graph with no variables
with self.test_session(graph=ops.Graph()) as sess:
constant_6_name = constant_op.constant(6.0).name
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo".
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
# Read the constant a from the graph.
a = ops.get_default_graph().get_tensor_by_name(constant_5_name)
b = constant_op.constant(6.0)
c = a * b
self.assertEqual(30.0, sess.run(c))
# Restore the graph with tag "bar".
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
# Read the constant a from the graph.
a = ops.get_default_graph().get_tensor_by_name(constant_6_name)
b = constant_op.constant(5.0)
c = a * b
self.assertEqual(30.0, sess.run(c))
def testNoOverwrite(self):
export_dir = self._get_export_dir("test_no_overwrite")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Restore the graph with tag "foo", whose variables were saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# An attempt to create another builder with the same export directory should
# result in an assertion error.
self.assertRaises(AssertionError, saved_model_builder.SavedModelBuilder,
export_dir)
def testSaveAsText(self):
export_dir = self._get_export_dir("test_astext")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Restore the graph with tag "foo", whose variables were saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with tag "bar", whose variables were not saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
def testCollections(self):
export_dir = self._get_export_dir("test_collections")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable added to a collection. SavedModel invoked to:
# - add with weights.
with self.test_session(graph=ops.Graph()) as sess:
v = variables.Variable(42, name="v")
ops.add_to_collection("foo_vars", v)
sess.run(variables.global_variables_initializer())
self.assertEqual(42, v.eval())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable added to a different collection.
# SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.test_session(graph=ops.Graph()) as sess:
v = variables.Variable(43, name="v")
ops.add_to_collection("bar_vars", v)
sess.run(variables.global_variables_initializer())
self.assertEqual(43, v.eval())
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved. The
# collection 'foo_vars' should contain a single element. The collection
# 'bar_vars' should not be found.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
collection_foo_vars = ops.get_collection("foo_vars")
self.assertEqual(len(collection_foo_vars), 1)
self.assertEqual(42, collection_foo_vars[0].eval())
self.assertEqual(len(ops.get_collection("bar_vars")), 0)
# Restore the graph with tag "bar", whose variables were not saved. The
# collection-def exported as part of the meta graph def is updated to
# reflect the new collection. The value of the variable in the
# collection-def corresponds to the saved value (from the previous graph
# with tag "foo").
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
collection_bar_vars = ops.get_collection("bar_vars")
self.assertEqual(len(collection_bar_vars), 1)
self.assertEqual(42, collection_bar_vars[0].eval())
self.assertEqual(len(ops.get_collection("foo_vars")), 0)
def testSignatureDefs(self):
export_dir = self._get_export_dir("test_signature_defs")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable and a single entry in the signature def map.
# SavedModel is invoked to add with weights.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build and populate an empty SignatureDef for testing.
foo_signature = signature_def_utils.build_signature_def(dict(),
dict(), "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"], signature_def_map={"foo_key": foo_signature})
# Graph with the same single variable and multiple entries in the signature
# def map. No weights are saved by SavedModel.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
# Build and populate a different SignatureDef for testing.
bar_signature = signature_def_utils.build_signature_def(dict(),
dict(), "bar")
# Also, build a different SignatureDef corresponding to "foo_key" defined
# in the previous graph.
foo_new_signature = signature_def_utils.build_signature_def(dict(),
dict(),
"foo_new")
builder.add_meta_graph(
["bar"],
signature_def_map={
"bar_key": bar_signature,
"foo_key": foo_new_signature
})
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo". The single entry in the SignatureDef map
# corresponding to "foo_key" should exist.
with self.test_session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
foo_signature = foo_graph.signature_def
self.assertEqual(len(foo_signature), 1)
self.assertEqual("foo", foo_signature["foo_key"].method_name)
# Restore the graph with tag "bar". The SignatureDef map should have two
# entries. One corresponding to "bar_key" and another corresponding to the
# new value of "foo_key".
with self.test_session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
bar_signature = bar_graph.signature_def
self.assertEqual(len(bar_signature), 2)
self.assertEqual("bar", bar_signature["bar_key"].method_name)
self.assertEqual("foo_new", bar_signature["foo_key"].method_name)
def testSignatureDefValidation(self):
export_dir = self._get_export_dir("test_signature_def_validation")
builder = saved_model_builder.SavedModelBuilder(export_dir)
tensor_without_name = meta_graph_pb2.TensorInfo()
tensor_without_name.dtype = types_pb2.DT_FLOAT
self._validate_inputs_tensor_info(builder, tensor_without_name)
self._validate_outputs_tensor_info(builder, tensor_without_name)
tensor_without_dtype = meta_graph_pb2.TensorInfo()
tensor_without_dtype.name = "x"
self._validate_inputs_tensor_info(builder, tensor_without_dtype)
self._validate_outputs_tensor_info(builder, tensor_without_dtype)
tensor_empty = meta_graph_pb2.TensorInfo()
self._validate_inputs_tensor_info(builder, tensor_empty)
self._validate_outputs_tensor_info(builder, tensor_empty)
def testAssets(self):
export_dir = self._get_export_dir("test_assets")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection.
ignored_filepath = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes("ignored.txt"))
file_io.write_string_to_file(ignored_filepath, "will be ignored")
asset_collection = self._build_asset_collection("hello42.txt",
"foo bar baz",
"asset_file_tensor")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor:0")
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("ignored.txt"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
def testCustomMainOp(self):
export_dir = self._get_export_dir("test_main_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.Variable(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.Variable(2, name="v2")
ops.add_to_collection("v", v2)
# Initialize another variable `v3` to 42.
v3 = variables.Variable(42, name="v3")
ops.add_to_collection("v", v3)
# Set up an assignment op to be run as part of the main_op.
with ops.control_dependencies([main_op.main_op()]):
add_v1_v2 = math_ops.add(v1._ref(), v2._ref())
custom_main_op = control_flow_ops.group(state_ops.assign(v3, add_v1_v2))
sess.run(custom_main_op)
builder.add_meta_graph_and_variables(
sess, ["foo"], main_op=custom_main_op)
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
# Evaluates to the sum of the first two variables and assigned as part of
# the main_op, following a restore.
self.assertEqual(3, ops.get_collection("v")[2].eval())
def testLegacyInitOp(self):
export_dir = self._get_export_dir("test_legacy_init_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.Variable(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.Variable(2, name="v2")
ops.add_to_collection("v", v2)
# Initialize another variable `v3` to 42.
v3 = variables.Variable(42, name="v3", trainable=False, collections=[])
ops.add_to_collection("v", v3)
# Set up an assignment op to be run as part of the legacy_init_op.
assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2))
legacy_init_op = control_flow_ops.group(assign_v3, name="legacy_init_op")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, ["foo"], legacy_init_op=legacy_init_op)
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
# Evaluates to the sum of the first two variables and assigned as part of
# the legacy_init_op, following a restore.
self.assertEqual(3, ops.get_collection("v")[2].eval())
def testLegacyInitOpWithNonEmptyCollection(self):
export_dir = self._get_export_dir(
"test_legacy_init_op_with_non_empty_collection")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
# Initialize variable `v1` to 1.
v1 = variables.Variable(1, name="v1")
ops.add_to_collection("v", v1)
# Initialize another variable `v2` to 42.
v2 = variables.Variable(42, name="v2", trainable=False, collections=[])
ops.add_to_collection("v", v2)
# Set up an assignment op to be run as part of the legacy_init_op.
assign_v2 = state_ops.assign(v2, v1)
legacy_init_op = control_flow_ops.group(assign_v2, name="legacy_init_op")
sess.run(variables.global_variables_initializer())
ops.add_to_collection(constants.LEGACY_INIT_OP_KEY,
control_flow_ops.no_op())
# AssertionError should be raised since the LEGACY_INIT_OP_KEY collection
# is not empty and we don't support multiple init ops.
with self.assertRaises(AssertionError):
builder.add_meta_graph_and_variables(
sess, ["foo"], legacy_init_op=legacy_init_op)
def testMultipleAssets(self):
export_dir = self._get_export_dir("test_multiple_assets")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection specific to `foo` graph.
asset_collection = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "foo".
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection specific to `bar` graph.
asset_collection = self._build_asset_collection("bar.txt", "content_bar",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "bar".
builder.add_meta_graph(["bar"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
# Check assets restored for graph with tag "foo".
with self.test_session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"foo.txt", "content_foo",
"asset_file_tensor:0")
# Check assets restored for graph with tag "bar".
with self.test_session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
self._validate_asset_collection(export_dir, bar_graph.collection_def,
"bar.txt", "content_bar",
"asset_file_tensor:0")
def testDuplicateAssets(self):
export_dir = self._get_export_dir("test_duplicate_assets")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection with `foo.txt` that has `foo` specific
# content.
asset_collection = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "foo".
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection with `foo.txt` that has `bar` specific
# content.
asset_collection = self._build_asset_collection("foo.txt", "content_bar",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "bar".
builder.add_meta_graph(["bar"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
# Check assets restored for graph with tag "foo".
with self.test_session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"foo.txt", "content_foo",
"asset_file_tensor:0")
# Check assets restored for graph with tag "bar".
with self.test_session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
# Validate the assets for `bar` graph. `foo.txt` should contain the
# original contents corresponding to `foo` graph since an asset with the
# same name across multiple graphs is only stored the first time
self._validate_asset_collection(export_dir, bar_graph.collection_def,
"foo.txt", "content_foo",
"asset_file_tensor:0")
def testOp(self):
export_dir = self._get_export_dir("test_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = variables.Variable(1, name="v1")
with sess.graph.device("/cpu:1"):
v2 = variables.Variable(2, name="v2")
# v3 is an unsaved variable derived from v1 and v2. It is used to
# exercise the ability to run an init op when restoring a graph.
v3 = variables.Variable(1, name="v3", trainable=False, collections=[])
assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2))
init_op = control_flow_ops.group(assign_v3, name="init_op")
ops.add_to_collection("v", v1)
ops.add_to_collection("v", v2)
ops.add_to_collection("v", v3)
ops.add_to_collection("init_op", init_op)
sess.run(variables.global_variables_initializer())
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
loader.load(sess, ["foo"], export_dir)
# Validate variables, run the init op and verify result.
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
ops.get_collection("init_op")[0].run()
self.assertEqual(3, ops.get_collection("v")[2].eval())
def testCustomSaveable(self):
export_dir = self._get_export_dir("custom_saveable")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
# CheckpointedOp is a key-value table that can be saved across sessions.
# The table register itself in SAVEABLE_OBJECTS collection.
v1 = saver_test_utils.CheckpointedOp(name="v1")
variables.global_variables_initializer().run()
v1.insert("k1", 3.0).run()
# Once the table is restored, we can access it through this reference.
ops.add_to_collection("table_ref", v1.table_ref)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
loader.load(sess, ["foo"], export_dir)
# Instantiate a wrapper object from the checkpointed reference.
v1 = saver_test_utils.CheckpointedOp(
name="v1", table_ref=ops.get_collection("table_ref")[0])
self.assertEqual(b"k1", v1.keys().eval())
self.assertEqual(3.0, v1.values().eval())
def testClearDevices(self):
export_dir = self._get_export_dir("test_clear_devices")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Specify a device and save a variable.
ops.reset_default_graph()
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(
sess, [tag_constants.TRAINING], clear_devices=True)
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were saved
# without any device information.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.TRAINING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
def testStripDefaultAttrs(self):
export_dir = self._get_export_dir("test_strip_default_attrs")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Add a graph with two float32 variables and a Complex Op composing them
# with strip_default_attrs enabled.
with session.Session(graph=ops.Graph()) as sess:
real_num = variables.Variable(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.Variable(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, ["foo"], strip_default_attrs=True)
# Add a graph with the same float32 variables and a Complex Op composing
# them with strip_default_attrs disabled.
with session.Session(graph=ops.Graph()) as sess:
real_num = variables.Variable(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.Variable(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph(["bar"], strip_default_attrs=False)
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Loading graph "foo" via the loader must restore the defaults for the
# "Complex" node based on the "Complex" OpDef in the Op registry.
sess = session.Session(graph=ops.Graph())
meta_graph_def = loader.load(sess, ["foo"], export_dir)
complex_node = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertIn("T", complex_node.attr)
self.assertIn("Tout", complex_node.attr)
# Load graph "foo" from disk as-is to verify default attrs are stripped.
# pylint: disable=protected-access
saved_model_pb = loader_impl._parse_saved_model(export_dir)
self.assertIsNotNone(saved_model_pb)
# pylint: enable=protected-access
meta_graph_foo_def = None
meta_graph_bar_def = None
for meta_graph_def in saved_model_pb.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set(["foo"]):
meta_graph_foo_def = meta_graph_def
elif set(meta_graph_def.meta_info_def.tags) == set(["bar"]):
meta_graph_bar_def = meta_graph_def
self.assertIsNotNone(meta_graph_foo_def)
self.assertIsNotNone(meta_graph_bar_def)
# "Complex" Op has 2 attributes with defaults:
# o "T" : float32. (input type)
# o "Tout" : complex64. (output type)
# "Complex" Op in graph "foo" shouldn't have attributes "T" and "Tout".
# Graph "foo" was saved with strip_default_attrs set to True.
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_foo_def.graph_def)
self.assertNotIn("T", node_def.attr)
self.assertNotIn("Tout", node_def.attr)
# "Complex" Op in graph "bar" must have attributes "T" and "Tout".
# Graph "bar" was saved with strip_default_attrs set to False.
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_bar_def.graph_def)
self.assertIn("T", node_def.attr)
self.assertIn("Tout", node_def.attr)
# Tests the behavior of loading SavedModels that having missing attrs or attrs
# with incorrect types.
def testInconsistentConsumerDefaultAttrs(self):
export_dir = self._get_export_dir(
"test_strip_default_attrs_no_consumer_defaults")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Add a graph with a single variable and a test op with a defaultless
# float32 attr, "test_attr".
with session.Session(graph=ops.Graph()) as sess:
variables.Variable(1.0, dtype=dtypes.float64, name="var")
test_ops.test_attr(T=dtypes.float32, name="test_attr")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Rewrite the SavedModel to remove the T attr from "test_attr".
saved_model_file = os.path.join(
export_dir, constants.SAVED_MODEL_FILENAME_PBTXT)
with open(saved_model_file) as f:
original_saved_model = f.read()
no_attr_saved_model = original_saved_model.replace("""
attr {
key: "T"
value {
type: DT_FLOAT
}
}""", "")
with open(saved_model_file, "w") as f:
f.write(no_attr_saved_model)
# Loading the SavedModel via the loader must fail because the SavedModel
# does not have any attr values for the "TestAttr" node, and there is no
# default specified in the TestAttr OpDef.
sess = session.Session(graph=ops.Graph())
if ops._USE_C_API:
error_message = "NodeDef missing attr 'T' from Op<name=TestAttr"
else:
error_message = ("Expected one attr with name .*T(out)?.* in name: "
"\"test_attr\".*")
with self.assertRaisesRegexp(ValueError, error_message):
loader.load(sess, ["foo"], export_dir)
# Rewrite the SavedModel to change the type of the T attr in "test_attr"
bad_type_saved_model = original_saved_model.replace("""
attr {
key: "T"
value {
type: DT_FLOAT
}
}""", """
attr {
key: "T"
value {
type: DT_DOUBLE
}
}""")
with open(saved_model_file, "w") as f:
f.write(bad_type_saved_model)
# Loading the SavedModel via the loader must fail because there is no
# OpKernel registered to handle T = double.
sess = session.Session(graph=ops.Graph())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
".*No OpKernel was registered to support Op \'TestAttr\' with these "
"attrs..*"):
loader.load(sess, ["foo"], export_dir)
if __name__ == "__main__":
test.main()
| |
import functools
import re
from typing import Dict, Sequence
import looker_sdk
from looker_sdk import methods, models, error
LOOKER_GROUP_PREFIX = "Looker_Hack: "
# simple caching mechanism until we have a true class for retaining these IDs
HACKATHON_ATTR_ID = None
HACKATHON_ROLE = None
def try_to(func):
"""Wrap API calls in try/except
"""
@functools.wraps(func)
def wrapped_f(**kwargs):
try:
return func(**kwargs)
except error.SDKError as ex:
raise RegisterError(f"Failed to {func.__name__}: ({ex})")
return wrapped_f
@try_to
def get_hackathon_attr_id(*, sdk: methods.LookerSDK) -> int:
global HACKATHON_ATTR_ID
if HACKATHON_ATTR_ID is not None:
return HACKATHON_ATTR_ID
main_hackathon = "hackathon"
user_attrs = sdk.all_user_attributes(fields="name,id")
for user_attr in user_attrs:
if user_attr.name == main_hackathon:
assert user_attr.id
HACKATHON_ATTR_ID = user_attr.id
break
else:
attrib = sdk.create_user_attribute(
body=models.WriteUserAttribute(
name=main_hackathon, label="Looker Hackathon", type="string"
)
)
if not attrib:
raise RegisterError(f"Could not find '{main_hackathon}' user attribute")
else:
assert attrib.id
HACKATHON_ATTR_ID = attrib.id
return HACKATHON_ATTR_ID
@try_to
def get_hackathon_role(*, sdk: methods.LookerSDK) -> models.Role:
global HACKATHON_ROLE
if HACKATHON_ROLE is not None:
return HACKATHON_ROLE
for role in sdk.all_roles(fields="name,id"):
if role.name == "Hackathon":
HACKATHON_ROLE = role
assert HACKATHON_ROLE.id
break
else:
raise RegisterError("Hackathon role needs to be created")
return HACKATHON_ROLE
def register_user(
*, hackathon: str, first_name: str, last_name: str, email: str
) -> str:
sdk = looker_sdk.init31()
user = find_or_create_user(
sdk=sdk, first_name=first_name, last_name=last_name, email=email
)
assert user.id
if not user.credentials_email:
create_email_credentials(sdk=sdk, user_id=user.id, email=email)
if user.credentials_api3:
client_id = user.credentials_api3[0].client_id
else:
client_id = create_api3_credentials(sdk=sdk, user_id=user.id).client_id
set_user_group(sdk=sdk, user_id=user.id, hackathon=hackathon)
set_user_attributes(sdk=sdk, user_id=user.id, hackathon=hackathon)
disable_user(sdk=sdk, user_id=user.id)
assert client_id
return client_id
def find_or_create_user(
*, sdk: methods.LookerSDK, first_name: str, last_name: str, email: str
) -> models.User:
try:
users = sdk.search_users(email=email)
if users:
user = users[0]
if (
user.first_name != first_name
or user.last_name != last_name
or user.is_disabled
):
assert user.id
user = sdk.update_user(
user_id=user.id,
body=models.WriteUser(
first_name=first_name, last_name=last_name, is_disabled=False
),
)
else:
user = sdk.create_user(
models.WriteUser(first_name=first_name, last_name=last_name)
)
except error.SDKError as create_ex:
raise RegisterError(f"Failed to find or create User ({create_ex})")
return user
def enable_users_by_hackathons(hackathons: Sequence[str]) -> Dict[str, str]:
global LOOKER_GROUP_PREFIX
sdk = looker_sdk.init31()
groups = {g.name: g.id for g in sdk.all_groups(fields="id,name")}
ret = {}
for hackathon in hackathons:
try:
group_id = groups[f"{LOOKER_GROUP_PREFIX}{hackathon}"]
except KeyError:
raise RegisterError(f"No group found for hackathon: '{hackathon}'")
for user in sdk.search_users(group_id=group_id):
assert user.id
assert user.email
sdk.update_user(user_id=user.id, body=models.WriteUser(is_disabled=False))
password_reset_url = sdk.create_user_credentials_email_password_reset(
user_id=user.id, expires=False
).password_reset_url
assert password_reset_url
setup = re.sub("password/reset", "account/setup", password_reset_url)
ret[user.email] = setup
return ret
@try_to
def create_email_credentials(*, sdk: methods.LookerSDK, user_id: int, email: str):
sdk.create_user_credentials_email(
user_id=user_id, body=models.WriteCredentialsEmail(email=email)
)
@try_to
def create_api3_credentials(
*, sdk: methods.LookerSDK, user_id: int
) -> models.CredentialsApi3:
return sdk.create_user_credentials_api3(
user_id=user_id, body=models.CredentialsApi3()
)
@try_to
def set_user_group(*, sdk: methods.LookerSDK, user_id: int, hackathon: str):
global LOOKER_GROUP_PREFIX
# TODO - switch to sdk.search_groups once that method is live on
# sandboxcl and hack instances
groups = sdk.all_groups(fields="id,name")
name = f"{LOOKER_GROUP_PREFIX}{hackathon}"
for group in groups:
if group.name == name:
break
else:
role = get_hackathon_role(sdk=sdk)
assert role.id
role_groups = []
for g in sdk.role_groups(role_id=role.id, fields="id"):
assert g.id
role_groups.append(g.id)
group = sdk.create_group(body=models.WriteGroup(name=name))
assert group.id
role_groups.append(group.id)
sdk.set_role_groups(role_id=role.id, body=role_groups)
assert group.id
sdk.add_group_user(
group_id=group.id, body=models.GroupIdForGroupUserInclusion(user_id=user_id)
)
@try_to
def set_user_attributes(*, sdk: methods.LookerSDK, user_id, hackathon):
hackathon_attr_id = get_hackathon_attr_id(sdk=sdk)
assert hackathon_attr_id
sdk.set_user_attribute_user_value(
user_id=user_id,
user_attribute_id=hackathon_attr_id,
body=models.WriteUserAttributeWithValue(value=hackathon),
)
@try_to
def disable_user(*, sdk: methods.LookerSDK, user_id: int):
sdk.update_user(user_id=user_id, body=models.WriteUser(is_disabled=True))
def me():
sdk = looker_sdk.init31()
return sdk.me()
class RegisterError(Exception):
"""Failed to register user in looker instance.
"""
| |
from flask import Flask,g, render_template, redirect, url_for, request, abort, make_response
from flask_bootstrap import Bootstrap
import os
import sqlite3
import json
from datetime import datetime
from time import mktime
app = Flask(__name__)
app.config.from_object(__name__)
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'database/data.db'),
AUTO_DEALLOCATE_TIMEOUT=5400 # Release resource after 1 hour and a half
))
Bootstrap(app)
def connect_db():
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def init_db():
if not hasattr(g,'sqlite_db'):
g.sqlite_db = connect_db()
db = g.sqlite_db
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.execute("INSERT INTO journal (event_time, event_action, event_data) VALUES (?, ?, ?)",
[datetime.now(),
"CREATE",
"Database Initialised!"])
db.commit()
@app.cli.command('initdb')
def initdb_command():
init_db()
print "Database Initialised"
@app.cli.command('dummydata')
def dummydata_command():
db = get_db()
cur = db.execute("INSERT INTO resources (resource_name,resource_address,allocated) VALUES (?,?,?)",
["resource1", "192.168.0.1",0])
cur = db.execute("INSERT INTO journal (event_time, event_action,event_resource_id,event_data) VALUES (?, ?, ?, ?)",
[datetime.now(),
"ADD_RESOURCE",
cur.lastrowid,
"Resource added by dummydata!"])
cur = db.execute("INSERT INTO resources (resource_name,resource_address,allocated) VALUES (?,?,?)",
["resource2", "10.0.0.1",0])
cur = db.execute(
"INSERT INTO journal (event_time, event_action,event_resource_id,event_data) VALUES (?, ?, ?, ?)",
[datetime.now(),
"ADD_RESOURCE",
cur.lastrowid,
"Resource added by dummydata!"])
db.commit()
def revoke_timedout_allocation(db):
unixtime = mktime(datetime.now().timetuple())
unixtime = unixtime - app.config["AUTO_DEALLOCATE_TIMEOUT"]
for resource in db.execute("SELECT id FROM resources WHERE allocated = 1 AND allocated_at < ?", [unixtime]):
db.execute("UPDATE resources SET allocated = 0, allocated_to_address = '', allocated_to_id = '', allocated_at = NULL WHERE id = ?",
[resource['id']])
db.execute("INSERT INTO journal (event_time, event_action,event_resource_id,event_data) VALUES (?, ?, ?, ?)",
[datetime.now(),
"DEALLOCATE_RESOURCE",
resource['id'],
"Resource deallocated by resource manager"])
db.commit()
def get_db():
if not hasattr(g,'sqlite_db'):
g.sqlite_db = connect_db()
revoke_timedout_allocation(g.sqlite_db)
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
if hasattr(g,'sqlite_db'):
g.sqlite_db.close()
@app.errorhandler(503)
def custom503(error):
response = make_response()
response.status_code = 503
return response
@app.route('/status', methods=['GET'])
def show_status():
db = get_db()
json_ret = dict()
for resource in db.execute('SELECT * FROM resources'):
json_ret[resource['id']] = {'id':resource['id'],
'resource_name': resource['resource_name'],
'resource_address': resource['resource_address'],
'allocated': resource['allocated'],
'allocated_to_id': resource['allocated_to_id'],
'allocated_to_address': resource['allocated_to_address'],
'allocated_at': resource['allocated_at'],
'allocatable':resource['allocatable'],
'additional_parameters':resource['additional_parameters']}
print(json.dumps(json_ret,sort_keys=True, indent=4))
return json.dumps(json_ret)
@app.route('/allocate/<type>/to/<requester>', methods=["GET"])
def allocate_resource(type, requester):
db = get_db()
cur = db.execute('SELECT * FROM resources WHERE resource_type = ? AND allocated = 0 AND allocatable = 1;',
[type])
row = cur.fetchall()
if len(row) != 0:
unixtime = mktime(datetime.now().timetuple())
# update the row in DB to reflect that we are now using the resource
db.execute("UPDATE resources SET allocated = 1, allocated_to_id = ?, allocated_to_address = ?, allocated_at = ? WHERE id = ?",
[ requester,
request.remote_addr,
unixtime,
row[0]['id']])
db.execute("INSERT INTO journal (event_time, event_action,event_resource_id,event_data) VALUES (?, ?, ?, ?)",
[datetime.now(),
"ALLOCATE_RESOURCE",
row[0]['id'],
"Resource allocated to %s for request %s" % (request.remote_addr,requester)])
db.commit()
ret = {'id':row[0]['id'],'address':row[0]['resource_address'],'name':row[0]['resource_name'],'additional_parameters':row[0]['additional_parameters']}
return json.dumps(ret)
else:
abort(503)
@app.route('/ip_query/<type>', methods=["GET"])
def resource_address_query(type):
db = get_db()
cur = db.execute('SELECT * FROM resources WHERE resource_type = ? AND allocatable = 0;',
[type])
row = cur.fetchall()
if len(row) != 0:
ret = {'id':row[0]['id'],'address':row[0]['resource_address'],'name':row[0]['resource_name'],'additional_parameters':row[0]['additional_parameters']}
return json.dumps(ret)
else:
abort(503)
@app.route('/deallocate/<id>',methods=["GET"])
def deallocate_resource(id):
db = get_db()
db.execute("UPDATE resources SET allocated = 0, allocated_to_address = '', allocated_to_id = '', allocated_at = NULL WHERE id = ?",
[id])
db.execute("INSERT INTO journal (event_time, event_action,event_resource_id,event_data) VALUES (?, ?, ?, ?)",
[datetime.now(),
"DEALLOCATE_RESOURCE",
id,
"Resource deallocated by %s" % (request.remote_addr)])
db.commit()
return ""
@app.route('/deallocate_ip/<id>',methods=["GET"])
def deallocate_resource_ip(id):
db = get_db()
db.execute("UPDATE resources SET allocated = 0, allocated_to_address = '', allocated_to_id = '', allocated_at = NULL WHERE allocated_to_id = ?",
[id])
db.execute("INSERT INTO journal (event_time, event_action,event_resource_id,event_data) VALUES (?, ?, ?, ?)",
[datetime.now(),
"DEALLOCATE_RESOURCE (IP)",
id,
"Resource deallocated by %s" % (request.remote_addr)])
db.commit()
return ""
@app.route('/add', methods=["POST"])
def add_resource():
db = get_db()
allocatable = 1 if request.form.get("allocatable") == 'on' else 0
cur = db.execute("INSERT INTO resources (resource_name, resource_address, resource_type, allocated, allocatable, additional_parameters) VALUES (?, ?, ?, ?, ?, ?)",
[request.form['resource_name'].strip(),
request.form['resource_address'].strip(),
request.form['resource_type'].strip(),
0,
allocatable,
request.form['additional_parameters'].strip()])
db.execute("INSERT INTO journal (event_time, event_action,event_resource_id,event_data) VALUES (?, ?, ?, ?)",
[datetime.now(),
"ADD_RESOURCE",
cur.lastrowid,
"Resource added by %s" % (request.remote_addr)])
db.commit()
return redirect(url_for('index'))
@app.route('/remove/<id>', methods=["POST","GET"])
def remove_resource(id):
db = get_db()
db.execute("delete from resources where id = ?",(id,))
db.execute("INSERT INTO journal (event_time, event_action,event_resource_id,event_data) VALUES (?, ?, ?, ?)",
[datetime.now(),
"REMOVE_RESOURCE",
id,
"Resource removed by %s" % (request.remote_addr)])
db.commit()
return redirect(url_for('index'))
@app.route('/')
def index():
db = get_db()
cur = db.execute("SELECT * FROM resources")
entries = cur.fetchall()
cur = db.execute("SELECT * FROM journal ORDER BY id DESC LIMIT 20")
events = cur.fetchall()
return render_template('index.html', resources = entries, events = events)
| |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for creating instance templates."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import image_utils
from googlecloudsdk.api_lib.compute import instance_template_utils
from googlecloudsdk.api_lib.compute import instance_utils
from googlecloudsdk.api_lib.compute import metadata_utils
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute.instance_templates import flags as instance_templates_flags
from googlecloudsdk.command_lib.compute.instances import flags as instances_flags
def _CommonArgs(parser, multiple_network_interface_cards, release_track,
support_alias_ip_ranges):
"""Common arguments used in Alpha, Beta, and GA."""
metadata_utils.AddMetadataArgs(parser)
instances_flags.AddDiskArgs(parser)
if release_track in [base.ReleaseTrack.ALPHA]:
instances_flags.AddCreateDiskArgs(parser)
instances_flags.AddExtendedMachineTypeArgs(parser)
instances_flags.AddLocalSsdArgs(parser)
instances_flags.AddCanIpForwardArgs(parser)
instances_flags.AddAddressArgs(
parser, instances=False,
multiple_network_interface_cards=multiple_network_interface_cards,
support_alias_ip_ranges=support_alias_ip_ranges)
instances_flags.AddMachineTypeArgs(parser)
instances_flags.AddMaintenancePolicyArgs(parser)
instances_flags.AddNoRestartOnFailureArgs(parser)
instances_flags.AddPreemptibleVmArgs(parser)
instances_flags.AddServiceAccountAndScopeArgs(parser, False)
instances_flags.AddTagsArgs(parser)
instances_flags.AddCustomMachineTypeArgs(parser)
instances_flags.AddImageArgs(parser)
instances_flags.AddNetworkArgs(parser)
flags.AddRegionFlag(
parser,
resource_type='subnetwork',
operation_type='attach')
parser.add_argument(
'--description',
help='Specifies a textual description for the instance template.')
instance_templates_flags.INSTANCE_TEMPLATE_ARG.AddArgument(parser)
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Create(base_classes.BaseAsyncCreator):
"""Create a Compute Engine virtual machine instance template.
*{command}* facilitates the creation of Google Compute Engine
virtual machine instance templates. For example, running:
$ {command} INSTANCE-TEMPLATE
will create one instance templates called 'INSTANCE-TEMPLATE'.
Instance templates are global resources, and can be used to create
instances in any zone.
"""
@staticmethod
def Args(parser):
_CommonArgs(parser, multiple_network_interface_cards=False,
release_track=base.ReleaseTrack.GA,
support_alias_ip_ranges=False)
@property
def service(self):
return self.compute.instanceTemplates
@property
def method(self):
return 'Insert'
@property
def resource_type(self):
return 'instanceTemplates'
def ValidateDiskFlags(self, args):
"""Validates the values of all disk-related flags."""
instances_flags.ValidateDiskCommonFlags(args)
instances_flags.ValidateDiskBootFlags(args)
instances_flags.ValidateCreateDiskFlags(args)
def CreateRequests(self, args):
"""Creates and returns an InstanceTemplates.Insert request.
Args:
args: the argparse arguments that this command was invoked with.
Returns:
request: a ComputeInstanceTemplatesInsertRequest message object
"""
self.ValidateDiskFlags(args)
instances_flags.ValidateLocalSsdFlags(args)
instances_flags.ValidateNicFlags(args)
instances_flags.ValidateServiceAccountAndScopeArgs(args)
boot_disk_size_gb = utils.BytesToGb(args.boot_disk_size)
utils.WarnIfDiskSizeIsTooSmall(boot_disk_size_gb, args.boot_disk_type)
instance_template_ref = (
instance_templates_flags.INSTANCE_TEMPLATE_ARG.ResolveAsResource(
args, self.resources))
metadata = metadata_utils.ConstructMetadataMessage(
self.messages,
metadata=args.metadata,
metadata_from_file=args.metadata_from_file)
if hasattr(args, 'network_interface') and args.network_interface:
network_interfaces = (
instance_template_utils.CreateNetworkInterfaceMessages)(
resources=self.resources,
scope_lister=flags.GetDefaultScopeLister(
self.compute_client, self.project),
messages=self.messages,
network_interface_arg=args.network_interface,
region=args.region)
else:
network_interfaces = [
instance_template_utils.CreateNetworkInterfaceMessage(
resources=self.resources,
scope_lister=flags.GetDefaultScopeLister(
self.compute_client, self.project),
messages=self.messages,
network=args.network,
region=args.region,
subnet=args.subnet,
address=(instance_template_utils.EPHEMERAL_ADDRESS
if not args.no_address and not args.address
else args.address))
]
scheduling = instance_utils.CreateSchedulingMessage(
messages=self.messages,
maintenance_policy=args.maintenance_policy,
preemptible=args.preemptible,
restart_on_failure=args.restart_on_failure)
if args.no_service_account:
service_account = None
else:
service_account = args.service_account
service_accounts = instance_utils.CreateServiceAccountMessages(
messages=self.messages,
scopes=[] if args.no_scopes else args.scopes,
service_account=service_account)
create_boot_disk = not instance_utils.UseExistingBootDisk(args.disk or [])
if create_boot_disk:
image_expander = image_utils.ImageExpander(self.compute_client,
self.resources)
image_uri, _ = image_expander.ExpandImageFlag(
user_project=self.project,
image=args.image,
image_family=args.image_family,
image_project=args.image_project,
return_image_resource=True)
else:
image_uri = None
if args.tags:
tags = self.messages.Tags(items=args.tags)
else:
tags = None
persistent_disks = (
instance_template_utils.CreatePersistentAttachedDiskMessages(
self.messages, args.disk or []))
persistent_create_disks = (
instance_template_utils.CreatePersistentCreateDiskMessages(
self, self.messages, getattr(args, 'create_disk', [])))
if create_boot_disk:
boot_disk_list = [
instance_template_utils.CreateDefaultBootAttachedDiskMessage(
messages=self.messages,
disk_type=args.boot_disk_type,
disk_device_name=args.boot_disk_device_name,
disk_auto_delete=args.boot_disk_auto_delete,
disk_size_gb=boot_disk_size_gb,
image_uri=image_uri)]
else:
boot_disk_list = []
local_ssds = []
for x in args.local_ssd or []:
local_ssd = instance_utils.CreateLocalSsdMessage(
self.resources,
self.messages,
x.get('device-name'),
x.get('interface'))
local_ssds.append(local_ssd)
disks = (
boot_disk_list + persistent_disks + persistent_create_disks + local_ssds
)
machine_type = instance_utils.InterpretMachineType(
machine_type=args.machine_type,
custom_cpu=args.custom_cpu,
custom_memory=args.custom_memory,
ext=getattr(args, 'custom_extensions', None))
request = self.messages.ComputeInstanceTemplatesInsertRequest(
instanceTemplate=self.messages.InstanceTemplate(
properties=self.messages.InstanceProperties(
machineType=machine_type,
disks=disks,
canIpForward=args.can_ip_forward,
metadata=metadata,
networkInterfaces=network_interfaces,
serviceAccounts=service_accounts,
scheduling=scheduling,
tags=tags,
),
description=args.description,
name=instance_template_ref.Name(),
),
project=self.project)
return [request]
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class CreateBeta(Create):
"""Create a Compute Engine virtual machine instance template.
*{command}* facilitates the creation of Google Compute Engine
virtual machine instance templates. For example, running:
$ {command} INSTANCE-TEMPLATE
will create one instance templates called 'INSTANCE-TEMPLATE'.
Instance templates are global resources, and can be used to create
instances in any zone.
"""
@classmethod
def Args(cls, parser):
_CommonArgs(parser, multiple_network_interface_cards=False,
release_track=base.ReleaseTrack.BETA,
support_alias_ip_ranges=False)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class CreateAlpha(Create):
"""Create a Compute Engine virtual machine instance template.
*{command}* facilitates the creation of Google Compute Engine
virtual machine instance templates. For example, running:
$ {command} INSTANCE-TEMPLATE
will create one instance templates called 'INSTANCE-TEMPLATE'.
Instance templates are global resources, and can be used to create
instances in any zone.
"""
@staticmethod
def Args(parser):
_CommonArgs(parser, multiple_network_interface_cards=True,
release_track=base.ReleaseTrack.ALPHA,
support_alias_ip_ranges=True)
| |
# utilities for dealing with (sub)processes
import subprocess
import os
import os.path
import sys
import re
import tempfile
import signal
import time
import pwd
import grp
import getpass
def get_pid_from_file(pidfile, resource_id=None, logger=None):
if not os.path.exists(pidfile):
if logger!=None and resource_id!=None:
logger.debug("%s: server not up - pid file '%s' not found" %
(resource_id, pidfile))
return None
file = open(pidfile, "rb")
data = file.read()
file.close()
if len(data) > 0:
pid = int(data)
return pid
else:
if logger!=None and resource_id!=None:
logger.debug("%s: server not up - pid file '%s' is empty" %
(resource_id, pidfile))
return None
def run_and_log_program(program_and_args, env_mapping, logger, cwd=None,
input=None, hide_input=False,
hide_command=False, hide_environ=False, allow_broken_pipe=False):
"""Run the specified program as a subprocess and log its output.
program_and_args should be a list of entries where the first is the
executable path, and the rest are the arguments.
"""
if not hide_command:
logger.debug(' '.join(program_and_args))
if cwd != None:
logger.debug("Subprocess working directory is %s" % cwd)
if env_mapping == None or len(env_mapping)>0 and (env_mapping==os.environ):
logger.debug("Subprocess inheriting parent process's environment")
elif len(env_mapping)>0:
if not hide_environ:
logger.debug("Subprocess environment is %s" % str(env_mapping))
else:
logger.debug("Subprocess passed empty environment")
subproc = subprocess.Popen(program_and_args,
env=env_mapping, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=cwd)
logger.debug("Started program %s, pid is %d" % (program_and_args[0],
subproc.pid))
if input!=None:
if not hide_input:
logger.debug("Input is " + input)
try:
(output, dummy) = subproc.communicate(input)
for line in output.split("\n"):
logger.debug("[%d] %s" % (subproc.pid, line.rstrip()))
except OSError:
if not allow_broken_pipe:
raise
else:
logger.warn("Subprocess %d closed stdin before write of input data complete" %
subproc.pid)
for line in subproc.stdout:
logger.debug("[%d] %s" % (subproc.pid, line))
else:
subproc.stdin.close()
for line in subproc.stdout:
logger.debug("[%d] %s" % (subproc.pid, line))
subproc.wait()
logger.debug("[%d] %s exited with return code %d" % (subproc.pid,
program_and_args[0],
subproc.returncode))
return subproc.returncode
class SubprocBadRcError(Exception):
def __init__(self, exe_path, rc):
Exception.__init__(self, "%s exited with return code %d" % (exe_path, rc))
self.exe_path = exe_path
self.rc = rc
def run_program_and_capture_results(program_and_args, env_mapping, logger,
cwd=None, input=None, expected_rcs=[0,]):
"""Run the program as a subprocess and return the stdout/stderr as a big string.
Also logs it to the logger at the debug level. If the return code is not in the
expected set of returned codes, throws a SubprocBadRcError exception.
"""
logger.debug(' '.join(program_and_args))
if cwd != None:
logger.debug("Subprocess working directory is %s" % cwd)
if env_mapping == None or len(env_mapping)>0 and (env_mapping==os.environ):
logger.debug("Subprocess inheriting parent process's environment")
elif len(env_mapping)>0:
logger.debug("Subprocess environment is %s" % str(env_mapping))
else:
logger.debug("Subprocess passed empty environment")
try:
subproc = subprocess.Popen(program_and_args,
env=env_mapping, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=cwd)
logger.debug("Started program %s, pid is %d" % (program_and_args[0],
subproc.pid))
if input!=None:
logger.debug("Input is " + input)
(output, dummy) = subproc.communicate(input)
for line in output.split("\n"):
logger.debug("[%d] %s" % (subproc.pid, line.rstrip()))
subproc.wait()
except Exception, e:
logger.error("Call to %s failed with exception %s" % (program_and_args[0],
e))
raise
logger.debug("[%d] %s exited with return code %d" % (subproc.pid,
program_and_args[0],
subproc.returncode))
if subproc.returncode not in expected_rcs:
raise SubprocBadRcError(program_and_args[0], subproc.returncode)
return output
def system(shell_command_string, logger, log_output_as_info=False, cwd=None):
"""Replacement for os.system(), which doesn't handle return codes correctly.
We also do logging. Set log_output_as_info to True if you have a really
long-running action.
Returns the exit code of the child process.
run_and_log_programm() is still preferred, as it is more
robust."""
logger.debug(shell_command_string)
if log_output_as_info: log_fn = logger.info
else: log_fn = logger.debug
subproc = subprocess.Popen(shell_command_string, shell=True,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
for line in subproc.stdout:
log_fn("[%d] %s" % (subproc.pid, line.rstrip()))
(pid, exit_status) = os.waitpid(subproc.pid, 0)
rc = exit_status >> 8 # the low byte is the signal ending the proc
logger.debug("[%d] exited with return code %d" % (subproc.pid, rc))
return rc
class SudoError(Exception):
def get_nested_exc_info(self):
return None
class SudoBadRc(SudoError):
"""This exception is thrown when the sudo operation returns a non-zero
return code
"""
def __init__(self, rc, program_and_args):
self.rc = rc
self.program_and_args = program_and_args
def get_exc_info(self, current_exc_state):
return current_exc_state
def __str__(self):
return "Sudo execution of '%s' failed, return code was %d" % (" ".join(self.program_and_args),
self.rc)
def __repr__(self):
return "SudoBadRc(%d, %s)" % (self.rc, self.program_and_args.__repr__())
class SudoExcept(SudoError):
"""This exception is thrown when a sudo command throws an exception
"""
def __init__(self, exc_info, program_and_args, rc=None):
self.exc_info = exc_info
(self.exc_type, self.exc_val, self.exc_tb) = exc_info
self.program_and_args = program_and_args
self.rc = rc
def get_nested_exc_info(self):
return self.exc_info
def __str__(self):
msg = "Sudo execution of '%s' failed, exception was '%s(%s)'" % (" ".join(self.program_and_args),
self.exc_type, self.exc_val)
if self.rc!=None:
msg = msg + ", return code was %d" % self.rc
return msg
def __repr__(self):
return "%s(%s, %s, rc=%s)" % (self.__class__.__name__, self.exc_info.__repr__(), self.program_and_args.__repr__(), self.rc)
class SudoTimestampError(SudoExcept):
"""This exception is thrown the timestamp clear operation failed
"""
def __init__(self, exc_info, program_and_args, rc=None):
SudoExcept.__init__(self, exc_info, program_and_args, rc)
class SudoTimestampBadRc(SudoError):
"""This exception is thrown when the sudo operation returns a non-zero
return code
"""
def __init__(self, rc, program_and_args):
self.rc = rc
self.program_and_args = program_and_args
def get_exc_info(self, current_exc_state):
return current_exc_state
def __str__(self):
return "Sudo execution of '%s' failed, return code was %d" % (" ".join(self.program_and_args),
self.rc)
def __repr__(self):
return "SudoTimestampBadRc(%d, %s)" % (self.rc, self.program_and_args.__repr__())
# setup some executable paths used by some of the following utility functions
if sys.platform=="darwin":
_cp_exe = "/bin/cp"
_chmod_exe = "/bin/chmod"
_chown_exe = "/usr/sbin/chown"
_chgrp_exe = "/usr/bin/chgrp"
_sudo_exe = "/usr/bin/sudo"
_mkdir_exe = "/bin/mkdir"
_cat_exe = "/bin/cat"
_kill_exe = "/bin/kill"
_rm_exe = "/bin/rm"
elif sys.platform=="linux2":
_cp_exe = "/bin/cp"
_chmod_exe = "/bin/chmod"
_chown_exe = "/bin/chown"
_chgrp_exe = "/bin/chgrp"
_sudo_exe = "/usr/bin/sudo"
_mkdir_exe = '/bin/mkdir'
_cat_exe = '/bin/cat'
_kill_exe = '/bin/kill'
_rm_exe = "/bin/rm"
else:
raise Exception("engage.utils.process: Undefined plaform %s" % sys.platform)
def clear_sudo_timestamp(logger=None):
"""Clear the sudo timestamp to ensure that the password is always checked.
"""
cmd = [_sudo_exe, "-K"]
if logger: logger.debug(' '.join(cmd))
try:
subproc = subprocess.Popen(cmd, env={}, stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=None)
for line in subproc.stdout:
if logger: logger.debug("[%d] %s" % (subproc.pid, line))
subproc.wait()
except:
exc_info = sys.exc_info()
sys.exc_clear()
raise SudoTimestampError(exc_info, cmd)
if subproc.returncode != 0:
raise SudoTimestampBadRc(subproc.returncode, cmd)
def is_running_as_root():
"""If the effective user id is 0, then we
are running as root (superuser).
"""
return os.geteuid() == 0
def is_sudo_password_required(logger=None):
assert os.geteuid() != 0, "check only valid when not running as root"
clear_sudo_timestamp(logger)
cmd = [_sudo_exe, "-n", "/bin/ls", "/"]
try:
subproc = subprocess.Popen(cmd, env={}, stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=None)
for line in subproc.stdout:
if logger: logger.debug("[%d] %s" % (subproc.pid, line))
subproc.wait()
except Exception, e:
if logger: logger.exception("Problem when checking for sudo access: %s" % e)
raise
if subproc.returncode==0:
return False
else:
return True
# The SUDO_PASSWORD_REQUIRED variable has three values:
# True - running as a regular user and need a password to access sudo
# False - running as a regular user but can run sudo in non-interactive mode
# None - running as effective user 0 (root), sudo not needed at all
SUDO_PASSWORD_REQUIRED = is_sudo_password_required() if not is_running_as_root() \
else None
class NoPasswordError(SudoError):
"""If you attempt to call run_sudo_program without providing
a password, and you aren't running as root, you get this error.
"""
pass
def run_sudo_program(program_and_args, sudo_password,
logger, cwd=None, env={}, user="root"):
"""Wrapper over run and log program for programs running under sudo.
It adds sudo and the -S option to the command arguments and then passes
the password in as the standard input. The echoing of the standard input
to the logger is supressed.
If you want to run as a different user from root, specify the user name for the
user keyword argument. This causes execution with the -s option.
Note that we don't run under sudo if we're already running as root and want to
run as root.
"""
if SUDO_PASSWORD_REQUIRED==None: # running as root
if user=="root":
# if we are already root and want to run as root, no need to sudo
rc = run_and_log_program(program_and_args, env, logger, cwd,
input=None)
if rc != 0:
raise SudoBadRc(rc, program_and_args)
else:
return 0
else:
# do not need to pass in a password, since already root
input_to_subproc = None
opts = ["-n",]
elif SUDO_PASSWORD_REQUIRED==False:
input_to_subproc = None
opts = ["-n",]
elif sudo_password==None:
raise NoPasswordError("Operation '%s' requires sudo access, but no password provided" % ' '.join(program_and_args))
else:
input_to_subproc = sudo_password + "\n"
opts = ["-p", "", "-S"]
if env==None or len(env)>0:
# if we intend to pass in the environment, need to tell sudo to
# propagate it for us.
opts = ['-E',] + opts
# we need to clear the sudo timestamp first so that sudo always expects a password and doesn't
# give us a broken pipe error
if SUDO_PASSWORD_REQUIRED:
clear_sudo_timestamp(logger)
if user=="root":
cmd = [_sudo_exe,] + opts + program_and_args
else:
cmd = [_sudo_exe, "-u", user] + opts + program_and_args
try:
rc = run_and_log_program(cmd, env, logger, cwd,
input=input_to_subproc,
hide_input=True, allow_broken_pipe=True)
if rc != 0:
raise SudoBadRc(rc, cmd)
return rc
except SudoBadRc:
raise
except:
exc_info = sys.exc_info()
sys.exc_clear()
raise SudoExcept(exc_info, cmd)
def test_sudo(sudo_password, program_and_args=["ls"], iterations=100):
"""Test function for sudo. This just checks for intermittent
errors by running the same sudo command many times
"""
import logging
logger = logging.getLogger("test_sudo")
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
def action(msg):
print "[ACTION] %s" % msg
logger.debug = action
for i in range(1, iterations):
try:
run_sudo_program(program_and_args, sudo_password, logger)
except:
print "failed in iteration %d" % i
raise
print "test successful"
def sudo_copy(copy_args, sudo_password, logger):
"""Copy files (as in the unix cp command) running as the superuser.
copy_args is a list of arguments to the cp operation
(e.g. [src_file, dest_file]).
"""
cmd = [_cp_exe] + copy_args
run_sudo_program(cmd, sudo_password, logger)
def _format_unix_mode_bits(mode):
"""
>>> _format_unix_mode_bits(0755)
'755'
>>> _format_unix_mode_bits(01000755)
'755'
>>> _format_unix_mode_bits(0100)
'100'
>>> _format_unix_mode_bits(256)
'400'
>>> _format_unix_mode_bits(01)
'001'
>>> _format_unix_mode_bits(1)
'001'
>>> _format_unix_mode_bits(9)
'011'
"""
if type(mode)==str or type(mode)==unicode:
return mode
oct_mode = oct(mode)
ln = len(oct_mode)
if ln >= 3:
return oct_mode[-3:]
elif ln==2:
return "0" + oct_mode
elif ln==1:
return "00" + oct_mode
else:
assert 0
def sudo_chmod(mode, files, sudo_password, logger, recursive=False):
"""Change file permissions (as in the chmod command) running as the superuser.
"""
if recursive:
cmd = [_chmod_exe, "-R", _format_unix_mode_bits(mode)]
else:
cmd = [_chmod_exe, _format_unix_mode_bits(mode)]
cmd.extend(files)
run_sudo_program(cmd, sudo_password, logger)
def sudo_chown(user, targets, sudo_password, logger, recursive=False):
"""Change file ownership (as in the chown command) running as the superuser.
"""
if recursive:
cmd = [_chown_exe, "-R", user] + targets
else:
cmd = [_chown_exe, user] + targets
run_sudo_program(cmd, sudo_password, logger)
def sudo_chgrp(group_name, targets, sudo_password, logger, recursive=False):
"""Change file ownership (as in the chown command) running as the superuser.
"""
if recursive:
cmd = [_chgrp_exe, "-R", group_name] + targets
else:
cmd = [_chgrp_exe, group_name] + targets
run_sudo_program(cmd, sudo_password, logger)
def sudo_mkdir(dir_path, sudo_password, logger, create_intermediate_dirs=False):
if create_intermediate_dirs:
cmd = [_mkdir_exe, "-p", dir_path]
else:
cmd = [_mkdir_exe, dir_path]
run_sudo_program(cmd, sudo_password, logger)
def sudo_rm(path, sudo_password, logger):
"""Remove the file or directory. Does a recursive remove if
the path points to a directory.
"""
if os.path.isdir(path):
cmd = [_rm_exe, "-r", path]
else:
cmd = [_rm_exe, path]
run_sudo_program(cmd, sudo_password, logger)
def sudo_set_file_permissions(path, user_id, group_id, mode_bits, logger, sudo_password):
"""Set the permissions of a file, running as root
"""
assert os.path.exists(path), "sudo_set_file_permissions: File %s missing" % path
user = pwd.getpwuid(user_id)[0]
group = grp.getgrgid(group_id)[0]
sudo_chown("%s:%s" % (user, group), [path], sudo_password, logger)
sudo_chmod(mode_bits, [path], sudo_password, logger)
def sudo_ensure_user_in_group(group_name, logger, sudo_password, user=None):
if user==None:
user_id = os.getuid()
user = pwd.getpwuid(user_id)[0]
root_user = pwd.getpwuid(0)[0]
if is_running_as_root() and user==root_user:
logger.debug("Running as root, no need to ensure user is in group '%s'" % group_name)
return
else:
if sys.platform=="darwin":
cmd = ["/usr/bin/dscl", "localhost", "-append",
"/Local/Default/Groups/%s" % group_name,
"GroupMembership", user]
else:
cmd = ['/usr/sbin/adduser', user, group_name]
run_sudo_program(cmd, sudo_password, logger)
def sudo_cat_file(path, logger, sudo_password):
"""Use this to get the contents of a file that is only
readable to root. Returns the contents of the file"""
if SUDO_PASSWORD_REQUIRED==None: # running as root
with open(path, "r") as f:
return f.read()
elif SUDO_PASSWORD_REQUIRED==False:
opts = ["-n",]
input = None
else:
if sudo_password==None:
raise NoPasswordError("sudo_cat_file requires sudo password, but not password provided")
opts = ["-p", "", "-S"]
input = sudo_password + "\n"
# we need to clear the sudo timestamp first so that sudo always expects a password and doesn't
# give us a broken pipe error
clear_sudo_timestamp(logger)
cmd = [_sudo_exe,] + opts + [_cat_exe, path]
logger.debug(' '.join(cmd))
subproc = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
logger.debug("Started program %s, pid is %d" % (cmd[0],
subproc.pid))
try:
(output, err) = subproc.communicate(input)
if len(err)>0:
for line in err.split('\n'):
logger.debug("[%d] %s" % (subproc.pid, line))
except OSError:
logger.warn("Subprocess %d closed stdin before write of input data complete" %
subproc.pid)
output = '\n'.join(subproc.stdout)
for line in subproc.stderr:
logger.debug("[%d] %s" % (subproc.pid, line))
subproc.wait()
if subproc.returncode != 0:
raise SudoBadRc(subproc.returncode, cmd)
else:
return output
if sys.platform=="darwin":
def is_process_alive(pid):
subproc = subprocess.Popen(["ps", "-p", pid.__str__()],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
subproc.stdin.close()
result = subproc.stdout.read().splitlines()
if len(result) == 2:
return True
else:
return False
elif sys.platform=="linux2":
def is_process_alive(pid):
return os.path.exists("/proc/%d" % pid)
else:
raise Exception("installutils.process.is_process_alive() not ported to platform %s" % sys.platform)
def sudo_get_pid_from_file(pidfile, logger, sudo_password,
resource_id="sudo_get_pid_from_file()"):
"""Parse a process id from the specified file, reading the file as root.
"""
# first run an ls to see if the file is there
logger.debug("sudo_get_pid_from_file(): determining whether pidfile %s is present" % pidfile)
try:
run_sudo_program(["/bin/ls", pidfile], sudo_password, logger)
except SudoBadRc:
logger.debug("%s: server not up - pid file '%s' not found" %
(resource_id, pidfile))
return None
# now get the actual file
re_map = {"pid":"\\d+"}
(rc, res_map) = run_sudo_program_and_scan_results([_cat_exe, pidfile],
re_map, logger,
sudo_password,
return_mos=True,
log_output=True)
if rc != 0:
raise Exception("%s: %s %s failed, returned error code %d" %
(resource_id, _cat_exe, pidfile, rc))
if res_map["pid"]==None or len(res_map["pid"])==0:
logger.debug("%s: server not up - pid file '%s' does not contain a pid" %
(resource_id, pidfile))
return None
else:
if len(res_map["pid"])>1:
raise Exception("%s: %s %s returned multiple pid matches: %s. Perhaps it is not a pid file?" %
(resource_id, _cat_exe, pidfile,
[m.group(0) for m in res_map["pid"]]))
pid = int((res_map["pid"][0]).group(0))
return pid
def check_server_status(pidfile, logger=None, resource_id=None,
remove_pidfile_if_dead_proc=False):
"""Check whether a server process is alive by grabbing its pid from the specified
pidfile and then checking the liveness of that pid. If the pidfile doesn't
exist, assume that server isn't alive. Returns the pid if the server is running
and None if it isn't running.
If logger and resource id are specified, we log this info to the logger at
the debug level. Otherwise we just do our work silently.
"""
pid = get_pid_from_file(pidfile, resource_id, logger)
if pid==None:
return None
elif is_process_alive(pid)==False:
if remove_pidfile_if_dead_proc:
os.remove(pidfile)
if logger!=None and resource_id!=None:
logger.debug("%s: server not up - pid '%d' not running" %
(resource_id, pid))
return None
else:
if logger!=None and resource_id!=None:
logger.debug("%s: server up (pid %d)" %
(resource_id, pid))
return pid
def sudo_check_server_status(pidfile, logger, sudo_password,
resource_id="sudo_check_server_status()",
remove_if_dead_proc=False):
"""Check whether a server process is alive by grabbing its pid from the specified
pidfile and then checking the liveness of that pid. We read the pidfile as
the superuser. This is useful if the pidfile or a containing directory
is not readable to the engage user.
If the pidfile doesn't
exist, assume that server isn't alive. Returns the pid if the server
is running and None if it isn't running.
"""
if is_running_as_root():
pid = get_pid_from_file(pidfile, resource_id, logger)
else:
pid = sudo_get_pid_from_file(pidfile, logger, sudo_password,
resource_id)
if pid==None:
return None
elif is_process_alive(pid)==False:
if remove_if_dead_proc:
sudo_rm(pidfile, sudo_password, logger)
logger.debug("%s: server not up - pid '%d' not running" %
(resource_id, pid))
return None
else:
logger.debug("%s: server up (pid %d)" %
(resource_id, pid))
return pid
class ServerStopTimeout(Exception):
"""This exception used to signal that the server process did not respond
to sigterm or sigkill without the specified timeout period.
"""
def __init__(self, resource_id, pid, timeout_in_secs):
Exception.__init__(self, "%s: Unable to stop process %d after %d seconds" %
(resource_id, pid, timeout_in_secs))
self.resource_id = resource_id
self.pid = pid
self.timeout_in_secs = timeout_in_secs
def stop_server_process(pidfile, logger, resource_id,
timeout_tries=20, force_stop=False):
"""Stop a server process whose pid is given by the pidfile.
Sends sigterm to process, unless force_stop is True. Then check up
to timeout_tries times, waiting 1 second between each try, to see if the
process went away. Removes the pid file of the process after it is
verified to be stopped.
Returns the pid of the stopped process. If the process was not running
to begin with, return None. Raises an exception if the process has not
gone away after the timeout period.
"""
pid = check_server_status(pidfile, logger, resource_id,
remove_pidfile_if_dead_proc=True)
if not pid:
return None
if force_stop:
signo = signal.SIGKILL
else:
signo = signal.SIGTERM
logger.debug("%s: sending signal %d to process %d" %
(resource_id, signo, pid))
os.kill(pid, signo)
for t in range(timeout_tries):
if is_process_alive(pid):
time.sleep(1.0)
else:
try:
os.remove(pidfile)
except:
pass # if the server removes the pidfile on its own, that's ok
logger.debug("%s: process %d stopped sucessfully" %
(resource_id, pid))
return pid
raise ServerStopTimeout(resource_id, pid, timeout_tries)
class ServerStartupError(Exception):
pass
def run_server(program_and_args, env_mapping, logfile, logger, pidfile_name=None,
cwd="/"):
"""Start another process as a server. Does not wait for it to complete.
Returns the process object for the process. If started successfully, and if
the pidfile name is provide, the pid of the process is written to pidfile.
"""
assert isinstance(program_and_args, list), \
"run_server must have program_and args as list, instead was passed '%s' of type %s" % \
(program_and_args.__repr__(), type(program_and_args))
log_dir = os.path.dirname(logfile)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
daemonize = os.path.join(os.path.dirname(__file__), "daemonize.py")
if pidfile_name:
cmd = [sys.executable, daemonize, program_and_args[0],
"--pid-file=%s" % pidfile_name, logfile, cwd] + \
program_and_args[1:]
else:
cmd = [sys.executable, daemonize, program_and_args[0],
logfile, cwd] + program_and_args[1:]
logger.debug(' '.join(cmd))
subproc = subprocess.Popen(cmd,
env=env_mapping, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True, cwd=cwd)
(stdout, stderr) = subproc.communicate(input=None)
for line in stdout.split('\n'):
logger.debug("[%d] %s" % (subproc.pid, line))
if subproc.returncode!=0:
raise ServerStartupError("Problem in starting daemon %s, rc=%d" %
(program_and_args[0], subproc.returncode))
if pidfile_name:
logger.debug("Started server program %s, pidfile is %s, output written to %s" %
(program_and_args[0], pidfile_name, logfile))
else:
logger.debug("Started server program %s, output written to %s" %
(program_and_args[0], logfile))
def sudo_run_server(program_and_args, env_mapping, logfile, logger,
sudo_password, cwd="/"):
"""Script for running a server process as root. Unlike the vanilla
run_server(), the program being run is responsible for creating a pidfile.
We do this because, if we run under sudo, the child won't be the actual server
process.
"""
log_dir = os.path.dirname(logfile)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
daemonize = os.path.abspath(os.path.join(os.path.dirname(__file__), "daemonize.py"))
if SUDO_PASSWORD_REQUIRED==None:
# if we are already root, run directly
run_server(program_and_args, env_mapping, logfile, logger,
pidfile_name=None, cwd=cwd)
return
elif SUDO_PASSWORD_REQUIRED==False:
opts = ["-n",]
input_data = None
else: # SUDO_PASSWORD_REQUIRED==True
if sudo_password==None:
raise NoPasswordError("Operation '%s' requires sudo access, but no password provided" % ' '.join(program_and_args))
opts = ["-p", "", "-S"]
input_data = sudo_password + "\n"
# we need to clear the sudo timestamp first so that sudo always expects a password and doesn't
# give us a broken pipe error
clear_sudo_timestamp(logger)
cmd = [_sudo_exe,] + opts + [sys.executable, daemonize,
program_and_args[0], logfile, cwd]
cmd.extend(program_and_args[1:])
logger.debug(' '.join(cmd))
subproc = subprocess.Popen(cmd,
env=env_mapping, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=cwd,
close_fds=True)
(stdout, stderr) = subproc.communicate(input=input_data)
for line in stdout.split('\n'):
logger.debug("[%d] %s" % (subproc.pid, line))
if subproc.returncode!=0:
raise ServerStartupError("Problem in starting daemon %s under sudo" %
program_and_args[0])
logger.debug("Daemonized subprocess %s" % program_and_args[0])
def run_server_as_user(user, program_and_args, env_mapping, logfile, logger,
sudo_password, cwd="/"):
"""Script for running a server process as the specified user, using sudo if
necessary. Unlike the vanilla
run_server(), the program being run is responsible for creating a pidfile.
We do this because, if we run under sudo, the child won't be the actual server
process.
"""
current_user = getpass.getuser()
log_dir = os.path.dirname(logfile)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
daemonize = os.path.abspath(os.path.join(os.path.dirname(__file__), "daemonize.py"))
if user == current_user:
# if we are already the desired user, run directly
run_server(program_and_args, env_mapping, logfile, logger,
pidfile_name=None, cwd=cwd)
return
elif SUDO_PASSWORD_REQUIRED==False or SUDO_PASSWORD_REQUIRED==None:
opts = ["-u", user, "-n",]
input_data = None
else: # SUDO_PASSWORD_REQUIRED==True
if sudo_password==None:
raise NoPasswordError("Operation '%s' requires sudo access, but no password provided" % ' '.join(program_and_args))
opts = ['-u', user, "-p", "", "-S"]
input_data = sudo_password + "\n"
# we need to clear the sudo timestamp first so that sudo always expects a password and doesn't
# give us a broken pipe error
clear_sudo_timestamp(logger)
cmd = [_sudo_exe,] + opts + [sys.executable, daemonize,
program_and_args[0], logfile, cwd]
cmd.extend(program_and_args[1:])
logger.debug(' '.join(cmd))
subproc = subprocess.Popen(cmd,
env=env_mapping, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=cwd,
close_fds=True)
(stdout, stderr) = subproc.communicate(input=input_data)
for line in stdout.split('\n'):
logger.debug("[%d] %s" % (subproc.pid, line))
if subproc.returncode!=0:
raise ServerStartupError("Problem in starting daemon %s under sudo" %
program_and_args[0])
logger.debug("Daemonized subprocess %s" % program_and_args[0])
def sudo_stop_server_process(pidfile, logger, resource_id,
sudo_password,
timeout_tries=20, force_stop=False):
"""This is a version of stop_server_process() for when the
server was started under root. We need to use sudo to
run the kill command
"""
pid = check_server_status(pidfile, logger, resource_id,
remove_pidfile_if_dead_proc=True)
if not pid:
return None
if force_stop:
signo = signal.SIGKILL
else:
signo = signal.SIGTERM
logger.debug("%s: sending signal %d to process %d" %
(resource_id, signo, pid))
run_sudo_program([_kill_exe, "-"+str(signo), str(pid)], sudo_password,
logger)
for t in range(timeout_tries):
if is_process_alive(pid):
logger.debug("Process %d still alive!" % pid)
time.sleep(1.0)
else:
logger.debug("Process %d has been stopped" % pid)
run_sudo_program([_rm_exe, pidfile], sudo_password,
logger)
logger.debug("%s: process %d stopped sucessfully" %
(resource_id, pid))
return pid
## print "timeout of stop, enter newline to continue",
## sys.stdin.readline()
raise ServerStopTimeout(resource_id, pid, timeout_tries)
def run_program_and_scan_results(program_and_args, re_map, logger, env=None,
cwd=None, input=None, log_output=False,
allow_broken_pipe=False,
return_mos=False,
hide_command=False,
shell=False):
"""Run the specified program as a subprocess and scan its output for the
regular expressions specified in re_map. re_map is a map from symbolic
names to regular expression patterns. Returns a pair: the return code
of the program followed by a map from the keys in re_map to booleans
which indicate whether the associated pattern was found.
If return_mos is True, then the result map will be from the symbolic
names to lists of match objects or None, rather than True/False.
"""
regexps = {}
results = {}
if return_mos:
not_found_value = None
else:
not_found_value = False
for key in re_map.keys():
regexps[key] = re.compile(re_map[key])
results[key] = not_found_value
if isinstance(program_and_args, list):
cmdstr = ' '.join(program_and_args)
exe = program_and_args[0]
else:
cmdstr = program_and_args
exe = program_and_args.split(" ")[0]
if not hide_command:
logger.debug(cmdstr)
logger.debug("shell=%s, cwd=%s" % (shell, cwd))
if shell==True:
cmd = cmdstr
else:
cmd = program_and_args
subproc = subprocess.Popen(cmd,
env=env, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=cwd,
shell=shell)
logger.debug("Started program %s, pid is %d" % (exe,
subproc.pid))
lines = None
try:
(output, dummy) = subproc.communicate(input)
lines = output.split("\n")
except OSError:
if not allow_broken_pipe:
raise
else:
logger.warn("Subprocess %d closed stdin before write of input data complete" %
subproc.pid)
if lines==None:
lines = subproc.stdout
for line in lines:
if log_output:
logger.debug("[%d] %s" % (subproc.pid, line))
for key in regexps.keys():
mo = regexps[key].search(line)
if mo!=None:
if return_mos:
if results[key]!=None:
results[key].append(mo)
else:
results[key] = [mo,]
else:
results[key] = True
subproc.wait()
logger.debug("[%d] %s exited with return code %d" % (subproc.pid, exe,
subproc.returncode))
return (subproc.returncode, results)
def run_sudo_program_and_scan_results(program_and_args, re_map, logger,
sudo_password, env=None, cwd=None,
log_output=False, return_mos=False):
"""Run a program under sudo and scan the results as described in
run_program_and_scan_results()
"""
if SUDO_PASSWORD_REQUIRED==None:
# if we are already root, no need to sudo
return run_program_and_scan_results(program_and_args, re_map, logger,
env=env, cwd=cwd,
log_output=log_output,
return_mos=return_mos)
elif SUDO_PASSWORD_REQUIRED==False:
input_data=None
opts = ["-n",]
else: # SUDO_PASSWORD_REQUIRED==True
if sudo_password==None:
raise NoPasswordError("Operation '%s' requires sudo access, but no password provided" % ' '.join(program_and_args))
input_data = sudo_password + "\n"
opts = ["-p", "", "-S"]
# we need to clear the sudo timestamp first so that sudo always expects a password and doesn't
# give us a broken pipe error
clear_sudo_timestamp(logger)
cmd = [_sudo_exe,] + opts + program_and_args
return run_program_and_scan_results(cmd, re_map, logger,
env, cwd, input=input_data,
log_output=log_output,
return_mos=return_mos,
allow_broken_pipe=True)
def find_matching_processes(process_pattern_list, exclude_pattern=None,
treat_patterns_as_literals=True):
"""Check the system for processes whose command names contain one
of the specified patterns. process_pattern_list should be a list
of regular expression patterns. If exclude_pattern is provided, we
compare any candidate matching commands to that pattern. If the command
contains the exclude_pattern, we drop it.
If treat_patterns_as_literals is true, we run each pattern through
re.escape(). Otherwise, we assume they are regular expressions
Returns a list of (process_id, command) pairs.
"""
if sys.platform == 'linux2':
psargs = '-ef'
pid_field = 1
cmd_field = 7
else:
psargs = '-Ax'
pid_field = 0
cmd_field = 3
subproc = subprocess.Popen(["/bin/ps", psargs],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if treat_patterns_as_literals:
pattern = '|'.join(["(%s)" % re.escape(process_name) for process_name in process_pattern_list])
else:
pattern = '|'.join(["(%s)" % process_name for process_name in process_pattern_list])
regexp = re.compile(pattern)
if exclude_pattern and treat_patterns_as_literals:
exclude_regexp = re.compile(re.escape(exclude_pattern))
elif exclude_pattern:
exclude_regexp = re.compile(exclude_pattern)
else:
exclude_regexp = None
result = []
for line in subproc.stdout:
fields = line.split()
cmd = ' '.join(fields[cmd_field:])
if regexp.search(cmd):
if exclude_regexp and exclude_regexp.search(cmd):
continue
fields = line.split()
result.append((int(fields[pid_field]), cmd))
(pid, exit_status) = os.waitpid(subproc.pid, 0)
return result
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.dns.base import DNSDriver, Zone, Record
from libcloud.dns.types import RecordType
from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.types import RecordAlreadyExistsError
class DummyDNSDriver(DNSDriver):
"""
Dummy DNS driver.
>>> from libcloud.dns.drivers.dummy import DummyDNSDriver
>>> driver = DummyDNSDriver('key', 'secret')
>>> driver.name
'Dummy DNS Provider'
"""
name = "Dummy DNS Provider"
website = "http://example.com"
def __init__(self, api_key, api_secret):
"""
:param api_key: API key or username to used (required)
:type api_key: ``str``
:param api_secret: Secret password to be used (required)
:type api_secret: ``str``
:rtype: ``None``
"""
self._zones = {}
def list_record_types(self):
"""
>>> driver = DummyDNSDriver('key', 'secret')
>>> driver.list_record_types()
['A']
@inherits: :class:`DNSDriver.list_record_types`
"""
return [RecordType.A]
def list_zones(self):
"""
>>> driver = DummyDNSDriver('key', 'secret')
>>> driver.list_zones()
[]
@inherits: :class:`DNSDriver.list_zones`
"""
return [zone["zone"] for zone in list(self._zones.values())]
def list_records(self, zone):
"""
>>> driver = DummyDNSDriver('key', 'secret')
>>> zone = driver.create_zone(domain='apache.org', type='master',
... ttl=100)
>>> list(zone.list_records())
[]
>>> record = driver.create_record(name='libcloud', zone=zone,
... type=RecordType.A, data='127.0.0.1')
>>> list(zone.list_records()) #doctest: +ELLIPSIS
[<Record: zone=apache.org, name=libcloud, type=A...>]
"""
return self._zones[zone.id]["records"].values()
def get_zone(self, zone_id):
"""
>>> driver = DummyDNSDriver('key', 'secret')
>>> driver.get_zone(zone_id='foobar')
... #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ZoneDoesNotExistError:
@inherits: :class:`DNSDriver.get_zone`
"""
if zone_id not in self._zones:
raise ZoneDoesNotExistError(driver=self, value=None, zone_id=zone_id)
return self._zones[zone_id]["zone"]
def get_record(self, zone_id, record_id):
"""
>>> driver = DummyDNSDriver('key', 'secret')
>>> driver.get_record(zone_id='doesnotexist', record_id='exists')
... #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ZoneDoesNotExistError:
@inherits: :class:`DNSDriver.get_record`
"""
self.get_zone(zone_id=zone_id)
zone_records = self._zones[zone_id]["records"]
if record_id not in zone_records:
raise RecordDoesNotExistError(record_id=record_id, value=None, driver=self)
return zone_records[record_id]
def create_zone(self, domain, type="master", ttl=None, extra=None):
"""
>>> driver = DummyDNSDriver('key', 'secret')
>>> zone = driver.create_zone(domain='apache.org', type='master',
... ttl=100)
>>> zone
<Zone: domain=apache.org, ttl=100, provider=Dummy DNS Provider ...>
>>> zone = driver.create_zone(domain='apache.org', type='master',
... ttl=100)
... #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ZoneAlreadyExistsError:
@inherits: :class:`DNSDriver.create_zone`
"""
id = "id-%s" % (domain)
if id in self._zones:
raise ZoneAlreadyExistsError(zone_id=id, value=None, driver=self)
zone = Zone(id=id, domain=domain, type=type, ttl=ttl, extra={}, driver=self)
self._zones[id] = {"zone": zone, "records": {}}
return zone
def create_record(self, name, zone, type, data, extra=None):
"""
>>> driver = DummyDNSDriver('key', 'secret')
>>> zone = driver.create_zone(domain='apache.org', type='master',
... ttl=100)
>>> record = driver.create_record(name='libcloud', zone=zone,
... type=RecordType.A, data='127.0.0.1')
>>> record #doctest: +ELLIPSIS
<Record: zone=apache.org, name=libcloud, type=A, data=127.0.0.1...>
>>> record = driver.create_record(name='libcloud', zone=zone,
... type=RecordType.A, data='127.0.0.1')
... #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
RecordAlreadyExistsError:
@inherits: :class:`DNSDriver.create_record`
"""
id = "id-%s" % (name)
zone = self.get_zone(zone_id=zone.id)
if id in self._zones[zone.id]["records"]:
raise RecordAlreadyExistsError(record_id=id, value=None, driver=self)
record = Record(
id=id, name=name, type=type, data=data, extra=extra, zone=zone, driver=self
)
self._zones[zone.id]["records"][id] = record
return record
def delete_zone(self, zone):
"""
>>> driver = DummyDNSDriver('key', 'secret')
>>> zone = driver.create_zone(domain='apache.org', type='master',
... ttl=100)
>>> driver.delete_zone(zone)
True
>>> driver.delete_zone(zone) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ZoneDoesNotExistError:
@inherits: :class:`DNSDriver.delete_zone`
"""
self.get_zone(zone_id=zone.id)
del self._zones[zone.id]
return True
def delete_record(self, record):
"""
>>> driver = DummyDNSDriver('key', 'secret')
>>> zone = driver.create_zone(domain='apache.org', type='master',
... ttl=100)
>>> record = driver.create_record(name='libcloud', zone=zone,
... type=RecordType.A, data='127.0.0.1')
>>> driver.delete_record(record)
True
>>> driver.delete_record(record) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
RecordDoesNotExistError:
@inherits: :class:`DNSDriver.delete_record`
"""
self.get_record(zone_id=record.zone.id, record_id=record.id)
del self._zones[record.zone.id]["records"][record.id]
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| |
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/shapes.py
"""
core of the graphics library - defines Drawing and Shapes
"""
__version__=''' $Id: shapes.py 3097 2007-05-24 16:49:19Z rgbecker $ '''
import string, os, sys
from math import pi, cos, sin, tan
from types import FloatType, IntType, ListType, TupleType, StringType, InstanceType
from pprint import pprint
from reportlab.platypus import Flowable
from reportlab.rl_config import shapeChecking, verbose, defaultGraphicsFontName, _unset_
from reportlab.lib import logger
from reportlab.lib import colors
from reportlab.lib.validators import *
from reportlab.lib.attrmap import *
from reportlab.lib.utils import fp_str
from reportlab.pdfbase.pdfmetrics import stringWidth
class NotImplementedError(Exception):
pass
# two constants for filling rules
NON_ZERO_WINDING = 'Non-Zero Winding'
EVEN_ODD = 'Even-Odd'
## these can be overridden at module level before you start
#creating shapes. So, if using a special color model,
#this provides support for the rendering mechanism.
#you can change defaults globally before you start
#making shapes; one use is to substitute another
#color model cleanly throughout the drawing.
STATE_DEFAULTS = { # sensible defaults for all
'transform': (1,0,0,1,0,0),
# styles follow SVG naming
'strokeColor': colors.black,
'strokeWidth': 1,
'strokeLineCap': 0,
'strokeLineJoin': 0,
'strokeMiterLimit' : 'TBA', # don't know yet so let bomb here
'strokeDashArray': None,
'strokeOpacity': 1.0, #100%
'fillColor': colors.black, #...or text will be invisible
#'fillRule': NON_ZERO_WINDING, - these can be done later
#'fillOpacity': 1.0, #100% - can be done later
'fontSize': 10,
'fontName': defaultGraphicsFontName,
'textAnchor': 'start' # can be start, middle, end, inherited
}
####################################################################
# math utilities. These could probably be moved into lib
# somewhere.
####################################################################
# constructors for matrices:
def nullTransform():
return (1, 0, 0, 1, 0, 0)
def translate(dx, dy):
return (1, 0, 0, 1, dx, dy)
def scale(sx, sy):
return (sx, 0, 0, sy, 0, 0)
def rotate(angle):
a = angle * pi/180
return (cos(a), sin(a), -sin(a), cos(a), 0, 0)
def skewX(angle):
a = angle * pi/180
return (1, 0, tan(a), 1, 0, 0)
def skewY(angle):
a = angle * pi/180
return (1, tan(a), 0, 1, 0, 0)
def mmult(A, B):
"A postmultiplied by B"
# I checked this RGB
# [a0 a2 a4] [b0 b2 b4]
# [a1 a3 a5] * [b1 b3 b5]
# [ 1 ] [ 1 ]
#
return (A[0]*B[0] + A[2]*B[1],
A[1]*B[0] + A[3]*B[1],
A[0]*B[2] + A[2]*B[3],
A[1]*B[2] + A[3]*B[3],
A[0]*B[4] + A[2]*B[5] + A[4],
A[1]*B[4] + A[3]*B[5] + A[5])
def inverse(A):
"For A affine 2D represented as 6vec return 6vec version of A**(-1)"
# I checked this RGB
det = float(A[0]*A[3] - A[2]*A[1])
R = [A[3]/det, -A[1]/det, -A[2]/det, A[0]/det]
return tuple(R+[-R[0]*A[4]-R[2]*A[5],-R[1]*A[4]-R[3]*A[5]])
def zTransformPoint(A,v):
"Apply the homogenous part of atransformation a to vector v --> A*v"
return (A[0]*v[0]+A[2]*v[1],A[1]*v[0]+A[3]*v[1])
def transformPoint(A,v):
"Apply transformation a to vector v --> A*v"
return (A[0]*v[0]+A[2]*v[1]+A[4],A[1]*v[0]+A[3]*v[1]+A[5])
def transformPoints(matrix, V):
return map(transformPoint, V)
def zTransformPoints(matrix, V):
return map(lambda x,matrix=matrix: zTransformPoint(matrix,x), V)
def _textBoxLimits(text, font, fontSize, leading, textAnchor, boxAnchor):
w = 0
for t in text:
w = max(w,stringWidth(t,font, fontSize))
h = len(text)*leading
yt = fontSize
if boxAnchor[0]=='s':
yb = -h
yt = yt - h
elif boxAnchor[0]=='n':
yb = 0
else:
yb = -h/2.0
yt = yt + yb
if boxAnchor[-1]=='e':
xb = -w
if textAnchor=='end': xt = 0
elif textAnchor=='start': xt = -w
else: xt = -w/2.0
elif boxAnchor[-1]=='w':
xb = 0
if textAnchor=='end': xt = w
elif textAnchor=='start': xt = 0
else: xt = w/2.0
else:
xb = -w/2.0
if textAnchor=='end': xt = -xb
elif textAnchor=='start': xt = xb
else: xt = 0
return xb, yb, w, h, xt, yt
def _rotatedBoxLimits( x, y, w, h, angle):
'''
Find the corner points of the rotated w x h sized box at x,y
return the corner points and the min max points in the original space
'''
C = zTransformPoints(rotate(angle),((x,y),(x+w,y),(x+w,y+h),(x,y+h)))
X = map(lambda x: x[0], C)
Y = map(lambda x: x[1], C)
return min(X), max(X), min(Y), max(Y), C
class _DrawTimeResizeable:
'''Addin class to provide the horribleness of _drawTimeResize'''
def _drawTimeResize(self,w,h):
if hasattr(self,'_canvas'):
canvas = self._canvas
drawing = canvas._drawing
drawing.width, drawing.height = w, h
if hasattr(canvas,'_drawTimeResize'):
canvas._drawTimeResize(w,h)
class _SetKeyWordArgs:
def __init__(self, keywords={}):
"""In general properties may be supplied to the constructor."""
for key, value in keywords.items():
setattr(self, key, value)
#################################################################
#
# Helper functions for working out bounds
#
#################################################################
def getRectsBounds(rectList):
# filter out any None objects, e.g. empty groups
L = filter(lambda x: x is not None, rectList)
if not L: return None
xMin, yMin, xMax, yMax = L[0]
for (x1, y1, x2, y2) in L[1:]:
if x1 < xMin:
xMin = x1
if x2 > xMax:
xMax = x2
if y1 < yMin:
yMin = y1
if y2 > yMax:
yMax = y2
return (xMin, yMin, xMax, yMax)
def getPathBounds(points):
n = len(points)
f = lambda i,p = points: p[i]
xs = map(f,xrange(0,n,2))
ys = map(f,xrange(1,n,2))
return (min(xs), min(ys), max(xs), max(ys))
def getPointsBounds(pointList):
"Helper function for list of points"
first = pointList[0]
if type(first) in (ListType, TupleType):
xs = map(lambda xy: xy[0],pointList)
ys = map(lambda xy: xy[1],pointList)
return (min(xs), min(ys), max(xs), max(ys))
else:
return getPathBounds(pointList)
#################################################################
#
# And now the shapes themselves....
#
#################################################################
class Shape(_SetKeyWordArgs,_DrawTimeResizeable):
"""Base class for all nodes in the tree. Nodes are simply
packets of data to be created, stored, and ultimately
rendered - they don't do anything active. They provide
convenience methods for verification but do not
check attribiute assignments or use any clever setattr
tricks this time."""
_attrMap = AttrMap()
def copy(self):
"""Return a clone of this shape."""
# implement this in the descendants as they need the right init methods.
raise NotImplementedError, "No copy method implemented for %s" % self.__class__.__name__
def getProperties(self,recur=1):
"""Interface to make it easy to extract automatic
documentation"""
#basic nodes have no children so this is easy.
#for more complex objects like widgets you
#may need to override this.
props = {}
for key, value in self.__dict__.items():
if key[0:1] <> '_':
props[key] = value
return props
def setProperties(self, props):
"""Supports the bulk setting if properties from,
for example, a GUI application or a config file."""
self.__dict__.update(props)
#self.verify()
def dumpProperties(self, prefix=""):
"""Convenience. Lists them on standard output. You
may provide a prefix - mostly helps to generate code
samples for documentation."""
propList = self.getProperties().items()
propList.sort()
if prefix:
prefix = prefix + '.'
for (name, value) in propList:
print '%s%s = %s' % (prefix, name, value)
def verify(self):
"""If the programmer has provided the optional
_attrMap attribute, this checks all expected
attributes are present; no unwanted attributes
are present; and (if a checking function is found)
checks each attribute. Either succeeds or raises
an informative exception."""
if self._attrMap is not None:
for key in self.__dict__.keys():
if key[0] <> '_':
assert self._attrMap.has_key(key), "Unexpected attribute %s found in %s" % (key, self)
for (attr, metavalue) in self._attrMap.items():
assert hasattr(self, attr), "Missing attribute %s from %s" % (attr, self)
value = getattr(self, attr)
assert metavalue.validate(value), "Invalid value %s for attribute %s in class %s" % (value, attr, self.__class__.__name__)
if shapeChecking:
"""This adds the ability to check every attribute assignment as it is made.
It slows down shapes but is a big help when developing. It does not
get defined if rl_config.shapeChecking = 0"""
def __setattr__(self, attr, value):
"""By default we verify. This could be off
in some parallel base classes."""
validateSetattr(self,attr,value) #from reportlab.lib.attrmap
def getBounds(self):
"Returns bounding rectangle of object as (x1,y1,x2,y2)"
raise NotImplementedError("Shapes and widgets must implement getBounds")
class Group(Shape):
"""Groups elements together. May apply a transform
to its contents. Has a publicly accessible property
'contents' which may be used to iterate over contents.
In addition, child nodes may be given a name in which
case they are subsequently accessible as properties."""
_attrMap = AttrMap(
transform = AttrMapValue(isTransform,desc="Coordinate transformation to apply"),
contents = AttrMapValue(isListOfShapes,desc="Contained drawable elements"),
)
def __init__(self, *elements, **keywords):
"""Initial lists of elements may be provided to allow
compact definitions in literal Python code. May or
may not be useful."""
# Groups need _attrMap to be an instance rather than
# a class attribute, as it may be extended at run time.
self._attrMap = self._attrMap.clone()
self.contents = []
self.transform = (1,0,0,1,0,0)
for elt in elements:
self.add(elt)
# this just applies keywords; do it at the end so they
#don;t get overwritten
_SetKeyWordArgs.__init__(self, keywords)
def _addNamedNode(self,name,node):
'if name is not None add an attribute pointing to node and add to the attrMap'
if name:
if name not in self._attrMap.keys():
self._attrMap[name] = AttrMapValue(isValidChild)
setattr(self, name, node)
def add(self, node, name=None):
"""Appends non-None child node to the 'contents' attribute. In addition,
if a name is provided, it is subsequently accessible by name
"""
# propagates properties down
if node is not None:
assert isValidChild(node), "Can only add Shape or UserNode objects to a Group"
self.contents.append(node)
self._addNamedNode(name,node)
def _nn(self,node):
self.add(node)
return self.contents[-1]
def insert(self, i, n, name=None):
'Inserts sub-node n in contents at specified location'
if n is not None:
assert isValidChild(n), "Can only insert Shape or UserNode objects in a Group"
if i<0:
self.contents[i:i] =[n]
else:
self.contents.insert(i,n)
self._addNamedNode(name,n)
def expandUserNodes(self):
"""Return a new object which only contains primitive shapes."""
# many limitations - shared nodes become multiple ones,
obj = isinstance(self,Drawing) and Drawing(self.width,self.height) or Group()
obj._attrMap = self._attrMap.clone()
if hasattr(obj,'transform'): obj.transform = self.transform[:]
self_contents = self.contents
a = obj.contents.append
for child in self_contents:
if isinstance(child, UserNode):
newChild = child.provideNode()
elif isinstance(child, Group):
newChild = child.expandUserNodes()
else:
newChild = child.copy()
a(newChild)
self._copyNamedContents(obj)
return obj
def _explode(self):
''' return a fully expanded object'''
from reportlab.graphics.widgetbase import Widget
obj = Group()
if hasattr(obj,'transform'): obj.transform = self.transform[:]
P = self.contents[:] # pending nodes
while P:
n = P.pop(0)
if isinstance(n, UserNode):
P.append(n.provideNode())
elif isinstance(n, Group):
n = n._explode()
if n.transform==(1,0,0,1,0,0):
obj.contents.extend(n.contents)
else:
obj.add(n)
else:
obj.add(n)
return obj
def _copyContents(self,obj):
for child in self.contents:
obj.contents.append(child)
def _copyNamedContents(self,obj,aKeys=None,noCopy=('contents',)):
from copy import copy
self_contents = self.contents
if not aKeys: aKeys = self._attrMap.keys()
for (k, v) in self.__dict__.items():
if v in self_contents:
pos = self_contents.index(v)
setattr(obj, k, obj.contents[pos])
elif k in aKeys and k not in noCopy:
setattr(obj, k, copy(v))
def _copy(self,obj):
"""copies to obj"""
obj._attrMap = self._attrMap.clone()
self._copyContents(obj)
self._copyNamedContents(obj)
return obj
def copy(self):
"""returns a copy"""
return self._copy(self.__class__())
def rotate(self, theta):
"""Convenience to help you set transforms"""
self.transform = mmult(self.transform, rotate(theta))
def translate(self, dx, dy):
"""Convenience to help you set transforms"""
self.transform = mmult(self.transform, translate(dx, dy))
def scale(self, sx, sy):
"""Convenience to help you set transforms"""
self.transform = mmult(self.transform, scale(sx, sy))
def skew(self, kx, ky):
"""Convenience to help you set transforms"""
self.transform = mmult(mmult(self.transform, skewX(kx)),skewY(ky))
def shift(self, x, y):
'''Convenience function to set the origin arbitrarily'''
self.transform = self.transform[:-2]+(x,y)
def asDrawing(self, width, height):
""" Convenience function to make a drawing from a group
After calling this the instance will be a drawing!
"""
self.__class__ = Drawing
self._attrMap.update(self._xtraAttrMap)
self.width = width
self.height = height
def getContents(self):
'''Return the list of things to be rendered
override to get more complicated behaviour'''
b = getattr(self,'background',None)
C = self.contents
if b and b not in C: C = [b]+C
return C
def getBounds(self):
if self.contents:
b = []
for elem in self.contents:
b.append(elem.getBounds())
x1 = getRectsBounds(b)
if x1 is None: return None
x1, y1, x2, y2 = x1
trans = self.transform
corners = [[x1,y1], [x1, y2], [x2, y1], [x2,y2]]
newCorners = []
for corner in corners:
newCorners.append(transformPoint(trans, corner))
return getPointsBounds(newCorners)
else:
#empty group needs a sane default; this
#will happen when interactively creating a group
#nothing has been added to yet. The alternative is
#to handle None as an allowed return value everywhere.
return None
def _addObjImport(obj,I,n=None):
'''add an import of obj's class to a dictionary of imports''' #'
from inspect import getmodule
c = obj.__class__
m = getmodule(c).__name__
n = n or c.__name__
if not I.has_key(m):
I[m] = [n]
elif n not in I[m]:
I[m].append(n)
def _repr(self,I=None):
'''return a repr style string with named fixed args first, then keywords'''
if type(self) is InstanceType:
if self is EmptyClipPath:
_addObjImport(self,I,'EmptyClipPath')
return 'EmptyClipPath'
if I: _addObjImport(self,I)
if isinstance(self,Shape):
from inspect import getargs
args, varargs, varkw = getargs(self.__init__.im_func.func_code)
P = self.getProperties()
s = self.__class__.__name__+'('
for n in args[1:]:
v = P[n]
del P[n]
s = s + '%s,' % _repr(v,I)
for n,v in P.items():
v = P[n]
s = s + '%s=%s,' % (n, _repr(v,I))
return s[:-1]+')'
else:
return repr(self)
elif type(self) is FloatType:
return fp_str(self)
elif type(self) in (ListType,TupleType):
s = ''
for v in self:
s = s + '%s,' % _repr(v,I)
if type(self) is ListType:
return '[%s]' % s[:-1]
else:
return '(%s%s)' % (s[:-1],len(self)==1 and ',' or '')
else:
return repr(self)
def _renderGroupPy(G,pfx,I,i=0,indent='\t\t'):
s = ''
C = getattr(G,'transform',None)
if C: s = s + ('%s%s.transform = %s\n' % (indent,pfx,_repr(C)))
C = G.contents
for n in C:
if isinstance(n, Group):
npfx = 'v%d' % i
i = i + 1
s = s + '%s%s=%s._nn(Group())\n' % (indent,npfx,pfx)
s = s + _renderGroupPy(n,npfx,I,i,indent)
i = i - 1
else:
s = s + '%s%s.add(%s)\n' % (indent,pfx,_repr(n,I))
return s
def _extraKW(self,pfx,**kw):
kw.update(self.__dict__)
R = {}
n = len(pfx)
for k in kw.keys():
if k.startswith(pfx):
R[k[n:]] = kw[k]
return R
class Drawing(Group, Flowable):
"""Outermost container; the thing a renderer works on.
This has no properties except a height, width and list
of contents."""
_saveModes=(
'pdf','ps','eps','gif','png','jpg','jpeg','pct',
'pict','tiff','tif','py','bmp','svg','tiffp','tiffl','tiff1',
)
_xtraAttrMap = AttrMap(
width = AttrMapValue(isNumber,desc="Drawing width in points."),
height = AttrMapValue(isNumber,desc="Drawing height in points."),
canv = AttrMapValue(None),
background = AttrMapValue(isValidChildOrNone,desc="Background widget for the drawing"),
hAlign = AttrMapValue(OneOf("LEFT", "RIGHT", "CENTER", "CENTRE"), desc="Horizontal alignment within parent document"),
vAlign = AttrMapValue(OneOf("TOP", "BOTTOM", "CENTER", "CENTRE"), desc="Vertical alignment within parent document"),
#AR temporary hack to track back up.
#fontName = AttrMapValue(isStringOrNone),
renderScale = AttrMapValue(isNumber,desc="Global scaling for rendering"),
)
_attrMap = AttrMap(BASE=Group)
_attrMap.update(_xtraAttrMap)
def __init__(self, width=400, height=200, *nodes, **keywords):
self.background = None
apply(Group.__init__,(self,)+nodes,keywords)
self.width = width
self.height = height
self.hAlign = 'LEFT'
self.vAlign = 'BOTTOM'
self.renderScale = 1.0
def _renderPy(self):
I = {'reportlab.graphics.shapes': ['_DrawingEditorMixin','Drawing','Group']}
G = _renderGroupPy(self._explode(),'self',I)
n = 'ExplodedDrawing_' + self.__class__.__name__
s = '#Autogenerated by ReportLab guiedit do not edit\n'
for m, o in I.items():
s = s + 'from %s import %s\n' % (m,string.replace(str(o)[1:-1],"'",""))
s = s + '\nclass %s(_DrawingEditorMixin,Drawing):\n' % n
s = s + '\tdef __init__(self,width=%s,height=%s,*args,**kw):\n' % (self.width,self.height)
s = s + '\t\tapply(Drawing.__init__,(self,width,height)+args,kw)\n'
s = s + G
s = s + '\n\nif __name__=="__main__": #NORUNTESTS\n\t%s().save(formats=[\'pdf\'],outDir=\'.\',fnRoot=None)\n' % n
return s
def draw(self,showBoundary=_unset_):
"""This is used by the Platypus framework to let the document
draw itself in a story. It is specific to PDF and should not
be used directly."""
import renderPDF
renderPDF.draw(self, self.canv, 0, 0, showBoundary=showBoundary)
def wrap(self, availWidth, availHeight):
width = self.width
height = self.height
renderScale = self.renderScale
if renderScale!=1.0:
width *= renderScale
height *= renderScale
return width, height
def expandUserNodes(self):
"""Return a new drawing which only contains primitive shapes."""
obj = Group.expandUserNodes(self)
obj.width = self.width
obj.height = self.height
return obj
def copy(self):
"""Returns a copy"""
return self._copy(self.__class__(self.width, self.height))
def asGroup(self,*args,**kw):
return self._copy(apply(Group,args,kw))
def save(self, formats=None, verbose=None, fnRoot=None, outDir=None, title='', **kw):
"""Saves copies of self in desired location and formats.
Multiple formats can be supported in one call
the extra keywords can be of the form
_renderPM_dpi=96 (which passes dpi=96 to renderPM)
"""
from reportlab import rl_config
ext = ''
if not fnRoot:
fnRoot = getattr(self,'fileNamePattern',(self.__class__.__name__+'%03d'))
chartId = getattr(self,'chartId',0)
if callable(fnRoot):
fnRoot = fnRoot(chartId)
else:
try:
fnRoot = fnRoot % getattr(self,'chartId',0)
except TypeError, err:
#the exact error message changed from 2.2 to 2.3 so we need to
#check a substring
if str(err).find('not all arguments converted') < 0: raise
if os.path.isabs(fnRoot):
outDir, fnRoot = os.path.split(fnRoot)
else:
outDir = outDir or getattr(self,'outDir','.')
outDir = outDir.rstrip().rstrip(os.sep)
if not outDir: outDir = '.'
if not os.path.isabs(outDir): outDir = os.path.join(getattr(self,'_override_CWD',os.path.dirname(sys.argv[0])),outDir)
if not os.path.isdir(outDir): os.makedirs(outDir)
fnroot = os.path.normpath(os.path.join(outDir,fnRoot))
plotMode = os.path.splitext(fnroot)
if string.lower(plotMode[1][1:]) in self._saveModes:
fnroot = plotMode[0]
plotMode = map(str.lower,formats or getattr(self,'formats',['pdf']))
verbose = (verbose is not None and (verbose,) or (getattr(self,'verbose',verbose),))[0]
_saved = logger.warnOnce.enabled, logger.infoOnce.enabled
logger.warnOnce.enabled = logger.infoOnce.enabled = verbose
if 'pdf' in plotMode:
from reportlab.graphics import renderPDF
filename = fnroot+'.pdf'
if verbose: print "generating PDF file %s" % filename
renderPDF.drawToFile(self, filename, title, showBoundary=getattr(self,'showBorder',rl_config.showBoundary),**_extraKW(self,'_renderPDF_',**kw))
ext = ext + '/.pdf'
if sys.platform=='mac':
import macfs, macostools
macfs.FSSpec(filename).SetCreatorType("CARO", "PDF ")
macostools.touched(filename)
for bmFmt in ('gif','png','tif','jpg','tiff','pct','pict', 'bmp','tiffp','tiffl','tiff1'):
if bmFmt in plotMode:
from reportlab.graphics import renderPM
filename = '%s.%s' % (fnroot,bmFmt)
if verbose: print "generating %s file %s" % (bmFmt,filename)
renderPM.drawToFile(self, filename,fmt=bmFmt,showBoundary=getattr(self,'showBorder',rl_config.showBoundary),**_extraKW(self,'_renderPM_',**kw))
ext = ext + '/.' + bmFmt
if 'eps' in plotMode:
try:
from rlextra.graphics import renderPS_SEP as renderPS
except ImportError:
from reportlab.graphics import renderPS
filename = fnroot+'.eps'
if verbose: print "generating EPS file %s" % filename
renderPS.drawToFile(self,
filename,
title = fnroot,
dept = getattr(self,'EPS_info',['Testing'])[0],
company = getattr(self,'EPS_info',['','ReportLab'])[1],
preview = getattr(self,'preview',rl_config.eps_preview),
showBoundary=getattr(self,'showBorder',rl_config.showBoundary),
ttf_embed=getattr(self,'ttf_embed',rl_config.eps_ttf_embed),
**_extraKW(self,'_renderPS_',**kw))
ext = ext + '/.eps'
if 'svg' in plotMode:
from reportlab.graphics import renderSVG
filename = fnroot+'.svg'
if verbose: print "generating EPS file %s" % filename
renderSVG.drawToFile(self,
filename,
showBoundary=getattr(self,'showBorder',rl_config.showBoundary),**_extraKW(self,'_renderSVG_',**kw))
ext = ext + '/.svg'
if 'ps' in plotMode:
from reportlab.graphics import renderPS
filename = fnroot+'.ps'
if verbose: print "generating EPS file %s" % filename
renderPS.drawToFile(self, filename, showBoundary=getattr(self,'showBorder',rl_config.showBoundary),**_extraKW(self,'_renderPS_',**kw))
ext = ext + '/.ps'
if 'py' in plotMode:
filename = fnroot+'.py'
if verbose: print "generating py file %s" % filename
open(filename,'w').write(self._renderPy())
ext = ext + '/.py'
logger.warnOnce.enabled, logger.infoOnce.enabled = _saved
if hasattr(self,'saveLogger'):
self.saveLogger(fnroot,ext)
return ext and fnroot+ext[1:] or ''
def asString(self, format, verbose=None, preview=0, **kw):
"""Converts to an 8 bit string in given format."""
assert format in ('pdf','ps','eps','gif','png','jpg','jpeg','bmp','ppm','tiff','tif','py','pict','pct','tiffp','tiffl','tiff1'), 'Unknown file format "%s"' % format
from reportlab import rl_config
#verbose = verbose is not None and (verbose,) or (getattr(self,'verbose',verbose),)[0]
if format == 'pdf':
from reportlab.graphics import renderPDF
return renderPDF.drawToString(self)
elif format in ('gif','png','tif','tiff','jpg','pct','pict','bmp','ppm','tiffp','tiffl','tiff1'):
from reportlab.graphics import renderPM
return renderPM.drawToString(self, fmt=format,showBoundary=getattr(self,'showBorder',
rl_config.showBoundary),**_extraKW(self,'_renderPM_',**kw))
elif format == 'eps':
try:
from rlextra.graphics import renderPS_SEP as renderPS
except ImportError:
from reportlab.graphics import renderPS
return renderPS.drawToString(self,
preview = preview,
showBoundary=getattr(self,'showBorder',rl_config.showBoundary))
elif format == 'ps':
from reportlab.graphics import renderPS
return renderPS.drawToString(self, showBoundary=getattr(self,'showBorder',rl_config.showBoundary))
elif format == 'py':
return self._renderPy()
class _DrawingEditorMixin:
'''This is a mixin to provide functionality for edited drawings'''
def _add(self,obj,value,name=None,validate=None,desc=None,pos=None):
'''
effectively setattr(obj,name,value), but takes care of things with _attrMaps etc
'''
ivc = isValidChild(value)
if name and hasattr(obj,'_attrMap'):
if not obj.__dict__.has_key('_attrMap'):
obj._attrMap = obj._attrMap.clone()
if ivc and validate is None: validate = isValidChild
obj._attrMap[name] = AttrMapValue(validate,desc)
if hasattr(obj,'add') and ivc:
if pos:
obj.insert(pos,value,name)
else:
obj.add(value,name)
elif name:
setattr(obj,name,value)
else:
raise ValueError, "Can't add, need name"
class LineShape(Shape):
# base for types of lines
_attrMap = AttrMap(
strokeColor = AttrMapValue(isColorOrNone),
strokeWidth = AttrMapValue(isNumber),
strokeLineCap = AttrMapValue(None),
strokeLineJoin = AttrMapValue(None),
strokeMiterLimit = AttrMapValue(isNumber),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone),
)
def __init__(self, kw):
self.strokeColor = STATE_DEFAULTS['strokeColor']
self.strokeWidth = 1
self.strokeLineCap = 0
self.strokeLineJoin = 0
self.strokeMiterLimit = 0
self.strokeDashArray = None
self.setProperties(kw)
class Line(LineShape):
_attrMap = AttrMap(BASE=LineShape,
x1 = AttrMapValue(isNumber),
y1 = AttrMapValue(isNumber),
x2 = AttrMapValue(isNumber),
y2 = AttrMapValue(isNumber),
)
def __init__(self, x1, y1, x2, y2, **kw):
LineShape.__init__(self, kw)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
def getBounds(self):
"Returns bounding rectangle of object as (x1,y1,x2,y2)"
return (self.x1, self.y1, self.x2, self.y2)
class SolidShape(LineShape):
# base for anything with outline and content
_attrMap = AttrMap(BASE=LineShape,
fillColor = AttrMapValue(isColorOrNone),
)
def __init__(self, kw):
self.fillColor = STATE_DEFAULTS['fillColor']
# do this at the end so keywords overwrite
#the above settings
LineShape.__init__(self, kw)
# path operator constants
_MOVETO, _LINETO, _CURVETO, _CLOSEPATH = range(4)
_PATH_OP_ARG_COUNT = (2, 2, 6, 0) # [moveTo, lineTo, curveTo, closePath]
_PATH_OP_NAMES=['moveTo','lineTo','curveTo','closePath']
def _renderPath(path, drawFuncs):
"""Helper function for renderers."""
# this could be a method of Path...
points = path.points
i = 0
hadClosePath = 0
hadMoveTo = 0
for op in path.operators:
nArgs = _PATH_OP_ARG_COUNT[op]
func = drawFuncs[op]
j = i + nArgs
apply(func, points[i:j])
i = j
if op == _CLOSEPATH:
hadClosePath = hadClosePath + 1
if op == _MOVETO:
hadMoveTo = hadMoveTo + 1
return hadMoveTo == hadClosePath
class Path(SolidShape):
"""Path, made up of straight lines and bezier curves."""
_attrMap = AttrMap(BASE=SolidShape,
points = AttrMapValue(isListOfNumbers),
operators = AttrMapValue(isListOfNumbers),
isClipPath = AttrMapValue(isBoolean),
)
def __init__(self, points=None, operators=None, isClipPath=0, **kw):
SolidShape.__init__(self, kw)
if points is None:
points = []
if operators is None:
operators = []
assert len(points) % 2 == 0, 'Point list must have even number of elements!'
self.points = points
self.operators = operators
self.isClipPath = isClipPath
def copy(self):
new = self.__class__(self.points[:], self.operators[:])
new.setProperties(self.getProperties())
return new
def moveTo(self, x, y):
self.points.extend([x, y])
self.operators.append(_MOVETO)
def lineTo(self, x, y):
self.points.extend([x, y])
self.operators.append(_LINETO)
def curveTo(self, x1, y1, x2, y2, x3, y3):
self.points.extend([x1, y1, x2, y2, x3, y3])
self.operators.append(_CURVETO)
def closePath(self):
self.operators.append(_CLOSEPATH)
def getBounds(self):
return getPathBounds(self.points)
EmptyClipPath=Path() #special path
def getArcPoints(centerx, centery, radius, startangledegrees, endangledegrees, yradius=None, degreedelta=None, reverse=None):
if yradius is None: yradius = radius
points = []
from math import sin, cos, pi
degreestoradians = pi/180.0
startangle = startangledegrees*degreestoradians
endangle = endangledegrees*degreestoradians
while endangle<startangle:
endangle = endangle+2*pi
angle = float(endangle - startangle)
a = points.append
if angle>.001:
degreedelta = min(angle,degreedelta or 1.)
radiansdelta = degreedelta*degreestoradians
n = max(int(angle/radiansdelta+0.5),1)
radiansdelta = angle/n
n += 1
else:
n = 1
radiansdelta = 0
for angle in xrange(n):
angle = startangle+angle*radiansdelta
a((centerx+radius*cos(angle),centery+yradius*sin(angle)))
if reverse: points.reverse()
return points
class ArcPath(Path):
'''Path with an addArc method'''
def addArc(self, centerx, centery, radius, startangledegrees, endangledegrees, yradius=None, degreedelta=None, moveTo=None, reverse=None):
P = getArcPoints(centerx, centery, radius, startangledegrees, endangledegrees, yradius=yradius, degreedelta=degreedelta, reverse=reverse)
if moveTo or not len(self.operators):
self.moveTo(P[0][0],P[0][1])
del P[0]
for x, y in P: self.lineTo(x,y)
def definePath(pathSegs=[],isClipPath=0, dx=0, dy=0, **kw):
O = []
P = []
for seg in pathSegs:
if type(seg) not in [ListType,TupleType]:
opName = seg
args = []
else:
opName = seg[0]
args = seg[1:]
if opName not in _PATH_OP_NAMES:
raise ValueError, 'bad operator name %s' % opName
op = _PATH_OP_NAMES.index(opName)
if len(args)!=_PATH_OP_ARG_COUNT[op]:
raise ValueError, '%s bad arguments %s' % (opName,str(args))
O.append(op)
P.extend(list(args))
for d,o in (dx,0), (dy,1):
for i in xrange(o,len(P),2):
P[i] = P[i]+d
return apply(Path,(P,O,isClipPath),kw)
class Rect(SolidShape):
"""Rectangle, possibly with rounded corners."""
_attrMap = AttrMap(BASE=SolidShape,
x = AttrMapValue(isNumber),
y = AttrMapValue(isNumber),
width = AttrMapValue(isNumber),
height = AttrMapValue(isNumber),
rx = AttrMapValue(isNumber),
ry = AttrMapValue(isNumber),
)
def __init__(self, x, y, width, height, rx=0, ry=0, **kw):
SolidShape.__init__(self, kw)
self.x = x
self.y = y
self.width = width
self.height = height
self.rx = rx
self.ry = ry
def copy(self):
new = self.__class__(self.x, self.y, self.width, self.height)
new.setProperties(self.getProperties())
return new
def getBounds(self):
return (self.x, self.y, self.x + self.width, self.y + self.height)
class Image(SolidShape):
"""Bitmap image."""
_attrMap = AttrMap(BASE=SolidShape,
x = AttrMapValue(isNumber),
y = AttrMapValue(isNumber),
width = AttrMapValue(isNumberOrNone),
height = AttrMapValue(isNumberOrNone),
path = AttrMapValue(None),
)
def __init__(self, x, y, width, height, path, **kw):
SolidShape.__init__(self, kw)
self.x = x
self.y = y
self.width = width
self.height = height
self.path = path
def copy(self):
new = self.__class__(self.x, self.y, self.width, self.height, self.path)
new.setProperties(self.getProperties())
return new
def getBounds(self):
return (self.x, self.y, self.x + width, self.y + width)
class Circle(SolidShape):
_attrMap = AttrMap(BASE=SolidShape,
cx = AttrMapValue(isNumber),
cy = AttrMapValue(isNumber),
r = AttrMapValue(isNumber),
)
def __init__(self, cx, cy, r, **kw):
SolidShape.__init__(self, kw)
self.cx = cx
self.cy = cy
self.r = r
def copy(self):
new = self.__class__(self.cx, self.cy, self.r)
new.setProperties(self.getProperties())
return new
def getBounds(self):
return (self.cx - self.r, self.cy - self.r, self.cx + self.r, self.cy + self.r)
class Ellipse(SolidShape):
_attrMap = AttrMap(BASE=SolidShape,
cx = AttrMapValue(isNumber),
cy = AttrMapValue(isNumber),
rx = AttrMapValue(isNumber),
ry = AttrMapValue(isNumber),
)
def __init__(self, cx, cy, rx, ry, **kw):
SolidShape.__init__(self, kw)
self.cx = cx
self.cy = cy
self.rx = rx
self.ry = ry
def copy(self):
new = self.__class__(self.cx, self.cy, self.rx, self.ry)
new.setProperties(self.getProperties())
return new
def getBounds(self):
return (self.cx - self.rx, self.cy - self.ry, self.cx + self.rx, self.cy + self.ry)
class Wedge(SolidShape):
"""A "slice of a pie" by default translates to a polygon moves anticlockwise
from start angle to end angle"""
_attrMap = AttrMap(BASE=SolidShape,
centerx = AttrMapValue(isNumber),
centery = AttrMapValue(isNumber),
radius = AttrMapValue(isNumber),
startangledegrees = AttrMapValue(isNumber),
endangledegrees = AttrMapValue(isNumber),
yradius = AttrMapValue(isNumberOrNone),
radius1 = AttrMapValue(isNumberOrNone),
yradius1 = AttrMapValue(isNumberOrNone),
)
degreedelta = 1 # jump every 1 degrees
def __init__(self, centerx, centery, radius, startangledegrees, endangledegrees, yradius=None, **kw):
SolidShape.__init__(self, kw)
while endangledegrees<startangledegrees:
endangledegrees = endangledegrees+360
#print "__init__"
self.centerx, self.centery, self.radius, self.startangledegrees, self.endangledegrees = \
centerx, centery, radius, startangledegrees, endangledegrees
self.yradius = yradius
def _xtraRadii(self):
yradius = getattr(self, 'yradius', None)
if yradius is None: yradius = self.radius
radius1 = getattr(self,'radius1', None)
yradius1 = getattr(self,'yradius1',radius1)
if radius1 is None: radius1 = yradius1
return yradius, radius1, yradius1
#def __repr__(self):
# return "Wedge"+repr((self.centerx, self.centery, self.radius, self.startangledegrees, self.endangledegrees ))
#__str__ = __repr__
def asPolygon(self):
#print "asPolygon"
centerx= self.centerx
centery = self.centery
radius = self.radius
yradius, radius1, yradius1 = self._xtraRadii()
startangledegrees = self.startangledegrees
endangledegrees = self.endangledegrees
from math import sin, cos, pi
degreestoradians = pi/180.0
startangle = startangledegrees*degreestoradians
endangle = endangledegrees*degreestoradians
while endangle<startangle:
endangle = endangle+2*pi
angle = float(endangle-startangle)
points = []
if angle>0.001:
degreedelta = min(self.degreedelta or 1.,angle)
radiansdelta = degreedelta*degreestoradians
n = max(1,int(angle/radiansdelta+0.5))
radiansdelta = angle/n
n += 1
else:
n = 1
radiansdelta = 0
CA = []
CAA = CA.append
a = points.append
for angle in xrange(n):
angle = startangle+angle*radiansdelta
CAA((cos(angle),sin(angle)))
for c,s in CA:
a(centerx+radius*c)
a(centery+yradius*s)
if (radius1==0 or radius1 is None) and (yradius1==0 or yradius1 is None):
a(centerx); a(centery)
else:
CA.reverse()
for c,s in CA:
a(centerx+radius1*c)
a(centery+yradius1*s)
return Polygon(points)
def copy(self):
new = self.__class__(self.centerx,
self.centery,
self.radius,
self.startangledegrees,
self.endangledegrees)
new.setProperties(self.getProperties())
return new
def getBounds(self):
return self.asPolygon().getBounds()
class Polygon(SolidShape):
"""Defines a closed shape; Is implicitly
joined back to the start for you."""
_attrMap = AttrMap(BASE=SolidShape,
points = AttrMapValue(isListOfNumbers),
)
def __init__(self, points=[], **kw):
SolidShape.__init__(self, kw)
assert len(points) % 2 == 0, 'Point list must have even number of elements!'
self.points = points
def copy(self):
new = self.__class__(self.points)
new.setProperties(self.getProperties())
return new
def getBounds(self):
return getPointsBounds(self.points)
class PolyLine(LineShape):
"""Series of line segments. Does not define a
closed shape; never filled even if apparently joined.
Put the numbers in the list, not two-tuples."""
_attrMap = AttrMap(BASE=LineShape,
points = AttrMapValue(isListOfNumbers),
)
def __init__(self, points=[], **kw):
LineShape.__init__(self, kw)
lenPoints = len(points)
if lenPoints:
if type(points[0]) in (ListType,TupleType):
L = []
for (x,y) in points:
L.append(x)
L.append(y)
points = L
else:
assert len(points) % 2 == 0, 'Point list must have even number of elements!'
self.points = points
def copy(self):
new = self.__class__(self.points)
new.setProperties(self.getProperties())
return new
def getBounds(self):
return getPointsBounds(self.points)
class String(Shape):
"""Not checked against the spec, just a way to make something work.
Can be anchored left, middle or end."""
# to do.
_attrMap = AttrMap(
x = AttrMapValue(isNumber),
y = AttrMapValue(isNumber),
text = AttrMapValue(isString),
fontName = AttrMapValue(None),
fontSize = AttrMapValue(isNumber),
fillColor = AttrMapValue(isColorOrNone),
textAnchor = AttrMapValue(isTextAnchor),
encoding = AttrMapValue(isString),
)
encoding = 'utf8'
def __init__(self, x, y, text, **kw):
self.x = x
self.y = y
self.text = text
self.textAnchor = 'start'
self.fontName = STATE_DEFAULTS['fontName']
self.fontSize = STATE_DEFAULTS['fontSize']
self.fillColor = STATE_DEFAULTS['fillColor']
self.setProperties(kw)
def getEast(self):
return self.x + stringWidth(self.text,self.fontName,self.fontSize, self.encoding)
def copy(self):
new = self.__class__(self.x, self.y, self.text)
new.setProperties(self.getProperties())
return new
def getBounds(self):
# assumes constant drop of 0.2*size to baseline
w = stringWidth(self.text,self.fontName,self.fontSize,self.encoding)
if self.textAnchor == 'start':
x = self.x
elif self.textAnchor == 'middle':
x = self.x - 0.5*w
elif self.textAnchor == 'end':
x = self.x - w
return (x, self.y - 0.2 * self.fontSize, x+w, self.y + self.fontSize)
class UserNode(_DrawTimeResizeable):
"""A simple template for creating a new node. The user (Python
programmer) may subclasses this. provideNode() must be defined to
provide a Shape primitive when called by a renderer. It does
NOT inherit from Shape, as the renderer always replaces it, and
your own classes can safely inherit from it without getting
lots of unintended behaviour."""
def provideNode(self):
"""Override this to create your own node. This lets widgets be
added to drawings; they must create a shape (typically a group)
so that the renderer can draw the custom node."""
raise NotImplementedError, "this method must be redefined by the user/programmer"
def test():
r = Rect(10,10,200,50)
import pprint
pp = pprint.pprint
print 'a Rectangle:'
pp(r.getProperties())
print
print 'verifying...',
r.verify()
print 'OK'
#print 'setting rect.z = "spam"'
#r.z = 'spam'
print 'deleting rect.width'
del r.width
print 'verifying...',
r.verify()
if __name__=='__main__':
test()
| |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import logging
import numpy as np
import pandas as pd
class StirFunctions(object):
"""
Function class for Stir.
"""
def __init__(self):
"""Class representing the functions for Sip"""
super(StirFunctions, self).__init__()
# Begin model methods
def calc_sat_air_conc(self):
"""
# eq. 1 saturated air concentration in mg/m^3
"""
air_vol = 24.45
pressure = 760.0
conv = 1000000.0
self.out_sat_air_conc = (self.vapor_pressure * self.molecular_weight * conv) / (pressure * air_vol)
return self.out_sat_air_conc
def calc_inh_rate_avian(self):
"""
eq. 2 Avian inhalation rate
"""
magic1 = 284.
magic2 = 0.77
conversion = 60.
activity_factor = 3.
self.out_inh_rate_avian = magic1 * (self.body_weight_assessed_bird ** magic2) * conversion * activity_factor
return self.out_inh_rate_avian
def calc_vid_avian(self):
"""
eq. 3 Maximum avian vapor inhalation dose
"""
duration_hours = 1.
conversion_factor = 1000000. # cm3/m3
# 1 (hr) is duration of exposure
self.out_vid_avian = (self.out_sat_air_conc * self.out_inh_rate_avian * duration_hours) / (
conversion_factor * self.body_weight_assessed_bird)
return self.out_vid_avian
def calc_inh_rate_mammal(self):
"""
eq. 4 Mammalian inhalation rate
"""
magic1 = 379.0
magic2 = 0.8
minutes_conversion = 60.
activity_factor = 3.
self.out_inh_rate_mammal = magic1 * (
self.body_weight_assessed_mammal ** magic2) * minutes_conversion * activity_factor
return self.out_inh_rate_mammal
def calc_vid_mammal(self):
"""
eq. 5 Maximum mammalian vapor inhalation dose
"""
duration_hours = 1.
conversion_factor = 1000000.
# 1 hr = duration of exposure
self.out_vid_mammal = (self.out_sat_air_conc * self.out_inh_rate_mammal * duration_hours) / (
conversion_factor * self.body_weight_assessed_mammal)
return self.out_vid_mammal
def calc_conc_air(self):
"""
eq. 6 Air column concentration after spray
"""
conversion_factor = 100. # cm/m
# conversion of application rate from lbs/acre to mg/cm2
cf_g_lbs = 453.59237
cf_mg_g = 1000.
cf_cm2_acre = 40468564.2
self.out_ar2 = (self.application_rate * cf_g_lbs * cf_mg_g) / cf_cm2_acre
self.out_air_conc = self.out_ar2 / (self.column_height * conversion_factor)
return self.out_air_conc
def calc_sid_avian(self):
"""
eq. 7 Avian spray droplet inhalation dose
"""
self.out_sid_avian = (self.out_air_conc * self.out_inh_rate_avian * (
self.direct_spray_duration / 60.0) * self.spray_drift_fraction) / (self.body_weight_assessed_bird)
return self.out_sid_avian
def calc_sid_mammal(self):
"""
eq. 8 Mammalian spray droplet inhalation dose
"""
self.out_sid_mammal = (self.out_air_conc * self.out_inh_rate_mammal * (
self.direct_spray_duration / 60.0) * self.spray_drift_fraction) / (self.body_weight_assessed_mammal)
return self.out_sid_mammal
def calc_convert_mammal_inhalation_lc50_to_ld50(self):
"""
eq. 9 Conversion of mammalian LC50 to LD50
"""
self.out_cf = ((self.out_inh_rate_mammal * 0.001) / self.body_weight_tested_mammal)
activity_factor = 1.
absorption = 1.
self.out_mammal_inhalation_ld50 = self.mammal_inhalation_lc50 * absorption * self.out_cf * \
self.duration_mammal_inhalation_study * activity_factor
return self.out_mammal_inhalation_ld50
def calc_adjusted_mammal_inhalation_ld50(self):
"""
eq. 10 Adjusted mammalian inhalation LD50
"""
magic_power = 0.25
self.out_adjusted_mammal_inhalation_ld50 = self.out_mammal_inhalation_ld50 * \
(self.body_weight_tested_mammal / self.body_weight_assessed_mammal) ** \
magic_power
return self.out_adjusted_mammal_inhalation_ld50
def calc_estimated_avian_inhalation_ld50(self):
"""
eq. 11 Estimated avian inhalation LD50
"""
three_five = 3.5
self.out_estimated_avian_inhalation_ld50 = (self.avian_oral_ld50 * self.out_mammal_inhalation_ld50) / (
three_five * self.mammal_oral_ld50)
return self.out_estimated_avian_inhalation_ld50
def calc_adjusted_avian_inhalation_ld50(self):
"""
eq. 12 Adjusted avian inhalation LD50
"""
self.out_adjusted_avian_inhalation_ld50 = self.out_estimated_avian_inhalation_ld50 * \
(self.body_weight_assessed_bird / self.body_weight_tested_bird) ** \
(self.mineau_scaling_factor - 1)
return self.out_adjusted_avian_inhalation_ld50
def return_ratio_vid_avian(self):
"""
results #1: Ratio of avian vapor dose to adjusted inhalation LD50
"""
self.out_ratio_vid_avian = self.out_vid_avian / self.out_adjusted_avian_inhalation_ld50
return self.out_ratio_vid_avian
def return_loc_vid_avian(self):
"""
results #2: Level of Concern for avian vapor phase risk
"""
msg_pass = 'Exposure not Likely Significant'
msg_fail = 'Proceed to Refinements'
boo_ratios = [ratio < 0.1 for ratio in self.out_ratio_vid_avian]
self.out_loc_vid_avian = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_ratio_vid_avian < 0.1
#self.out_loc_vid_avian = exceed_boolean.map(lambda x:
# 'Exposure not Likely Significant' if x is True
# else 'Proceed to Refinements')
return self.out_loc_vid_avian
def return_ratio_sid_avian(self):
"""
results #3: Ratio of avian droplet inhalation dose to adjusted inhalation LD50
"""
self.out_ratio_sid_avian = self.out_sid_avian / self.out_adjusted_avian_inhalation_ld50
return self.out_ratio_sid_avian
def return_loc_sid_avian(self):
"""
results #4: Level of Concern for avian droplet inhalation risk
"""
msg_pass = 'Exposure not Likely Significant'
msg_fail = 'Proceed to Refinements'
boo_ratios = [ratio < 0.1 for ratio in self.out_ratio_sid_avian]
self.out_loc_sid_avian = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_ratio_sid_avian < 0.1
#self.out_loc_sid_avian = exceed_boolean.map(lambda x:
# 'Exposure not Likely Significant' if x is True
# else 'Proceed to Refinements')
return self.out_loc_sid_avian
def return_ratio_vid_mammal(self):
"""
results #5: Ratio of mammalian vapor dose to adjusted inhalation LD50
"""
self.out_ratio_vid_mammal = self.out_vid_mammal / self.out_adjusted_mammal_inhalation_ld50
return self.out_ratio_vid_mammal
def return_loc_vid_mammal(self):
"""
results #6: Level of Concern for mammalian vapor phase risk
"""
msg_pass = 'Exposure not Likely Significant'
msg_fail = 'Proceed to Refinements'
boo_ratios = [ratio < 0.1 for ratio in self.out_ratio_vid_mammal]
self.out_loc_vid_mammal = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_ratio_vid_mammal < 0.1
#self.out_loc_vid_mammal = exceed_boolean.map(lambda x:
# 'Exposure not Likely Significant' if x is True
# else 'Proceed to Refinements')
return self.out_loc_vid_mammal
def return_ratio_sid_mammal(self):
"""
results #7: Ratio of mammalian droplet inhalation dose to adjusted inhalation LD50
"""
self.out_ratio_sid_mammal = self.out_sid_mammal / self.out_adjusted_mammal_inhalation_ld50
return self.out_ratio_sid_mammal
def return_loc_sid_mammal(self):
"""
results #8: Level of Concern for mammaliam droplet inhalation risk
"""
msg_pass = 'Exposure not Likely Significant'
msg_fail = 'Proceed to Refinements'
boo_ratios = [ratio < 0.1 for ratio in self.out_ratio_sid_mammal]
self.out_loc_sid_mammal = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_ratio_sid_mammal < 0.1
#self.out_loc_sid_mammal = exceed_boolean.map(lambda x:
# 'Exposure not Likely Significant' if x is True
# else 'Proceed to Refinements')
return self.out_loc_sid_mammal
| |
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing helper class and methods for interacting with Gerrit."""
from __future__ import print_function
import logging
import operator
from chromite.cbuildbot import constants
from chromite.lib import cros_build_lib
from chromite.lib import git
from chromite.lib import gob_util
from chromite.lib import parallel
from chromite.lib import patch as cros_patch
gob_util.LOGGER = cros_build_lib.logger
class GerritException(Exception):
"""Base exception, thrown for gerrit failures"""
class QueryHasNoResults(GerritException):
"""Exception thrown when a query returns no results."""
class QueryNotSpecific(GerritException):
"""Thrown when a query needs to identify one CL, but matched multiple."""
class FailedToReachGerrit(GerritException):
"""Exception thrown if we failed to contact the Gerrit server."""
class GerritHelper(object):
"""Helper class to manage interaction with the gerrit-on-borg service."""
# Maximum number of results to return per query.
_GERRIT_MAX_QUERY_RETURN = 500
# Number of processes to run in parallel when fetching from Gerrit. The
# Gerrit team recommended keeping this small to avoid putting too much
# load on the server.
_NUM_PROCESSES = 10
# Fields that appear in gerrit change query results.
MORE_CHANGES = '_more_changes'
def __init__(self, host, remote, print_cmd=True):
"""Initialize.
Args:
host: Hostname (without protocol prefix) of the gerrit server.
remote: The symbolic name of a known remote git host,
taken from cbuildbot.contants.
print_cmd: Determines whether all RunCommand invocations will be echoed.
Set to False for quiet operation.
"""
self.host = host
self.remote = remote
self.print_cmd = bool(print_cmd)
self._version = None
@classmethod
def FromRemote(cls, remote, **kwargs):
if remote == constants.INTERNAL_REMOTE:
host = constants.INTERNAL_GERRIT_HOST
elif remote == constants.EXTERNAL_REMOTE:
host = constants.EXTERNAL_GERRIT_HOST
else:
raise ValueError('Remote %s not supported.' % remote)
return cls(host, remote, **kwargs)
@classmethod
def FromGob(cls, gob, **kwargs):
"""Return a helper for a GoB instance."""
host = constants.GOB_HOST % ('%s-review' % gob)
return cls(host, gob, **kwargs)
def SetReviewers(self, change, add=(), remove=(), dryrun=False):
"""Modify the list of reviewers on a gerrit change.
Args:
change: ChangeId or change number for a gerrit review.
add: Sequence of email addresses of reviewers to add.
remove: Sequence of email addresses of reviewers to remove.
dryrun: If True, only print what would have been done.
"""
if add:
if dryrun:
cros_build_lib.Info('Would have added %s to "%s"', add, change)
else:
gob_util.AddReviewers(self.host, change, add)
if remove:
if dryrun:
cros_build_lib.Info('Would have removed %s to "%s"', remove, change)
else:
gob_util.RemoveReviewers(self.host, change, remove)
def GetChangeDetail(self, change_num):
"""Return detailed information about a gerrit change.
Args:
change_num: A gerrit change number.
"""
return gob_util.GetChangeDetail(
self.host, change_num, o_params=('CURRENT_REVISION', 'CURRENT_COMMIT'))
def GrabPatchFromGerrit(self, project, change, commit, must_match=True):
"""Return a cros_patch.GerritPatch representing a gerrit change.
Args:
project: The name of the gerrit project for the change.
change: A ChangeId or gerrit number for the change.
commit: The git commit hash for a patch associated with the change.
must_match: Raise an exception if the change is not found.
"""
query = {'project': project, 'commit': commit, 'must_match': must_match}
return self.QuerySingleRecord(change, **query)
def IsChangeCommitted(self, change, must_match=False):
"""Check whether a gerrit change has been merged.
Args:
change: A gerrit change number.
must_match: Raise an exception if the change is not found. If this is
False, then a missing change will return None.
"""
change = gob_util.GetChange(self.host, change)
if not change:
if must_match:
raise QueryHasNoResults('Could not query for change %s' % change)
return
return change.get('status') == 'MERGED'
def GetLatestSHA1ForBranch(self, project, branch):
"""Return the git hash at the tip of a branch."""
url = '%s://%s/%s' % (gob_util.GIT_PROTOCOL, self.host, project)
cmd = ['ls-remote', url, 'refs/heads/%s' % branch]
try:
result = git.RunGit('.', cmd, print_cmd=self.print_cmd)
if result:
return result.output.split()[0]
except cros_build_lib.RunCommandError:
cros_build_lib.Error('Command "%s" failed.', cros_build_lib.CmdToStr(cmd),
exc_info=True)
def QuerySingleRecord(self, change=None, **kwargs):
"""Free-form query of a gerrit change that expects a single result.
Args:
change: A gerrit change number.
**kwargs:
dryrun: Don't query the gerrit server; just return None.
must_match: Raise an exception if the query comes back empty. If this
is False, an unsatisfied query will return None.
Refer to Query() docstring for remaining arguments.
Returns:
If kwargs['raw'] == True, return a python dict representing the
change; otherwise, return a cros_patch.GerritPatch object.
"""
query_kwds = kwargs
dryrun = query_kwds.get('dryrun')
must_match = query_kwds.pop('must_match', True)
results = self.Query(change, **query_kwds)
if dryrun:
return None
elif not results:
if must_match:
raise QueryHasNoResults('Query %s had no results' % (change,))
return None
elif len(results) != 1:
raise QueryNotSpecific('Query %s returned too many results: %s'
% (change, results))
return results[0]
def Query(self, change=None, sort=None, current_patch=True, options=(),
dryrun=False, raw=False, start=None, bypass_cache=True, **kwargs):
"""Free-form query for gerrit changes.
Args:
change: ChangeId, git commit hash, or gerrit number for a change.
sort: A functor to extract a sort key from a cros_patch.GerritChange
object, for sorting results.. If this is None, results will not be
sorted.
current_patch: If True, ask the gerrit server for extra information about
the latest uploaded patch.
options: Deprecated.
dryrun: If True, don't query the gerrit server; return an empty list.
raw: If True, return a list of python dict's representing the query
results. Otherwise, return a list of cros_patch.GerritPatch.
start: Offset in the result set to start at.
bypass_cache: Query each change to make sure data is up to date.
kwargs: A dict of query parameters, as described here:
https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-changes
Returns:
A list of python dicts or cros_patch.GerritChange.
"""
query_kwds = kwargs
if options:
raise GerritException('"options" argument unsupported on gerrit-on-borg.')
url_prefix = gob_util.GetGerritFetchUrl(self.host)
# All possible params are documented at
# pylint: disable=C0301
# https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-changes
o_params = ['DETAILED_ACCOUNTS', 'ALL_REVISIONS', 'DETAILED_LABELS']
if current_patch:
o_params.extend(['CURRENT_COMMIT', 'CURRENT_REVISION'])
if change and cros_patch.ParseGerritNumber(change) and not query_kwds:
if dryrun:
cros_build_lib.Info('Would have run gob_util.GetChangeDetail(%s, %s)',
self.host, change)
return []
change = self.GetChangeDetail(change)
if change is None:
return []
patch_dict = cros_patch.GerritPatch.ConvertQueryResults(change, self.host)
if raw:
return [patch_dict]
return [cros_patch.GerritPatch(patch_dict, self.remote, url_prefix)]
# TODO: We should allow querying using a cros_patch.PatchQuery
# object directly.
if change and cros_patch.ParseSHA1(change):
# Use commit:sha1 for accurate query results (crbug.com/358381).
kwargs['commit'] = change
change = None
elif change and cros_patch.ParseChangeID(change):
# Use change:change-id for accurate query results (crbug.com/357876).
kwargs['change'] = change
change = None
elif change and cros_patch.ParseFullChangeID(change):
project, branch, change_id = cros_patch.ParseFullChangeID(change)
kwargs['change'] = change_id
kwargs['project'] = project
kwargs['branch'] = branch
change = None
if change and query_kwds.get('change'):
raise GerritException('Bad query params: provided a change-id-like query,'
' and a "change" search parameter')
if dryrun:
cros_build_lib.Info(
'Would have run gob_util.QueryChanges(%s, %s, first_param=%s, '
'limit=%d)', self.host, repr(query_kwds), change,
self._GERRIT_MAX_QUERY_RETURN)
return []
start = 0
moar = gob_util.QueryChanges(
self.host, query_kwds, first_param=change, start=start,
limit=self._GERRIT_MAX_QUERY_RETURN, o_params=o_params)
result = list(moar)
while moar and self.MORE_CHANGES in moar[-1]:
start += len(moar)
moar = gob_util.QueryChanges(
self.host, query_kwds, first_param=change, start=start,
limit=self._GERRIT_MAX_QUERY_RETURN, o_params=o_params)
result.extend(moar)
# NOTE: Query results are served from the gerrit cache, which may be stale.
# To make sure the patch information is accurate, re-request each query
# result directly, circumventing the cache. For reference:
# https://code.google.com/p/chromium/issues/detail?id=302072
if bypass_cache:
result = self.GetMultipleChangeDetail([x['_number'] for x in result])
result = [cros_patch.GerritPatch.ConvertQueryResults(
x, self.host) for x in result]
if sort:
result = sorted(result, key=operator.itemgetter(sort))
if raw:
return result
return [cros_patch.GerritPatch(x, self.remote, url_prefix) for x in result]
def GetMultipleChangeDetail(self, changes):
"""Query the gerrit server for multiple changes using GetChangeDetail.
Args:
changes: A sequence of gerrit change numbers.
Returns:
A list of the raw output of GetChangeDetail.
"""
inputs = [[change] for change in changes]
return parallel.RunTasksInProcessPool(self.GetChangeDetail, inputs,
processes=self._NUM_PROCESSES)
def QueryMultipleCurrentPatchset(self, changes):
"""Query the gerrit server for multiple changes.
Args:
changes: A sequence of gerrit change numbers.
Returns:
A list of cros_patch.GerritPatch.
"""
if not changes:
return
url_prefix = gob_util.GetGerritFetchUrl(self.host)
results = self.GetMultipleChangeDetail(changes)
for change, change_detail in zip(changes, results):
if not change_detail:
raise GerritException('Change %s not found on server %s.'
% (change, self.host))
patch_dict = cros_patch.GerritPatch.ConvertQueryResults(
change_detail, self.host)
yield change, cros_patch.GerritPatch(patch_dict, self.remote, url_prefix)
@staticmethod
def _to_changenum(change):
"""Unequivocally return a gerrit change number.
The argument may either be an number, which will be returned unchanged;
or an instance of GerritPatch, in which case its gerrit number will be
returned.
"""
# TODO(davidjames): Deprecate the ability to pass in strings to these
# functions -- API users should just pass in a GerritPatch instead or use
# the gob_util APIs directly.
if isinstance(change, cros_patch.GerritPatch):
return change.gerrit_number
return change
def SetReview(self, change, msg=None, labels=None, dryrun=False):
"""Update the review labels on a gerrit change.
Args:
change: A gerrit change number.
msg: A text comment to post to the review.
labels: A dict of label/value to set on the review.
dryrun: If True, don't actually update the review.
"""
if not msg and not labels:
return
if dryrun:
if msg:
cros_build_lib.Info('Would have added message "%s" to change "%s".',
msg, change)
if labels:
for key, val in labels.iteritems():
cros_build_lib.Info(
'Would have set label "%s" to "%s" for change "%s".',
key, val, change)
return
gob_util.SetReview(self.host, self._to_changenum(change),
msg=msg, labels=labels, notify='ALL')
def RemoveReady(self, change, dryrun=False):
"""Set the 'Commit-Queue' and 'Trybot-Ready' labels on a |change| to '0'."""
if dryrun:
cros_build_lib.Info('Would have reset Commit-Queue label for %s', change)
return
gob_util.ResetReviewLabels(self.host, self._to_changenum(change),
label='Commit-Queue', notify='OWNER')
gob_util.ResetReviewLabels(self.host, self._to_changenum(change),
label='Trybot-Ready', notify='OWNER')
def SubmitChange(self, change, dryrun=False):
"""Land (merge) a gerrit change using the JSON API."""
if dryrun:
cros_build_lib.Info('Would have submitted change %s', change)
return
gob_util.SubmitChange(self.host, change.gerrit_number, revision=change.sha1)
def SubmitChangeUsingGit(self, change, git_repo, dryrun=False):
"""Submit |change| using 'git push'.
This tries to submit a change that is present in |git_repo| via 'git push'.
It rebases the change if necessary and submits it.
Returns:
True if we were able to submit the change using 'git push'. If not, we
output a warning and return False.
"""
remote, checkout_ref = git.GetTrackingBranch(git_repo)
uploaded_sha1 = change.sha1
for _ in range(3):
# Get our updated SHA1.
local_sha1 = change.GetLocalSHA1(git_repo, checkout_ref)
if local_sha1 is None:
logging.warn('%s is not present in %s', change, git_repo)
break
if local_sha1 != uploaded_sha1:
try:
push_to = git.RemoteRef(change.project_url,
'refs/for/%s' % change.tracking_branch)
git.GitPush(git_repo, local_sha1, push_to, dryrun=dryrun)
uploaded_sha1 = local_sha1
except cros_build_lib.RunCommandError:
break
try:
push_to = git.RemoteRef(change.project_url, change.tracking_branch)
git.GitPush(git_repo, local_sha1, push_to, dryrun=dryrun)
return True
except cros_build_lib.RunCommandError:
logging.warn('git push failed for %s; was a change chumped in the '
'middle of the CQ run?',
change, exc_info=True)
# Rebase the branch.
try:
git.SyncPushBranch(git_repo, remote, checkout_ref)
except cros_build_lib.RunCommandError:
logging.warn('git rebase failed for %s; was a change chumped in the '
'middle of the CQ run?',
change, exc_info=True)
break
return False
def AbandonChange(self, change, dryrun=False):
"""Mark a gerrit change as 'Abandoned'."""
if dryrun:
cros_build_lib.Info('Would have abandoned change %s', change)
return
gob_util.AbandonChange(self.host, self._to_changenum(change))
def RestoreChange(self, change, dryrun=False):
"""Re-activate a previously abandoned gerrit change."""
if dryrun:
cros_build_lib.Info('Would have restored change %s', change)
return
gob_util.RestoreChange(self.host, self._to_changenum(change))
def DeleteDraft(self, change, dryrun=False):
"""Delete a draft patch set."""
if dryrun:
cros_build_lib.Info('Would have deleted draft patch set %s', change)
return
gob_util.DeleteDraft(self.host, self._to_changenum(change))
def GetAccount(self):
"""Get information about the user account."""
return gob_util.GetAccount(self.host)
def GetGerritPatchInfo(patches):
"""Query Gerrit server for patch information using string queries.
Args:
patches: A list of patch IDs to query. Internal patches start with a '*'.
Returns:
A list of GerritPatch objects describing each patch. Only the first
instance of a requested patch is returned.
Raises:
PatchException if a patch can't be found.
ValueError if a query string cannot be converted to a PatchQuery object.
"""
return GetGerritPatchInfoWithPatchQueries(
[cros_patch.ParsePatchDep(p) for p in patches])
def GetGerritPatchInfoWithPatchQueries(patches):
"""Query Gerrit server for patch information using PatchQuery objects.
Args:
patches: A list of PatchQuery objects to query.
Returns:
A list of GerritPatch objects describing each patch. Only the first
instance of a requested patch is returned.
Raises:
PatchException if a patch can't be found.
"""
seen = set()
results = []
for remote in constants.CHANGE_PREFIX.keys():
helper = GetGerritHelper(remote)
raw_ids = [x.ToGerritQueryText() for x in patches
if x.remote == remote]
for _k, change in helper.QueryMultipleCurrentPatchset(raw_ids):
# return a unique list, while maintaining the ordering of the first
# seen instance of each patch. Do this to ensure whatever ordering
# the user is trying to enforce, we honor; lest it break on
# cherry-picking.
if change.id not in seen:
results.append(change)
seen.add(change.id)
return results
def GetGerritHelper(remote=None, gob=None, **kwargs):
"""Return a GerritHelper instance for interacting with the given remote."""
if gob:
return GerritHelper.FromGob(gob, **kwargs)
else:
return GerritHelper.FromRemote(remote, **kwargs)
def GetGerritHelperForChange(change):
"""Return a usable GerritHelper instance for this change.
If you need a GerritHelper for a specific change, get it via this
function.
"""
return GetGerritHelper(change.remote)
def GetCrosInternal(**kwargs):
"""Convenience method for accessing private ChromeOS gerrit."""
return GetGerritHelper(constants.INTERNAL_REMOTE, **kwargs)
def GetCrosExternal(**kwargs):
"""Convenience method for accessing public ChromiumOS gerrit."""
return GetGerritHelper(constants.EXTERNAL_REMOTE, **kwargs)
def GetChangeRef(change_number, patchset=None):
"""Given a change number, return the refs/changes/* space for it.
Args:
change_number: The gerrit change number you want a refspec for.
patchset: If given it must either be an integer or '*'. When given,
the returned refspec is for that exact patchset. If '*' is given, it's
used for pulling down all patchsets for that change.
Returns:
A git refspec.
"""
change_number = int(change_number)
s = 'refs/changes/%02i/%i' % (change_number % 100, change_number)
if patchset is not None:
s += '/%s' % ('*' if patchset == '*' else int(patchset))
return s
| |
from datetime import datetime
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.core.mail import mail_admins
from django.db import models
from dennis.translator import Translator
from statsd import statsd
import waffle
from .gengo_utils import (
GengoError,
FjordGengo,
GengoUnknownLanguage,
GengoUnsupportedLanguage,
GENGO_UNSUPPORTED_MACHINE_LC_SRC
)
from .utils import locale_equals_language
from fjord.base.models import ModelBase
from fjord.base.utils import instance_to_key, wrap_with_paragraphs
from fjord.journal.models import Record
from fjord.journal.utils import j_error, j_info
class SuperModel(models.Model):
"""Model used for unit tests
It's really difficult to define a model in the test suite used
just for testing without a lot of shenanigans with South and the
db, so intead we define a "real" model, but only use it for
testing.
"""
locale = models.CharField(max_length=5)
desc = models.CharField(blank=True, default=u'', max_length=100)
trans_desc = models.CharField(blank=True, default=u'', max_length=100)
def generate_translation_jobs(self, system=None):
"""This always returns a fake translation job"""
return [
(instance_to_key(self), u'fake', self.locale, u'desc',
u'en', u'trans_desc')
]
_translation_systems = {}
def get_translation_systems():
"""Returns translation systems map
"""
return _translation_systems
def get_translation_system_choices():
"""Returns a tuple of (value, display-name) tuples for Choices field
This inserts a "no choice" choice at the beginning, too, the value of
which is the empty string.
"""
choices = [(key, key) for key in _translation_systems.keys()]
choices.insert(0, (u'', u'None'))
return tuple(choices)
class TranslationSystemMeta(type):
"""Metaclass to register TranslationSystem subclasses"""
def __new__(cls, name, bases, attrs):
new_cls = super(TranslationSystemMeta, cls).__new__(
cls, name, bases, attrs)
if new_cls.name:
_translation_systems[new_cls.name] = new_cls
return new_cls
class TranslationSystem(object):
"""Translation system base class
All translation system plugins should subclass this. They should
additionally do the following:
1. set the name property to something unique
2. implement translate method
See FakeTranslator and DennisTranslator for sample
implementations.
"""
__metaclass__ = TranslationSystemMeta
# Name of this translation system
name = ''
# Whether or not this system uses push and pull translations
use_push_and_pull = False
# Whether or not this system has daily activities
use_daily = False
def translate(self, instance, src_lang, src_field, dst_lang, dst_field):
"""Implement this to translation fields on an instance
This translates in-place.
If this is an asynchronous system, then this can either push
the text to be translated now or queue the text to be pushed
later in a batch of things to be translated.
"""
raise NotImplementedError()
def push_translations(self):
"""Implement this to do any work required to push translations
This is for asynchronous systems that take a batch of translations,
perform some work, and then return results some time later.
Print any status text to stdout.
"""
raise NotImplementedError()
def pull_translations(self):
"""Implement this to do any work required to pull translations
This is for asynchronous systems that take a batch of translations,
perform some work, and then return results some time later.
Print any status text to stdout.
"""
raise NotImplementedError()
def run_daily_activities(self):
"""Implement this to do any work that needs to happen once per day
Examples:
1. sending out daily reminders
2. sending out a warning about low balance
Print any status text to stdout.
"""
raise NotImplementedError()
def log_info(self, instance, action='translate', msg=u'', metadata=None):
metadata = metadata or {}
j_info(
app='translations',
src=self.name,
action=action,
msg=msg,
instance=instance,
metadata=metadata
)
def log_error(self, instance, action='translate', msg=u'', metadata=None):
metadata = metadata or {}
j_error(
app='translations',
src=self.name,
action=action,
msg=msg,
instance=instance,
metadata=metadata
)
# ---------------------------------------------------------
# Fake translation system
# ---------------------------------------------------------
class FakeTranslator(TranslationSystem):
"""Translates by uppercasing text"""
name = 'fake'
def translate(self, instance, src_lang, src_field, dst_lang, dst_field):
setattr(instance, dst_field, getattr(instance, src_field).upper())
instance.save()
self.log_info(instance=instance, action='translate', msg='success')
# ---------------------------------------------------------
# Dennis translation system
# ---------------------------------------------------------
class DennisTranslator(TranslationSystem):
"""Translates using shouty and anglequote"""
name = 'dennis'
def translate(self, instance, src_lang, src_field, dst_lang, dst_field):
text = getattr(instance, src_field)
if text:
pipeline = ['shouty', 'anglequote']
translated = Translator([], pipeline).translate_string(text)
setattr(instance, dst_field, translated)
instance.save()
# ---------------------------------------------------------
# Gengo translator system
# ---------------------------------------------------------
STATUS_CREATED = 'created'
STATUS_IN_PROGRESS = 'in-progress'
STATUS_COMPLETE = 'complete'
STATUS_CHOICES = (
(STATUS_CREATED, STATUS_CREATED),
(STATUS_IN_PROGRESS, STATUS_IN_PROGRESS),
(STATUS_COMPLETE, STATUS_COMPLETE)
)
class GengoJob(ModelBase):
"""Represents a job for the Gengo human translation system"""
# Generic foreign key to the instance this record is about
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
tier = models.CharField(max_length=10, default=u'')
# Source and destination fields for the translation
src_field = models.CharField(max_length=50)
dst_field = models.CharField(max_length=50)
# Source and destination languages
src_lang = models.CharField(default=u'', blank=True, max_length=10)
dst_lang = models.CharField(default=u'', blank=True, max_length=10)
# Status of the job and the order it's tied to
status = models.CharField(
choices=STATUS_CHOICES, default=STATUS_CREATED, max_length=12)
order = models.ForeignKey('translations.GengoOrder', null=True)
# When this job instance was created
created = models.DateTimeField(default=datetime.now)
# When this job instance was completed
completed = models.DateTimeField(blank=True, null=True)
def __unicode__(self):
return u'<GengoJob {0}>'.format(self.id)
def save(self, *args, **kwargs):
super(GengoJob, self).save(*args, **kwargs)
if not self.pk:
self.log('create GengoJob', {})
@classmethod
def unique_id_to_id(self, unique_id):
parts = unique_id.split('||')
return int(parts[-1])
@property
def unique_id(self):
"""Returns a unique id for this job for this host
When we create a job with Gengo, we need to tie that job
uniquely back to a GengoJob row, but that could be created on
a variety of systems. This (attempts to) create a unique
identifier for a specific GengoJob in a specific environment
by (ab)using the SITE_URL.
FIXME: It's possible we don't need to do this because jobs are
tied to orders and order numbers are generated by Gengo and
should be unique.
"""
return '||'.join([
getattr(settings, 'SITE_URL', 'localhost'),
'GengoJob',
str(self.pk)
])
def assign_to_order(self, order):
"""Assigns the job to an order which makes the job in progress"""
self.order = order
self.status = STATUS_IN_PROGRESS
self.save()
def mark_complete(self):
"""Marks a job as complete"""
self.status = STATUS_COMPLETE
self.completed = datetime.now()
self.save()
self.log('completed', {})
def log(self, action, metadata):
j_info(
app='translations',
src='gengo_human',
action=action,
msg='job event',
instance=self,
metadata=metadata
)
@property
def records(self):
return Record.objects.records(self)
class GengoOrder(ModelBase):
"""Represents a Gengo translation order which contains multiple jobs"""
order_id = models.CharField(max_length=100)
status = models.CharField(
choices=STATUS_CHOICES, default=STATUS_IN_PROGRESS, max_length=12)
# When this instance was created which should also line up with
# the time the order was submitted to Gengo
created = models.DateTimeField(default=datetime.now)
# When this order was completed
completed = models.DateTimeField(blank=True, null=True)
def __unicode__(self):
return u'<GengoOrder {0}>'.format(self.id)
def save(self, *args, **kwargs):
super(GengoOrder, self).save(*args, **kwargs)
if not self.pk:
self.log('create GengoOrder', {})
def mark_complete(self):
"""Marks an order as complete"""
self.status = STATUS_COMPLETE
self.completed = datetime.now()
self.save()
self.log('completed', {})
def completed_jobs(self):
return self.gengojob_set.filter(status=STATUS_COMPLETE)
def outstanding_jobs(self):
return self.gengojob_set.exclude(status=STATUS_COMPLETE)
def log(self, action, metadata):
j_info(
app='translations',
src='gengo_human',
action=action,
msg='order event',
instance=self,
metadata=metadata
)
@property
def records(self):
return Record.objects.records(self)
class GengoTranslationSystem(TranslationSystem):
"""Superclass for GengoHumanTranslator and GengoMachineTranslator"""
use_push_and_pull = True
# Which translation tier to use
gengo_tier = None
# Whether to should watch the balance when creating jobs
gengo_watch_balance = False
# Whether to check if the src/dst is a supported pair--not all Gengo
# translation systems care about this
gengo_check_supported_language_pair = False
# This is a ridiculous flag for whether to check if the lc_src is
# supported for machine translations
gengo_check_supported_machine_lc_dst = False
def translate(self, instance, src_lang, src_field, dst_lang, dst_field):
# If gengosystem is disabled, we just return immediately. We
# can backfill later.
if not waffle.switch_is_active('gengosystem'):
return
text = getattr(instance, src_field)
metadata = {
'tier': self.gengo_tier,
'locale': instance.locale,
'length': len(text),
'body': text[:50].encode('utf-8')
}
gengo_api = FjordGengo()
# Guess the language. If we can't guess the language, then we
# don't create a GengoJob.
try:
lc_src = gengo_api.guess_language(text)
if lc_src not in gengo_api.get_languages():
raise GengoUnsupportedLanguage(
'unsupported language: {0}'.format(lc_src))
except GengoUnknownLanguage as exc:
# FIXME: This might be an indicator that this response is
# spam. At some point p, we can write code to account for
# that.
self.log_error(instance, action='guess-language', msg=unicode(exc),
metadata=metadata)
statsd.incr('translation.{0}.unknown'.format(self.name))
return
except GengoUnsupportedLanguage as exc:
# FIXME: This is a similar boat to GengoUnknownLanguage
# where for now, we're just going to ignore it because I'm
# not sure what to do about it and I'd like more data.
self.log_error(instance, action='translate', msg=unicode(exc),
metadata=metadata)
statsd.incr('translation.{0}.unsupported'.format(self.name))
return
# If the locale doesn't equal the guessed language, then
# that's interesting since the user is writing feedback in a
# language other than what the ui is showing. We want to log
# that for metrics purposes.
if not locale_equals_language(instance.locale, lc_src):
self.log_error(
instance,
action='guess-language',
msg='locale "{0}" != guessed language "{1}"'.format(
instance.locale, lc_src),
metadata=metadata)
# If the source language is English, we just copy it over and
# we're done.
if locale_equals_language(dst_lang, lc_src):
setattr(instance, dst_field, text)
instance.save()
self.log_info(
instance, action='translate',
msg=u'lc_src == dst_lang, so we copy src to dst',
metadata=metadata)
return
if ((self.gengo_check_supported_machine_lc_dst
and lc_src in GENGO_UNSUPPORTED_MACHINE_LC_SRC)):
return
# If src/dst isn't a supported pair, log an issue for metrics
# purposes and move on.
if ((self.gengo_check_supported_language_pair
and (lc_src, dst_lang) not in gengo_api.get_language_pairs())):
self.log_error(
instance, action='translate',
msg=u'(lc_src {0}, dst_lang {1}) not supported'.format(
lc_src, dst_lang),
metadata=metadata)
return
job = GengoJob(
tier=self.gengo_tier,
content_object=instance,
src_lang=lc_src,
src_field=src_field,
dst_lang=dst_lang,
dst_field=dst_field
)
job.save()
def balance_good_to_continue(self, balance, threshold):
"""Checks whether balance is good to continue
If it's not, this sends some mail and returns False.
We check against a threshold that's high enough that we're
pretty sure the next job we create will not exceed the credits
in the account. Pretty sure if we exceed the credits in the
account, it'll return a non-ok opstat and that'll throw an
exception and everything will be ok data-consistency-wise.
"""
# FIXME: This should email a different group than admin,
# but I'm (ab)using the admin group for now because I know
# they're set up right.
if balance < threshold:
mail_admins(
subject='Gengo account balance {0} < {1}'.format(
balance, threshold),
message=wrap_with_paragraphs(
'Dagnabit! Send more money or the translations get it! '
'Don\'t try no funny business, neither!'
'\n\n'
'Love,'
'\n\n'
'Fjord McGengo'
)
)
return False
return True
def push_translations(self):
# If gengosystem is disabled, we just return immediately. We
# can backfill later.
if not waffle.switch_is_active('gengosystem'):
return
gengo_api = FjordGengo()
if not gengo_api.is_configured():
# If Gengo isn't configured, then we drop out here rather
# than raise a GengoConfig error.
return
if self.gengo_watch_balance:
balance = gengo_api.get_balance()
threshold = settings.GENGO_ACCOUNT_BALANCE_THRESHOLD
# statsd the balance so we can track it with graphite.
statsd.gauge('translation.gengo.balance', balance)
if not self.balance_good_to_continue(balance, threshold):
# If we don't have enough balance, stop.
return
# Create language buckets for the jobs for this translator.
# We bucket by language because this makes it easier for a
# single Gengo translator to translate all the jobs in an
# order.
jobs = GengoJob.objects.filter(
tier=self.gengo_tier, status=STATUS_CREATED)
lang_buckets = {}
for job in jobs:
lang_buckets.setdefault(job.src_lang, []).append(job)
# For each bucket, assemble an order and post it.
for lang, jobs in lang_buckets.items():
batch = []
for job in jobs:
batch.append({
'id': job.id,
'lc_src': job.src_lang,
'lc_dst': job.dst_lang,
'tier': self.gengo_tier,
'text': getattr(job.content_object, job.src_field),
'unique_id': job.unique_id
})
try:
resp = gengo_api.translate_bulk(batch)
except GengoError as exc:
self.log_error(
instance=None, action='push-translations',
msg=unicode(exc),
metadata={
'batch': batch
})
continue
# We should have an `order_id` at this point, so we create a
# GengoOrder with it.
order = GengoOrder(order_id=resp['order_id'])
order.save()
order.log('created', metadata={'response': resp})
# Update all the jobs in the order.
for job in jobs:
job.assign_to_order(order)
if self.gengo_watch_balance:
# Update the balance and see if we're below the threshold.
balance = balance - float(resp['credits_used'])
if not self.balance_good_to_continue(balance, threshold):
# If we don't have enough balance, stop.
return
def pull_translations(self):
# If gengosystem is disabled, we just return immediately. We
# can backfill later.
if not waffle.switch_is_active('gengosystem'):
return
gengo_api = FjordGengo()
if not gengo_api.is_configured():
# If Gengo isn't configured, then we drop out here rather
# than raise a GengoConfig error.
return
# Get all the orders that are in progress
orders = GengoOrder.objects.filter(status=STATUS_IN_PROGRESS)
for order in orders:
# Get the list of all completed jobs
completed = gengo_api.completed_jobs_for_order(order.order_id)
# If there are no completed jobs, then we don't need to
# bother doing any additional processing for this order
if not completed:
continue
# For each complete job we haven't seen before, pull it
# from the db, save the translated text and update all the
# bookkeeping.
for comp in completed:
id_ = GengoJob.unique_id_to_id(comp['custom_data'])
job = GengoJob.objects.get(pk=id_)
if job.status == STATUS_COMPLETE:
continue
instance = job.content_object
setattr(instance, job.dst_field, comp['body_tgt'])
instance.save()
job.mark_complete()
# Check to see if there are still outstanding jobs for
# this order. If there aren't, close the order out.
outstanding = (GengoJob.objects
.filter(order=order, status=STATUS_IN_PROGRESS)
.count())
if outstanding == 0:
order.mark_complete()
class GengoMachineTranslator(GengoTranslationSystem):
"""Translates using Gengo machine translation"""
name = 'gengo_machine'
gengo_tier = 'machine'
gengo_check_supported_machine_lc_dst = True
class GengoHumanTranslator(GengoTranslationSystem):
"""Translates using Gengo human translation
Note: This costs real money!
"""
name = 'gengo_human'
use_daily = True
gengo_tier = 'standard'
gengo_watch_balance = True
gengo_check_supported_language_pair = True
def run_daily_activities(self):
# If gengosystem is disabled, we don't want to do anything.
if not waffle.switch_is_active('gengosystem'):
return
gengo_api = FjordGengo()
if not gengo_api.is_configured():
# If Gengo isn't configured, then we drop out here rather
# than raise a GengoConfig error.
return
balance = gengo_api.get_balance()
threshold = settings.GENGO_ACCOUNT_BALANCE_THRESHOLD
if threshold < balance < (2 * threshold):
mail_admins(
subject='Warning: Gengo account balance {0} < {1}'.format(
balance, 2 * threshold),
message=wrap_with_paragraphs(
'Dear mom,'
'\n\n'
'Translations are the fab. Running low on funds. Send '
'more money when you get a chance.'
'\n\n'
'Love,'
'\n\n'
'Fjord McGengo'
)
)
| |
# Copyright 2014. Amazon Web Services, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from boto.exception import JSONResponseError
from boto.dynamodb2.exceptions import ConditionalCheckFailedException
from boto.dynamodb2.exceptions import ItemNotFound
from boto.dynamodb2.exceptions import ValidationException
from boto.dynamodb2.items import Item
from boto.dynamodb2.table import Table
from datetime import datetime
class GameController:
"""
This GameController class basically acts as a singleton providing the necessary
DynamoDB API calls.
"""
def __init__(self, connectionManager):
self.cm = connectionManager
self.ResourceNotFound = 'com.amazonaws.dynamodb.v20120810#ResourceNotFoundException'
def createNewGame(self, gameId, creator, invitee):
"""
Using the High-Level API, an Item is created and saved to the table.
All the primary keys for either the schema or an index (GameId,
HostId, StatusDate, and OpponentId) as well as extra attributes needed to maintain
game state are given a value.
Returns True/False depending on the success of the save.
"""
now = str(datetime.now())
statusDate = "PENDING_" + now
item = Item(self.cm.getGamesTable(), data= {
"GameId" : gameId,
"HostId" : creator,
"StatusDate" : statusDate,
"OUser" : creator,
"Turn" : invitee,
"OpponentId" : invitee
})
return item.save()
def checkIfTableIsActive(self):
description = self.cm.db.describe_table("Games")
status = description['Table']['TableStatus']
return status == "ACTIVE"
def getGame(self, gameId):
"""
Basic get_item call on the Games Table, where we specify the primary key
GameId to be the parameter gameId.
Returns None on an ItemNotFound Exception.
"""
try:
item = self.cm.getGamesTable().get_item(GameId=gameId)
except ItemNotFound as inf:
return None
except JSONResponseError as jre:
return None
return item
def acceptGameInvite(self, game):
date = str(datetime.now())
status = "IN_PROGRESS_"
statusDate = status + date
key = {
"GameId" : { "S" : game["GameId"] }
}
attributeUpdates = {
"StatusDate" : {
"Action" : "PUT",
"Value" : { "S" : statusDate }
}
}
expectations = {"StatusDate" : {
"AttributeValueList": [{"S" : "PENDING_"}],
"ComparisonOperator": "BEGINS_WITH"}
}
try:
self.cm.db.update_item("Games", key=key,
attribute_updates=attributeUpdates,
expected=expectations)
except ConditionalCheckFailedException as ccfe:
return False
return True
def rejectGameInvite(self, game):
"""
Reject the game invite, by deleting the Item from the table.
Conditional on the fact the game is still in the PENDING status.
Returns True/False depending on success of delete.
"""
key = {
"GameId": { "S" : game["GameId"] }
}
expectation = {"StatusDate" : {
"AttributeValueList": [{"S" : "PENDING_"}],
"ComparisonOperator": "BEGINS_WITH" }
}
try:
self.cm.db.delete_item("Games", key, expected=expectation)
except Exception as e:
return False
return True
def getGameInvites(self,user):
"""
Performs a query on the "OpponentId-StatusDate-index" in order to get the
10 most recent games you were invited to.
Returns a list of Game objects.
"""
invites = []
if user == None:
return invites
gameInvitesIndex = self.cm.getGamesTable().query(OpponentId__eq=user,
StatusDate__beginswith="PENDING_",
index="OpponentId-StatusDate-index",
limit=10)
for i in range(10):
try:
gameInvite = next(gameInvitesIndex)
except StopIteration as si:
break
except ValidationException as ve:
break
except JSONResponseError as jre:
if jre.body.get(u'__type', None) == self.ResourceNotFound:
return None
else:
raise jre
invites.append(gameInvite)
return invites
def updateBoardAndTurn(self, item, position, current_player):
"""
Using the Low Level API, we execute a conditional write on the Item.
We are able to specify the particular item by passing in the keys param, in
this case it's just a GameId.
In expectations, we expect
the StatusDate to be IN_PROGRESS_<date of the game>,
the Turn to be the player who is currently logged in,
the "Space" to not exist as an attribute because it hasn't been written to yet.
If this succeeds we update the Turn to the next player, as well.
Returns True/False depending on the success of the these operations.
"""
player_one = item["HostId"]
player_two = item["OpponentId"]
gameId = item["GameId"]
statusDate = item["StatusDate"]
date = statusDate.split("_")[1]
representation = "X"
if item["OUser"] == current_player:
representation = "O"
if current_player == player_one:
next_player = player_two
else:
next_player = player_one
key = {
"GameId" : { "S" : gameId }
}
attributeUpdates = {
position : {
"Action" : "PUT",
"Value" : { "S" : representation }
},
"Turn" : {
"Action" : "PUT",
"Value" : { "S" : next_player }
}
}
expectations = {"StatusDate" : {"AttributeValueList": [{"S" : "IN_PROGRESS_"}],
"ComparisonOperator": "BEGINS_WITH"},
"Turn" : {"Value" : {"S" : current_player}},
position : {"Exists" : False}}
# LOW LEVEL API
try:
self.cm.db.update_item("Games", key=key,
attribute_updates=attributeUpdates,
expected=expectations)
except ConditionalCheckFailedException as ccfe:
return False
return True
def getBoardState(self, item):
"""
Puts the state of the board into a list, putting a blank space for
spaces that are not occupied.
"""
squares = ["TopLeft", "TopMiddle", "TopRight", "MiddleLeft", "MiddleMiddle", "MiddleRight", \
"BottomLeft", "BottomMiddle", "BottomRight"]
state = []
for square in squares:
value = item[square]
if value == None:
state.append(" ")
else:
state.append(value)
return state
def checkForGameResult(self, board, item, current_player):
"""
Check the board to see if you've won,lost tied or in progress.
Returns "Win", "Loss", "Tie" or None (for in-progress)
"""
yourMarker = "X"
theirMarker = "O"
if current_player == item["OUser"]:
yourMarker = "O"
theirMakrer = "X"
winConditions = [[0,3,6],[0,1,2],[0,4,8],
[1,4,7],[2,5,8],[2,4,6],
[3,4,5],[6,7,8]]
for winCondition in winConditions:
b_zero = board[winCondition[0]]
b_one = board[winCondition[1]]
b_two = board[winCondition[2]]
if b_zero == b_one and \
b_one == b_two and \
b_two == yourMarker:
return "Win"
if b_zero == b_one and \
b_one == b_two and \
b_two == theirMarker:
return "Lose"
if self.checkForTie(board):
return "Tie"
return None
def checkForTie(self, board):
"""
Checks the boardState to see if there are any empty spaces which would
signify that the game hasn't come to a stalemate yet.
"""
for cell in board:
if cell == " ":
return False
return True
def changeGameToFinishedState(self, item, result, current_user):
"""
This game verifies whether a game has an outcome already and if not
sets the StatusDate to FINISHED_<date> and fills the Result attribute
with the name of the winning player.
Returns True/False depending on the success of the operation.
"""
#Happens if you're visiting a game that already has a winner
if item["Result"] != None:
return True
date = str(datetime.now())
status = "FINISHED"
item["StatusDate"] = status + "_" + date
item["Turn"] = "N/A"
if result == "Tie":
item["Result"] = result
elif result == "Win":
item["Result"] = current_user
else:
if item["HostId"] == current_user:
item["Result"] = item["OpponentId"]
else:
item["Result"] = item["HostId"]
return item.save()
def mergeQueries(self, host, opp, limit=10):
"""
Taking the two iterators of games you've played in (either host or opponent)
you sort through the elements taking the top 10 recent games into a list.
Returns a list of Game objects.
"""
games = []
game_one = None
game_two = None
while len(games) <= limit:
if game_one == None:
try:
game_one = next(host)
except StopIteration as si:
if game_two != None:
games.append(game_two)
for rest in opp:
if len(games) == limit:
break
else:
games.append(rest)
return games
if game_two == None:
try:
game_two = next(opp)
except StopIteration as si:
if game_one != None:
games.append(game_one)
for rest in host:
if len(games) == limit:
break
else:
games.append(rest)
return games
if game_one > game_two:
games.append(game_one)
game_one = None
else:
games.append(game_two)
game_two = None
return games
def getGamesWithStatus(self, user, status):
"""
Query for all games that a user appears in and have a certain status.
Sorts/merges the results of the two queries for top 10 most recent games.
Return a list of Game objects.
"""
if user == None:
return []
hostGamesInProgress = self.cm.getGamesTable().query(HostId__eq=user,
StatusDate__beginswith=status,
index="HostId-StatusDate-index",
limit=10)
oppGamesInProgress = self.cm.getGamesTable().query(OpponentId__eq=user,
StatusDate__beginswith=status,
index="OpponentId-StatusDate-index",
limit=10)
games = self.mergeQueries(hostGamesInProgress,
oppGamesInProgress)
return games
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import os
import re
import shutil
import time
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
import six
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import utils
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import ipv6_utils
from neutron.common import utils as commonutils
from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
UDP = 'udp'
TCP = 'tcp'
DNS_PORT = 53
DHCPV4_PORT = 67
DHCPV6_PORT = 547
METADATA_DEFAULT_PREFIX = 16
METADATA_DEFAULT_IP = '169.254.169.254'
METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP,
METADATA_DEFAULT_PREFIX)
METADATA_PORT = 80
WIN2k3_STATIC_DNS = 249
NS_PREFIX = 'qdhcp-'
DNSMASQ_SERVICE_NAME = 'dnsmasq'
class DictModel(dict):
"""Convert dict into an object that provides attribute access to values."""
def __init__(self, *args, **kwargs):
"""Convert dict values to DictModel values."""
super(DictModel, self).__init__(*args, **kwargs)
def needs_upgrade(item):
"""Check if `item` is a dict and needs to be changed to DictModel.
"""
return isinstance(item, dict) and not isinstance(item, DictModel)
def upgrade(item):
"""Upgrade item if it needs to be upgraded."""
if needs_upgrade(item):
return DictModel(item)
else:
return item
for key, value in self.iteritems():
if isinstance(value, (list, tuple)):
# Keep the same type but convert dicts to DictModels
self[key] = type(value)(
(upgrade(item) for item in value)
)
elif needs_upgrade(value):
# Change dict instance values to DictModel instance values
self[key] = DictModel(value)
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
class NetModel(DictModel):
def __init__(self, use_namespaces, d):
super(NetModel, self).__init__(d)
self._ns_name = (use_namespaces and
"%s%s" % (NS_PREFIX, self.id) or None)
@property
def namespace(self):
return self._ns_name
@six.add_metaclass(abc.ABCMeta)
class DhcpBase(object):
def __init__(self, conf, network, process_monitor,
version=None, plugin=None):
self.conf = conf
self.network = network
self.process_monitor = process_monitor
self.device_manager = DeviceManager(self.conf, plugin)
self.version = version
@abc.abstractmethod
def enable(self):
"""Enables DHCP for this network."""
@abc.abstractmethod
def disable(self, retain_port=False):
"""Disable dhcp for this network."""
def restart(self):
"""Restart the dhcp service for the network."""
self.disable(retain_port=True)
self.enable()
@abc.abstractproperty
def active(self):
"""Boolean representing the running state of the DHCP server."""
@abc.abstractmethod
def reload_allocations(self):
"""Force the DHCP server to reload the assignment database."""
@classmethod
def existing_dhcp_networks(cls, conf):
"""Return a list of existing networks ids that we have configs for."""
raise NotImplementedError()
@classmethod
def check_version(cls):
"""Execute version checks on DHCP server."""
raise NotImplementedError()
@classmethod
def get_isolated_subnets(cls, network):
"""Returns a dict indicating whether or not a subnet is isolated"""
raise NotImplementedError()
@classmethod
def should_enable_metadata(cls, conf, network):
"""True if the metadata-proxy should be enabled for the network."""
raise NotImplementedError()
class DhcpLocalProcess(DhcpBase):
PORTS = []
def __init__(self, conf, network, process_monitor, version=None,
plugin=None):
super(DhcpLocalProcess, self).__init__(conf, network, process_monitor,
version, plugin)
self.confs_dir = self.get_confs_dir(conf)
self.network_conf_dir = os.path.join(self.confs_dir, network.id)
utils.ensure_dir(self.network_conf_dir)
@staticmethod
def get_confs_dir(conf):
return os.path.abspath(os.path.normpath(conf.dhcp_confs))
def get_conf_file_name(self, kind):
"""Returns the file name for a given kind of config file."""
return os.path.join(self.network_conf_dir, kind)
def _remove_config_files(self):
shutil.rmtree(self.network_conf_dir, ignore_errors=True)
def _enable_dhcp(self):
"""check if there is a subnet within the network with dhcp enabled."""
for subnet in self.network.subnets:
if subnet.enable_dhcp:
return True
return False
def enable(self):
"""Enables DHCP for this network by spawning a local process."""
if self.active:
self.restart()
elif self._enable_dhcp():
utils.ensure_dir(self.network_conf_dir)
interface_name = self.device_manager.setup(self.network)
self.interface_name = interface_name
self.spawn_process()
def _get_process_manager(self, cmd_callback=None):
return external_process.ProcessManager(
conf=self.conf,
uuid=self.network.id,
namespace=self.network.namespace,
default_cmd_callback=cmd_callback,
pid_file=self.get_conf_file_name('pid'),
run_as_root=True)
def disable(self, retain_port=False):
"""Disable DHCP for this network by killing the local process."""
self.process_monitor.unregister(self.network.id, DNSMASQ_SERVICE_NAME)
self._get_process_manager().disable()
if not retain_port:
self._destroy_namespace_and_port()
self._remove_config_files()
def _destroy_namespace_and_port(self):
try:
self.device_manager.destroy(self.network, self.interface_name)
except RuntimeError:
LOG.warning(_LW('Failed trying to delete interface: %s'),
self.interface_name)
if self.conf.dhcp_delete_namespaces and self.network.namespace:
ns_ip = ip_lib.IPWrapper(namespace=self.network.namespace)
try:
ns_ip.netns.delete(self.network.namespace)
except RuntimeError:
LOG.warning(_LW('Failed trying to delete namespace: %s'),
self.network.namespace)
def _get_value_from_conf_file(self, kind, converter=None):
"""A helper function to read a value from one of the state files."""
file_name = self.get_conf_file_name(kind)
msg = _('Error while reading %s')
try:
with open(file_name, 'r') as f:
try:
return converter(f.read()) if converter else f.read()
except ValueError:
msg = _('Unable to convert value in %s')
except IOError:
msg = _('Unable to access %s')
LOG.debug(msg, file_name)
return None
@property
def interface_name(self):
return self._get_value_from_conf_file('interface')
@interface_name.setter
def interface_name(self, value):
interface_file_path = self.get_conf_file_name('interface')
utils.replace_file(interface_file_path, value)
@property
def active(self):
return self._get_process_manager().active
@abc.abstractmethod
def spawn_process(self):
pass
class Dnsmasq(DhcpLocalProcess):
# The ports that need to be opened when security policies are active
# on the Neutron port used for DHCP. These are provided as a convenience
# for users of this class.
PORTS = {constants.IP_VERSION_4:
[(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)],
constants.IP_VERSION_6:
[(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)],
}
_TAG_PREFIX = 'tag%d'
@classmethod
def check_version(cls):
pass
@classmethod
def existing_dhcp_networks(cls, conf):
"""Return a list of existing networks ids that we have configs for."""
confs_dir = cls.get_confs_dir(conf)
try:
return [
c for c in os.listdir(confs_dir)
if uuidutils.is_uuid_like(c)
]
except OSError:
return []
def _build_cmdline_callback(self, pid_file):
cmd = [
'dnsmasq',
'--no-hosts',
'--no-resolv',
'--strict-order',
'--bind-interfaces',
'--interface=%s' % self.interface_name,
'--except-interface=lo',
'--pid-file=%s' % pid_file,
'--dhcp-hostsfile=%s' % self.get_conf_file_name('host'),
'--addn-hosts=%s' % self.get_conf_file_name('addn_hosts'),
'--dhcp-optsfile=%s' % self.get_conf_file_name('opts'),
'--dhcp-leasefile=%s' % self.get_conf_file_name('leases'),
]
possible_leases = 0
for i, subnet in enumerate(self.network.subnets):
mode = None
# if a subnet is specified to have dhcp disabled
if not subnet.enable_dhcp:
continue
if subnet.ip_version == 4:
mode = 'static'
else:
# Note(scollins) If the IPv6 attributes are not set, set it as
# static to preserve previous behavior
addr_mode = getattr(subnet, 'ipv6_address_mode', None)
ra_mode = getattr(subnet, 'ipv6_ra_mode', None)
if (addr_mode in [constants.DHCPV6_STATEFUL,
constants.DHCPV6_STATELESS] or
not addr_mode and not ra_mode):
mode = 'static'
cidr = netaddr.IPNetwork(subnet.cidr)
if self.conf.dhcp_lease_duration == -1:
lease = 'infinite'
else:
lease = '%ss' % self.conf.dhcp_lease_duration
# mode is optional and is not set - skip it
if mode:
if subnet.ip_version == 4:
cmd.append('--dhcp-range=%s%s,%s,%s,%s' %
('set:', self._TAG_PREFIX % i,
cidr.network, mode, lease))
else:
cmd.append('--dhcp-range=%s%s,%s,%s,%d,%s' %
('set:', self._TAG_PREFIX % i,
cidr.network, mode,
cidr.prefixlen, lease))
possible_leases += cidr.size
if cfg.CONF.advertise_mtu:
mtu = self.network.mtu
# Do not advertise unknown mtu
if mtu > 0:
cmd.append('--dhcp-option-force=option:mtu,%d' % mtu)
# Cap the limit because creating lots of subnets can inflate
# this possible lease cap.
cmd.append('--dhcp-lease-max=%d' %
min(possible_leases, self.conf.dnsmasq_lease_max))
cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file)
if self.conf.dnsmasq_dns_servers:
cmd.extend(
'--server=%s' % server
for server in self.conf.dnsmasq_dns_servers)
if self.conf.dhcp_domain:
cmd.append('--domain=%s' % self.conf.dhcp_domain)
if self.conf.dhcp_broadcast_reply:
cmd.append('--dhcp-broadcast')
return cmd
def spawn_process(self):
"""Spawn the process, if it's not spawned already."""
# we only need to generate the lease file the first time dnsmasq starts
# rather than on every reload since dnsmasq will keep the file current
self._output_init_lease_file()
self._spawn_or_reload_process(reload_with_HUP=False)
def _spawn_or_reload_process(self, reload_with_HUP):
"""Spawns or reloads a Dnsmasq process for the network.
When reload_with_HUP is True, dnsmasq receives a HUP signal,
or it's reloaded if the process is not running.
"""
self._output_config_files()
pm = self._get_process_manager(
cmd_callback=self._build_cmdline_callback)
pm.enable(reload_cfg=reload_with_HUP)
self.process_monitor.register(uuid=self.network.id,
service_name=DNSMASQ_SERVICE_NAME,
monitored_process=pm)
def _release_lease(self, mac_address, ip):
"""Release a DHCP lease."""
cmd = ['dhcp_release', self.interface_name, ip, mac_address]
ip_wrapper = ip_lib.IPWrapper(namespace=self.network.namespace)
ip_wrapper.netns.execute(cmd, run_as_root=True)
def _output_config_files(self):
self._output_hosts_file()
self._output_addn_hosts_file()
self._output_opts_file()
def reload_allocations(self):
"""Rebuild the dnsmasq config and signal the dnsmasq to reload."""
# If all subnets turn off dhcp, kill the process.
if not self._enable_dhcp():
self.disable()
LOG.debug('Killing dnsmasq for network since all subnets have '
'turned off DHCP: %s', self.network.id)
return
self._release_unused_leases()
self._spawn_or_reload_process(reload_with_HUP=True)
LOG.debug('Reloading allocations for network: %s', self.network.id)
self.device_manager.update(self.network, self.interface_name)
def _sort_fixed_ips_for_dnsmasq(self, fixed_ips, v6_nets):
"""Sort fixed_ips so that stateless IPv6 subnets appear first.
For example, If a port with v6 extra_dhcp_opts is on a network with
IPv4 and IPv6 stateless subnets. Then dhcp host file will have
below 2 entries for same MAC,
fa:16:3e:8f:9d:65,30.0.0.5,set:aabc7d33-4874-429e-9637-436e4232d2cd
(entry for IPv4 dhcp)
fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd
(entry for stateless IPv6 for v6 options)
dnsmasq internal details for processing host file entries
1) dnsmaq reads the host file from EOF.
2) So it first picks up stateless IPv6 entry,
fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd
3) But dnsmasq doesn't have sufficient checks to skip this entry and
pick next entry, to process dhcp IPv4 request.
4) So dnsmaq uses this this entry to process dhcp IPv4 request.
5) As there is no ip in this entry, dnsmaq logs "no address available"
and fails to send DHCPOFFER message.
As we rely on internal details of dnsmasq to understand and fix the
issue, Ihar sent a mail to dnsmasq-discuss mailing list
http://lists.thekelleys.org.uk/pipermail/dnsmasq-discuss/2015q2/
009650.html
So If we reverse the order of writing entries in host file,
so that entry for stateless IPv6 comes first,
then dnsmasq can correctly fetch the IPv4 address.
"""
return sorted(
fixed_ips,
key=lambda fip: ((fip.subnet_id in v6_nets) and (
v6_nets[fip.subnet_id].ipv6_address_mode == (
constants.DHCPV6_STATELESS))),
reverse=True)
def _iter_hosts(self):
"""Iterate over hosts.
For each host on the network we yield a tuple containing:
(
port, # a DictModel instance representing the port.
alloc, # a DictModel instance of the allocated ip and subnet.
# if alloc is None, it means there is no need to allocate
# an IPv6 address because of stateless DHCPv6 network.
host_name, # Host name.
name, # Canonical hostname in the format 'hostname[.domain]'.
no_dhcp, # A flag indicating that the address doesn't need a DHCP
# IP address.
no_opts, # A flag indication that options shouldn't be written
)
"""
v6_nets = dict((subnet.id, subnet) for subnet in
self.network.subnets if subnet.ip_version == 6)
for port in self.network.ports:
fixed_ips = self._sort_fixed_ips_for_dnsmasq(port.fixed_ips,
v6_nets)
for alloc in fixed_ips:
no_dhcp = False
no_opts = False
if alloc.subnet_id in v6_nets:
addr_mode = v6_nets[alloc.subnet_id].ipv6_address_mode
no_dhcp = addr_mode in (constants.IPV6_SLAAC,
constants.DHCPV6_STATELESS)
# we don't setup anything for SLAAC. It doesn't make sense
# to provide options for a client that won't use DHCP
no_opts = addr_mode == constants.IPV6_SLAAC
hostname = 'host-%s' % alloc.ip_address.replace(
'.', '-').replace(':', '-')
fqdn = hostname
if self.conf.dhcp_domain:
fqdn = '%s.%s' % (fqdn, self.conf.dhcp_domain)
yield (port, alloc, hostname, fqdn, no_dhcp, no_opts)
def _output_init_lease_file(self):
"""Write a fake lease file to bootstrap dnsmasq.
The generated file is passed to the --dhcp-leasefile option of dnsmasq.
This is used as a bootstrapping mechanism to avoid NAKing active leases
when a dhcp server is scheduled to another agent. Using a leasefile
will also prevent dnsmasq from NAKing or ignoring renewals after a
restart.
Format is as follows:
epoch-timestamp mac_addr ip_addr hostname client-ID
"""
filename = self.get_conf_file_name('leases')
buf = six.StringIO()
LOG.debug('Building initial lease file: %s', filename)
# we make up a lease time for the database entry
if self.conf.dhcp_lease_duration == -1:
# Even with an infinite lease, a client may choose to renew a
# previous lease on reboot or interface bounce so we should have
# an entry for it.
# Dnsmasq timestamp format for an infinite lease is is 0.
timestamp = 0
else:
timestamp = int(time.time()) + self.conf.dhcp_lease_duration
dhcp_enabled_subnet_ids = [s.id for s in self.network.subnets
if s.enable_dhcp]
for host_tuple in self._iter_hosts():
port, alloc, hostname, name, no_dhcp, no_opts = host_tuple
# don't write ip address which belongs to a dhcp disabled subnet
# or an IPv6 SLAAC/stateless subnet
if no_dhcp or alloc.subnet_id not in dhcp_enabled_subnet_ids:
continue
ip_address = self._format_address_for_dnsmasq(alloc.ip_address)
# all that matters is the mac address and IP. the hostname and
# client ID will be overwritten on the next renewal.
buf.write('%s %s %s * *\n' %
(timestamp, port.mac_address, ip_address))
contents = buf.getvalue()
utils.replace_file(filename, contents)
LOG.debug('Done building initial lease file %s with contents:\n%s',
filename, contents)
return filename
@staticmethod
def _format_address_for_dnsmasq(address):
# (dzyu) Check if it is legal ipv6 address, if so, need wrap
# it with '[]' to let dnsmasq to distinguish MAC address from
# IPv6 address.
if netaddr.valid_ipv6(address):
return '[%s]' % address
return address
def _output_hosts_file(self):
"""Writes a dnsmasq compatible dhcp hosts file.
The generated file is sent to the --dhcp-hostsfile option of dnsmasq,
and lists the hosts on the network which should receive a dhcp lease.
Each line in this file is in the form::
'mac_address,FQDN,ip_address'
IMPORTANT NOTE: a dnsmasq instance does not resolve hosts defined in
this file if it did not give a lease to a host listed in it (e.g.:
multiple dnsmasq instances on the same network if this network is on
multiple network nodes). This file is only defining hosts which
should receive a dhcp lease, the hosts resolution in itself is
defined by the `_output_addn_hosts_file` method.
"""
buf = six.StringIO()
filename = self.get_conf_file_name('host')
LOG.debug('Building host file: %s', filename)
dhcp_enabled_subnet_ids = [s.id for s in self.network.subnets
if s.enable_dhcp]
# NOTE(ihrachyshka): the loop should not log anything inside it, to
# avoid potential performance drop when lots of hosts are dumped
for host_tuple in self._iter_hosts():
port, alloc, hostname, name, no_dhcp, no_opts = host_tuple
if no_dhcp:
if not no_opts and getattr(port, 'extra_dhcp_opts', False):
buf.write('%s,%s%s\n' %
(port.mac_address, 'set:', port.id))
continue
# don't write ip address which belongs to a dhcp disabled subnet.
if alloc.subnet_id not in dhcp_enabled_subnet_ids:
continue
ip_address = self._format_address_for_dnsmasq(alloc.ip_address)
if getattr(port, 'extra_dhcp_opts', False):
buf.write('%s,%s,%s,%s%s\n' %
(port.mac_address, name, ip_address,
'set:', port.id))
else:
buf.write('%s,%s,%s\n' %
(port.mac_address, name, ip_address))
utils.replace_file(filename, buf.getvalue())
LOG.debug('Done building host file %s with contents:\n%s', filename,
buf.getvalue())
return filename
def _read_hosts_file_leases(self, filename):
leases = set()
if os.path.exists(filename):
with open(filename) as f:
for l in f.readlines():
host = l.strip().split(',')
leases.add((host[2].strip('[]'), host[0]))
return leases
def _release_unused_leases(self):
filename = self.get_conf_file_name('host')
old_leases = self._read_hosts_file_leases(filename)
new_leases = set()
for port in self.network.ports:
for alloc in port.fixed_ips:
new_leases.add((alloc.ip_address, port.mac_address))
for ip, mac in old_leases - new_leases:
self._release_lease(mac, ip)
def _output_addn_hosts_file(self):
"""Writes a dnsmasq compatible additional hosts file.
The generated file is sent to the --addn-hosts option of dnsmasq,
and lists the hosts on the network which should be resolved even if
the dnsmaq instance did not give a lease to the host (see the
`_output_hosts_file` method).
Each line in this file is in the same form as a standard /etc/hosts
file.
"""
buf = six.StringIO()
for host_tuple in self._iter_hosts():
port, alloc, hostname, fqdn, no_dhcp, no_opts = host_tuple
# It is compulsory to write the `fqdn` before the `hostname` in
# order to obtain it in PTR responses.
if alloc:
buf.write('%s\t%s %s\n' % (alloc.ip_address, fqdn, hostname))
addn_hosts = self.get_conf_file_name('addn_hosts')
utils.replace_file(addn_hosts, buf.getvalue())
return addn_hosts
def _output_opts_file(self):
"""Write a dnsmasq compatible options file."""
options, subnet_index_map = self._generate_opts_per_subnet()
options += self._generate_opts_per_port(subnet_index_map)
name = self.get_conf_file_name('opts')
utils.replace_file(name, '\n'.join(options))
return name
def _generate_opts_per_subnet(self):
options = []
subnet_index_map = {}
if self.conf.enable_isolated_metadata:
subnet_to_interface_ip = self._make_subnet_interface_ip_map()
isolated_subnets = self.get_isolated_subnets(self.network)
for i, subnet in enumerate(self.network.subnets):
addr_mode = getattr(subnet, 'ipv6_address_mode', None)
if (not subnet.enable_dhcp or
(subnet.ip_version == 6 and
addr_mode == constants.IPV6_SLAAC)):
continue
if subnet.dns_nameservers:
options.append(
self._format_option(
subnet.ip_version, i, 'dns-server',
','.join(
Dnsmasq._convert_to_literal_addrs(
subnet.ip_version, subnet.dns_nameservers))))
else:
# use the dnsmasq ip as nameservers only if there is no
# dns-server submitted by the server
subnet_index_map[subnet.id] = i
if self.conf.dhcp_domain and subnet.ip_version == 6:
options.append('tag:tag%s,option6:domain-search,%s' %
(i, ''.join(self.conf.dhcp_domain)))
gateway = subnet.gateway_ip
host_routes = []
for hr in subnet.host_routes:
if hr.destination == constants.IPv4_ANY:
if not gateway:
gateway = hr.nexthop
else:
host_routes.append("%s,%s" % (hr.destination, hr.nexthop))
# Add host routes for isolated network segments
if (isolated_subnets[subnet.id] and
self.conf.enable_isolated_metadata and
subnet.ip_version == 4):
subnet_dhcp_ip = subnet_to_interface_ip[subnet.id]
host_routes.append(
'%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip)
)
if subnet.ip_version == 4:
host_routes.extend(["%s,0.0.0.0" % (s.cidr) for s in
self.network.subnets
if (s.ip_version == 4 and
s.cidr != subnet.cidr)])
if host_routes:
if gateway:
host_routes.append("%s,%s" % (constants.IPv4_ANY,
gateway))
options.append(
self._format_option(subnet.ip_version, i,
'classless-static-route',
','.join(host_routes)))
options.append(
self._format_option(subnet.ip_version, i,
WIN2k3_STATIC_DNS,
','.join(host_routes)))
if gateway:
options.append(self._format_option(subnet.ip_version,
i, 'router',
gateway))
else:
options.append(self._format_option(subnet.ip_version,
i, 'router'))
return options, subnet_index_map
def _generate_opts_per_port(self, subnet_index_map):
options = []
dhcp_ips = collections.defaultdict(list)
for port in self.network.ports:
if getattr(port, 'extra_dhcp_opts', False):
port_ip_versions = set(
[netaddr.IPAddress(ip.ip_address).version
for ip in port.fixed_ips])
for opt in port.extra_dhcp_opts:
opt_ip_version = opt.ip_version
if opt_ip_version in port_ip_versions:
options.append(
self._format_option(opt_ip_version, port.id,
opt.opt_name, opt.opt_value))
else:
LOG.info(_LI("Cannot apply dhcp option %(opt)s "
"because it's ip_version %(version)d "
"is not in port's address IP versions"),
{'opt': opt.opt_name,
'version': opt_ip_version})
# provides all dnsmasq ip as dns-server if there is more than
# one dnsmasq for a subnet and there is no dns-server submitted
# by the server
if port.device_owner == constants.DEVICE_OWNER_DHCP:
for ip in port.fixed_ips:
i = subnet_index_map.get(ip.subnet_id)
if i is None:
continue
dhcp_ips[i].append(ip.ip_address)
for i, ips in dhcp_ips.items():
for ip_version in (4, 6):
vx_ips = [ip for ip in ips
if netaddr.IPAddress(ip).version == ip_version]
if vx_ips:
options.append(
self._format_option(
ip_version, i, 'dns-server',
','.join(
Dnsmasq._convert_to_literal_addrs(ip_version,
vx_ips))))
return options
def _make_subnet_interface_ip_map(self):
ip_dev = ip_lib.IPDevice(self.interface_name,
namespace=self.network.namespace)
subnet_lookup = dict(
(netaddr.IPNetwork(subnet.cidr), subnet.id)
for subnet in self.network.subnets
)
retval = {}
for addr in ip_dev.addr.list():
ip_net = netaddr.IPNetwork(addr['cidr'])
if ip_net in subnet_lookup:
retval[subnet_lookup[ip_net]] = addr['cidr'].split('/')[0]
return retval
def _format_option(self, ip_version, tag, option, *args):
"""Format DHCP option by option name or code."""
option = str(option)
pattern = "(tag:(.*),)?(.*)$"
matches = re.match(pattern, option)
extra_tag = matches.groups()[0]
option = matches.groups()[2]
if isinstance(tag, int):
tag = self._TAG_PREFIX % tag
if not option.isdigit():
if ip_version == 4:
option = 'option:%s' % option
else:
option = 'option6:%s' % option
if extra_tag:
tags = ('tag:' + tag, extra_tag[:-1], '%s' % option)
else:
tags = ('tag:' + tag, '%s' % option)
return ','.join(tags + args)
@staticmethod
def _convert_to_literal_addrs(ip_version, ips):
if ip_version == 4:
return ips
return ['[' + ip + ']' for ip in ips]
@classmethod
def get_isolated_subnets(cls, network):
"""Returns a dict indicating whether or not a subnet is isolated
A subnet is considered non-isolated if there is a port connected to
the subnet, and the port's ip address matches that of the subnet's
gateway. The port must be owned by a nuetron router.
"""
isolated_subnets = collections.defaultdict(lambda: True)
subnets = dict((subnet.id, subnet) for subnet in network.subnets)
for port in network.ports:
if port.device_owner not in constants.ROUTER_INTERFACE_OWNERS:
continue
for alloc in port.fixed_ips:
if subnets[alloc.subnet_id].gateway_ip == alloc.ip_address:
isolated_subnets[alloc.subnet_id] = False
return isolated_subnets
@classmethod
def should_enable_metadata(cls, conf, network):
"""Determine whether the metadata proxy is needed for a network
This method returns True for truly isolated networks (ie: not attached
to a router), when the enable_isolated_metadata flag is True.
This method also returns True when enable_metadata_network is True,
and the network passed as a parameter has a subnet in the link-local
CIDR, thus characterizing it as a "metadata" network. The metadata
network is used by solutions which do not leverage the l3 agent for
providing access to the metadata service via logical routers built
with 3rd party backends.
"""
if conf.enable_metadata_network and conf.enable_isolated_metadata:
# check if the network has a metadata subnet
meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_CIDR)
if any(netaddr.IPNetwork(s.cidr) in meta_cidr
for s in network.subnets):
return True
if not conf.use_namespaces or not conf.enable_isolated_metadata:
return False
isolated_subnets = cls.get_isolated_subnets(network)
return any(isolated_subnets[subnet.id] for subnet in network.subnets)
class DeviceManager(object):
def __init__(self, conf, plugin):
self.conf = conf
self.plugin = plugin
if not conf.interface_driver:
LOG.error(_LE('An interface driver must be specified'))
raise SystemExit(1)
try:
self.driver = importutils.import_object(
conf.interface_driver, conf)
except Exception as e:
LOG.error(_LE("Error importing interface driver '%(driver)s': "
"%(inner)s"),
{'driver': conf.interface_driver,
'inner': e})
raise SystemExit(1)
def get_interface_name(self, network, port):
"""Return interface(device) name for use by the DHCP process."""
return self.driver.get_device_name(port)
def get_device_id(self, network):
"""Return a unique DHCP device ID for this host on the network."""
# There could be more than one dhcp server per network, so create
# a device id that combines host and network ids
return commonutils.get_dhcp_agent_device_id(network.id, self.conf.host)
def _set_default_route(self, network, device_name):
"""Sets the default gateway for this dhcp namespace.
This method is idempotent and will only adjust the route if adjusting
it would change it from what it already is. This makes it safe to call
and avoids unnecessary perturbation of the system.
"""
device = ip_lib.IPDevice(device_name, namespace=network.namespace)
gateway = device.route.get_gateway()
if gateway:
gateway = gateway['gateway']
for subnet in network.subnets:
skip_subnet = (
subnet.ip_version != 4
or not subnet.enable_dhcp
or subnet.gateway_ip is None)
if skip_subnet:
continue
if gateway != subnet.gateway_ip:
LOG.debug('Setting gateway for dhcp netns on net %(n)s to '
'%(ip)s',
{'n': network.id, 'ip': subnet.gateway_ip})
device.route.add_gateway(subnet.gateway_ip)
return
# No subnets on the network have a valid gateway. Clean it up to avoid
# confusion from seeing an invalid gateway here.
if gateway is not None:
LOG.debug('Removing gateway for dhcp netns on net %s', network.id)
device.route.delete_gateway(gateway)
def setup_dhcp_port(self, network):
"""Create/update DHCP port for the host if needed and return port."""
device_id = self.get_device_id(network)
subnets = {}
dhcp_enabled_subnet_ids = []
for subnet in network.subnets:
if subnet.enable_dhcp:
dhcp_enabled_subnet_ids.append(subnet.id)
subnets[subnet.id] = subnet
dhcp_port = None
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == device_id:
port_fixed_ips = []
ips_needs_removal = False
for fixed_ip in port.fixed_ips:
if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
port_fixed_ips.append(
{'subnet_id': fixed_ip.subnet_id,
'ip_address': fixed_ip.ip_address})
dhcp_enabled_subnet_ids.remove(fixed_ip.subnet_id)
else:
ips_needs_removal = True
# If there are dhcp_enabled_subnet_ids here that means that
# we need to add those to the port and call update.
if dhcp_enabled_subnet_ids or ips_needs_removal:
port_fixed_ips.extend(
[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
dhcp_port = self.plugin.update_dhcp_port(
port.id, {'port': {'network_id': network.id,
'fixed_ips': port_fixed_ips}})
if not dhcp_port:
raise exceptions.Conflict()
else:
dhcp_port = port
# break since we found port that matches device_id
break
# check for a reserved DHCP port
if dhcp_port is None:
LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist. Checking for a reserved port.',
{'device_id': device_id, 'network_id': network.id})
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT:
dhcp_port = self.plugin.update_dhcp_port(
port.id, {'port': {'network_id': network.id,
'device_id': device_id}})
if dhcp_port:
break
# DHCP port has not yet been created.
if dhcp_port is None:
LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist.', {'device_id': device_id,
'network_id': network.id})
port_dict = dict(
name='',
admin_state_up=True,
device_id=device_id,
network_id=network.id,
tenant_id=network.tenant_id,
fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
dhcp_port = self.plugin.create_dhcp_port({'port': port_dict})
if not dhcp_port:
raise exceptions.Conflict()
# Convert subnet_id to subnet dict
fixed_ips = [dict(subnet_id=fixed_ip.subnet_id,
ip_address=fixed_ip.ip_address,
subnet=subnets[fixed_ip.subnet_id])
for fixed_ip in dhcp_port.fixed_ips]
ips = [DictModel(item) if isinstance(item, dict) else item
for item in fixed_ips]
dhcp_port.fixed_ips = ips
return dhcp_port
def setup(self, network):
"""Create and initialize a device for network's DHCP on this host."""
port = self.setup_dhcp_port(network)
interface_name = self.get_interface_name(network, port)
if ip_lib.ensure_device_is_ready(interface_name,
namespace=network.namespace):
LOG.debug('Reusing existing device: %s.', interface_name)
else:
self.driver.plug(network.id,
port.id,
interface_name,
port.mac_address,
namespace=network.namespace)
self.fill_dhcp_udp_checksums(namespace=network.namespace)
ip_cidrs = []
for fixed_ip in port.fixed_ips:
subnet = fixed_ip.subnet
if not ipv6_utils.is_auto_address_subnet(subnet):
net = netaddr.IPNetwork(subnet.cidr)
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
if (self.conf.enable_isolated_metadata and
self.conf.use_namespaces):
ip_cidrs.append(METADATA_DEFAULT_CIDR)
self.driver.init_l3(interface_name, ip_cidrs,
namespace=network.namespace)
# ensure that the dhcp interface is first in the list
if network.namespace is None:
device = ip_lib.IPDevice(interface_name)
device.route.pullup_route(interface_name)
if self.conf.use_namespaces:
self._set_default_route(network, interface_name)
return interface_name
def update(self, network, device_name):
"""Update device settings for the network's DHCP on this host."""
if self.conf.use_namespaces:
self._set_default_route(network, device_name)
def destroy(self, network, device_name):
"""Destroy the device used for the network's DHCP on this host."""
self.driver.unplug(device_name, namespace=network.namespace)
self.plugin.release_dhcp_port(network.id,
self.get_device_id(network))
def fill_dhcp_udp_checksums(self, namespace):
"""Ensure DHCP reply packets always have correct UDP checksums."""
iptables_mgr = iptables_manager.IptablesManager(use_ipv6=False,
namespace=namespace)
ipv4_rule = ('-p udp --dport %d -j CHECKSUM --checksum-fill'
% constants.DHCP_RESPONSE_PORT)
iptables_mgr.ipv4['mangle'].add_rule('POSTROUTING', ipv4_rule)
iptables_mgr.apply()
| |
'''
The one parameter exponential family distributions used by GLM.
'''
# TODO: quasi, quasibinomial, quasipoisson
# see http://www.biostat.jhsph.edu/~qli/biostatistics_r_doc/library/stats/html/family.html
# for comparison to R, and McCullagh and Nelder
import numpy as np
from scipy import special
from . import links as L
from . import varfuncs as V
FLOAT_EPS = np.finfo(float).eps
class Family(object):
"""
The parent class for one-parameter exponential families.
Parameters
----------
link : a link function instance
Link is the linear transformation function.
See the individual families for available links.
variance : a variance function
Measures the variance as a function of the mean probabilities.
See the individual families for the default variance function.
See Also
--------
:ref:`links`
"""
# TODO: change these class attributes, use valid somewhere...
valid = [-np.inf, np.inf]
links = []
def _setlink(self, link):
"""
Helper method to set the link for a family.
Raises a ValueError exception if the link is not available. Note that
the error message might not be that informative because it tells you
that the link should be in the base class for the link function.
See glm.GLM for a list of appropriate links for each family but note
that not all of these are currently available.
"""
# TODO: change the links class attribute in the families to hold
# meaningful information instead of a list of links instances such as
# [<statsmodels.family.links.Log object at 0x9a4240c>,
# <statsmodels.family.links.Power object at 0x9a423ec>,
# <statsmodels.family.links.Power object at 0x9a4236c>]
# for Poisson...
self._link = link
if not isinstance(link, L.Link):
raise TypeError("The input should be a valid Link object.")
if hasattr(self, "links"):
validlink = link in self.links
validlink = max([isinstance(link, _) for _ in self.links])
if not validlink:
errmsg = "Invalid link for family, should be in %s. (got %s)"
raise ValueError(errmsg % (repr(self.links), link))
def _getlink(self):
"""
Helper method to get the link for a family.
"""
return self._link
# link property for each family is a pointer to link instance
link = property(_getlink, _setlink, doc="Link function for family")
def __init__(self, link, variance):
self.link = link()
self.variance = variance
def starting_mu(self, y):
"""
Starting value for mu in the IRLS algorithm.
Parameters
----------
y : array
The untransformed response variable.
Returns
-------
mu_0 : array
The first guess on the transformed response variable.
Notes
-----
mu_0 = (endog + mean(endog))/2.
Notes
-----
Only the Binomial family takes a different initial value.
"""
return (y + y.mean())/2.
def weights(self, mu):
"""
Weights for IRLS steps
Parameters
----------
mu : array-like
The transformed mean response variable in the exponential family
Returns
-------
w : array
The weights for the IRLS steps
Notes
-----
`w` = 1 / (link'(`mu`)**2 * variance(`mu`))
"""
return 1. / (self.link.deriv(mu)**2 * self.variance(mu))
def deviance(self, endog, mu, scale=1.):
"""
Deviance of (endog,mu) pair.
Deviance is usually defined as twice the loglikelihood ratio.
Parameters
----------
endog : array-like
The endogenous response variable
mu : array-like
The inverse of the link function at the linear predicted values.
scale : float, optional
An optional scale argument
Returns
-------
Deviance : array
The value of deviance function defined below.
Notes
-----
Deviance is defined
.. math::
\sum_i(2 loglike(y_i, y_i) - 2 * loglike(y_i, mu_i)) / scale
where y is the endogenous variable. The deviance functions are
analytically defined for each family.
"""
raise NotImplementedError
def resid_dev(self, endog, mu, scale=1.):
"""
The deviance residuals
Parameters
----------
endog : array
The endogenous response variable
mu : array
The inverse of the link function at the linear predicted values.
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
Deviance residuals.
Notes
-----
The deviance residuals are defined for each family.
"""
raise NotImplementedError
def fitted(self, lin_pred):
"""
Fitted values based on linear predictors lin_pred.
Parameters
-----------
lin_pred : array
Values of the linear predictor of the model.
dot(X,beta) in a classical linear model.
Returns
--------
mu : array
The mean response variables given by the inverse of the link
function.
"""
fits = self.link.inverse(lin_pred)
return fits
def predict(self, mu):
"""
Linear predictors based on given mu values.
Parameters
----------
mu : array
The mean response variables
Returns
-------
lin_pred : array
Linear predictors based on the mean response variables. The value
of the link function at the given mu.
"""
return self.link(mu)
def loglike(self, endog, mu, scale=1.):
"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
`endog` : array
Usually the endogenous response variable.
`mu` : array
Usually but not always the fitted mean response variable.
scale : float
The scale parameter
Returns
-------
llf : float
The value of the loglikelihood evaluated at (endog,mu).
Notes
-----
This is defined for each family. endog and mu are not restricted to
`endog` and `mu` respectively. For instance, the deviance function
calls both loglike(endog,endog) and loglike(endog,mu) to get the
likelihood ratio.
"""
raise NotImplementedError
def resid_anscombe(self, endog, mu):
"""
The Anscome residuals.
See also
--------
statsmodels.families.family.Family docstring and the `resid_anscombe`
for the individual families for more information.
"""
raise NotImplementedError
class Poisson(Family):
"""
Poisson exponential family.
Parameters
----------
link : a link instance, optional
The default link for the Poisson family is the log link. Available
links are log, identity, and sqrt. See statsmodels.family.links for
more information.
Attributes
----------
Poisson.link : a link instance
The link function of the Poisson instance.
Poisson.variance : varfuncs instance
`variance` is an instance of
statsmodels.genmod.families.family.varfuncs.mu
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
"""
links = [L.log, L.identity, L.sqrt]
variance = V.mu
valid = [0, np.inf]
safe_links = [L.Log,]
def __init__(self, link=L.log):
self.variance = Poisson.variance
self.link = link()
def _clean(self, x):
"""
Helper function to trim the data so that is in (0,inf)
Notes
-----
The need for this function was discovered through usage and its
possible that other families might need a check for validity of the
domain.
"""
return np.clip(x, FLOAT_EPS, np.inf)
def resid_dev(self, endog, mu, scale=1.):
"""Poisson deviance residual
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
resid_dev = sign(endog-mu)*sqrt(2*endog*log(endog/mu)-2*(endog-mu))
"""
endog_mu = self._clean(endog/mu)
return np.sign(endog - mu) * np.sqrt(2 * endog *
np.log(endog_mu) -
2 * (endog - mu))/scale
def deviance(self, endog, mu, scale=1.):
'''
Poisson deviance function
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
The deviance function at (endog,mu) as defined below.
Notes
-----
If a constant term is included it is defined as
:math:`deviance = 2*\\sum_{i}(Y*\\log(Y/\\mu))`
'''
endog_mu = self._clean(endog/mu)
return 2*np.sum(endog*np.log(endog_mu))/scale
def loglike(self, endog, mu, scale=1.):
"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The scale parameter, defaults to 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,scale) as defined below.
Notes
-----
llf = scale * sum(-mu + endog*log(mu) - gammaln(endog+1))
where gammaln is the log gamma function
"""
return scale * np.sum(-mu + endog*np.log(mu)-special.gammaln(endog+1))
def resid_anscombe(self, endog, mu):
"""
Anscombe residuals for the Poisson exponential family distribution
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscome residuals for the Poisson family defined below
Notes
-----
resid_anscombe is defined
.. math:
(3/2.)*(endog^{2/3.} - \\mu**(2/3.))/\\mu^{1/6.}
"""
return (3/2.)*(endog**(2/3.)-mu**(2/3.))/mu**(1/6.)
class Gaussian(Family):
"""
Gaussian exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Gaussian family is the identity link.
Available links are log, identity, and inverse.
See statsmodels.family.links for more information.
Attributes
----------
Gaussian.link : a link instance
The link function of the Gaussian instance
Gaussian.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.constant
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
"""
links = [L.log, L.identity, L.inverse_power]
variance = V.constant
safe_links = links
def __init__(self, link=L.identity):
self.variance = Gaussian.variance
self.link = link()
def resid_dev(self, endog, mu, scale=1.):
"""
Gaussian deviance residuals
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
--------
`resid_dev` = (`endog` - `mu`)/sqrt(variance(`mu`))
"""
return (endog - mu) / np.sqrt(self.variance(mu))/scale
def deviance(self, endog, mu, scale=1.):
"""
Gaussian deviance function
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
The deviance function at (endog,mu) as defined below.
Notes
--------
`deviance` = sum((endog-mu)**2)
"""
return np.sum((endog-mu)**2)/scale
def loglike(self, endog, mu, scale=1.):
"""
The log-likelihood in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
Scales the loglikelihood function. The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,scale) as defined below.
Notes
-----
If the link is the identity link function then the
loglikelihood function is the same as the classical OLS model.
llf = -(nobs/2)*(log(SSR) + (1 + log(2*pi/nobs)))
where SSR = sum((endog-link^(-1)(mu))**2)
If the links is not the identity link then the loglikelihood
function is defined as
llf = sum((`endog`*`mu`-`mu`**2/2)/`scale` - `endog`**2/(2*`scale`) - \
(1/2.)*log(2*pi*`scale`))
"""
if isinstance(self.link, L.Power) and self.link.power == 1:
# This is just the loglikelihood for classical OLS
nobs2 = endog.shape[0]/2.
SSR = np.sum((endog-self.fitted(mu))**2, axis=0)
llf = -np.log(SSR) * nobs2
llf -= (1+np.log(np.pi/nobs2))*nobs2
return llf
else:
# Return the loglikelihood for Gaussian GLM
return np.sum((endog * mu - mu**2/2)/scale - endog**2/(2 * scale)
- .5*np.log(2 * np.pi * scale))
def resid_anscombe(self, endog, mu):
"""
The Anscombe residuals for the Gaussian exponential family distribution
Parameters
----------
endog : array
Endogenous response variable
mu : array
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals for the Gaussian family defined below
Notes
--------
`resid_anscombe` = `endog` - `mu`
"""
return endog-mu
class Gamma(Family):
"""
Gamma exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Gamma family is the inverse link.
Available links are log, identity, and inverse.
See statsmodels.family.links for more information.
Attributes
----------
Gamma.link : a link instance
The link function of the Gamma instance
Gamma.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.mu_squared
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
"""
links = [L.log, L.identity, L.inverse_power]
variance = V.mu_squared
safe_links = [L.Log,]
def __init__(self, link=L.inverse_power):
self.variance = Gamma.variance
self.link = link()
def _clean(self, x):
"""
Helper function to trim the data so that is in (0,inf)
Notes
-----
The need for this function was discovered through usage and its
possible that other families might need a check for validity of the
domain.
"""
return np.clip(x, FLOAT_EPS, np.inf)
def deviance(self, endog, mu, scale=1.):
"""
Gamma deviance function
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
`deviance` = 2*sum((endog - mu)/mu - log(endog/mu))
"""
endog_mu = self._clean(endog/mu)
return 2 * np.sum((endog - mu)/mu - np.log(endog_mu))
def resid_dev(self, endog, mu, scale=1.):
r"""
Gamma deviance residuals
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
`resid_dev` is defined
.. math:
sign(endog - \mu) * \sqrt{-2*(-(endog-\mu)/\mu + \log(endog/\mu))}
"""
endog_mu = self._clean(endog/mu)
return np.sign(endog - mu) * np.sqrt(-2 * (-(endog - mu)/mu +
np.log(endog_mu)))
def loglike(self, endog, mu, scale=1.):
"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,scale) as defined below.
Notes
--------
llf = -1/scale * sum(endog/mu + log(mu) + (scale-1)*log(endog) +\
log(scale) + scale*gammaln(1/scale))
where gammaln is the log gamma function.
"""
return - 1./scale * np.sum(endog/mu + np.log(mu) + (scale - 1) *
np.log(endog) + np.log(scale) + scale *
special.gammaln(1./scale))
# in Stata scale is set to equal 1 for reporting llf
# in R it's the dispersion, though there is a loss of precision vs.
# our results due to an assumed difference in implementation
def resid_anscombe(self, endog, mu):
"""
The Anscombe residuals for Gamma exponential family distribution
Parameters
----------
endog : array
Endogenous response variable
mu : array
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals for the Gamma family defined below
Notes
-----
resid_anscombe = 3*(endog**(1/3.)-mu**(1/3.))/mu**(1/3.)
"""
return 3*(endog**(1/3.)-mu**(1/3.))/mu**(1/3.)
class Binomial(Family):
"""
Binomial exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Binomial family is the logit link.
Available links are logit, probit, cauchy, log, and cloglog.
See statsmodels.family.links for more information.
Attributes
----------
Binomial.link : a link instance
The link function of the Binomial instance
Binomial.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.binary
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
Notes
-----
endog for Binomial can be specified in one of three ways.
"""
links = [L.logit, L.probit, L.cauchy, L.log, L.cloglog, L.identity]
variance = V.binary # this is not used below in an effort to include n
# Other safe links, e.g. cloglog and probit are subclasses
safe_links = [L.Logit, L.CDFLink]
def __init__(self, link=L.logit): # , n=1.):
# TODO: it *should* work for a constant n>1 actually, if data_weights
# is equal to n
self.n = 1
# overwritten by initialize if needed but always used to initialize
# variance since endog is assumed/forced to be (0,1)
self.variance = V.Binomial(n=self.n)
self.link = link()
def starting_mu(self, y):
"""
The starting values for the IRLS algorithm for the Binomial family.
A good choice for the binomial family is
starting_mu = (y + .5)/2
"""
return (y + .5)/2
def initialize(self, endog):
'''
Initialize the response variable.
Parameters
----------
endog : array
Endogenous response variable
Returns
--------
If `endog` is binary, returns `endog`
If `endog` is a 2d array, then the input is assumed to be in the format
(successes, failures) and
successes/(success + failures) is returned. And n is set to
successes + failures.
'''
if (endog.ndim > 1 and endog.shape[1] > 1):
y = endog[:, 0]
self.n = endog.sum(1) # overwrite self.n for deviance below
return y*1./self.n
else:
return endog
def deviance(self, endog, mu, scale=1.):
'''
Deviance function for either Bernoulli or Binomial data.
Parameters
----------
endog : array-like
Endogenous response variable (already transformed to a probability
if appropriate).
mu : array
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
--------
deviance : float
The deviance function as defined below
Notes
-----
If the endogenous variable is binary:
`deviance` = -2*sum(I_one * log(mu) + (I_zero)*log(1-mu))
where I_one is an indicator function that evalueates to 1 if
endog_i == 1. and I_zero is an indicator function that evaluates to
1 if endog_i == 0.
If the model is ninomial:
`deviance` = 2*sum(log(endog/mu) + (n-endog)*log((n-endog)/(n-mu)))
where endog and n are as defined in Binomial.initialize.
'''
if np.shape(self.n) == () and self.n == 1:
one = np.equal(endog, 1)
return -2 * np.sum(one * np.log(mu + 1e-200) + (1-one) *
np.log(1 - mu + 1e-200))
else:
return 2 * np.sum(self.n * (endog * np.log(endog/mu + 1e-200) +
(1 - endog) * np.log((1 - endog) /
(1 - mu) +
1e-200)))
def resid_dev(self, endog, mu, scale=1.):
"""
Binomial deviance residuals
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
If `endog` is binary:
resid_dev = sign(endog-mu)*sqrt(-2*log(I_one*mu + I_zero*(1-mu)))
where I_one is an indicator function that evaluates as 1 if endog == 1
and I_zero is an indicator function that evaluates as 1 if endog == 0.
If `endog` is binomial:
resid_dev = sign(endog - mu) * sqrt(2 * n * (endog * log(endog/mu) +
(1 - endog) * log((1 - endog)/(1 - mu))))
where endog and n are as defined in Binomial.initialize.
"""
mu = self.link._clean(mu)
if np.shape(self.n) == () and self.n == 1:
one = np.equal(endog, 1)
return np.sign(endog-mu)*np.sqrt(-2 * np.log(one * mu + (1 - one) *
(1 - mu)))/scale
else:
return (np.sign(endog - mu) *
np.sqrt(2 * self.n * (endog * np.log(endog/mu + 1e-200) +
(1 - endog) * np.log((1 - endog)/(1 - mu) +
1e-200)))/scale)
def loglike(self, endog, mu, scale=1.):
"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
Not used for the Binomial GLM.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,scale) as defined below.
Notes
--------
If `endog` is binary:
`llf` = scale*sum(endog*log(mu/(1-mu))+log(1-mu))
If `endog` is binomial:
`llf` = scale*sum(gammaln(n+1) - gammaln(y+1) - gammaln(n-y+1) +\
y*log(mu/(1-mu)) + n*log(1-mu)
where gammaln is the log gamma function and y = endog*n with endog
and n as defined in Binomial initialize. This simply makes y the
original number of successes.
"""
if np.shape(self.n) == () and self.n == 1:
return scale * np.sum(endog * np.log(mu/(1 - mu) + 1e-200) +
np.log(1 - mu))
else:
y = endog * self.n # convert back to successes
return scale * np.sum(special.gammaln(self.n + 1) -
special.gammaln(y + 1) -
special.gammaln(self.n - y + 1) + y *
np.log(mu/(1 - mu)) + self.n *
np.log(1 - mu))
def resid_anscombe(self, endog, mu):
'''
The Anscombe residuals
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals as defined below.
Notes
-----
sqrt(n)*(cox_snell(endog)-cox_snell(mu))/(mu**(1/6.)*(1-mu)**(1/6.))
where cox_snell is defined as
cox_snell(x) = betainc(2/3., 2/3., x)*betainc(2/3.,2/3.)
where betainc is the incomplete beta function
The name 'cox_snell' is idiosyncratic and is simply used for
convenience following the approach suggested in Cox and Snell (1968).
Further note that
cox_snell(x) = x**(2/3.)/(2/3.)*hyp2f1(2/3.,1/3.,5/3.,x)
where hyp2f1 is the hypergeometric 2f1 function. The Anscombe
residuals are sometimes defined in the literature using the
hyp2f1 formulation. Both betainc and hyp2f1 can be found in scipy.
References
----------
Anscombe, FJ. (1953) "Contribution to the discussion of H. Hotelling's
paper." Journal of the Royal Statistical Society B. 15, 229-30.
Cox, DR and Snell, EJ. (1968) "A General Definition of Residuals."
Journal of the Royal Statistical Society B. 30, 248-75.
'''
cox_snell = lambda x: (special.betainc(2/3., 2/3., x)
* special.beta(2/3., 2/3.))
return np.sqrt(self.n) * ((cox_snell(endog) - cox_snell(mu)) /
(mu**(1/6.) * (1 - mu)**(1/6.)))
class InverseGaussian(Family):
"""
InverseGaussian exponential family.
Parameters
----------
link : a link instance, optional
The default link for the inverse Gaussian family is the
inverse squared link.
Available links are inverse_squared, inverse, log, and identity.
See statsmodels.family.links for more information.
Attributes
----------
InverseGaussian.link : a link instance
The link function of the inverse Gaussian instance
InverseGaussian.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.mu_cubed
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
Notes
-----
The inverse Guassian distribution is sometimes referred to in the
literature as the Wald distribution.
"""
links = [L.inverse_squared, L.inverse_power, L.identity, L.log]
variance = V.mu_cubed
safe_links = [L.inverse_squared, L.Log,]
def __init__(self, link=L.inverse_squared):
self.variance = InverseGaussian.variance
self.link = link()
def resid_dev(self, endog, mu, scale=1.):
"""
Returns the deviance residuals for the inverse Gaussian family.
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
`dev_resid` = sign(endog-mu)*sqrt((endog-mu)**2/(endog*mu**2))
"""
return np.sign(endog-mu) * np.sqrt((endog-mu)**2/(endog*mu**2))/scale
def deviance(self, endog, mu, scale=1.):
"""
Inverse Gaussian deviance function
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
`deviance` = sum((endog=mu)**2/(endog*mu**2))
"""
return np.sum((endog-mu)**2/(endog*mu**2))/scale
def loglike(self, endog, mu, scale=1.):
"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,scale) as defined below.
Notes
-----
`llf` = -(1/2.)*sum((endog-mu)**2/(endog*mu**2*scale)
+ log(scale*endog**3) + log(2*pi))
"""
return -.5 * np.sum((endog - mu)**2/(endog * mu**2 * scale)
+ np.log(scale * endog**3) + np.log(2 * np.pi))
def resid_anscombe(self, endog, mu):
"""
The Anscombe residuals for the inverse Gaussian distribution
Parameters
----------
endog : array
Endogenous response variable
mu : array
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals for the inverse Gaussian distribution as
defined below
Notes
-----
`resid_anscombe` = log(endog/mu)/sqrt(mu)
"""
return np.log(endog/mu)/np.sqrt(mu)
class NegativeBinomial(Family):
"""
Negative Binomial exponential family.
Parameters
----------
link : a link instance, optional
The default link for the negative binomial family is the log link.
Available links are log, cloglog, identity, nbinom and power.
See statsmodels.family.links for more information.
alpha : float, optional
The ancillary parameter for the negative binomial distribution.
For now `alpha` is assumed to be nonstochastic. The default value
is 1. Permissible values are usually assumed to be between .01 and 2.
Attributes
----------
NegativeBinomial.link : a link instance
The link function of the negative binomial instance
NegativeBinomial.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.nbinom
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
Notes
-----
Power link functions are not yet supported.
"""
links = [L.log, L.cloglog, L.identity, L.nbinom, L.Power]
# TODO: add the ability to use the power links with an if test
# similar to below
variance = V.nbinom
safe_links = [L.Log,]
def __init__(self, link=L.log, alpha=1.):
self.alpha = 1. * alpha # make it at least float
self.variance = V.NegativeBinomial(alpha=self.alpha)
if isinstance(link, L.NegativeBinomial):
self.link = link(alpha=self.alpha)
else:
self.link = link()
def _clean(self, x):
"""
Helper function to trim the data so that is in (0,inf)
Notes
-----
The need for this function was discovered through usage and its
possible that other families might need a check for validity of the
domain.
"""
return np.clip(x, FLOAT_EPS, np.inf)
def deviance(self, endog, mu, scale=1.):
r"""
Returns the value of the deviance function.
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional scale argument
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
`deviance` = sum(piecewise)
where piecewise is defined as
If :math:`Y_{i} == 0`:
.. math::
piecewise_i = 2\log(1+\alpha*\mu)/\alpha
If :math:`Y_{i} > 0`:
.. math::
piecewise_i = math2 Y \log(Y/\mu)-2/\alpha(1+\alpha Y) * \log((1+\alpha Y)/(1+\alpha\mu))
"""
iszero = np.equal(endog, 0)
notzero = 1 - iszero
endog_mu = self._clean(endog/mu)
tmp = iszero * 2 * np.log(1 + self.alpha * mu)/self.alpha
tmp += notzero * (2 * endog * np.log(endog_mu) - 2/self.alpha *
(1 + self.alpha*endog) *
np.log((1 + self.alpha * endog) /
(1 + self.alpha * mu)))
return np.sum(tmp)/scale
def resid_dev(self, endog, mu, scale=1.):
r'''
Negative Binomial Deviance Residual
Parameters
----------
endog : array-like
`endog` is the response variable
mu : array-like
`mu` is the fitted value of the model
scale : float, optional
An optional argument to divide the residuals by scale
Returns
--------
resid_dev : array
The array of deviance residuals
Notes
-----
`resid_dev` = sign(endog-mu) * sqrt(piecewise)
where piecewise is defined as
If :math:`Y_i = 0`:
.. math::
piecewise_i = 2*log(1+alpha*mu)/alpha
If :math:`Y_i > 0`:
.. math::
piecewise_i = 2*Y*log(Y/\mu) - 2/\alpha * (1 + \alpha * Y) * \log((1 + \alpha * Y)/(1 + \alpha * \mu))
'''
iszero = np.equal(endog, 0)
notzero = 1 - iszero
endog_mu = self._clean(endog/mu)
tmp = iszero * 2 * np.log(1 + self.alpha * mu)/self.alpha
tmp += notzero * (2 * endog * np.log(endog_mu) - 2/self.alpha *
(1 + self.alpha * endog) *
np.log((1 + self.alpha * endog) /
(1 + self.alpha * mu)))
return np.sign(endog - mu) * np.sqrt(tmp)/scale
def loglike(self, endog, mu, scale):
"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
The fitted mean response values
scale : float
The scale parameter
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,scale) as defined below.
Notes
-----
sum(endog*log(alpha*exp(lin_pred)/(1+alpha*exp(lin_pred))) -
log(1+alpha*exp(lin_pred))/alpha + constant)
where constant is defined as::
constant = gammaln(endog + 1/alpha) - gammaln(endog + 1) -
gammaln(1/alpha)
"""
lin_pred = self._link(mu)
constant = special.gammaln(endog + 1/self.alpha) - special.gammaln(endog+1)\
-special.gammaln(1/self.alpha)
exp_lin_pred = np.exp(lin_pred)
return (np.sum(endog * np.log(self.alpha * exp_lin_pred /
(1 + self.alpha * exp_lin_pred)) -
np.log(1 + self.alpha * exp_lin_pred)/self.alpha + constant))
def resid_anscombe(self, endog, mu):
"""
The Anscombe residuals for the negative binomial family
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
Returns
-------
resid_anscombe : array
The Anscombe residuals as defined below.
Notes
-----
`resid_anscombe` = (hyp2f1(-alpha*endog)-hyp2f1(-alpha*mu)+\
1.5*(endog**(2/3.)-mu**(2/3.)))/(mu+alpha*mu**2)**(1/6.)
where hyp2f1 is the hypergeometric 2f1 function parameterized as
hyp2f1(x) = hyp2f1(2/3.,1/3.,5/3.,x)
"""
hyp2f1 = lambda x : special.hyp2f1(2/3., 1/3., 5/3., x)
return ((hyp2f1(-self.alpha * endog) - hyp2f1(-self.alpha * mu) +
1.5 * (endog**(2/3.)-mu**(2/3.))) /
(mu + self.alpha*mu**2)**(1/6.))
| |
"""
Interface to Constrained Optimization By Linear Approximation
Functions
---------
.. autosummary::
:toctree: generated/
fmin_cobyla
"""
import functools
from threading import RLock
import numpy as np
from scipy.optimize import _cobyla
from .optimize import OptimizeResult, _check_unknown_options
try:
from itertools import izip
except ImportError:
izip = zip
__all__ = ['fmin_cobyla']
# Workarund as _cobyla.minimize is not threadsafe
# due to an unknown f2py bug and can segfault,
# see gh-9658.
_module_lock = RLock()
def synchronized(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
with _module_lock:
return func(*args, **kwargs)
return wrapper
@synchronized
def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0,
rhoend=1e-4, maxfun=1000, disp=None, catol=2e-4):
"""
Minimize a function using the Constrained Optimization By Linear
Approximation (COBYLA) method. This method wraps a FORTRAN
implementation of the algorithm.
Parameters
----------
func : callable
Function to minimize. In the form func(x, \\*args).
x0 : ndarray
Initial guess.
cons : sequence
Constraint functions; must all be ``>=0`` (a single function
if only 1 constraint). Each function takes the parameters `x`
as its first argument, and it can return either a single number or
an array or list of numbers.
args : tuple, optional
Extra arguments to pass to function.
consargs : tuple, optional
Extra arguments to pass to constraint functions (default of None means
use same extra arguments as those passed to func).
Use ``()`` for no extra arguments.
rhobeg : float, optional
Reasonable initial changes to the variables.
rhoend : float, optional
Final accuracy in the optimization (not precisely guaranteed). This
is a lower bound on the size of the trust region.
disp : {0, 1, 2, 3}, optional
Controls the frequency of output; 0 implies no output.
maxfun : int, optional
Maximum number of function evaluations.
catol : float, optional
Absolute tolerance for constraint violations.
Returns
-------
x : ndarray
The argument that minimises `f`.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'COBYLA' `method` in particular.
Notes
-----
This algorithm is based on linear approximations to the objective
function and each constraint. We briefly describe the algorithm.
Suppose the function is being minimized over k variables. At the
jth iteration the algorithm has k+1 points v_1, ..., v_(k+1),
an approximate solution x_j, and a radius RHO_j.
(i.e., linear plus a constant) approximations to the objective
function and constraint functions such that their function values
agree with the linear approximation on the k+1 points v_1,.., v_(k+1).
This gives a linear program to solve (where the linear approximations
of the constraint functions are constrained to be non-negative).
However, the linear approximations are likely only good
approximations near the current simplex, so the linear program is
given the further requirement that the solution, which
will become x_(j+1), must be within RHO_j from x_j. RHO_j only
decreases, never increases. The initial RHO_j is rhobeg and the
final RHO_j is rhoend. In this way COBYLA's iterations behave
like a trust region algorithm.
Additionally, the linear program may be inconsistent, or the
approximation may give poor improvement. For details about
how these issues are resolved, as well as how the points v_i are
updated, refer to the source code or the references below.
References
----------
Powell M.J.D. (1994), "A direct search optimization method that models
the objective and constraint functions by linear interpolation.", in
Advances in Optimization and Numerical Analysis, eds. S. Gomez and
J-P Hennart, Kluwer Academic (Dordrecht), pp. 51-67
Powell M.J.D. (1998), "Direct search algorithms for optimization
calculations", Acta Numerica 7, 287-336
Powell M.J.D. (2007), "A view of algorithms for optimization without
derivatives", Cambridge University Technical Report DAMTP 2007/NA03
Examples
--------
Minimize the objective function f(x,y) = x*y subject
to the constraints x**2 + y**2 < 1 and y > 0::
>>> def objective(x):
... return x[0]*x[1]
...
>>> def constr1(x):
... return 1 - (x[0]**2 + x[1]**2)
...
>>> def constr2(x):
... return x[1]
...
>>> from scipy.optimize import fmin_cobyla
>>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7)
array([-0.70710685, 0.70710671])
The exact solution is (-sqrt(2)/2, sqrt(2)/2).
"""
err = "cons must be a sequence of callable functions or a single"\
" callable function."
try:
len(cons)
except TypeError as e:
if callable(cons):
cons = [cons]
else:
raise TypeError(err) from e
else:
for thisfunc in cons:
if not callable(thisfunc):
raise TypeError(err)
if consargs is None:
consargs = args
# build constraints
con = tuple({'type': 'ineq', 'fun': c, 'args': consargs} for c in cons)
# options
opts = {'rhobeg': rhobeg,
'tol': rhoend,
'disp': disp,
'maxiter': maxfun,
'catol': catol}
sol = _minimize_cobyla(func, x0, args, constraints=con,
**opts)
if disp and not sol['success']:
print("COBYLA failed to find a solution: %s" % (sol.message,))
return sol['x']
@synchronized
def _minimize_cobyla(fun, x0, args=(), constraints=(),
rhobeg=1.0, tol=1e-4, maxiter=1000,
disp=False, catol=2e-4, **unknown_options):
"""
Minimize a scalar function of one or more variables using the
Constrained Optimization BY Linear Approximation (COBYLA) algorithm.
Options
-------
rhobeg : float
Reasonable initial changes to the variables.
tol : float
Final accuracy in the optimization (not precisely guaranteed).
This is a lower bound on the size of the trust region.
disp : bool
Set to True to print convergence messages. If False,
`verbosity` is ignored as set to 0.
maxiter : int
Maximum number of function evaluations.
catol : float
Tolerance (absolute) for constraint violations
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
rhoend = tol
iprint = int(bool(disp))
# check constraints
if isinstance(constraints, dict):
constraints = (constraints, )
for ic, con in enumerate(constraints):
# check type
try:
ctype = con['type'].lower()
except KeyError as e:
raise KeyError('Constraint %d has no type defined.' % ic) from e
except TypeError as e:
raise TypeError('Constraints must be defined using a '
'dictionary.') from e
except AttributeError as e:
raise TypeError("Constraint's type must be a string.") from e
else:
if ctype != 'ineq':
raise ValueError("Constraints of type '%s' not handled by "
"COBYLA." % con['type'])
# check function
if 'fun' not in con:
raise KeyError('Constraint %d has no function defined.' % ic)
# check extra arguments
if 'args' not in con:
con['args'] = ()
# m is the total number of constraint values
# it takes into account that some constraints may be vector-valued
cons_lengths = []
for c in constraints:
f = c['fun'](x0, *c['args'])
try:
cons_length = len(f)
except TypeError:
cons_length = 1
cons_lengths.append(cons_length)
m = sum(cons_lengths)
def calcfc(x, con):
f = fun(x, *args)
i = 0
for size, c in izip(cons_lengths, constraints):
con[i: i + size] = c['fun'](x, *c['args'])
i += size
return f
info = np.zeros(4, np.float64)
xopt, info = _cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg,
rhoend=rhoend, iprint=iprint, maxfun=maxfun,
dinfo=info)
if info[3] > catol:
# Check constraint violation
info[0] = 4
return OptimizeResult(x=xopt,
status=int(info[0]),
success=info[0] == 1,
message={1: 'Optimization terminated successfully.',
2: 'Maximum number of function evaluations '
'has been exceeded.',
3: 'Rounding errors are becoming damaging '
'in COBYLA subroutine.',
4: 'Did not converge to a solution '
'satisfying the constraints. See '
'`maxcv` for magnitude of violation.',
5: 'NaN result encountered.'
}.get(info[0], 'Unknown exit status.'),
nfev=int(info[1]),
fun=info[2],
maxcv=info[3])
| |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#/*##########################################################################
# Copyright (C) 2016 K. Kummer, A. Tamborino, European Synchrotron Radiation
# Facility
#
# This file is part of the ID32 RIXSToolBox developed at the ESRF by the ID32
# staff and the ESRF Software group.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#############################################################################*/
from __future__ import division
__author__ = "K. Kummer - ESRF ID32"
__contact__ = "kurt.kummer@esrf.fr"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
___doc__ = """
...
"""
import os
import copy
import time
import numpy as np
from PyMca5.PyMcaGui import PyMcaQt as qt
from PyMca5.PyMcaGui.pymca import ScanWindow
from PyMca5.PyMcaGui.plotting import PlotWindow
from PyMca5.PyMcaCore.SpecFileDataSource import SpecFileDataSource
from PyMca5.PyMcaGui.pymca import QDispatcher
from PyMca5.PyMcaGui.pymca.SumRulesTool import MarkerSpinBox
from PyMca5.PyMcaCore import DataObject
from PyMca5.PyMcaGui import IconDict
from PyMca5.PyMcaGui.plotting import ColormapDialog
from RTB_SpecGen import ExportWidget
from RTB_Icons import RtbIcons
from RTB_Math import RTB_Math
class MainWindow(qt.QWidget):
def __init__(self, parent=None):
DEBUG = 1
qt.QWidget.__init__(self, parent)
self.setWindowTitle('RixsToolBox - Map generator')
self.setWindowIcon(qt.QIcon(qt.QPixmap(RtbIcons['Logo'])))
self.build()
self.connect_signals()
self.xvals = []
self.yvals = []
self.qvals = []
self.RTB_Math = RTB_Math()
def build(self):
self._sourceWidget = QDispatcher.QDispatcher(self)
fileTypeList = ['Spec Files (*.spec)',
'Dat Files (*.dat)',
'All Files (*.*)']
self._sourceWidget.sourceSelector.fileTypeList = fileTypeList
for tabnum in range(self._sourceWidget.tabWidget.count()):
if self._sourceWidget.tabWidget.tabText(tabnum) != 'SpecFile':
self._sourceWidget.tabWidget.removeTab(tabnum)
self._sourceWidget.selectorWidget['SpecFile']
self._sourceWidget.selectorWidget['SpecFile'].autoAddBox.setChecked(False)
self._sourceWidget.selectorWidget['SpecFile'].autoAddBox.hide()
self._sourceWidget.selectorWidget['SpecFile'].autoReplaceBox.setChecked(False)
self._sourceWidget.selectorWidget['SpecFile'].autoReplaceBox.hide()
self._sourceWidget.selectorWidget['SpecFile'].autoOffBox.setChecked(True)
self._sourceWidget.selectorWidget['SpecFile'].autoOffBox.hide()
self._sourceWidget.selectorWidget['SpecFile'].meshBox.setChecked(False)
self._sourceWidget.selectorWidget['SpecFile'].meshBox.hide()
self._sourceWidget.selectorWidget['SpecFile'].forceMcaBox.setChecked(False)
self._sourceWidget.selectorWidget['SpecFile'].forceMcaBox.hide()
if hasattr(self._sourceWidget.selectorWidget['SpecFile'], 'object3DBox'):
self._sourceWidget.selectorWidget['SpecFile'].object3DBox.setChecked(True)
self._sourceWidget.selectorWidget['SpecFile'].object3DBox.hide()
self._sourceWidget.selectorWidget['SpecFile'].addButton.setText('Add to map')
self._sourceWidget.selectorWidget['SpecFile'].replaceButton.hide()
self._sourceWidget.selectorWidget['SpecFile'].removeButton.hide()
self._exportWidget = ExportWidget()
self.mapPlotWindow = PlotWindow.PlotWindow(
parent=self, backend=None, plugins=False, newplot=False, roi=False,
control=False, position=True, info=False, fit=False, logx=False,
logy=False, save=False, togglePoints=False)
self.mapPlotWindow.enableActiveCurveHandling(False)
self.mapPlotWindow.enableOwnSave(False)
#~ self.colormapDialog = ColormapDialog.ColormapDialog(self)
self.colormapDialog = MyColormapDialog(self)
self.colormapButton = qt.QPushButton()
self.colormapButton.setMinimumSize(25, 25)
self.colormapButton.setMaximumSize(25, 25)
self.colormapButton.setIcon(qt.QIcon(qt.QPixmap(IconDict['colormap'])))
self.colormapButton.setToolTip('Colormap dialog')
self.updateMapButton = qt.QPushButton()
self.updateMapButton.setMinimumSize(25, 25)
self.updateMapButton.setMaximumSize(25, 25)
self.updateMapButton.setIcon(qt.QIcon(qt.QPixmap(IconDict['reload'])))
self.updateMapButton.setToolTip('Update map')
self.waterfallPlotWindow = ScanWindow.ScanWindow(
parent=self, backend=None, plugins=False,roi=False,
control=False, position=True, info=False, fit=False)
# ~ self.waterfallPlotWindow.enableOwnSave(False)
self.minSpinBox = MarkerSpinBox(self, self.waterfallPlotWindow, 'min')
self.minSpinBox.setMaximumWidth(100)
self.minSpinBox.setMinimumWidth(70)
self.minSpinBox.setAlignment(qt.Qt.AlignRight)
self.minSpinBox.setMinimum(-100000)
self.minSpinBox.setMaximum(100000)
self.minSpinBox.setDecimals(2)
self.minSpinBox.setSingleStep(1)
self.minSpinBox.setValue(0)
self.minSpinBox.setEnabled(False)
self.minSpinBox.hideMarker()
self.maxSpinBox = MarkerSpinBox(self, self.waterfallPlotWindow, 'max')
self.maxSpinBox.setMaximumWidth(100)
self.maxSpinBox.setMinimumWidth(70)
self.maxSpinBox.setAlignment(qt.Qt.AlignRight)
self.maxSpinBox.setMinimum(-100000)
self.maxSpinBox.setMaximum(100000)
self.maxSpinBox.setDecimals(2)
self.maxSpinBox.setSingleStep(1)
self.maxSpinBox.setValue(0)
self.maxSpinBox.setEnabled(False)
self.maxSpinBox.hideMarker()
self.normaliseCheckBox = qt.QCheckBox('Normalise to intensity between')
self.normaliseCheckBox.setChecked(False)
self.normaliseMethodComboBox = qt.QComboBox(self)
self.normaliseMethodComboBox.addItem('average')
self.normaliseMethodComboBox.addItem('maximum')
self.normaliseMethodComboBox.setMaximumWidth(70)
self.normaliseMethodComboBox.setMinimumWidth(70)
self.normaliseMethodComboBox.setEnabled(False)
normaliseLayout = qt.QHBoxLayout(self)
normaliseLayout.addWidget(self.normaliseCheckBox)
normaliseLayout.addWidget(self.minSpinBox)
normaliseLayout.addWidget(qt.QLabel('and'))
normaliseLayout.addWidget(self.maxSpinBox)
normaliseLayout.addWidget(qt.QLabel('using the'))
normaliseLayout.addWidget(self.normaliseMethodComboBox)
normaliseLayout.addWidget(qt.HorizontalSpacer())
normaliseLayout.setContentsMargins(0, 0, 0, 0)
self.normaliseWidget = qt.QWidget(self)
self.normaliseWidget.setLayout(normaliseLayout)
self.dummyPlotWindow = ScanWindow.ScanWindow(
parent=self, backend=None, plugins=True, roi=False,
control=False, position=True, info=False)
#~ self._plotSpectraWindow.graph.enablemarkermode()
#~ self._plotSpectraWindow.enableActiveCurveHandling(False)
#~ if hasattr(self._plotSpectraWindow, '_buildLegendWidget'):
#~ self._plotSpectraWindow._buildLegendWidget()
self.integralPlotWindow = ScanWindow.ScanWindow(
parent=self, backend=None, plugins=True, roi=False,
control=True, position=True, info=False, fit=True,
save=False)
self.integralPlotWindow.enableActiveCurveHandling(True)
self.saveButton = qt.QPushButton('Save')
self.saveButton.setMinimumSize(75,75)
self.saveButton.setMaximumSize(75,75)
self.saveButton.clicked.connect(self.saveButtonClicked)
self.saveButton.setDisabled(True)
self.saveButton.setToolTip('Select output file\nto enable saving')
self.xAxisBox = qt.QGroupBox('Plot against')
self.xAxisH = qt.QRadioButton('H', checked=False)
self.xAxisK = qt.QRadioButton('K', checked=False)
self.xAxisL = qt.QRadioButton('L', checked=False)
self.xAxisM = qt.QRadioButton('motor position', checked=False)
self.xAxisQ = qt.QRadioButton('custom Q', checked=True)
self.xAxisGroup = qt.QButtonGroup()
self.xAxisGroup.addButton(self.xAxisH)
self.xAxisGroup.addButton(self.xAxisK)
self.xAxisGroup.addButton(self.xAxisL)
self.xAxisGroup.addButton(self.xAxisM)
self.xAxisGroup.addButton(self.xAxisQ)
self.xAxisMotorComboBox = qt.QComboBox()
xAxisLayout = qt.QGridLayout()
xAxisLayout.addWidget(self.xAxisH, 0, 0, 1, 1)
xAxisLayout.addWidget(self.xAxisK, 1, 0, 1, 1)
xAxisLayout.addWidget(self.xAxisL, 2, 0, 1, 1)
xAxisLayout.addWidget(self.xAxisM, 3, 0, 1, 1)
xAxisLayout.addWidget(self.xAxisMotorComboBox, 3, 1, 1, 1)
xAxisLayout.addWidget(self.xAxisQ, 4, 0, 1, 1)
xAxisLayout.addWidget(qt.VerticalSpacer(), 5, 0, 1, 1)
self.xAxisBox.setLayout(xAxisLayout)
self.xAxisBox.setMaximumWidth(250)
self.offsetSpinBox = qt.QDoubleSpinBox()
self.offsetSpinBox.setMaximumWidth(100)
self.offsetSpinBox.setMinimumWidth(70)
self.offsetSpinBox.setAlignment(qt.Qt.AlignRight)
self.offsetSpinBox.setMinimum(1e-2)
self.offsetSpinBox.setMaximum(1e6)
self.offsetSpinBox.setSingleStep(1)
self.offsetSpinBox.setDecimals(5)
self.offsetSpinBox.setValue(1)
self.offsetLayout = qt.QHBoxLayout()
self.offsetLayout.addWidget(qt.QLabel('Offset between spectra'))
self.offsetLayout.addSpacing(20)
self.offsetLayout.addWidget(self.offsetSpinBox)
self.offsetLayout.addWidget(qt.HorizontalSpacer())
self.offsetWidget = qt.QWidget()
self.offsetWidget.setLayout(self.offsetLayout)
self.waterfallWidget = qt.QWidget()
self.waterfallLayout = qt.QVBoxLayout()
self.waterfallLayout.addWidget(self.waterfallPlotWindow)
self.waterfallLayout.addWidget(self.offsetWidget)
self.waterfallLayout.addWidget(self.normaliseWidget)
self.waterfallWidget.setLayout(self.waterfallLayout)
self.interpolationComboBox = qt.QComboBox()
self.interpolationComboBox.addItem('nearest')
self.interpolationComboBox.addItem('linear')
#~ self.interpolationComboBox.addItem('cubic')
self.interpolationLayout = qt.QHBoxLayout()
self.interpolationLayout.addWidget(qt.QLabel('Interpolation'))
self.interpolationLayout.addSpacing(10)
self.interpolationLayout.addWidget(self.interpolationComboBox)
self.interpolationLayout.addSpacing(25)
self.interpolationLayout.addWidget(self.colormapButton)
self.interpolationLayout.addWidget(qt.HorizontalSpacer())
self.interpolationLayout.addWidget(self.updateMapButton)
self.interpolationWidget = qt.QWidget()
self.interpolationWidget.setLayout(self.interpolationLayout)
self.integralTable = IntegralTable(self)
self.integralTable.setMaximumWidth(170)
self.integralTable.setMinimumHeight(325)
self.integralTable.setMaximumHeight(325)
self.mapWidget = qt.QWidget()
self.mapLayout = qt.QVBoxLayout()
self.mapLayout.addWidget(self.dummyPlotWindow)
self.mapLayout.addWidget(self.mapPlotWindow)
self.mapLayout.addWidget(self.interpolationWidget)
self.mapWidget.setLayout(self.mapLayout)
self.dummyPlotWindow.hide()
self.integralMethodBox = qt.QGroupBox('Plot')
self.integralMethodIntegral = qt.QRadioButton('Integral', checked=True)
self.integralMethodMean = qt.QRadioButton('Mean value', checked=False)
self.integralMethodGroup = qt.QButtonGroup()
self.integralMethodGroup.addButton(self.integralMethodIntegral)
self.integralMethodGroup.addButton(self.integralMethodMean)
self.integralMethodLayout = qt.QVBoxLayout()
self.integralMethodLayout.addWidget(self.integralMethodIntegral)
self.integralMethodLayout.addWidget(self.integralMethodMean)
self.integralMethodBox.setLayout(self.integralMethodLayout)
self.integralMethodBox.setMaximumWidth(170)
self.integralTableWidget = qt.QWidget()
self.integralTableWidgetLayout = qt.QVBoxLayout()
self.integralTableWidgetLayout.addWidget(qt.VerticalSpacer())
self.integralTableWidgetLayout.addWidget(self.integralTable)
#~ hint = '\n'.join(['If [Ref] is given integrals [1]...[9]',
#~ ' will be normalised to integral [Ref].'])
#~ hintLabel = qt.QLabel(hint, font=qt.QFont("SansSerif", 8))
#~ self.integralTableWidgetLayout.addWidget(hintLabel)
self.integralTableWidgetLayout.addSpacing(20)
self.integralTableWidgetLayout.addWidget(self.integralMethodBox)
self.integralTableWidgetLayout.addWidget(qt.VerticalSpacer())
self.integralTableWidget.setLayout(self.integralTableWidgetLayout)
self.integralWidget = qt.QWidget()
self.integralLayout = qt.QHBoxLayout()
self.integralLayout.addWidget(self.integralPlotWindow, 3)
self.integralLayout.addWidget(self.integralTableWidget, 1)
self.integralWidget.setLayout(self.integralLayout)
self.tabWidget = qt.QTabWidget()
self.tabWidget.addTab(self.waterfallWidget, 'Waterfall plot')
self.tabWidget.addTab(self.mapWidget, 'Map')
self.tabWidget.addTab(self.integralWidget, 'Integrals')
self.table = SelectionTable(self)
self.tableLayout = qt.QHBoxLayout()
self.tableLayout.addWidget(self.table,3)
self.tableLayout.addWidget(self.xAxisBox,1)
self.tableWidget = qt.QGroupBox('Source spectra')
self.tableWidget.setLayout(self.tableLayout)
rsLayout = qt.QGridLayout(self)
rsLayout.addWidget(self.tabWidget, 0, 0, 1, 1)
rsLayout.addWidget(self.tableWidget, 1, 0, 1, 1)
rsWidget = qt.QWidget()
rsWidget.setContentsMargins(0,0,0,-8)
rsWidget.setLayout(rsLayout)
self._lsLayout = qt.QVBoxLayout(self)
self._lsLayout.addWidget(self._sourceWidget)
self._lsLayout.addWidget(self._exportWidget)
self._lsWidget = qt.QWidget()
self._lsWidget.setContentsMargins(0,0,0,-8)
self._lsWidget.setSizePolicy(
qt.QSizePolicy(qt.QSizePolicy.Fixed, qt.QSizePolicy.Preferred))
self._lsWidget.setLayout(self._lsLayout)
self._lsWidget.setMaximumWidth(500)
self._exportWidget.hide()
self.splitter = qt.QSplitter(self)
self.splitter.setOrientation(qt.Qt.Horizontal)
self.splitter.setHandleWidth(5)
self.splitter.setStretchFactor(1, 2)
self.splitter.addWidget(self._lsWidget)
self.splitter.addWidget(rsWidget)
self._mainLayout = qt.QHBoxLayout()
self._mainLayout.addWidget(self.splitter)
self.setLayout(self._mainLayout)
return 0
def connect_signals(self):
self._sourceWidget.sigAddSelection.connect(self.addSpectra)
self._exportWidget.OutputFileSelected.connect(self._enableSaveButton)
self._sourceWidget.selectorWidget['SpecFile'].cntTable.sigSpecFileCntTableSignal.connect(self.update_yselection)
self.xAxisMotorComboBox.activated.connect(self.updatePlots)
self.table.tableChanged.connect(self.updatePlots)
self.integralTable.itemChanged.connect(self.integralsChanged)
self.integralTable.tableChanged.connect(self.integralsChanged)
self.integralMethodGroup.buttonClicked.connect(self.integralsChanged)
self.xAxisGroup.buttonClicked.connect(self.updatePlots)
self.offsetSpinBox.valueChanged.connect(self.updatePlots)
self.colormapButton.clicked.connect(self.selectColormap)
self.normaliseCheckBox.stateChanged.connect(self.setNormalisation)
self.normaliseMethodComboBox.currentIndexChanged.connect(self.updatePlots)
self.minSpinBox.valueChanged.connect(self.updatePlots)
self.minSpinBox.intersectionsChangedSignal.connect(self.updatePlots)
self.maxSpinBox.valueChanged.connect(self.updatePlots)
self.maxSpinBox.intersectionsChangedSignal.connect(self.updatePlots)
self.interpolationComboBox.currentIndexChanged.connect(self.updateMap)
self.colormapDialog.sigColormapChanged.connect(self.updateMap)
self.updateMapButton.clicked.connect(self.updateMap)
self.mapPlotWindow.sigIconSignal.connect(self.saveMap)
return 0
def integralsChanged(self):
self.integralLimits = []
for row in range(self.integralTable.rowCount()-1):
if self.integralTable.item(row, 0) \
and self.integralTable.item(row, 1):
try:
limit_1 = float(self.integralTable.item(row, 0).text())
limit_2 = float(self.integralTable.item(row, 1).text())
self.integralLimits.append(
[row, min([limit_1, limit_2]), max([limit_1, limit_2])])
except ValueError:
pass
if not self.integralLimits or not self.qvals:
return
self.integralPlotWindow.clearCurves()
for limit in self.integralLimits:
yint = []
xint = []
for i, qval in enumerate(self.qvals):
xint.append(qval)
if self.integralMethodIntegral.isChecked():
yint.append(self.yvals[i][
np.argwhere(self.xvals[i]>=limit[1])[0][0]: \
np.argwhere(self.xvals[i]<=limit[2])[-1][0]].sum())
else:
yint.append(self.yvals[i][
np.argwhere(self.xvals[i]>=limit[1])[0][0]: \
np.argwhere(self.xvals[i]<=limit[2])[-1][0]].mean())
int2plot = np.vstack([xint, yint]).T
int2plot = int2plot[int2plot[:,0].argsort()]
self.integralPlotWindow.addCurve(int2plot[:,0], int2plot[:,1],
legend='Region %d' % (limit[0]+1), ylabel=' ',
symbol='o')
return
def setNormalisation(self):
if self.normaliseCheckBox.isChecked():
self.minSpinBox.setEnabled(True)
self.minSpinBox.showMarker()
self.maxSpinBox.setEnabled(True)
self.maxSpinBox.showMarker()
self.normaliseMethodComboBox.setEnabled(True)
if self.minSpinBox.value() == 0 and self.maxSpinBox.value() == 0:
xlimits = self.waterfallPlotWindow.getGraphXLimits()
self.maxSpinBox.setValue(xlimits[0]+0.9*(xlimits[1]-xlimits[0]))
self.minSpinBox.setValue(xlimits[0]+0.1*(xlimits[1]-xlimits[0]))
else:
self.minSpinBox.setEnabled(False)
self.minSpinBox.hideMarker()
self.maxSpinBox.setEnabled(False)
self.maxSpinBox.hideMarker()
self.normaliseMethodComboBox.setEnabled(False)
self.updatePlots()
def update_yselection(self):
""" Make sure that only one y counter can be selected """
self._sourceWidget.selectorWidget['SpecFile'].cntTable.ySelection = self._sourceWidget.selectorWidget['SpecFile'].cntTable.ySelection[-1:]
for i in range(self._sourceWidget.selectorWidget['SpecFile'].cntTable.rowCount()):
widget = self._sourceWidget.selectorWidget['SpecFile'].cntTable.cellWidget(i, 2)
if i in self._sourceWidget.selectorWidget['SpecFile'].cntTable.ySelection:
if not widget.isChecked():
widget.setChecked(True)
else:
if widget.isChecked():
widget.setChecked(False)
return 0
def shiftSpinBoxChanged(self):
self.preprocessCurves(shift=True)
def rescaleSpinBoxChanged(self):
self.preprocessCurves(rescale=True)
def addSpectra(self, selectionlist):
self.dummyPlotWindow._addSelection(selectionlist)
curves = self.dummyPlotWindow.getAllCurves()
self.dummyPlotWindow.clearCurves()
self.table.update(curves=curves)
def updatePlots(self):
print(self.table.motorNames)
# Update list of motor names
currentMotor = self.xAxisMotorComboBox.currentText()
self.xAxisMotorComboBox.clear()
self.xAxisMotorComboBox.addItems(self.table.motorNames)
if currentMotor:
print(currentMotor, self.table.motorNames)
self.xAxisMotorComboBox.setCurrentIndex(
self.table.motorNames.index(currentMotor))
# Update motor positions
currentMotor = self.xAxisMotorComboBox.currentText()
if currentMotor:
for ii in range(len(self.table.spectra.keys())):
spectrum = self.table.spectra[self.table.item(ii, 0).text()]
if currentMotor in spectrum['MotorNames']:
motorvalue = float(spectrum['MotorValues'][
spectrum['MotorNames'].index(currentMotor)])
else:
motorvalue = np.nan
self.table.setItem(ii, 4, qt.QTableWidgetItem(str(motorvalue)))
spectrum['hklmq'][3] = motorvalue
# PLOT WATERFALL
self.waterfallPlotWindow.clearCurves()
offset = self.offsetSpinBox.value()
self.xvals, self.yvals, self.qvals = [], [], []
if self.xAxisH.isChecked():
ind = 0
if self.xAxisK.isChecked():
ind = 1
if self.xAxisL.isChecked():
ind = 2
if self.xAxisM.isChecked():
ind = 3
if self.xAxisQ.isChecked():
ind = 4
for legend, spectrum in self.table.spectra.items():
x = 1.*spectrum['x']
y = 1.*spectrum['y']
if self.normaliseCheckBox.isChecked():
imin = np.argwhere(x>=self.minSpinBox.value())[0][0]
imax = np.argwhere(x<=self.maxSpinBox.value())[-1][0]
if self.normaliseMethodComboBox.currentText() == 'average':
y /= y[imin:imax].mean()
else:
y /= y[imin:imax].max()
self.waterfallPlotWindow.addCurve(x, y/offset+spectrum['hklmq'][ind],
legend=legend, ylabel='Q')
self.xvals.append(x)
self.qvals.append(spectrum['hklmq'][ind])
self.yvals.append(y)
self.waterfallPlotWindow.setGraphYLabel('Q')
self.integralsChanged()
def updateMap(self):
# PLOT MAP
points = [np.vstack([x, self.qvals[i]*np.ones(len(x))]) for i, x in enumerate(self.xvals)]
points = np.hstack(points).T
values = np.hstack(self.yvals)
# Interpolate data on grid
oversamplingQ = 10
grid_x, grid_y = np.mgrid[self.xvals[0][0]:self.xvals[0][-1]:len(self.xvals[0])*1j,
min(self.qvals):max(self.qvals):oversamplingQ*len(self.qvals)*1j]
self.grid_x = grid_x[:,0]
self.grid_y = grid_y[0,:]
# ~ self.grid_z = interpolate.griddata(points, values, (grid_x, grid_y),
# ~ method=self.interpolationComboBox.currentText(), fill_value=0)
self.grid_z = self.RTB_Math.interpolate_on_grid(self.qvals, self.xvals,
self.yvals, (grid_x, grid_y), fill_value=None,
method=self.interpolationComboBox.currentText())
cm_name = self.colormapDialog.combo.currentText()
cm_autoscale = self.colormapDialog.autoscale
cm_min = self.colormapDialog.minValue
cm_max = self.colormapDialog.maxValue
colormap = {'name': cm_name, 'normalization':'linear', 'colors': 256,
'autoscale': cm_autoscale, 'vmin': cm_min, 'vmax': cm_max}
self.mapPlotWindow.addImage(self.grid_z, colormap=colormap, yScale=[grid_x[0,0], (grid_x[-1,0]-grid_x[0,0])/len(grid_x[:,0])],
xScale=[min(self.qvals), (max(self.qvals)-min(self.qvals))/len(self.qvals)/oversamplingQ])
hist = np.histogram(self.grid_z, 10)
self.colormapDialog.plotHistogram([0.5*(hist[1][1:]+hist[1][:-1]), hist[0]])
def saveWaterfall(self, signal):
if not signal['key'] == 'save':
return
# Get output filename
outfile = qt.QFileDialog(self)
outfile.setWindowTitle("Output File Selection")
outfile.setModal(1)
filterlist = ['*.png', '*.dat']
if hasattr(outfile, "setFilters"):
outfile.setFilters(filterlist)
else:
outfile.setNameFilters(filterlist)
outfile.setFileMode(outfile.AnyFile)
outfile.setAcceptMode(outfile.AcceptSave)
ret = outfile.exec_()
if not ret:
return None
if hasattr(outfile, "selectedFilter"):
outputFilter = qt.safe_str(outfile.selectedFilter())
else:
outputFilter = qt.safe_str(outfile.selectedNameFilter())
outputFile = qt.safe_str(outfile.selectedFiles()[0])
outfile.close()
del outfile
extension = outputFilter[-4:]
if len(outputFile) < 5:
outputFile = outputFile + extension
elif outputFile[-4:] != extension:
outputFile = outputFile + extension
if outputFile is None:
return
# Save map
if extension == '.png':
self.waterfallPlotWindow.saveGraph(outputFile, fileFormat='png', dpi=150)
print('Plot saved to %s' % outputFile)
elif extension == '.dat':
for i, q in enumerate(sorted(self.qvals)):
index = self.qvals.index(q)
if i == 0:
x = self.xvals[index]
array2export = np.zeros((len(x), len(self.qvals)+1))
array2export[:,0] = x
array2export[:,i+1] = np.interp(
x, self.xvals[index], self.yvals[index])
header = '--- ' + ' '.join(np.asarray(sorted(self.qvals), dtype='string').tolist())
np.savetxt(outputFile, array2export, fmt='%f', header=header)
print('Data saved to %s' % outputFile)
return
def saveMap(self, signal):
if not signal['key'] == 'save':
return
# Get output filename
outfile = qt.QFileDialog(self)
outfile.setWindowTitle("Output File Selection")
outfile.setModal(1)
filterlist = ['*.png', '*.dat']
if hasattr(outfile, "setFilters"):
outfile.setFilters(filterlist)
else:
outfile.setNameFilters(filterlist)
outfile.setFileMode(outfile.AnyFile)
outfile.setAcceptMode(outfile.AcceptSave)
ret = outfile.exec_()
if not ret:
return None
if hasattr(outfile, "selectedFilter"):
outputFilter = qt.safe_str(outfile.selectedFilter())
else:
outputFilter = qt.safe_str(outfile.selectedNameFilter())
outputFile = qt.safe_str(outfile.selectedFiles()[0])
outfile.close()
del outfile
extension = outputFilter[-4:]
if len(outputFile) < 5:
outputFile = outputFile + extension
elif outputFile[-4:] != extension:
outputFile = outputFile + extension
if outputFile is None:
return
# Save map
if extension == '.png':
self.mapPlotWindow.saveGraph(outputFile, fileFormat='png', dpi=150)
print('Plot saved to %s' % outputFile)
elif extension == '.dat':
header = '--- ' + ' '.join(np.asarray(self.grid_y, dtype='string').tolist())
array2export = np.vstack([self.grid_x, self.grid_z.T]).T
np.savetxt(outputFile, array2export, fmt='%f', header=header)
print('Data saved to %s' % outputFile)
return
def selectColormap(self):
print('Colormap dialog')
if self.colormapDialog.isHidden():
self.colormapDialog.show()
self.colormapDialog.raise_()
self.colormapDialog.show()
def _enableSaveButton(self):
self.saveButton.setEnabled(True)
self.saveButton.setToolTip(None)
def saveButtonClicked(self):
print('Save button clicked')
class SelectionTable(qt.QTableWidget):
tableChanged = qt.pyqtSignal()
def __init__(self, parent):
super(SelectionTable, self).__init__(parent)
self.setColumnCount(6)
self.setHorizontalHeaderLabels(['Spectrum', 'H', 'K', 'L',
'motor position', 'custom Q'])
self.setColumnWidth(0, 300)
self.setColumnWidth(1, 60)
self.setColumnWidth(2, 60)
self.setColumnWidth(3, 60)
self.setAlternatingRowColors(True)
self.setSortingEnabled(True)
self.setSelectionBehavior(qt.QAbstractItemView.SelectRows)
self.itemChanged.connect(self.updateQ)
self.spectra = {}
self.motorNames = []
def update(self, curves=[]):
legends = [self.item(i, 0).text() for i in range(self.rowCount())]
# Add curves if any
for ii, curve in enumerate(curves):
x, y, legend, info = curve[:4]
if legend in self.spectra.keys():
print('%s already in map - will be replaced' % legend)
if not info['MotorNames']:
info['MotorNames'] = []
if 'H' in info['MotorNames']:
hkl_h = info['MotorValues'][info['MotorNames'].index('H')]
hkl_k = info['MotorValues'][info['MotorNames'].index('K')]
hkl_l = info['MotorValues'][info['MotorNames'].index('L')]
else:
hkl_h = np.nan
hkl_k = np.nan
hkl_l = np.nan
if info['MotorValues']:
motor = info['MotorValues'][0]
else:
motor = np.nan
custom = 0
self.spectra[legend] = {}
self.spectra[legend]['hklmq'] = [hkl_h, hkl_k, hkl_l, motor, custom]
self.spectra[legend]['x'] = x
self.spectra[legend]['y'] = y
self.spectra[legend]['MotorNames'] = info['MotorNames']
self.spectra[legend]['MotorValues'] = info['MotorValues']
if legend in legends:
row = legends.index(legend)
else:
self.insertRow(0)
row = 0
self.setItem(row, 0, qt.QTableWidgetItem(legend))
for i in range(6):
self.item(row, i).setFlags(
qt.Qt.ItemIsEnabled | qt.Qt.ItemIsSelectable)
if i < 5:
self.setItem(row, 1+i, qt.QTableWidgetItem(
str(self.spectra[legend]['hklmq'][i])))
# Remove curves if any and update Q for the remaining spectra
legends = [self.item(i, 0).text() for i in range(self.rowCount())]
motornames = [set(self.spectra[s]['MotorNames']) for s in legends]
if motornames:
self.motorNames = motornames[0]
for ii in range(1, len(motornames)):
self.motorNames = self.motorNames.union(motornames[ii])
self.motorNames = list(self.motorNames)
indices = np.argsort(np.array([s.lower() for s in self.motorNames]))
self.motorNames = [self.motorNames[i] for i in indices]
self.spectra = dict(
[(legend, self.spectra[legend]) for legend in legends])
self.tableChanged.emit()
def updateQ(self, item):
if item.column() == 5:
legend = self.item(item.row(), 0).text()
self.spectra[legend]['hklmq'][4] = np.float(item.text())
self.tableChanged.emit()
# ~ if item.column() == 4:
# ~ legend = self.item(item.row(), 0).text()
# ~ self.spectra[legend]['hklmq'][3] = np.float(item.text())
# ~ self.tableChanged.emit()
def contextMenuEvent(self, event):
if self.selectedIndexes():
menu = qt.QMenu(self)
removeSpectrumAction = menu.addAction("Remove from list")
action = menu.exec_(self.mapToGlobal(event.pos()))
if action == removeSpectrumAction:
rows2remove = list(set([index.row() for index in self.selectedIndexes()]))
for row in sorted(rows2remove)[::-1]:
self.removeRow(row)
self.update()
class IntegralTable(qt.QTableWidget):
tableChanged = qt.pyqtSignal()
def __init__(self, parent):
qt.QTableWidget.__init__(self, parent)
self.setColumnCount(2)
self.setRowCount(10)
self.setHorizontalHeaderLabels(['from', 'to'])
self.setColumnWidth(0, 72)
self.setColumnWidth(1, 72)
self.setAlternatingRowColors(True)
self.setSortingEnabled(False)
#~ self.setSelectionBehavior(qt.QAbstractItemView.SelectRows)
#~ self.itemChanged.connect(self.tableChanged)
#~ self.setVerticalHeaderLabels(['1', '2', '3', '4', '5',
#~ '6', '7', '8', '9', 'Ref'])
def contextMenuEvent(self, event):
if self.selectedIndexes():
menu = qt.QMenu(self)
removeSpectrumAction = menu.addAction("Remove from list")
action = menu.exec_(self.mapToGlobal(event.pos()))
if action == removeSpectrumAction:
rows2remove = list(set([index.row() for index in self.selectedIndexes()]))
for row in sorted(rows2remove)[::-1]:
self.removeRow(row)
self.insertRow(9)
self.tableChanged.emit()
class MyColormapDialog(ColormapDialog.ColormapDialog):
def __init__(self, parent):
ColormapDialog.ColormapDialog.__init__(self, parent)
self.autoScale90Button.hide()
supported_colormaps = parent.mapPlotWindow.getSupportedColormaps()
desired_colormaps = ['terrain', 'seismic', 'jet', 'hot',
'gray', 'gnuplot', 'coolwarm', 'afmhot', 'Spectra_r', 'Reds_r',
'RdGy_r', 'RdBu_r', 'PuBu_r', 'OrRd_r',
'CMRmap', 'BrBG_r', 'Blues_r', 'temperature','RdYlBu_r']
self.combo.clear()
self.colormapList = []
for cm in parent.mapPlotWindow.getSupportedColormaps():
if cm in desired_colormaps:
self.combo.addItem(cm)
self.colormapList.append(cm)
self.buttonGroup.button(2).hide()
self.colormapList.append(cm)
if 'RdYlBu_r' in self.colormapList:
cmap = 'RdYlBu_r'
else:
cmap = 'jet'
self.colormapIndex = self.colormapList.index(cmap)
self.colormapString = cmap
self.setDataMinMax(0, 1)
self.setAutoscale(1)
self.setColormap(self.colormapIndex)
if __name__ == "__main__":
import numpy as np
app = qt.QApplication([])
app.lastWindowClosed.connect(app.quit)
w = MainWindow()
w.show()
app.exec_()
| |
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sb
sb.set_style("dark")
import os
import string
import codecs
import glob
from operator import itemgetter
from collections import namedtuple
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectKBest, f_classif, chi2
from sklearn.decomposition import PCA
from HACluster import *
import PLM
from nltk.tokenize import wordpunct_tokenize
def identity(x):
return x
Oeuvre = namedtuple('Oeuvre', ['dates', 'titles', 'texts'])
def load_data(genres=['prose'], data_dir="../data",
min_nb_tokens=1000):
items = []
# iterate over relevant genres:
for genre in genres:
for filename in glob.glob(data_dir+"/"+genre+"/*.txt"):
print "\t+ "+filename,
with codecs.open(filename, 'r', 'utf-8') as F:
words = wordpunct_tokenize(F.read().lower())
if len(words) >= min_nb_tokens:
print ">>> "+str(len(words))+" words loaded:",
print (" ".join(words[:6])).strip()
genre, date, title = os.path.basename(filename).replace(".txt", "").split("_")
date = int(date)
items.append((date, title, words))
else:
print ">>> file too short"
# sort texts chronologically:
items.sort(key=itemgetter(0))
return Oeuvre(*zip(*items))
def sample(oeuvre, sample_size=2500):
dates, titles, samples = [], [], []
for date, title, text in zip(*oeuvre):
if len(text) > sample_size: # more than one sample
start_idx, end_idx, cnt = 0, sample_size, 0
while end_idx <= len(text):
dates.append(date)
titles.append(str(title)+"_"+str(cnt+1))
samples.append(text[start_idx:end_idx])
cnt+=1
start_idx+=sample_size
end_idx+=sample_size
else:
dates.append(str(date)+"_1")
titles.append(str(title)+"_1")
samples.append(text)
return Oeuvre(dates, titles, samples)
def load_stopwords(filepath="../data/stopwords.txt"):
return set(codecs.open(filepath, 'r', 'utf-8').read().lower().split())
sample_size = 1000
genres = ['drama']
oeuvre = load_data(genres=genres, min_nb_tokens=sample_size)
oeuvre = sample(oeuvre=oeuvre, sample_size=sample_size)
stopwords = load_stopwords()
vectorizer = TfidfVectorizer(analyzer=identity,
vocabulary=stopwords,
#max_features=1000,
use_idf=False)
X = vectorizer.fit_transform(oeuvre.texts).toarray()
def vnc():
dist_matrix = DistanceMatrix(X, lambda u,v: np.sum((u-v)**2)/2)
# initialize a clusterer, with default linkage methode (Ward)
clusterer = VNClusterer(dist_matrix)
# start the clustering procedure
clusterer.cluster(verbose=0)
# plot the result as a dendrogram
clusterer.dendrogram().draw(title="Becket's oeuvre - VNC analysis",#clusterer.linkage.__name__,
labels=oeuvre.titles,#oeuvre.dates,
show=False, save=True,
fontsize=3)
#vnc()
def plm(break_date=1955, nb=50):
big_docs = {"before":[], "after":[]}
for text, date in zip(oeuvre.texts, oeuvre.dates):
if date < break_date:
big_docs["before"].extend(text)
else:
big_docs["after"].extend(text)
plm = PLM.ParsimoniousLM(big_docs.values(), 0.1)
plm.fit(big_docs.values(), big_docs.keys())
for category, lm in plm.fitted_:
print category
words = plm.vectorizer.get_feature_names()
scores = []
for word, score in sorted(zip(words, lm), key=lambda i:i[1], reverse=True)[:nb]:
scores.append((word, np.exp(score)))
print scores
#plm()
def tau(nb=10):
from scipy.stats import kendalltau
df = pd.DataFrame(X)
df.columns = vectorizer.get_feature_names()
df.index = oeuvre.titles
scores = []
ranks = range(1,len(df.index)+1)
for feat in df.columns:
tau, p = kendalltau(ranks, df[feat].tolist())
scores.append((feat, tau))
scores.sort(key=itemgetter(1))
nb = 5
top, bottom = scores[:nb], scores[-nb:]
fig = sb.plt.figure()
sb.set_style("darkgrid")
for (feat, tau), col in zip(top, sb.color_palette("Set1")[:nb]):
sb.plt.plot(ranks, df[feat].tolist(), label=feat, c=col)
sb.plt.legend(loc="best")
sb.plt.xlabel('Diachrony', fontsize=10)
sb.plt.ylabel('Frequency', fontsize=10)
sb.plt.savefig("top_tau.pdf")
fig = sb.plt.figure()
sb.set_style("darkgrid")
for (feat, tau), col in zip(bottom, sb.color_palette("Set1")[:nb]):
sb.plt.plot(ranks, df[feat].tolist(), label=feat, c=col)
sb.plt.legend(loc="best")
sb.plt.xlabel('Diachrony', fontsize=10)
sb.plt.ylabel('Frequency', fontsize=10)
sb.plt.savefig("bottom_tau.pdf")
tau()
def ngram_viewer(items=[]):
items = set(items)
df = pd.DataFrame(X)
df.columns = vectorizer.get_feature_names()
df.index = oeuvre.titles
ranks = range(1,len(df.index)+1)
fig = sb.plt.figure()
sb.set_style("darkgrid")
# remove OOV items
items = {item for item in items if item in df}
for item, colour in zip(items, sb.color_palette("Set1")[:len(items)]):
sb.plt.plot(ranks, df[item].tolist(), label=item, c=colour)
sb.plt.legend(loc="best")
sb.plt.xlabel('Diachrony', fontsize=10)
sb.plt.ylabel('Frequency', fontsize=10)
sb.plt.savefig("ngram_viewer.pdf")
#ngram_viewer(["no", "less", "neither"])
# un- als prefix?
# leestekens beter weglaten
def pca():
import pylab as Plot
# scale X:
from sklearn.preprocessing import StandardScaler
Xs = StandardScaler().fit_transform(X)
P = PCA(n_components=2)
Xr = P.fit_transform(Xs)
loadings = P.components_.transpose()
sb.set_style("darkgrid")
fig, ax1 = plt.subplots()
#Plot.tick_params(axis='both',which='both',top='off', left='off', right="off", bottom="off", labelbottom='off', labelleft="off", labelright="off")
# first samples:
x1, x2 = Xr[:,0], Xr[:,1]
ax1.scatter(x1, x2, 100, edgecolors='none', facecolors='none');
for x,y,l in zip(x1, x2, oeuvre.titles):
print(l)
ax1.text(x, y, l ,ha='center', va="center", size=10, color="darkgrey")
# now loadings:
sb.set_style("dark")
ax2 = ax1.twinx().twiny()
l1, l2 = loadings[:,0], loadings[:,1]
ax2.scatter(l1, l2, 100, edgecolors='none', facecolors='none');
for x,y,l in zip(l1, l2, vectorizer.get_feature_names()):
l = l.encode('utf8')
print(l)
ax2.text(x, y, l ,ha='center', va="center", size=10, color="black")
plt.savefig("pca.pdf", bbox_inches=0)
#pca()
| |
# Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of the TrieHH Algorithm.
This is intended to be a stand-alone implementation of TrieHH, suitable for
branching as a starting point for algorithm modifications;
Based on the paper:
Federated Heavy Hitters Discovery with Differential Privacy
Wennan Zhu, Peter Kairouz, H. Brendan McMahan,
Haicheng Sun, Wei Li. AISTATS 2020.
https://arxiv.org/pdf/1902.08534.pdf
"""
import attr
import tensorflow as tf
import tensorflow_federated as tff
from analytics.heavy_hitters import heavy_hitters_utils as hh_utils
DEFAULT_VALUE = -1 # The value to use if a key is missing in the hash table.
DEFAULT_TERMINATOR = '$' # The end of sequence symbol.
@attr.s(cmp=False, frozen=True)
class ClientOutput(object):
"""Structure for outputs returned from clients during federated heavy hitters.
Fields:
`client_votes`: A tensor containing the client's votes.
"""
client_votes = attr.ib()
@attr.s(cmp=False, frozen=True)
class ServerState(object):
"""Structure for state on the server.
Fields:
`discovered_heavy_hitters`: A tf.string containing discovered heavy
hitters.
`heavy_hitters_counts`: A tf.int32 containing the counts of the
heavy hitter in the round it is discovered.
`discovered_prefixes`: A tf.tstring containing candidate prefixes.
`round_num`: A tf.constant dictating the algorithm's round number.
`accumulated_votes`: A tf.constant that holds the votes accumulated over
sub-rounds.
"""
discovered_heavy_hitters = attr.ib()
heavy_hitters_counts = attr.ib()
discovered_prefixes = attr.ib()
round_num = attr.ib()
accumulated_votes = attr.ib()
def make_accumulate_client_votes_fn(round_num, num_sub_rounds,
discovered_prefixes_table,
possible_prefix_extensions_table,
default_terminator):
"""Returns a reduce function that is used to accumulate client votes.
This function creates an accumulate_client_votes reduce function that can be
consumed by a tf.data.Dataset.reduce method. The reduce function maps
(old_state, example) to a new_state. It must take two arguments and return a
new element with a structure that matches that of the initial_state.
Args:
round_num: A tf.constant containing the round number.
num_sub_rounds: A tf.constant containing the number of sub rounds in a
round.
discovered_prefixes_table: A tf.lookup.StaticHashTable containing the
discovered prefixes.
possible_prefix_extensions_table: A tf.lookup.StaticHashTable containing the
possible prefix extensions that a client can vote on.
default_terminator: A tf.string containing the end of sequence symbol.
Returns:
An accumulate_client_votes reduce function for a specific round, set of
discovered prefixes, and a set of possbile prefix extensions.
"""
@tf.function
def accumulate_client_votes(vote_accumulator, example):
"""Accumulates client votes on prefix extensions."""
example = tf.strings.lower(example)
# Append the default terminator to the example.
example = tf.strings.join([example, default_terminator])
# Compute effective round number.
effective_round_num = tf.math.floordiv(round_num, num_sub_rounds)
if tf.strings.length(example) < effective_round_num:
return vote_accumulator
else:
discovered_prefixes_index = discovered_prefixes_table.lookup(
tf.strings.substr(example, 0, effective_round_num))
possible_prefix_extensions_index = possible_prefix_extensions_table.lookup(
tf.strings.substr(example, effective_round_num, 1))
# If the character extension is not in the alphabet, or the prefix is not
# already in the discovered prefixes, do not add client's vote.
if tf.math.logical_or(
tf.math.equal(possible_prefix_extensions_index,
tf.constant(DEFAULT_VALUE)),
tf.math.equal(discovered_prefixes_index, tf.constant(DEFAULT_VALUE))):
return vote_accumulator
else:
indices = [[
discovered_prefixes_index, possible_prefix_extensions_index
]]
updates = tf.constant([1])
return tf.tensor_scatter_nd_add(vote_accumulator, indices, updates)
return accumulate_client_votes
@tf.function
def client_update(dataset, discovered_prefixes, possible_prefix_extensions,
round_num, num_sub_rounds, max_num_prefixes,
max_user_contribution, default_terminator):
"""Creates a ClientOutput object that holds the client's votes.
This function takes in a 'tf.data.Dataset' containing the client's words,
selects (up to) `max_user_contribution` words the given `dataset`, and creates
a `ClientOutput` object that holds the client's votes on chracter extensions
to `discovered_prefixes`. The allowed character extensions are found in
`possible_prefix_extensions`. `round_num` and `num_sub_round` are needed to
compute the length of the prefix to be extended. `max_num_prefixes` is
needed to set the shape of the tensor holding the client votes.
Args:
dataset: A 'tf.data.Dataset' containing the client's on-device words.
discovered_prefixes: A tf.string containing candidate prefixes.
possible_prefix_extensions: A tf.string of shape (num_discovered_prefixes, )
containing possible prefix extensions.
round_num: A tf.constant dictating the algorithm's round number.
num_sub_rounds: A tf.constant containing the number of sub rounds in a
round.
max_num_prefixes: A tf.constant dictating the maximum number of prefixes we
can keep in the trie.
max_user_contribution: A tf.constant dictating the maximum number of
examples a client can contribute.
default_terminator: A tf.string containing the end of sequence symbol.
Returns:
A ClientOutput object holding the client's votes.
"""
# Create all zero client vote tensor.
client_votes = tf.zeros(
dtype=tf.int32,
shape=[max_num_prefixes,
tf.shape(possible_prefix_extensions)[0]])
# If discovered_prefixes is emtpy (training is done), skip the voting.
if tf.math.equal(tf.size(discovered_prefixes), 0):
return ClientOutput(client_votes)
else:
discovered_prefixes_table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
discovered_prefixes, tf.range(tf.shape(discovered_prefixes)[0])),
DEFAULT_VALUE)
possible_prefix_extensions_table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
possible_prefix_extensions,
tf.range(tf.shape(possible_prefix_extensions)[0])), DEFAULT_VALUE)
accumulate_client_votes_fn = make_accumulate_client_votes_fn(
round_num, num_sub_rounds, discovered_prefixes_table,
possible_prefix_extensions_table, default_terminator)
sampled_data_list = hh_utils.get_top_elements(dataset,
max_user_contribution)
sampled_data = tf.data.Dataset.from_tensor_slices(sampled_data_list)
return ClientOutput(
sampled_data.reduce(client_votes, accumulate_client_votes_fn))
@tf.function()
def accumulate_server_votes(server_state, sub_round_votes):
"""Accumulates votes and returns an updated server state."""
accumulated_votes = server_state.accumulated_votes + sub_round_votes
round_num = server_state.round_num + 1
return tff.structure.update_struct(
server_state, accumulated_votes=accumulated_votes, round_num=round_num)
@tf.function()
def get_extended_prefix_candidates(discovered_prefixes,
extensions_wo_terminator):
"""Extend all discovered_prefixes with all possible extensions.
Args:
discovered_prefixes: A 1D tf.string containing discovered prefixes.
extensions_wo_terminator: A 1D tf.string containing containing all possible
extensions except `default_terminator`.
Returns:
A 1D tf.string tensor containing all combinations of each item in
`discovered_prefixes` extended by each item in `extensions_wo_terminator`.
Shape: (len(`discovered_prefixes`) * len(`extensions_wo_terminator`), ).
"""
extended_prefixes = tf.TensorArray(
dtype=tf.string,
size=tf.shape(discovered_prefixes)[0] *
tf.shape(extensions_wo_terminator)[0])
position = tf.constant(0, dtype=tf.int32)
for prefix in discovered_prefixes:
for possible_extension in extensions_wo_terminator:
# [-1] is passed to tf.reshape to flatten the extended prefix. This is
# important to ensure consistency of shapes.
extended_prefix = tf.reshape(
tf.strings.reduce_join([prefix, possible_extension]), [-1])
extended_prefixes = extended_prefixes.write(position, extended_prefix)
position += 1
return extended_prefixes.concat()
@tf.function()
def extend_prefixes(prefixes_votes, discovered_prefixes,
extensions_wo_terminator, max_num_prefixes, threshold):
"""Extends prefixes in `discovered_prefixes` by `extensions_wo_terminator`.
For any prefix in `discovered_prefixes` with an extension in
`extensions_wo_terminator`, we only save this extension if the number of votes
for it is at least `threshold` and it is in the highest `max_num_prefixes`
votes.
Args:
prefixes_votes: A 1D tf.int32 containing flattern votes of all candidates
for extended prefixes.
discovered_prefixes: A 1D tf.string containing prefixes to be extended.
extensions_wo_terminator: A 1D tf.string containing all possible prefix
extensions except `default_terminator`.
max_num_prefixes: A tf.constant dictating the maximum number of prefixes we
can keep in the trie.
threshold: The threshold for heavy hitters and discovered prefixes. Only
those get at least `threshold` votes are discovered.
Returns:
A 1D tf.string containing all the extended prefixes.
"""
extended_prefix_candiates = get_extended_prefix_candidates(
discovered_prefixes, extensions_wo_terminator)
extended_prefix_candiates_num = tf.shape(extended_prefix_candiates)[0]
prefixes_mask = tf.math.greater_equal(prefixes_votes, threshold)
# If the number of candidates for extended prefixes <= max_num_prefixes, we
# only need to filter the votes by the threshold. Otherwise, the votes needs
# to be both >= threhold and in top `max_num_prefixes`.
if tf.shape(prefixes_votes)[0] > max_num_prefixes:
_, top_indices = tf.math.top_k(prefixes_votes, max_num_prefixes)
# Create a 1-D tensor filled with tf.bool True of shape (max_num_prefixes,)
top_indices = tf.cast(top_indices, dtype=tf.int64)
top_indices = tf.sort(top_indices)
top_indices = tf.reshape(top_indices, (tf.shape(top_indices)[0], 1))
top_indices_mask_values = tf.cast(
tf.ones(shape=(max_num_prefixes,)), dtype=tf.bool)
# Create a mask tensor that only the indices of the top `max_num_prefixes`
# candidates are set to True.
top_indices_mask = tf.sparse.SparseTensor(
indices=top_indices,
values=top_indices_mask_values,
dense_shape=[extended_prefix_candiates_num])
top_indices_mask = tf.sparse.to_dense(top_indices_mask)
prefixes_mask = tf.math.logical_and(prefixes_mask, top_indices_mask)
extended_prefixes = tf.boolean_mask(extended_prefix_candiates, prefixes_mask)
return extended_prefixes
@tf.function()
def accumulate_server_votes_and_decode(server_state, possible_prefix_extensions,
sub_round_votes, max_num_prefixes,
threshold):
"""Accumulates server votes and executes a decoding round.
Args:
server_state: A `ServerState`, the state to be updated.
possible_prefix_extensions: A 1D tf.string containing all possible prefix
extensions.
sub_round_votes: A tensor of shape = (max_num_prefixes,
len(possible_prefix_extensions)) containing aggregated client votes.
max_num_prefixes: A tf.constant dictating the maximum number of prefixes we
can keep in the trie.
threshold: The threshold for heavy hitters and discovered prefixes. Only
those get at least `threshold` votes are discovered.
Returns:
An updated `ServerState`.
"""
possible_extensions_num = tf.shape(possible_prefix_extensions)[0]
# Get a list of possible extensions without `default_terminator` (the last
# item in `possible_prefix_extensions`)
extensions_wo_terminator_num = possible_extensions_num - 1
extensions_wo_terminator = tf.slice(possible_prefix_extensions, [0],
[extensions_wo_terminator_num])
accumulated_votes = server_state.accumulated_votes + sub_round_votes
# The last column of `accumulated_votes` are those ending with
# 'default_terminator`, which are full length heavy hitters.
heavy_hitters_votes = tf.slice(
accumulated_votes, [0, extensions_wo_terminator_num],
[tf.shape(server_state.discovered_prefixes)[0], 1])
heavy_hitters_votes = tf.reshape(heavy_hitters_votes, [-1])
heavy_hitters_mask = tf.math.greater_equal(heavy_hitters_votes, threshold)
# The candidates of heavy hitters are `discovered_prefixes` ending with
# `default_terminator`. We don't attach `default_terminator` here because it
# is supposed to be removed after the full length heavy hitters are
# discovered.
heavy_hitters_candidates = server_state.discovered_prefixes
new_heavy_hitters = tf.boolean_mask(heavy_hitters_candidates,
heavy_hitters_mask)
new_heavy_hitters_counts = tf.boolean_mask(heavy_hitters_votes,
heavy_hitters_mask)
# All but the last column of `accumulated_votes` are votes of prefixes.
prefixes_votes = tf.slice(accumulated_votes, [0, 0], [
tf.shape(server_state.discovered_prefixes)[0],
extensions_wo_terminator_num
])
prefixes_votes = tf.reshape(prefixes_votes, [-1])
extended_prefixes = extend_prefixes(prefixes_votes,
server_state.discovered_prefixes,
extensions_wo_terminator,
max_num_prefixes, threshold)
discovered_heavy_hitters = tf.concat(
[server_state.discovered_heavy_hitters, new_heavy_hitters], 0)
heavy_hitters_counts = tf.concat(
[server_state.heavy_hitters_counts, new_heavy_hitters_counts], 0)
# Reinitialize the server's vote tensor.
accumulated_votes = tf.zeros(
dtype=tf.int32, shape=[max_num_prefixes, possible_extensions_num])
# Increment the server's round_num.
round_num = server_state.round_num + 1
# Return an updated server state.
return tff.structure.update_struct(
server_state,
discovered_heavy_hitters=discovered_heavy_hitters,
heavy_hitters_counts=heavy_hitters_counts,
round_num=round_num,
discovered_prefixes=extended_prefixes,
accumulated_votes=accumulated_votes)
@tf.function
def server_update(server_state, possible_prefix_extensions, sub_round_votes,
num_sub_rounds, max_num_prefixes, threshold):
"""Updates `server_state` based on `client_votes`.
Args:
server_state: A `ServerState`, the state to be updated.
possible_prefix_extensions: A 1D tf.string containing all possible prefix
extensions.
sub_round_votes: A tensor of shape = (max_num_prefixes,
len(possible_prefix_extensions)) containing aggregated client votes.
num_sub_rounds: The total number of sub rounds to be executed before
decoding aggregated votes.
max_num_prefixes: A tf.constant dictating the maximum number of prefixes we
can keep in the trie.
threshold: The threshold for heavy hitters and discovered prefixes. Only
those get at least `threshold` votes are discovered.
Returns:
An updated `ServerState`.
"""
# If discovered_prefixes is emtpy (training is done), skip the voting.
if tf.math.equal(tf.size(server_state.discovered_prefixes), 0):
return server_state
if tf.math.equal((server_state.round_num + 1) % num_sub_rounds, 0):
return accumulate_server_votes_and_decode(server_state,
possible_prefix_extensions,
sub_round_votes, max_num_prefixes,
threshold)
else:
return accumulate_server_votes(server_state, sub_round_votes)
| |
import time
import calendar
import re
import socket
import pytz #@UnresolvedImport
from itertools import chain
import xmltodict
from cwbot.common.kmailContainer import Kmail
from unidecode import unidecode
from xml.parsers.expat import ExpatError
from urllib2 import HTTPError, URLError, urlopen
from collections import defaultdict, namedtuple, deque
from fuzzywuzzy import fuzz #@UnresolvedImport
from cwbot.modules.BaseChatModule import BaseChatModule
import kol.util.Report
#from kol.request.ClanLogRequest import ClanLogRequest, CLAN_LOG_FAX
from cwbot.kolextra.request.ClanLogPartialRequest import \
ClanLogPartialRequest, CLAN_LOG_FAX
import cwbot.util.DebugThreading as threading
tz = pytz.timezone('America/Phoenix')
utc = pytz.utc
_Faxbot = namedtuple('Faxbot', ['name', 'id', 'xml'])
_FaxMatch = namedtuple('FaxMatch', ['monstername', 'forced', 'message'])
_FaxState = namedtuple('FaxState', ['requestTime', 'requestId'])
def utcTime():
""" Epoch time in UTC """
return calendar.timegm(time.gmtime())
class FaxMonsterEntry(object):
""" container class used to hold fax codes """
def __init__(self, name, code, faxbot):
self.name = name.strip()
self.code = code.strip().lower()
self.faxbot = faxbot
self._otherNames = []
@property
def nameList(self):
""" list of all possible names to reference this object, including
the .name, .code, and all the ._otherNames """
return [self.name.lower(), self.code] + self._otherNames
def addAlias(self, name):
""" an alias -- not the name or code, but another name for reference
"""
self._otherNames.append(name.strip().lower())
def contains(self, name):
return name.strip().lower() in self.nameList
def __repr__(self):
return "{{Fax-{}: {}}}".format(self.nameList[0],
', '.join(self.nameList[1:]))
def toDict(self):
return {'name': self.name, 'code': self.code,
'other': self._otherNames, 'playerId': self.playerId}
@classmethod
def fromDict(cls, d):
obj = cls(d['name'], d['code'])
obj._otherNames = d['other']
obj.playerId = d['playerId']
return obj
class FaxModule2(BaseChatModule):
"""
A module that handles faxing, including fax lookup for unknown monster
codes, reporting on incoming faxes, and reporting what's in the fax
machine.
Configuration options:
faxbot_timeout - time to wait until giving up on a fax request [def. = 90]
url_timeout - time to try to load XML page before timing out [def. = 15]
[[[[xml]]]]
BOTNAME = URL_TO_XML
[[[[alias]]]]
ALIASNAME = ALIAS (monster alias name)
Configuration example:
[[[[xml]]]]
1 = http://hogsofdestiny.com/faxbot/faxbot.xml
2 = http://faust.kolbots.com/faustbot.xml
3 = https://sourceforge.net/p/easyfax/code/HEAD/tree/Easyfax.xml?format=raw
[[[[success]]]]
FaxBot = has copied
faustbot = has been delivered
Easyfax = fax is ready
[[[[alias]]]]
lobsterfrogman = lfm # now you can type '!fax lfm'
"""
requiredCapabilities = ['chat']
_name = "fax"
__lock = threading.RLock()
_faxWait = 60
_xmlMins = 30
_checkFrequency = 15
_defaultXml = {'1': "http://faust.kolbots.com/faustbot.xml",
'2': "https://sourceforge.net/p/easyfax/"
"code/HEAD/tree/Easyfax.xml?format=raw"}
_defaultSuccess = {'faustbot': "has been delivered",
'Easyfax': "fax is ready"}
def __init__(self, manager, identity, config):
self._abortTime = None
self._timeout = None
self._xmlAddresses = None
self._finishInitialization = threading.Event()
self._initialized = False
self._monsters = defaultdict(list)
self._requestQueue = deque()
self._faxReply = None
self._delayMode = threading.Event()
self._delayStart = 0
self._faxState = None
self._faxCommands = []
self._success = None
self._lastXmlUpdate = 0
self._lastFaxCheck = 0
# last request the bot made to FaxBot
self._lastRequest, self._lastRequestTime = None, None
# last monster in the fax log
self._lastFax, self._lastFaxTime = None, None
# username of last faxer / last time bot got a message from faxbot
self._lastFaxUname, self._lastFaxBotTime = None, None
self._lastFaxCheck = 0
super(FaxModule2, self).__init__(manager, identity, config)
def _configure(self, config):
try:
self._abortTime = int(config.setdefault('faxbot_timeout', 90))
self._timeout = int(config.setdefault('url_timeout', 15))
self._xmlAddresses = config.setdefault('xml', self._defaultXml)
success = config.setdefault('success', self._defaultSuccess)
self._success = {''.join(k.lower()): v
for k,v in success.items()}
except ValueError:
raise Exception("Fax Module config error: "
"faxbot_timeout, url_timeout must be integral")
self._alias = config.setdefault('alias', {'lobsterfrogman': 'lfm'})
def initialize(self, state, initData):
self._finishInitialization.set()
@property
def state(self):
return {}
@property
def initialState(self):
return {}
def getFaxMatch(self, args):
'''Look up the monster in the list using fuzzy matching. '''
splitArgs = args.split()
# did we force?
if any(s for s in splitArgs if s.strip().lower() == "force"):
# make a new monster
return _FaxMatch(splitArgs[0], True, 'forcing')
# make list of all possible names/codes/aliases
nameList = {}
for k,v in self._monsters.items():
names = set(chain.from_iterable(val.nameList for val in v))
nameList[k] = names
simplify = (lambda x: x.replace("'", "")
.replace("_", " ")
.replace("-", " ").lower())
sArgs = simplify(args)
# first, check for exact code/name/alias matches
matches = []
for k,names in nameList.items():
if any(True for name in names if sArgs == simplify(name)):
matches.append(k)
if len(matches) == 1:
return _FaxMatch(matches[0], False, 'exact match')
# next, check for "close" matches
scoreDiff = 15
scores = {}
for k,names in nameList.items():
score1 = max(fuzz.partial_token_set_ratio(simplify(name), sArgs)
for name in names)
scores[k] = score1
maxScore = max(scores.values())
fuzzyMatchKeys = set(k for k,v in scores.items()
if v >= maxScore - scoreDiff)
# also check for args as a subset of string or code
detokenize = lambda x: ''.join(re.split(r"'|_|-| ", x)).lower()
dArgs = detokenize(args)
subsetMatchKeys = set()
for k, names in nameList.items():
if any(True for name in names if dArgs in detokenize(name)):
subsetMatchKeys.add(k)
ls = len(subsetMatchKeys)
lf = len(fuzzyMatchKeys)
matchKeys = subsetMatchKeys | fuzzyMatchKeys
lm = len(matchKeys)
if ls == 0 and lf == 1:
m = matchKeys.pop()
return _FaxMatch(m, False, "fuzzy match")
elif lm == 1:
m = matchKeys.pop()
return _FaxMatch(m, False, "subset match")
elif lm > 1 and lm < 6:
possibleMatchStr = ", ".join(
(self._monsters[k][0].name for k in matchKeys))
return _FaxMatch(None, False,
("Did you mean one of: {}?"
.format(possibleMatchStr)))
elif lm > 1:
return _FaxMatch(None, False,
("Matched {} monster names/codes; "
"please be more specific. Send \"!fax list\" for"
" monster list.".format(ls + lf)))
return _FaxMatch(None, False, "No known monster with name/code "
"matching '{0}'. "
"Use '!fax {0} force' to force, "
"or send \"!fax list\" for a list."
.format(args))
def checkForNewFax(self, announceInChat=True):
""" See if a new fax has arrived, and possibly announce it if it has.
"""
with self.__lock:
self._lastFaxCheck = utcTime()
replyStr = None
lastFaxTime = self._lastFaxTime
lastFaxUname = self._lastFaxUname
lastFaxMonster = self._lastFax
event = self.updateLastFax()
if (self._lastFax is not None and
(lastFaxTime != self._lastFaxTime or
lastFaxUname != self._lastFaxUname or
lastFaxMonster != self._lastFax)):
self.log("Received new fax {}".format(event))
return replyStr
def updateLastFax(self):
""" Update what's in the Fax machine """
with self.__lock:
# suppress annoying output from pyKol
kol.util.Report.removeOutputSection("*")
try:
r = ClanLogPartialRequest(self.session)
log = self.tryRequest(r, numTries=5, initialDelay=0.25,
scaleFactor=1.5)
finally:
kol.util.Report.addOutputSection("*")
faxEvents = [event for event in log['entries']
if event['type'] == CLAN_LOG_FAX]
lastEvent = None if len(faxEvents) == 0 else faxEvents[0]
if lastEvent is None:
self._lastFax = None
self._lastFaxTime = None
self._lastFaxUname = None
else:
self._lastFax = lastEvent['monster']
lastFaxTimeAz = tz.localize(lastEvent['date'])
lastFaxTimeUtc = lastFaxTimeAz.astimezone(utc)
self._lastFaxTime = calendar.timegm(lastFaxTimeUtc.timetuple())
self._lastFaxUname = lastEvent['userName']
return lastEvent
def printLastFax(self):
""" Get the chat text to represent what's in the Fax machine. """
if utcTime() - self._lastFaxCheck >= self._checkFrequency:
self.checkForNewFax(False)
if self._lastFax is None:
return "I can't tell what's in the fax machine."
elapsed = utcTime() - self._lastFaxTime
timeStr = "{} minutes".format(int((elapsed+59) // 60))
return ("The fax has been holding a(n) {} for the last {}. "
"(Send \"!fax list\" for a list of monsters.)"
.format(self._lastFax, timeStr))
def faxMonster(self, args, isPM):
"""Send a request, if not waiting on another request."""
with self.__lock:
(monster, force, message) = self.getFaxMatch(args)
matches = self._monsters.get(monster, [])
if monster is None or not matches:
return message
if isPM:
str1 = "Matched {} ({})\n".format(matches[0].name, message)
return str1 + "\n".join("/w {} {}"
.format(m.faxbot.name, m.code)
for m in matches)
if self._delayMode.is_set():
return ("Please wait {} more seconds to request a fax."
.format(int(self._faxWait
- time.time() + self._delayStart)))
if self._requestQueue:
return "I am still waiting on my last request."
self._requestQueue.extend(matches)
return "Requested {} ({})...".format(matches[0].name, message)
def _processCommand(self, message, cmd, args):
if cmd == "fax":
if args.lower() == "list":
return self._sendMonsterList(message['userId'])
if args != "":
isPM = (message['type'] == "private")
return self.faxMonster(args, isPM)
else:
return self.printLastFax()
with self.__lock:
if self._faxState:
if message.get('userId', 0) == self._faxState.requestId:
self.log("Received {} PM: {}".format(
self._faxState.requestId,
message['text']))
self._faxReply = message['text']
return None
def _sendMonsterList(self, uid):
text = ("Available monsters:\n\n" +
"\n".join(sorted(self._monsters.keys())))
self.sendKmail(Kmail(uid, text))
return "Monster list sent."
def _refreshMonsterList(self):
genLen = lambda gen: sum(1 for _ in gen)
entryCount = genLen(chain.from_iterable(self._monsters.values()))
self.log("Updating xml... ({} entries)".format(entryCount))
for _,v in self._monsters.items():
v = [entry for entry in v
if entry.faxbot.xml in self._xmlAddresses.values()]
# clear empty entries
monsters = defaultdict(list)
monsters.update(
{k:v for k,v in self._monsters.items() if v})
self._monsters = monsters
entryCount2 = genLen(chain.from_iterable(self._monsters.values()))
if entryCount != entryCount2:
self._log("Removed {} entries due to config file mismatch."
.format(entryCount - entryCount2))
numTries = 3
for key in sorted(self._xmlAddresses.keys()):
address = self._xmlAddresses[key]
txt = None
for _ in range(numTries):
try:
txt = urlopen(address, timeout=self._timeout).read()
d = xmltodict.parse(txt)
except (HTTPError,
URLError,
socket.timeout,
socket.error,
ExpatError) as e:
self.log("Error loading webpage "
"for fax list: {}: {}"
.format(e.__class__.__name__, e.args))
else:
entryCount = genLen(chain.from_iterable(
self._monsters.values()))
d1 = d[d.keys()[0]]
try:
faxbot = _Faxbot(d1['botdata']['name'].encode('ascii'),
int(d1['botdata']['playerid']),
address)
except KeyError:
continue
monsters = d1['monsterlist']['monsterdata']
newMonsters = {}
for monster in monsters:
mname = unidecode(monster['actual_name']).lower()
code = unidecode(monster['command']).lower()
name = unidecode(monster['name'])
newMonsters[mname] = FaxMonsterEntry(name,
code,
faxbot)
for n,alias in self._alias.items():
if n.lower().strip() in [mname,
code,
name.lower().strip()]:
newMonsters[mname].addAlias(alias)
for k,v in self._monsters.items():
self._monsters[k] = [entry for entry in v
if entry.faxbot.xml != address]
for mname,monster in newMonsters.items():
self._monsters[mname].append(monster)
entryCount2 = genLen(chain.from_iterable(
self._monsters.values()))
# clear empty entries
monsters = defaultdict(list)
monsters.update(
{k:v for k,v in self._monsters.items() if v})
self._monsters = monsters
self.log("Net change of {} entries from {} xml ({} -> {})"
.format(entryCount2 - entryCount,
faxbot.name,
entryCount,
entryCount2))
break
self._lastXmlUpdate = time.time()
def _heartbeat(self):
if self._finishInitialization.is_set():
self._finishInitialization.clear()
self._refreshMonsterList()
self._initialized = True
if self._initialized:
with self.__lock:
# are we waiting for a request?
if self._faxState:
# check if we received a reply
request = self._requestQueue[0]
if self._faxReply:
# check if it matches
regex = self._success[
''.join(request.faxbot.name.lower())]
if re.search(regex, self._faxReply):
# matched!
self.chat("{} has delivered a(n) {}."
.format(request.faxbot.name,
request.name))
self._requestQueue.clear()
self._delayMode.set()
self._delayStart = time.time()
self._faxCommands = []
else:
# not a match.
self.chat("{} reply: {}"
.format(request.faxbot.name,
self._faxReply))
self._requestQueue.popleft()
if not self._requestQueue:
self.chat("Could not receive fax. "
"Try one of: {}"
.format(", "
.join(self._faxCommands)))
self._faxCommands = []
self._faxReply = None
self._faxState = None
else:
# no fax reply yet
if (time.time() - self._faxState.requestTime
> self._abortTime):
self.chat("{} did not reply.".format(
request.faxbot.name))
self._requestQueue.popleft()
self._faxState = None
self._faxReply = None
if not self._requestQueue:
self.chat("Could not receive fax. "
"Try one of: {}"
.format(", "
.join(self._faxCommands)))
self._faxCommands = []
elif self._delayMode.is_set():
if time.time() - self._delayStart > self._faxWait:
self._delayMode.clear()
self._delayStart = 0
elif self._requestQueue:
request = self._requestQueue[0]
self.chat("Requesting {} from {}..."
.format(request.name,
request.faxbot.name))
self._faxState = _FaxState(requestTime=time.time(),
requestId=request.faxbot.id)
self.whisper(request.faxbot.id, request.code)
self._faxCommands.append("/w {} {}".format(
request.faxbot.name,
request.code))
elif time.time() - self._lastXmlUpdate > 60 * self._xmlMins:
self._refreshMonsterList()
def _eventCallback(self, eData):
s = eData.subject
if s == "state":
if eData.to is None:
self._eventReply({
'warning': '(omitted for general state inquiry)'})
else:
self._eventReply(self.state)
def _availableCommands(self):
return {'fax': "!fax: check the contents of the fax machine. "
"'!fax MONSTERNAME' requests a fax from FaxBot."}
| |
# --
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
# --
from .comp import Gallery, Asset, AssetCropper
from nagare import presentation, var, ajax, security
from nagare.i18n import _, _N
def render_image(self, h, comp, size, randomize=False, **kw):
metadata = self.assets_manager.get_metadata(self.filename)
src = self.assets_manager.get_image_url(self.filename, size)
if randomize:
src += '?r=' + h.generate_id()
return h.img(title=metadata['filename'], alt=metadata['filename'],
src=src, **kw)
def render_file(self, h, comp, size, **kw):
kw['class'] += ' file_icon'
metadata = self.assets_manager.get_metadata(self.filename)
res = [h.img(title=metadata['filename'], alt=metadata['filename'],
src="img/file-icon.jpg", **kw)]
if size == 'medium':
res.append(h.span(metadata['filename']))
return res
CONTENT_TYPES = {'image/png': render_image,
'image/jpeg': render_image,
'image/pjpeg': render_image,
'image/gif': render_image}
@presentation.render_for(Gallery)
def render(self, h, comp, *args):
with h.div(class_='nbItems'):
h << h.script(u'YAHOO.kansha.app.closeModal();')
h << comp.render(h, model='badge')
with h.div(id="card-gallery"):
if security.has_permissions('edit', self):
for overlay in self.overlays:
h << overlay
else:
for asset in self.assets:
h << asset.render(h, model="anonymous")
return h.root
@presentation.render_for(Gallery, "download")
def render_download(self, h, comp, *args):
if security.has_permissions('edit', self):
v_file = var.Var()
submit_id = h.generate_id("attach_submit")
input_id = h.generate_id("attach_input")
h << h.label((h.i(class_='icon-file icon-grey'),
_("Add file")), class_='btn btn-small', for_=input_id)
with h.form:
h << h.script(
u'''
function valueChanged(e) {
if (YAHOO.kansha.app.checkFileSize(this, %(max_size)s)) {
YAHOO.util.Dom.get(%(submit_id)s).click();
YAHOO.kansha.app.showModal('oip');
} else {
alert(%(error)s);
}
}
YAHOO.util.Event.onDOMReady(function() {
YAHOO.util.Event.on(%(input_id)s, 'change', valueChanged);
});''' %
{
'max_size': ajax.py2js(self.assets_manager.max_size),
'input_id': ajax.py2js(input_id),
'submit_id': ajax.py2js(submit_id),
'error': ajax.py2js(
_(u'Max file size exceeded')
).decode('UTF-8')
}
)
h << h.input(id=input_id, style="position:absolute;left:-1000px;", type="file", name="file", multiple="multiple", maxlength="100",).action(v_file)
h << h.input(style="position:absolute;left:-1000px;", id=submit_id, type="submit").action(lambda: self.add_assets(v_file()))
return h.root
@presentation.render_for(Gallery, model='badge')
def render_gallery_badge(self, h, *args):
"""Gallery badge for the card"""
if self.assets:
label = _N('file', 'files', len(self.assets))
h << h.span(h.i(class_='icon-file icon-grey'), ' ', len(self.assets), class_='label', data_tooltip=label)
return h.root
@presentation.render_for(Asset, model="anonymous")
def render_asset_anonymous(self, h, comp, model, *args):
return h.a(
comp.render(h, model="thumb"),
onclick="window.open(%s);YAHOO.kansha.app.hideOverlay()" % ajax.py2js(
self.assets_manager.get_image_url(self.filename)
),
target='_blank'
)
@presentation.render_for(Asset)
@presentation.render_for(Asset, model='medium')
@presentation.render_for(Asset, model='thumb')
@presentation.render_for(Asset, model='cover')
def render_asset(self, h, comp, model, *args):
res = []
metadata = self.assets_manager.get_metadata(self.filename)
kw = {'randomize': True} if model == 'cover' else {}
kw['class'] = model
if self.is_cover:
res.append(h.span(class_='is_cover'))
meth = CONTENT_TYPES.get(metadata['content-type'], render_file)
res.append(meth(self, h, comp, model, **kw))
return res
@presentation.render_for(Asset, model='flow')
def render_asset_flow(self, h, comp, *args):
with h.div(class_='comment'):
with h.div(class_='left'):
h << self.author.render(h, model='avatar')
with h.div(class_='right'):
h << self.author.render(h, model='fullname')
h << _(' added a file ')
h << comp.render(h, 'creation_date')
with h.div(class_='contents'):
with h.p:
h << h.SyncRenderer().a(comp.render(h, model='medium'),
href=self.assets_manager.get_image_url(self.filename),
target='_blank')
return h.root
@presentation.render_for(Asset, 'menu')
def render_overlay_menu(self, h, comp, *args):
id_ = h.generate_id()
card_cmp, card_id, card_model = h.get_async_root().component, h.get_async_root().id, h.get_async_root().model
with h.div(id=id_):
with h.ul:
h << h.li(h.a(
_('Open'),
target='_blank',
onclick=(
"window.open(%s);"
"YAHOO.kansha.app.hideOverlay()" %
ajax.py2js(self.assets_manager.get_image_url(self.filename))
)
)
)
h << h.li(h.a(_('Delete')).action(lambda: comp.answer(('delete', self))))
if self.is_image():
if self.is_cover:
h << h.li(h.a(_('Remove cover')).action(
lambda: comp.answer(('remove_cover', self))))
else:
# Open asset cropper
card_cmp = h.get_async_root().component
card_id = h.get_async_root().id
card_model = h.get_async_root().model
self.create_cropper_component(comp, card_cmp, card_id, card_model)
h << h.li(self.overlay_cropper)
return h.root
@presentation.render_for(Asset, 'cropper_menu')
def render_asset_cropper_menu(self, h, comp, *args):
h << h.span(_('Make cover'))
return h.root
@presentation.render_for(Asset, model='crop')
def render_gallery_crop(self, h, comp, *args):
h << self.cropper.on_answer(lambda answ: self.end_crop(comp, answ))
return h.root
@presentation.render_for(AssetCropper)
def render_gallery_cropper(self, h, comp, *args):
crop_width, crop_height = 425, 250
h << h.p(_('Use the controls below to create the cover of your card.'))
form_id, img_id = h.generate_id(), h.generate_id()
with h.form:
for crop_name in 'crop_left', 'crop_top', 'crop_width', 'crop_height':
h << h.input(type='hidden', id=form_id + '_' + crop_name).action(getattr(self, crop_name))
h << h.p(render_image(self.asset, h, comp, 'medium', id=img_id))
h << h.script(
"YAHOO.util.Event.onContentReady(%s,"
"function(){YAHOO.kansha.app.initCrop(%s, %s, %s, %s)})" % (
ajax.py2js(img_id),
ajax.py2js(img_id),
ajax.py2js(form_id),
ajax.py2js(crop_width),
ajax.py2js(crop_height)
)
)
h << h.input(type='submit',
value=_('Done'),
class_='btn btn-primary btn-small').action(ajax.Update(render=lambda r: self.card_component.render(r, self.card_component_model),
action=lambda: comp.answer((int(self.crop_left() or 0),
int(self.crop_top() or 0),
int(self.crop_width() or crop_width),
int(self.crop_height() or crop_height))),
component_to_update=self.card_component_id)
)
return h.root
| |
#! /usr/bin/env python
import unittest
from redcap import Project, RedcapError
import semantic_version
skip_pd = False
try:
import pandas as pd
except ImportError:
skip_pd = True
class ProjectTests(unittest.TestCase):
"""docstring for ProjectTests"""
url = 'https://redcap.vanderbilt.edu/api/'
bad_url = 'https://redcap.vanderbilt.edu/api'
reg_token = '8E66DB6844D58E990075AFB51658A002'
long_proj = Project(url, '1387872621BBF1C17CC47FD8AE25FF54')
reg_proj = Project(url, reg_token)
ssl_proj = Project(url, reg_token, verify_ssl=False)
survey_proj = Project(url, '37CAB1ABC2FEB3BB9D821DF13BA38A7B')
def setUp(self):
pass
def tearDown(self):
pass
def test_good_init(self):
"""Ensure basic instantiation """
self.assertIsInstance(self.long_proj, Project)
self.assertIsInstance(self.reg_proj, Project)
self.assertIsInstance(self.ssl_proj, Project)
def test_normal_attrs(self):
"""Ensure projects are created with all normal attrs"""
for attr in ('metadata', 'field_names', 'field_labels', 'forms',
'events', 'arm_names', 'arm_nums', 'def_field'):
self.assertTrue(hasattr(self.reg_proj, attr))
def test_long_attrs(self):
"proj.events/arm_names/arm_nums should not be empty in long projects"
self.assertIsNotNone(self.long_proj.events)
self.assertIsNotNone(self.long_proj.arm_names)
self.assertIsNotNone(self.long_proj.arm_nums)
def test_is_longitudinal(self):
"Test the is_longitudinal method"
self.assertFalse(self.reg_proj.is_longitudinal())
self.assertTrue(self.long_proj.is_longitudinal())
def test_regular_attrs(self):
"""proj.events/arm_names/arm_nums should be empty tuples"""
for attr in 'events', 'arm_names', 'arm_nums':
attr_obj = getattr(self.reg_proj, attr)
self.assertIsNotNone(attr_obj)
self.assertEqual(len(attr_obj), 0)
def test_json_export(self):
""" Make sure we get a list of dicts"""
data = self.reg_proj.export_records()
self.assertIsInstance(data, list)
for record in data:
self.assertIsInstance(record, dict)
def test_long_export(self):
"""After determining a unique event name, make sure we get a
list of dicts"""
unique_event = self.long_proj.events[0]['unique_event_name']
data = self.long_proj.export_records(events=[unique_event])
self.assertIsInstance(data, list)
for record in data:
self.assertIsInstance(record, dict)
def test_import_records(self):
"Test record import"
data = self.reg_proj.export_records()
response = self.reg_proj.import_records(data)
self.assertIn('count', response)
self.assertNotIn('error', response)
def test_import_exception(self):
"Test record import throws RedcapError for bad import"
data = self.reg_proj.export_records()
data[0]['non_existent_key'] = 'foo'
with self.assertRaises(RedcapError) as cm:
self.reg_proj.import_records(data)
exc = cm.exception
self.assertIn('error', exc.args[0])
def is_good_csv(self, csv_string):
"Helper to test csv strings"
return isinstance(csv_string, basestring)
def test_csv_export(self):
"""Test valid csv export """
csv = self.reg_proj.export_records(format='csv')
self.assertTrue(self.is_good_csv(csv))
def test_metadata_export(self):
"""Test valid metadata csv export"""
csv = self.reg_proj.export_metadata(format='csv')
self.assertTrue(self.is_good_csv(csv))
def test_bad_creds(self):
"Test that exceptions are raised with bad URL or tokens"
with self.assertRaises(RedcapError):
Project(self.bad_url, self.reg_token)
with self.assertRaises(RedcapError):
Project(self.url, '1')
def test_fem_export(self):
""" Test fem export in json format gives list of dicts"""
fem = self.long_proj.export_fem(format='json')
self.assertIsInstance(fem, list)
for arm in fem:
self.assertIsInstance(arm, dict)
def test_file_export(self):
"""Test file export and proper content-type parsing"""
record, field = '1', 'file'
#Upload first to make sure file is there
self.import_file()
# Now export it
content, headers = self.reg_proj.export_file(record, field)
self.assertIsInstance(content, basestring)
# We should at least get the filename in the headers
for key in ['name']:
self.assertIn(key, headers)
# needs to raise ValueError for exporting non-file fields
with self.assertRaises(ValueError):
self.reg_proj.export_file(record=record, field='dob')
# Delete and make sure we get an RedcapError with next export
self.reg_proj.delete_file(record, field)
with self.assertRaises(RedcapError):
self.reg_proj.export_file(record, field)
def import_file(self):
upload_fname = self.upload_fname()
with open(upload_fname, 'r') as fobj:
response = self.reg_proj.import_file('1', 'file', upload_fname, fobj)
return response
def upload_fname(self):
import os
this_dir, this_fname = os.path.split(__file__)
return os.path.join(this_dir, 'data.txt')
def test_file_import(self):
"Test file import"
# Make sure a well-formed request doesn't throw RedcapError
try:
response = self.import_file()
except RedcapError:
self.fail("Shouldn't throw RedcapError for successful imports")
self.assertTrue('error' not in response)
# Test importing a file to a non-file field raises a ValueError
fname = self.upload_fname()
with open(fname, 'r') as fobj:
with self.assertRaises(ValueError):
response = self.reg_proj.import_file('1', 'first_name',
fname, fobj)
def test_file_delete(self):
"Test file deletion"
# upload a file
fname = self.upload_fname()
with open(fname, 'r') as fobj:
self.reg_proj.import_file('1', 'file', fname, fobj)
# make sure deleting doesn't raise
try:
self.reg_proj.delete_file('1', 'file')
except RedcapError:
self.fail("Shouldn't throw RedcapError for successful deletes")
def test_user_export(self):
"Test user export"
users = self.reg_proj.export_users()
# A project must have at least one user
self.assertTrue(len(users) > 0)
req_keys = ['firstname', 'lastname', 'email', 'username',
'expiration', 'data_access_group', 'data_export',
'forms']
for user in users:
for key in req_keys:
self.assertIn(key, user)
def test_verify_ssl(self):
"""Test argument making for SSL verification"""
# Test we won't verify SSL cert for non-verified project
post_kwargs = self.ssl_proj._kwargs()
self.assertIn('verify', post_kwargs)
self.assertFalse(post_kwargs['verify'])
# Test we do verify SSL cert in normal project
post_kwargs = self.reg_proj._kwargs()
self.assertIn('verify', post_kwargs)
self.assertTrue(post_kwargs['verify'])
def test_export_data_access_groups(self):
"""Test we get 'redcap_data_access_group' in exported data"""
records = self.reg_proj.export_records(export_data_access_groups=True)
for record in records:
self.assertIn('redcap_data_access_group', record)
# When not passed, that key shouldn't be there
records = self.reg_proj.export_records()
for record in records:
self.assertNotIn('redcap_data_access_group', record)
def test_export_survey_fields(self):
"""Test that we get the appropriate survey keys in the exported
data.
Note that the 'demographics' form has been setup as the survey
in the `survey_proj` project. The _timestamp field will vary for
users as their survey form will be named differently"""
records = self.survey_proj.export_records(export_survey_fields=True)
for record in records:
self.assertIn('redcap_survey_identifier', record)
self.assertIn('demographics_timestamp', record)
# The regular project doesn't have a survey setup. Users should
# be able this argument as True but it winds up a no-op.
records = self.reg_proj.export_records(export_survey_fields=True)
for record in records:
self.assertNotIn('redcap_survey_identifier', record)
self.assertNotIn('demographics_timestamp', record)
@unittest.skipIf(skip_pd, "Couldn't import pandas")
def test_metadata_to_df(self):
"""Test metadata export --> DataFrame"""
df = self.reg_proj.export_metadata(format='df')
self.assertIsInstance(df, pd.DataFrame)
@unittest.skipIf(skip_pd, "Couldn't import pandas")
def test_export_to_df(self):
"""Test export --> DataFrame"""
df = self.reg_proj.export_records(format='df')
self.assertIsInstance(df, pd.DataFrame)
# Test it's a normal index
self.assertTrue(hasattr(df.index, 'name'))
# Test for a MultiIndex on longitudinal df
long_df = self.long_proj.export_records(format='df', event_name='raw')
self.assertTrue(hasattr(long_df.index, 'names'))
@unittest.skipIf(skip_pd, "Couldn't import pandas")
def test_export_df_kwargs(self):
"""Test passing kwargs to export DataFrame construction"""
df = self.reg_proj.export_records(format='df',
df_kwargs={'index_col': 'first_name'})
self.assertEqual(df.index.name, 'first_name')
self.assertTrue('study_id' in df)
@unittest.skipIf(skip_pd, "Couldn't import pandas")
def test_metadata_df_kwargs(self):
"""Test passing kwargs to metadata DataFrame construction"""
df = self.reg_proj.export_metadata(format='df',
df_kwargs={'index_col': 'field_label'})
self.assertEqual(df.index.name, 'field_label')
self.assertTrue('field_name' in df)
@unittest.skipIf(skip_pd, "Couldn't import pandas")
def test_import_dataframe(self):
"""Test importing a pandas.DataFrame"""
df = self.reg_proj.export_records(format='df')
# grrr coerce implicilty converted floats to str(int())
for col in ['matrix1', 'matrix2', 'matrix3', 'sex']:
df[col] = map(lambda x: str(int(x)) if pd.notnull(x) else '', df[col])
response = self.reg_proj.import_records(df)
self.assertIn('count', response)
self.assertNotIn('error', response)
long_df = self.long_proj.export_records(event_name='raw', format='df')
response = self.long_proj.import_records(long_df)
self.assertIn('count', response)
self.assertNotIn('error', response)
def test_date_formatting(self):
"""Test date_format parameter"""
def import_factory(date_string):
return [{'study_id': '1',
'dob': date_string}]
# Default YMD with dashes
import_ymd = import_factory('2000-01-01')
response = self.reg_proj.import_records(import_ymd)
self.assertEqual(response['count'], 1)
# DMY with /
import_dmy = import_factory('31/01/2000')
response = self.reg_proj.import_records(import_dmy, date_format='DMY')
self.assertEqual(response['count'], 1)
import_mdy = import_factory('12/31/2000')
response = self.reg_proj.import_records(import_mdy, date_format='MDY')
self.assertEqual(response['count'], 1)
def test_get_version(self):
"""Testing retrieval of REDCap version associated with Project"""
self.assertTrue(isinstance(semantic_version.Version('1.0.0'), type(self.long_proj.redcap_version)))
def test_export_checkbox_labels(self):
"""Testing the export of checkbox labels as field values"""
self.assertEqual(
self.reg_proj.export_records(
raw_or_label='label',
export_checkbox_labels=True)[0]['matcheck1___1'],
'Foo'
)
| |
from __future__ import print_function, division
from sympy.core import Mul, sympify
from sympy.matrices.expressions.matexpr import (
MatrixExpr, ShapeError, OneMatrix, ZeroMatrix
)
from sympy.strategies import (
unpack, flatten, condition, exhaust, rm_id, sort
)
def hadamard_product(*matrices):
"""
Return the elementwise (aka Hadamard) product of matrices.
Examples
========
>>> from sympy.matrices import hadamard_product, MatrixSymbol
>>> A = MatrixSymbol('A', 2, 3)
>>> B = MatrixSymbol('B', 2, 3)
>>> hadamard_product(A)
A
>>> hadamard_product(A, B)
HadamardProduct(A, B)
>>> hadamard_product(A, B)[0, 1]
A[0, 1]*B[0, 1]
"""
if not matrices:
raise TypeError("Empty Hadamard product is undefined")
validate(*matrices)
if len(matrices) == 1:
return matrices[0]
else:
matrices = [i for i in matrices if not i.is_Identity]
return HadamardProduct(*matrices).doit()
class HadamardProduct(MatrixExpr):
"""
Elementwise product of matrix expressions
Examples
========
Hadamard product for matrix symbols:
>>> from sympy.matrices import hadamard_product, HadamardProduct, MatrixSymbol
>>> A = MatrixSymbol('A', 5, 5)
>>> B = MatrixSymbol('B', 5, 5)
>>> isinstance(hadamard_product(A, B), HadamardProduct)
True
Notes
=====
This is a symbolic object that simply stores its argument without
evaluating it. To actually compute the product, use the function
``hadamard_product()`` or ``HadamardProduct.doit``
"""
is_HadamardProduct = True
def __new__(cls, *args, **kwargs):
args = list(map(sympify, args))
check = kwargs.get('check', True)
if check:
validate(*args)
return super(HadamardProduct, cls).__new__(cls, *args)
@property
def shape(self):
return self.args[0].shape
def _entry(self, i, j, **kwargs):
return Mul(*[arg._entry(i, j, **kwargs) for arg in self.args])
def _eval_transpose(self):
from sympy.matrices.expressions.transpose import transpose
return HadamardProduct(*list(map(transpose, self.args)))
def doit(self, **ignored):
expr = self.func(*[i.doit(**ignored) for i in self.args])
# Check for explicit matrices:
from sympy import MatrixBase
from sympy.matrices.immutable import ImmutableMatrix
explicit = [i for i in expr.args if isinstance(i, MatrixBase)]
if explicit:
remainder = [i for i in expr.args if i not in explicit]
expl_mat = ImmutableMatrix([
Mul.fromiter(i) for i in zip(*explicit)
]).reshape(*self.shape)
expr = HadamardProduct(*([expl_mat] + remainder))
return canonicalize(expr)
def _eval_derivative(self, x):
from sympy import Add
terms = []
args = list(self.args)
for i in range(len(args)):
factors = args[:i] + [args[i].diff(x)] + args[i+1:]
terms.append(hadamard_product(*factors))
return Add.fromiter(terms)
def _eval_derivative_matrix_lines(self, x):
from sympy.core.expr import ExprBuilder
from sympy.codegen.array_utils import CodegenArrayDiagonal, CodegenArrayTensorProduct
from sympy.matrices.expressions.matexpr import _make_matrix
with_x_ind = [i for i, arg in enumerate(self.args) if arg.has(x)]
lines = []
for ind in with_x_ind:
left_args = self.args[:ind]
right_args = self.args[ind+1:]
d = self.args[ind]._eval_derivative_matrix_lines(x)
hadam = hadamard_product(*(right_args + left_args))
diagonal = [(0, 2), (3, 4)]
diagonal = [e for j, e in enumerate(diagonal) if self.shape[j] != 1]
for i in d:
l1 = i._lines[i._first_line_index]
l2 = i._lines[i._second_line_index]
subexpr = ExprBuilder(
CodegenArrayDiagonal,
[
ExprBuilder(
CodegenArrayTensorProduct,
[
ExprBuilder(_make_matrix, [l1]),
hadam,
ExprBuilder(_make_matrix, [l2]),
]
),
] + diagonal, # turn into *diagonal after dropping Python 2.7
)
i._first_pointer_parent = subexpr.args[0].args[0].args
i._first_pointer_index = 0
i._second_pointer_parent = subexpr.args[0].args[2].args
i._second_pointer_index = 0
i._lines = [subexpr]
lines.append(i)
return lines
def validate(*args):
if not all(arg.is_Matrix for arg in args):
raise TypeError("Mix of Matrix and Scalar symbols")
A = args[0]
for B in args[1:]:
if A.shape != B.shape:
raise ShapeError("Matrices %s and %s are not aligned" % (A, B))
# TODO Implement algorithm for rewriting Hadamard product as diagonal matrix
# if matmul identy matrix is multiplied.
def canonicalize(x):
"""Canonicalize the Hadamard product ``x`` with mathematical properties.
Examples
========
>>> from sympy.matrices.expressions import MatrixSymbol, HadamardProduct
>>> from sympy.matrices.expressions import OneMatrix, ZeroMatrix
>>> from sympy.matrices.expressions.hadamard import canonicalize
>>> from sympy import init_printing
>>> init_printing(use_unicode=False)
>>> A = MatrixSymbol('A', 2, 2)
>>> B = MatrixSymbol('B', 2, 2)
>>> C = MatrixSymbol('C', 2, 2)
Hadamard product associativity:
>>> X = HadamardProduct(A, HadamardProduct(B, C))
>>> X
A.*(B.*C)
>>> canonicalize(X)
A.*B.*C
Hadamard product commutativity:
>>> X = HadamardProduct(A, B)
>>> Y = HadamardProduct(B, A)
>>> X
A.*B
>>> Y
B.*A
>>> canonicalize(X)
A.*B
>>> canonicalize(Y)
A.*B
Hadamard product identity:
>>> X = HadamardProduct(A, OneMatrix(2, 2))
>>> X
A.*1
>>> canonicalize(X)
A
Absorbing element of Hadamard product:
>>> X = HadamardProduct(A, ZeroMatrix(2, 2))
>>> X
A.*0
>>> canonicalize(X)
0
Rewriting to Hadamard Power
>>> X = HadamardProduct(A, A, A)
>>> X
A.*A.*A
>>> canonicalize(X)
.3
A
Notes
=====
As the Hadamard product is associative, nested products can be flattened.
The Hadamard product is commutative so that factors can be sorted for
canonical form.
A matrix of only ones is an identity for Hadamard product,
so every matrices of only ones can be removed.
Any zero matrix will make the whole product a zero matrix.
Duplicate elements can be collected and rewritten as HadamardPower
References
==========
.. [1] https://en.wikipedia.org/wiki/Hadamard_product_(matrices)
"""
from sympy.core.compatibility import default_sort_key
# Associativity
rule = condition(
lambda x: isinstance(x, HadamardProduct),
flatten
)
fun = exhaust(rule)
x = fun(x)
# Identity
fun = condition(
lambda x: isinstance(x, HadamardProduct),
rm_id(lambda x: isinstance(x, OneMatrix))
)
x = fun(x)
# Absorbing by Zero Matrix
def absorb(x):
if any(isinstance(c, ZeroMatrix) for c in x.args):
return ZeroMatrix(*x.shape)
else:
return x
fun = condition(
lambda x: isinstance(x, HadamardProduct),
absorb
)
x = fun(x)
# Rewriting with HadamardPower
if isinstance(x, HadamardProduct):
from collections import Counter
tally = Counter(x.args)
new_arg = []
for base, exp in tally.items():
if exp == 1:
new_arg.append(base)
else:
new_arg.append(HadamardPower(base, exp))
x = HadamardProduct(*new_arg)
# Commutativity
fun = condition(
lambda x: isinstance(x, HadamardProduct),
sort(default_sort_key)
)
x = fun(x)
# Unpacking
x = unpack(x)
return x
def hadamard_power(base, exp):
base = sympify(base)
exp = sympify(exp)
if exp == 1:
return base
if not base.is_Matrix:
return base**exp
if exp.is_Matrix:
raise ValueError("cannot raise expression to a matrix")
return HadamardPower(base, exp)
class HadamardPower(MatrixExpr):
r"""
Elementwise power of matrix expressions
Parameters
==========
base : scalar or matrix
exp : scalar or matrix
Notes
=====
There are four definitions for the hadamard power which can be used.
Let's consider `A, B` as `(m, n)` matrices, and `a, b` as scalars.
Matrix raised to a scalar exponent:
.. math::
A^{\circ b} = \begin{bmatrix}
A_{0, 0}^b & A_{0, 1}^b & \cdots & A_{0, n-1}^b \\
A_{1, 0}^b & A_{1, 1}^b & \cdots & A_{1, n-1}^b \\
\vdots & \vdots & \ddots & \vdots \\
A_{m-1, 0}^b & A_{m-1, 1}^b & \cdots & A_{m-1, n-1}^b
\end{bmatrix}
Scalar raised to a matrix exponent:
.. math::
a^{\circ B} = \begin{bmatrix}
a^{B_{0, 0}} & a^{B_{0, 1}} & \cdots & a^{B_{0, n-1}} \\
a^{B_{1, 0}} & a^{B_{1, 1}} & \cdots & a^{B_{1, n-1}} \\
\vdots & \vdots & \ddots & \vdots \\
a^{B_{m-1, 0}} & a^{B_{m-1, 1}} & \cdots & a^{B_{m-1, n-1}}
\end{bmatrix}
Matrix raised to a matrix exponent:
.. math::
A^{\circ B} = \begin{bmatrix}
A_{0, 0}^{B_{0, 0}} & A_{0, 1}^{B_{0, 1}} &
\cdots & A_{0, n-1}^{B_{0, n-1}} \\
A_{1, 0}^{B_{1, 0}} & A_{1, 1}^{B_{1, 1}} &
\cdots & A_{1, n-1}^{B_{1, n-1}} \\
\vdots & \vdots &
\ddots & \vdots \\
A_{m-1, 0}^{B_{m-1, 0}} & A_{m-1, 1}^{B_{m-1, 1}} &
\cdots & A_{m-1, n-1}^{B_{m-1, n-1}}
\end{bmatrix}
Scalar raised to a scalar exponent:
.. math::
a^{\circ b} = a^b
"""
def __new__(cls, base, exp):
base = sympify(base)
exp = sympify(exp)
if base.is_scalar and exp.is_scalar:
return base ** exp
if base.is_Matrix and exp.is_Matrix and base.shape != exp.shape:
raise ValueError(
'The shape of the base {} and '
'the shape of the exponent {} do not match.'
.format(base.shape, exp.shape)
)
obj = super(HadamardPower, cls).__new__(cls, base, exp)
return obj
@property
def base(self):
return self._args[0]
@property
def exp(self):
return self._args[1]
@property
def shape(self):
if self.base.is_Matrix:
return self.base.shape
return self.exp.shape
def _entry(self, i, j, **kwargs):
base = self.base
exp = self.exp
if base.is_Matrix:
a = base._entry(i, j, **kwargs)
elif base.is_scalar:
a = base
else:
raise ValueError(
'The base {} must be a scalar or a matrix.'.format(base))
if exp.is_Matrix:
b = exp._entry(i, j, **kwargs)
elif exp.is_scalar:
b = exp
else:
raise ValueError(
'The exponent {} must be a scalar or a matrix.'.format(exp))
return a ** b
def _eval_transpose(self):
from sympy.matrices.expressions.transpose import transpose
return HadamardPower(transpose(self.base), self.exp)
def _eval_derivative(self, x):
from sympy import log
dexp = self.exp.diff(x)
logbase = self.base.applyfunc(log)
dlbase = logbase.diff(x)
return hadamard_product(
dexp*logbase + self.exp*dlbase,
self
)
def _eval_derivative_matrix_lines(self, x):
from sympy.codegen.array_utils import CodegenArrayTensorProduct
from sympy.codegen.array_utils import CodegenArrayDiagonal
from sympy.core.expr import ExprBuilder
from sympy.matrices.expressions.matexpr import _make_matrix
lr = self.base._eval_derivative_matrix_lines(x)
for i in lr:
diagonal = [(1, 2), (3, 4)]
diagonal = [e for j, e in enumerate(diagonal) if self.base.shape[j] != 1]
l1 = i._lines[i._first_line_index]
l2 = i._lines[i._second_line_index]
subexpr = ExprBuilder(
CodegenArrayDiagonal,
[
ExprBuilder(
CodegenArrayTensorProduct,
[
ExprBuilder(_make_matrix, [l1]),
self.exp*hadamard_power(self.base, self.exp-1),
ExprBuilder(_make_matrix, [l2]),
]
),
] + diagonal, # turn into *diagonal after dropping Python 2.7
validator=CodegenArrayDiagonal._validate
)
i._first_pointer_parent = subexpr.args[0].args[0].args
i._first_pointer_index = 0
i._first_line_index = 0
i._second_pointer_parent = subexpr.args[0].args[2].args
i._second_pointer_index = 0
i._second_line_index = 0
i._lines = [subexpr]
return lr
| |
"""Support for OwnTracks."""
from collections import defaultdict
import json
import logging
import re
from aiohttp.web import json_response
import voluptuous as vol
from homeassistant.components import cloud, mqtt, webhook
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_GPS_ACCURACY,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_WEBHOOK_ID,
Platform,
)
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
from homeassistant.setup import async_when_setup
from .config_flow import CONF_SECRET
from .const import DOMAIN
from .messages import async_handle_message, encrypt_message
_LOGGER = logging.getLogger(__name__)
CONF_MAX_GPS_ACCURACY = "max_gps_accuracy"
CONF_WAYPOINT_IMPORT = "waypoints"
CONF_WAYPOINT_WHITELIST = "waypoint_whitelist"
CONF_MQTT_TOPIC = "mqtt_topic"
CONF_REGION_MAPPING = "region_mapping"
CONF_EVENTS_ONLY = "events_only"
BEACON_DEV_ID = "beacon"
PLATFORMS = [Platform.DEVICE_TRACKER]
DEFAULT_OWNTRACKS_TOPIC = "owntracks/#"
CONFIG_SCHEMA = vol.All(
cv.removed(CONF_WEBHOOK_ID),
vol.Schema(
{
vol.Optional(DOMAIN, default={}): {
vol.Optional(CONF_MAX_GPS_ACCURACY): vol.Coerce(float),
vol.Optional(CONF_WAYPOINT_IMPORT, default=True): cv.boolean,
vol.Optional(CONF_EVENTS_ONLY, default=False): cv.boolean,
vol.Optional(
CONF_MQTT_TOPIC, default=DEFAULT_OWNTRACKS_TOPIC
): mqtt.valid_subscribe_topic,
vol.Optional(CONF_WAYPOINT_WHITELIST): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_SECRET): vol.Any(
vol.Schema({vol.Optional(cv.string): cv.string}), cv.string
),
vol.Optional(CONF_REGION_MAPPING, default={}): dict,
}
},
extra=vol.ALLOW_EXTRA,
),
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Initialize OwnTracks component."""
hass.data[DOMAIN] = {"config": config[DOMAIN], "devices": {}, "unsub": None}
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up OwnTracks entry."""
config = hass.data[DOMAIN]["config"]
max_gps_accuracy = config.get(CONF_MAX_GPS_ACCURACY)
waypoint_import = config.get(CONF_WAYPOINT_IMPORT)
waypoint_whitelist = config.get(CONF_WAYPOINT_WHITELIST)
secret = config.get(CONF_SECRET) or entry.data[CONF_SECRET]
region_mapping = config.get(CONF_REGION_MAPPING)
events_only = config.get(CONF_EVENTS_ONLY)
mqtt_topic = config.get(CONF_MQTT_TOPIC)
context = OwnTracksContext(
hass,
secret,
max_gps_accuracy,
waypoint_import,
waypoint_whitelist,
region_mapping,
events_only,
mqtt_topic,
)
webhook_id = config.get(CONF_WEBHOOK_ID) or entry.data[CONF_WEBHOOK_ID]
hass.data[DOMAIN]["context"] = context
async_when_setup(hass, "mqtt", async_connect_mqtt)
webhook.async_register(hass, DOMAIN, "OwnTracks", webhook_id, handle_webhook)
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
hass.data[DOMAIN]["unsub"] = hass.helpers.dispatcher.async_dispatcher_connect(
DOMAIN, async_handle_message
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload an OwnTracks config entry."""
webhook.async_unregister(hass, entry.data[CONF_WEBHOOK_ID])
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
hass.data[DOMAIN]["unsub"]()
return unload_ok
async def async_remove_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Remove an OwnTracks config entry."""
if not entry.data.get("cloudhook"):
return
await cloud.async_delete_cloudhook(hass, entry.data[CONF_WEBHOOK_ID])
async def async_connect_mqtt(hass, component):
"""Subscribe to MQTT topic."""
context = hass.data[DOMAIN]["context"]
async def async_handle_mqtt_message(msg):
"""Handle incoming OwnTracks message."""
try:
message = json.loads(msg.payload)
except ValueError:
# If invalid JSON
_LOGGER.error("Unable to parse payload as JSON: %s", msg.payload)
return
message["topic"] = msg.topic
hass.helpers.dispatcher.async_dispatcher_send(DOMAIN, hass, context, message)
await mqtt.async_subscribe(hass, context.mqtt_topic, async_handle_mqtt_message, 1)
return True
async def handle_webhook(hass, webhook_id, request):
"""Handle webhook callback.
iOS sets the "topic" as part of the payload.
Android does not set a topic but adds headers to the request.
"""
context = hass.data[DOMAIN]["context"]
topic_base = re.sub("/#$", "", context.mqtt_topic)
try:
message = await request.json()
except ValueError:
_LOGGER.warning("Received invalid JSON from OwnTracks")
return json_response([])
# Android doesn't populate topic
if "topic" not in message:
headers = request.headers
user = headers.get("X-Limit-U")
device = headers.get("X-Limit-D", user)
if user:
message["topic"] = f"{topic_base}/{user}/{device}"
elif message["_type"] != "encrypted":
_LOGGER.warning(
"No topic or user found in message. If on Android,"
" set a username in Connection -> Identification"
)
# Keep it as a 200 response so the incorrect packet is discarded
return json_response([])
hass.helpers.dispatcher.async_dispatcher_send(DOMAIN, hass, context, message)
response = []
for person in hass.states.async_all("person"):
if "latitude" in person.attributes and "longitude" in person.attributes:
response.append(
{
"_type": "location",
"lat": person.attributes["latitude"],
"lon": person.attributes["longitude"],
"tid": "".join(p[0] for p in person.name.split(" ")[:2]),
"tst": int(person.last_updated.timestamp()),
}
)
if message["_type"] == "encrypted" and context.secret:
return json_response(
{
"_type": "encrypted",
"data": encrypt_message(
context.secret, message["topic"], json.dumps(response)
),
}
)
return json_response(response)
class OwnTracksContext:
"""Hold the current OwnTracks context."""
def __init__(
self,
hass,
secret,
max_gps_accuracy,
import_waypoints,
waypoint_whitelist,
region_mapping,
events_only,
mqtt_topic,
):
"""Initialize an OwnTracks context."""
self.hass = hass
self.secret = secret
self.max_gps_accuracy = max_gps_accuracy
self.mobile_beacons_active = defaultdict(set)
self.regions_entered = defaultdict(list)
self.import_waypoints = import_waypoints
self.waypoint_whitelist = waypoint_whitelist
self.region_mapping = region_mapping
self.events_only = events_only
self.mqtt_topic = mqtt_topic
self._pending_msg = []
@callback
def async_valid_accuracy(self, message):
"""Check if we should ignore this message."""
if (acc := message.get("acc")) is None:
return False
try:
acc = float(acc)
except ValueError:
return False
if acc == 0:
_LOGGER.warning(
"Ignoring %s update because GPS accuracy is zero: %s",
message["_type"],
message,
)
return False
if self.max_gps_accuracy is not None and acc > self.max_gps_accuracy:
_LOGGER.info(
"Ignoring %s update because expected GPS accuracy %s is not met: %s",
message["_type"],
self.max_gps_accuracy,
message,
)
return False
return True
@callback
def set_async_see(self, func):
"""Set a new async_see function."""
self.async_see = func
for msg in self._pending_msg:
func(**msg)
self._pending_msg.clear()
# pylint: disable=method-hidden
@callback
def async_see(self, **data):
"""Send a see message to the device tracker."""
self._pending_msg.append(data)
@callback
def async_see_beacons(self, hass, dev_id, kwargs_param):
"""Set active beacons to the current location."""
kwargs = kwargs_param.copy()
# Mobile beacons should always be set to the location of the
# tracking device. I get the device state and make the necessary
# changes to kwargs.
device_tracker_state = hass.states.get(f"device_tracker.{dev_id}")
if device_tracker_state is not None:
acc = device_tracker_state.attributes.get(ATTR_GPS_ACCURACY)
lat = device_tracker_state.attributes.get(ATTR_LATITUDE)
lon = device_tracker_state.attributes.get(ATTR_LONGITUDE)
if lat is not None and lon is not None:
kwargs["gps"] = (lat, lon)
kwargs["gps_accuracy"] = acc
else:
kwargs["gps"] = None
kwargs["gps_accuracy"] = None
# the battery state applies to the tracking device, not the beacon
# kwargs location is the beacon's configured lat/lon
kwargs.pop("battery", None)
for beacon in self.mobile_beacons_active[dev_id]:
kwargs["dev_id"] = f"{BEACON_DEV_ID}_{beacon}"
kwargs["host_name"] = beacon
self.async_see(**kwargs)
| |
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.citation import Citation # noqa: F401,E501
from orcid_api_v3.models.country_v20 import CountryV20 # noqa: F401,E501
from orcid_api_v3.models.created_date_v20 import CreatedDateV20 # noqa: F401,E501
from orcid_api_v3.models.external_i_ds_v20 import ExternalIDsV20 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v20 import LastModifiedDateV20 # noqa: F401,E501
from orcid_api_v3.models.publication_date_v20 import PublicationDateV20 # noqa: F401,E501
from orcid_api_v3.models.source_v20 import SourceV20 # noqa: F401,E501
from orcid_api_v3.models.title_v20 import TitleV20 # noqa: F401,E501
from orcid_api_v3.models.url_v20 import UrlV20 # noqa: F401,E501
from orcid_api_v3.models.work_contributors_v20 import WorkContributorsV20 # noqa: F401,E501
from orcid_api_v3.models.work_title_v20 import WorkTitleV20 # noqa: F401,E501
class WorkV20(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created_date': 'CreatedDateV20',
'last_modified_date': 'LastModifiedDateV20',
'source': 'SourceV20',
'put_code': 'int',
'path': 'str',
'title': 'WorkTitleV20',
'journal_title': 'TitleV20',
'short_description': 'str',
'citation': 'Citation',
'type': 'str',
'publication_date': 'PublicationDateV20',
'external_ids': 'ExternalIDsV20',
'url': 'UrlV20',
'contributors': 'WorkContributorsV20',
'language_code': 'str',
'country': 'CountryV20',
'visibility': 'str'
}
attribute_map = {
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'put_code': 'put-code',
'path': 'path',
'title': 'title',
'journal_title': 'journal-title',
'short_description': 'short-description',
'citation': 'citation',
'type': 'type',
'publication_date': 'publication-date',
'external_ids': 'external-ids',
'url': 'url',
'contributors': 'contributors',
'language_code': 'language-code',
'country': 'country',
'visibility': 'visibility'
}
def __init__(self, created_date=None, last_modified_date=None, source=None, put_code=None, path=None, title=None, journal_title=None, short_description=None, citation=None, type=None, publication_date=None, external_ids=None, url=None, contributors=None, language_code=None, country=None, visibility=None): # noqa: E501
"""WorkV20 - a model defined in Swagger""" # noqa: E501
self._created_date = None
self._last_modified_date = None
self._source = None
self._put_code = None
self._path = None
self._title = None
self._journal_title = None
self._short_description = None
self._citation = None
self._type = None
self._publication_date = None
self._external_ids = None
self._url = None
self._contributors = None
self._language_code = None
self._country = None
self._visibility = None
self.discriminator = None
if created_date is not None:
self.created_date = created_date
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if source is not None:
self.source = source
if put_code is not None:
self.put_code = put_code
if path is not None:
self.path = path
if title is not None:
self.title = title
if journal_title is not None:
self.journal_title = journal_title
if short_description is not None:
self.short_description = short_description
if citation is not None:
self.citation = citation
if type is not None:
self.type = type
if publication_date is not None:
self.publication_date = publication_date
if external_ids is not None:
self.external_ids = external_ids
if url is not None:
self.url = url
if contributors is not None:
self.contributors = contributors
if language_code is not None:
self.language_code = language_code
if country is not None:
self.country = country
if visibility is not None:
self.visibility = visibility
@property
def created_date(self):
"""Gets the created_date of this WorkV20. # noqa: E501
:return: The created_date of this WorkV20. # noqa: E501
:rtype: CreatedDateV20
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""Sets the created_date of this WorkV20.
:param created_date: The created_date of this WorkV20. # noqa: E501
:type: CreatedDateV20
"""
self._created_date = created_date
@property
def last_modified_date(self):
"""Gets the last_modified_date of this WorkV20. # noqa: E501
:return: The last_modified_date of this WorkV20. # noqa: E501
:rtype: LastModifiedDateV20
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this WorkV20.
:param last_modified_date: The last_modified_date of this WorkV20. # noqa: E501
:type: LastModifiedDateV20
"""
self._last_modified_date = last_modified_date
@property
def source(self):
"""Gets the source of this WorkV20. # noqa: E501
:return: The source of this WorkV20. # noqa: E501
:rtype: SourceV20
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this WorkV20.
:param source: The source of this WorkV20. # noqa: E501
:type: SourceV20
"""
self._source = source
@property
def put_code(self):
"""Gets the put_code of this WorkV20. # noqa: E501
:return: The put_code of this WorkV20. # noqa: E501
:rtype: int
"""
return self._put_code
@put_code.setter
def put_code(self, put_code):
"""Sets the put_code of this WorkV20.
:param put_code: The put_code of this WorkV20. # noqa: E501
:type: int
"""
self._put_code = put_code
@property
def path(self):
"""Gets the path of this WorkV20. # noqa: E501
:return: The path of this WorkV20. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this WorkV20.
:param path: The path of this WorkV20. # noqa: E501
:type: str
"""
self._path = path
@property
def title(self):
"""Gets the title of this WorkV20. # noqa: E501
:return: The title of this WorkV20. # noqa: E501
:rtype: WorkTitleV20
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkV20.
:param title: The title of this WorkV20. # noqa: E501
:type: WorkTitleV20
"""
self._title = title
@property
def journal_title(self):
"""Gets the journal_title of this WorkV20. # noqa: E501
:return: The journal_title of this WorkV20. # noqa: E501
:rtype: TitleV20
"""
return self._journal_title
@journal_title.setter
def journal_title(self, journal_title):
"""Sets the journal_title of this WorkV20.
:param journal_title: The journal_title of this WorkV20. # noqa: E501
:type: TitleV20
"""
self._journal_title = journal_title
@property
def short_description(self):
"""Gets the short_description of this WorkV20. # noqa: E501
:return: The short_description of this WorkV20. # noqa: E501
:rtype: str
"""
return self._short_description
@short_description.setter
def short_description(self, short_description):
"""Sets the short_description of this WorkV20.
:param short_description: The short_description of this WorkV20. # noqa: E501
:type: str
"""
self._short_description = short_description
@property
def citation(self):
"""Gets the citation of this WorkV20. # noqa: E501
:return: The citation of this WorkV20. # noqa: E501
:rtype: Citation
"""
return self._citation
@citation.setter
def citation(self, citation):
"""Sets the citation of this WorkV20.
:param citation: The citation of this WorkV20. # noqa: E501
:type: Citation
"""
self._citation = citation
@property
def type(self):
"""Gets the type of this WorkV20. # noqa: E501
:return: The type of this WorkV20. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this WorkV20.
:param type: The type of this WorkV20. # noqa: E501
:type: str
"""
allowed_values = ["ARTISTIC_PERFORMANCE", "BOOK_CHAPTER", "BOOK_REVIEW", "BOOK", "CONFERENCE_ABSTRACT", "CONFERENCE_PAPER", "CONFERENCE_POSTER", "DATA_SET", "DICTIONARY_ENTRY", "DISCLOSURE", "DISSERTATION", "EDITED_BOOK", "ENCYCLOPEDIA_ENTRY", "INVENTION", "JOURNAL_ARTICLE", "JOURNAL_ISSUE", "LECTURE_SPEECH", "LICENSE", "MAGAZINE_ARTICLE", "MANUAL", "NEWSLETTER_ARTICLE", "NEWSPAPER_ARTICLE", "ONLINE_RESOURCE", "OTHER", "PATENT", "REGISTERED_COPYRIGHT", "REPORT", "RESEARCH_TECHNIQUE", "RESEARCH_TOOL", "SPIN_OFF_COMPANY", "STANDARDS_AND_POLICY", "SUPERVISED_STUDENT_PUBLICATION", "TECHNICAL_STANDARD", "TEST", "TRADEMARK", "TRANSLATION", "WEBSITE", "WORKING_PAPER", "UNDEFINED"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def publication_date(self):
"""Gets the publication_date of this WorkV20. # noqa: E501
:return: The publication_date of this WorkV20. # noqa: E501
:rtype: PublicationDateV20
"""
return self._publication_date
@publication_date.setter
def publication_date(self, publication_date):
"""Sets the publication_date of this WorkV20.
:param publication_date: The publication_date of this WorkV20. # noqa: E501
:type: PublicationDateV20
"""
self._publication_date = publication_date
@property
def external_ids(self):
"""Gets the external_ids of this WorkV20. # noqa: E501
:return: The external_ids of this WorkV20. # noqa: E501
:rtype: ExternalIDsV20
"""
return self._external_ids
@external_ids.setter
def external_ids(self, external_ids):
"""Sets the external_ids of this WorkV20.
:param external_ids: The external_ids of this WorkV20. # noqa: E501
:type: ExternalIDsV20
"""
self._external_ids = external_ids
@property
def url(self):
"""Gets the url of this WorkV20. # noqa: E501
:return: The url of this WorkV20. # noqa: E501
:rtype: UrlV20
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this WorkV20.
:param url: The url of this WorkV20. # noqa: E501
:type: UrlV20
"""
self._url = url
@property
def contributors(self):
"""Gets the contributors of this WorkV20. # noqa: E501
:return: The contributors of this WorkV20. # noqa: E501
:rtype: WorkContributorsV20
"""
return self._contributors
@contributors.setter
def contributors(self, contributors):
"""Sets the contributors of this WorkV20.
:param contributors: The contributors of this WorkV20. # noqa: E501
:type: WorkContributorsV20
"""
self._contributors = contributors
@property
def language_code(self):
"""Gets the language_code of this WorkV20. # noqa: E501
:return: The language_code of this WorkV20. # noqa: E501
:rtype: str
"""
return self._language_code
@language_code.setter
def language_code(self, language_code):
"""Sets the language_code of this WorkV20.
:param language_code: The language_code of this WorkV20. # noqa: E501
:type: str
"""
self._language_code = language_code
@property
def country(self):
"""Gets the country of this WorkV20. # noqa: E501
:return: The country of this WorkV20. # noqa: E501
:rtype: CountryV20
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this WorkV20.
:param country: The country of this WorkV20. # noqa: E501
:type: CountryV20
"""
self._country = country
@property
def visibility(self):
"""Gets the visibility of this WorkV20. # noqa: E501
:return: The visibility of this WorkV20. # noqa: E501
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""Sets the visibility of this WorkV20.
:param visibility: The visibility of this WorkV20. # noqa: E501
:type: str
"""
allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE"] # noqa: E501
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}" # noqa: E501
.format(visibility, allowed_values)
)
self._visibility = visibility
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkV20, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkV20):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| |
import functools
import httplib as http
import itertools
from operator import itemgetter
from dateutil.parser import parse as parse_date
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.utils import timezone
from flask import request, redirect
import pytz
from framework.database import get_or_http_error, autoload
from framework.exceptions import HTTPError
from framework.status import push_status_message
from osf.utils.sanitize import strip_html
from osf.utils.permissions import ADMIN
from osf.utils.functional import rapply
from osf.models import NodeLog, RegistrationSchema, DraftRegistration, Sanction
from website.exceptions import NodeStateError
from website.project.decorators import (
must_be_valid_project,
must_have_permission,
http_error_if_disk_saving_mode
)
from website import language, settings
from website.ember_osf_web.decorators import ember_flag_is_active
from website.prereg import utils as prereg_utils
from website.project import utils as project_utils
from website.project.metadata.schemas import LATEST_SCHEMA_VERSION, METASCHEMA_ORDERING
from website.project.metadata.utils import serialize_meta_schema, serialize_draft_registration
from website.project.utils import serialize_node
get_schema_or_fail = lambda query: get_or_http_error(RegistrationSchema, query)
autoload_draft = functools.partial(autoload, DraftRegistration, 'draft_id', 'draft')
def must_be_branched_from_node(func):
@autoload_draft
@must_be_valid_project
@functools.wraps(func)
def wrapper(*args, **kwargs):
node = kwargs['node']
draft = kwargs['draft']
if draft.deleted:
raise HTTPError(http.GONE)
if not draft.branched_from._id == node._id:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Not a draft of this node',
'message_long': 'This draft registration is not created from the given node.'
}
)
return func(*args, **kwargs)
return wrapper
def validate_embargo_end_date(end_date_string, node):
"""
Our reviewers have a window of time in which to review a draft reg. submission.
If an embargo end_date that is within that window is at risk of causing
validation errors down the line if the draft is approved and registered.
The draft registration approval window is always greater than the time span
for disallowed embargo end dates.
:raises: HTTPError if end_date is less than the approval window or greater than the
max embargo end date
"""
end_date = parse_date(end_date_string, ignoretz=True).replace(tzinfo=pytz.utc)
today = timezone.now()
if (end_date - today) <= settings.DRAFT_REGISTRATION_APPROVAL_PERIOD:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid embargo end date',
'message_long': 'Embargo end date for this submission must be at least {0} days in the future.'.format(settings.DRAFT_REGISTRATION_APPROVAL_PERIOD)
})
elif not node._is_embargo_date_valid(end_date):
max_end_date = today + settings.DRAFT_REGISTRATION_APPROVAL_PERIOD
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid embargo end date',
'message_long': 'Embargo end date must on or before {0}.'.format(max_end_date.isoformat())
})
def validate_registration_choice(registration_choice):
if registration_choice not in ('embargo', 'immediate'):
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': "Invalid 'registrationChoice'",
'message_long': "Values for 'registrationChoice' must be either 'embargo' or 'immediate'."
}
)
def check_draft_state(draft):
registered_and_deleted = draft.registered_node and draft.registered_node.is_deleted
if draft.registered_node and not registered_and_deleted:
raise HTTPError(http.FORBIDDEN, data={
'message_short': 'This draft has already been registered',
'message_long': 'This draft has already been registered and cannot be modified.'
})
if draft.is_pending_review:
raise HTTPError(http.FORBIDDEN, data={
'message_short': 'This draft is pending review',
'message_long': 'This draft is pending review and cannot be modified.'
})
if draft.requires_approval and draft.is_approved and (not registered_and_deleted):
raise HTTPError(http.FORBIDDEN, data={
'message_short': 'This draft has already been approved',
'message_long': 'This draft has already been approved and cannot be modified.'
})
@must_have_permission(ADMIN)
@must_be_branched_from_node
def submit_draft_for_review(auth, node, draft, *args, **kwargs):
"""Submit for approvals and/or notifications
:return: serialized registration
:rtype: dict
:raises: HTTPError if embargo end date is invalid
"""
data = request.get_json()
meta = {}
registration_choice = data.get('registrationChoice', 'immediate')
validate_registration_choice(registration_choice)
if registration_choice == 'embargo':
# Initiate embargo
end_date_string = data['embargoEndDate']
validate_embargo_end_date(end_date_string, node)
meta['embargo_end_date'] = end_date_string
meta['registration_choice'] = registration_choice
if draft.registered_node and not draft.registered_node.is_deleted:
raise HTTPError(http.BAD_REQUEST, data=dict(message_long='This draft has already been registered, if you wish to '
'register it again or submit it for review please create '
'a new draft.'))
# Don't allow resubmission unless submission was rejected
if draft.approval and draft.approval.state != Sanction.REJECTED:
raise HTTPError(http.CONFLICT, data=dict(message_long='Cannot resubmit previously submitted draft.'))
draft.submit_for_review(
initiated_by=auth.user,
meta=meta,
save=True
)
if prereg_utils.get_prereg_schema() == draft.registration_schema:
node.add_log(
action=NodeLog.PREREG_REGISTRATION_INITIATED,
params={'node': node._primary_key},
auth=auth,
save=False
)
node.save()
push_status_message(language.AFTER_SUBMIT_FOR_REVIEW,
kind='info',
trust=False)
return {
'status': 'initiated',
'urls': {
'registrations': node.web_url_for('node_registrations')
}
}, http.ACCEPTED
@must_have_permission(ADMIN)
@must_be_branched_from_node
def draft_before_register_page(auth, node, draft, *args, **kwargs):
"""Allow the user to select an embargo period and confirm registration
:return: serialized Node + DraftRegistration
:rtype: dict
"""
ret = serialize_node(node, auth, primary=True)
ret['draft'] = serialize_draft_registration(draft, auth)
return ret
@must_have_permission(ADMIN)
@must_be_branched_from_node
@http_error_if_disk_saving_mode
def register_draft_registration(auth, node, draft, *args, **kwargs):
"""Initiate a registration from a draft registration
:return: success message; url to registrations page
:rtype: dict
"""
data = request.get_json()
registration_choice = data.get('registrationChoice', 'immediate')
validate_registration_choice(registration_choice)
# Don't allow resubmission unless submission was rejected
if draft.approval and draft.approval.state != Sanction.REJECTED:
raise HTTPError(http.CONFLICT, data=dict(message_long='Cannot resubmit previously submitted draft.'))
register = draft.register(auth)
draft.save()
if registration_choice == 'embargo':
# Initiate embargo
embargo_end_date = parse_date(data['embargoEndDate'], ignoretz=True).replace(tzinfo=pytz.utc)
try:
register.embargo_registration(auth.user, embargo_end_date)
except ValidationError as err:
raise HTTPError(http.BAD_REQUEST, data=dict(message_long=err.message))
else:
try:
register.require_approval(auth.user)
except NodeStateError as err:
raise HTTPError(http.BAD_REQUEST, data=dict(message_long=err.message))
register.save()
push_status_message(language.AFTER_REGISTER_ARCHIVING,
kind='info',
trust=False)
return {
'status': 'initiated',
'urls': {
'registrations': node.web_url_for('node_registrations')
}
}, http.ACCEPTED
@must_have_permission(ADMIN)
@must_be_branched_from_node
def get_draft_registration(auth, node, draft, *args, **kwargs):
"""Return a single draft registration
:return: serialized draft registration
:rtype: dict
"""
return serialize_draft_registration(draft, auth), http.OK
@must_have_permission(ADMIN)
@must_be_valid_project
def get_draft_registrations(auth, node, *args, **kwargs):
"""List draft registrations for a node
:return: serialized draft registrations
:rtype: dict
"""
#'updated': '2016-08-03T14:24:12Z'
count = request.args.get('count', 100)
drafts = itertools.islice(node.draft_registrations_active, 0, count)
serialized_drafts = [serialize_draft_registration(d, auth) for d in drafts]
sorted_serialized_drafts = sorted(serialized_drafts, key=itemgetter('updated'), reverse=True)
return {
'drafts': sorted_serialized_drafts
}, http.OK
@must_have_permission(ADMIN)
@must_be_valid_project
@ember_flag_is_active('ember_create_draft_registration_page')
def new_draft_registration(auth, node, *args, **kwargs):
"""Create a new draft registration for the node
:return: Redirect to the new draft's edit page
:rtype: flask.redirect
:raises: HTTPError
"""
if node.is_registration:
raise HTTPError(http.FORBIDDEN, data={
'message_short': "Can't create draft",
'message_long': 'Creating draft registrations on registered projects is not allowed.'
})
data = request.values
schema_name = data.get('schema_name')
if not schema_name:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Must specify a schema_name',
'message_long': 'Please specify a schema_name'
}
)
schema_version = data.get('schema_version', 2)
meta_schema = get_schema_or_fail(Q(name=schema_name, schema_version=int(schema_version)))
draft = DraftRegistration.create_from_node(
node,
user=auth.user,
schema=meta_schema,
data={}
)
return redirect(node.web_url_for('edit_draft_registration_page', draft_id=draft._id))
@must_have_permission(ADMIN)
@ember_flag_is_active('ember_edit_draft_registration_page')
@must_be_branched_from_node
def edit_draft_registration_page(auth, node, draft, **kwargs):
"""Draft registration editor
:return: serialized DraftRegistration
:rtype: dict
"""
check_draft_state(draft)
ret = project_utils.serialize_node(node, auth, primary=True)
ret['draft'] = serialize_draft_registration(draft, auth)
return ret
@must_have_permission(ADMIN)
@must_be_branched_from_node
def update_draft_registration(auth, node, draft, *args, **kwargs):
"""Update an existing draft registration
:return: serialized draft registration
:rtype: dict
:raises: HTTPError
"""
check_draft_state(draft)
data = request.get_json()
schema_data = data.get('schema_data', {})
schema_data = rapply(schema_data, strip_html)
schema_name = data.get('schema_name')
schema_version = data.get('schema_version', 1)
if schema_name:
meta_schema = get_schema_or_fail(Q(name=schema_name, schema_version=schema_version))
existing_schema = draft.registration_schema
if (existing_schema.name, existing_schema.schema_version) != (meta_schema.name, meta_schema.schema_version):
draft.registration_schema = meta_schema
draft.update_metadata(schema_data)
draft.save()
return serialize_draft_registration(draft, auth), http.OK
@must_have_permission(ADMIN)
@must_be_branched_from_node
def delete_draft_registration(auth, node, draft, *args, **kwargs):
"""Permanently delete a draft registration
:return: None
:rtype: NoneType
"""
if draft.registered_node and not draft.registered_node.is_deleted:
raise HTTPError(
http.FORBIDDEN,
data={
'message_short': 'Can\'t delete draft',
'message_long': 'This draft has already been registered and cannot be deleted.'
}
)
draft.deleted = timezone.now()
draft.save(update_fields=['deleted'])
return None, http.NO_CONTENT
def get_metaschemas(*args, **kwargs):
"""
List metaschemas with which a draft registration may be created. Only fetch the newest version for each schema.
:return: serialized metaschemas
:rtype: dict
"""
count = request.args.get('count', 100)
include = request.args.get('include', 'latest')
meta_schemas = RegistrationSchema.objects.filter(active=True)
if include == 'latest':
meta_schemas.filter(schema_version=LATEST_SCHEMA_VERSION)
meta_schemas = sorted(meta_schemas, key=lambda x: METASCHEMA_ORDERING.index(x.name))
return {
'meta_schemas': [
serialize_meta_schema(ms) for ms in meta_schemas[:count]
]
}, http.OK
| |
#! /usr/bin/python3
"""
Reverse geocoding
"""
import googlemaps
import argparse
import json
from airbnb_config import ABConfig
import sys
import logging
FORMAT_STRING = "%(asctime)-15s %(levelname)-8s%(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT_STRING)
LOGGER = logging.getLogger()
STRING_NA = "N/A"
# Suppress informational logging from requests module
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
class Location():
def __init__(self, lat_round, lng_round):
self.lat_round = lat_round
self.lng_round = lng_round
self.neighborhood = STRING_NA
self.sublocality = STRING_NA
self.locality = STRING_NA
self.level2 = STRING_NA
self.level1 = STRING_NA
self.country = STRING_NA
@classmethod
def from_db(cls, lat_round, lng_round):
"""
Get a location (address etc) by reading from the database
"""
return cls(lat_round, lng_round)
class BoundingBox():
"""
Get max and min lat and long for a search area
"""
def __init__(self, bounding_box):
(self.bb_s_lat,
self.bb_n_lat,
self.bb_w_lng,
self.bb_e_lng) = bounding_box
@classmethod
def from_db(cls, config, search_area):
"""
Get a bounding box from the database by reading the search_area.name
"""
try:
cls.search_area = search_area
conn = config.connect()
cur = conn.cursor()
sql = """
SELECT bb_s_lat, bb_n_lat, bb_w_lng, bb_e_lng
FROM search_area
WHERE name = %s
"""
cur.execute(sql, (search_area,))
bounding_box = cur.fetchone()
cur.close()
return cls(bounding_box)
except:
LOGGER.exception("Exception in BoundingBox_from_db: exiting")
sys.exit()
@classmethod
def from_google(cls, config, search_area):
"""
Get a bounding box from Google
"""
try:
gmaps = googlemaps.Client(key=config.GOOGLE_API_KEY)
results = gmaps.geocode((search_area))
bounds = results[0]["geometry"]["bounds"]
bounding_box = (bounds["southwest"]["lat"],
bounds["northeast"]["lat"],
bounds["southwest"]["lng"],
bounds["northeast"]["lng"],)
return cls(bounding_box)
except:
LOGGER.exception("Exception in BoundingBox_from_google: exiting")
sys.exit()
@classmethod
def from_args(cls, config, args):
"""
Get a bounding box from the command line
"""
try:
bounding_box = (args.bb_s_lat, args.bb_n_lat,
args.bb_w_lng, args.bb_e_lng)
return cls(bounding_box)
except:
LOGGER.exception("Exception in BoundingBox_from_args: exiting")
sys.exit()
def select_lat_lng(config, bounding_box):
"""
Return a pair of lat_round and lng_round values from the Location table
for which the country has not yet been set.
"""
try:
conn = config.connect()
cur = conn.cursor()
sql = """
SELECT lat_round, lng_round
FROM location
WHERE country IS NULL
AND lat_round BETWEEN %s AND %s
AND lng_round BETWEEN %s AND %s
LIMIT 1
"""
args = (bounding_box.bb_s_lat,
bounding_box.bb_n_lat,
bounding_box.bb_w_lng,
bounding_box.bb_e_lng)
cur.execute(sql, args)
try:
(lat_round, lng_round) = cur.fetchone()
except:
# No more results
return None
cur.close()
location = Location(lat_round, lng_round)
return location
except Exception:
LOGGER.exception("Exception in select_lat_lng: exiting")
sys.exit()
def update_location(config, location):
"""
Insert or update a location with the required address information
"""
try:
conn = config.connect()
cur = conn.cursor()
sql = """
UPDATE location
SET neighborhood = %s,
sublocality = %s,
locality = %s,
level2 = %s,
level1 = %s,
country = %s
WHERE lat_round = %s AND lng_round = %s
"""
update_args = (location.neighborhood,
location.sublocality,
location.locality,
location.level2,
location.level1,
location.country,
location.lat_round,
location.lng_round,
)
LOGGER.debug(update_args)
cur.execute(sql, update_args)
cur.close()
conn.commit()
return True
except:
LOGGER.exception("Exception in update_location")
return False
def reverse_geocode(config, location):
"""
Return address information from the Google API as a Location object for a given lat lng
"""
gmaps = googlemaps.Client(key=config.GOOGLE_API_KEY)
# Look up an address with reverse geocoding
# lat = 41.782
# lng = -72.693
lat = location.lat_round
lng = location.lng_round
results = gmaps.reverse_geocode((lat, lng))
# Parsing the result is described at
# https://developers.google.com/maps/documentation/geocoding/web-service-best-practices#ParsingJSON
json_file = open("geocode.json", mode="w", encoding="utf-8")
json_file.write(json.dumps(results, indent=4, sort_keys=True))
json_file.close()
# In practice, you may wish to only return the first result (results[0])
for result in results:
if (location.neighborhood != STRING_NA and
location.sublocality != STRING_NA and
location.locality != STRING_NA and
location.level2 != STRING_NA and
location.level1 != STRING_NA and
location.country != STRING_NA):
break
address_components = result['address_components']
for address_component in address_components:
if (location.neighborhood == STRING_NA
and "neighborhood" in address_component["types"]):
location.neighborhood = address_component["long_name"]
elif (location.sublocality == STRING_NA
and "sublocality" in address_component["types"]):
location.sublocality = address_component["long_name"]
elif (location.locality == STRING_NA
and "locality" in address_component["types"]):
location.locality = address_component["long_name"]
elif (location.level2 == STRING_NA
and "administrative_area_level_2" in
address_component["types"]):
location.level2 = address_component["long_name"]
elif (location.level1 == STRING_NA
and "administrative_area_level_1" in
address_component["types"]):
location.level1 = address_component["long_name"]
elif (location.country == STRING_NA
and "country" in address_component["types"]):
location.country = address_component["long_name"]
return location
def main():
""" Controlling routine that calls the others """
config = ABConfig()
parser = argparse.ArgumentParser(
description='reverse geocode')
# usage='%(prog)s [options]')
# These arguments should be more carefully constructed. Right now there is
# no defining what is required, and what is optional, and what contradicts
# what.
parser.add_argument("--sa",
metavar="search_area", type=str,
help="""search_area""")
parser.add_argument("--lat",
metavar="lat", type=float,
help="""lat""")
parser.add_argument("--lng",
metavar="lng", type=float,
help="""lng""")
parser.add_argument("--bb_n_lat",
metavar="bb_n_lat", type=float,
help="""bb_n_lat""")
parser.add_argument("--bb_s_lat",
metavar="bb_s_lat", type=float,
help="""bb_s_lat""")
parser.add_argument("--bb_e_lng",
metavar="bb_e_lng", type=float,
help="""bb_e_lng""")
parser.add_argument("--bb_w_lng",
metavar="bb_w_lng", type=float,
help="""bb_w_lng""")
parser.add_argument("--count",
metavar="count", type=int,
help="""number_of_lookups""")
args = parser.parse_args()
search_area = args.sa
if args.count:
count = args.count
else:
count = 1000
if search_area:
# bb = BoundingBox.from_db(config, search_area)
# print(bb.bb_s_lat, bb.bb_n_lat, bb.bb_w_lng, bb.bb_e_lng)
bounding_box = BoundingBox.from_google(config, search_area)
LOGGER.info("Bounding box for %s from Google = (%s, %s, %s, %s)",
search_area,
bounding_box.bb_s_lat, bounding_box.bb_n_lat,
bounding_box.bb_w_lng, bounding_box.bb_e_lng)
# bounding_box = BoundingBox.from_db(config, search_area)
# LOGGER.info("Bounding box for %s from DB = (%s, %s, %s, %s)",
# search_area,
# bounding_box.bb_s_lat, bounding_box.bb_n_lat,
# bounding_box.bb_w_lng, bounding_box.bb_e_lng)
if args.bb_n_lat:
bounding_box = BoundingBox.from_args(config, args)
if not count:
sys.exit(0)
for lookup in range(1, count):
location = select_lat_lng(config, bounding_box)
if location is None:
LOGGER.info("No more locations")
sys.exit(0)
else:
LOGGER.debug(location)
location = reverse_geocode(config, location)
if not location.country:
location.country = "UNKNOWN"
LOGGER.debug(
"nbhd={}, subloc={}, loc={}, l2={}, l1={}, country={}."
.format(
location.neighborhood,
location.sublocality,
location.locality,
location.level2,
location.level1,
location.country)
)
success = update_location(config, location)
if success:
LOGGER.info("Update succeeded: %s, %s in %s: %s of %s",
location.lat_round, location.lng_round,
location.country, lookup, count)
else:
LOGGER.warn("Update failed: %s, %s: %s of %s",
location.lat_round, location.lng_round,
lookup, count)
if __name__ == "__main__":
main()
| |
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import gc
import inspect
import os
import subprocess
import threading
import time
import imath
import six
import IECore
import Gaffer
import GafferTest
class ValuePlugTest( GafferTest.TestCase ) :
def testCacheMemoryLimit( self ) :
n = GafferTest.CachingTestNode()
n["in"].setValue( "d" )
v1 = n["out"].getValue( _copy=False )
v2 = n["out"].getValue( _copy=False )
self.assertEqual( v1, v2 )
self.assertEqual( v1, IECore.StringData( "d" ) )
# the objects should be one and the same, as the second computation
# should have shortcut and returned a cached result.
self.assertTrue( v1.isSame( v2 ) )
Gaffer.ValuePlug.setCacheMemoryLimit( 0 )
v3 = n["out"].getValue( _copy=False )
self.assertEqual( v3, IECore.StringData( "d" ) )
# the objects should be different, as we cleared the cache.
self.assertFalse( v3.isSame( v2 ) )
Gaffer.ValuePlug.setCacheMemoryLimit( self.__originalCacheMemoryLimit )
v1 = n["out"].getValue( _copy=False )
v2 = n["out"].getValue( _copy=False )
self.assertEqual( v1, v2 )
self.assertEqual( v1, IECore.StringData( "d" ) )
# the objects should be one and the same, as we reenabled the cache.
self.assertTrue( v1.isSame( v2 ) )
Gaffer.ValuePlug.clearCache()
self.assertEqual( Gaffer.ValuePlug.cacheMemoryUsage(), 0 )
v3 = n["out"].getValue( _copy=False )
self.assertFalse( v3.isSame( v2 ) )
v4 = n["out"].getValue( _copy=False )
self.assertTrue( v4.isSame( v3 ) )
def testSettable( self ) :
p1 = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.In )
self.assertTrue( p1.settable() )
p2 = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
self.assertFalse( p2.settable() )
p1.setInput( p2 )
self.assertFalse( p1.settable() )
def testUncacheabilityPropagates( self ) :
n = GafferTest.CachingTestNode()
n["in"].setValue( "pig" )
p1 = Gaffer.ObjectPlug( "p1", Gaffer.Plug.Direction.In, IECore.IntData( 10 ) )
p2 = Gaffer.ObjectPlug( "p2", Gaffer.Plug.Direction.In, IECore.IntData( 20 ) )
p3 = Gaffer.ObjectPlug( "p3", Gaffer.Plug.Direction.In, IECore.IntData( 30 ) )
p1.setInput( n["out"] )
p2.setInput( p1 )
p3.setInput( p2 )
o2 = p2.getValue( _copy = False )
o3 = p3.getValue( _copy = False )
self.assertEqual( o2, IECore.StringData( "pig" ) )
self.assertEqual( o3, IECore.StringData( "pig" ) )
self.assertTrue( o2.isSame( o3 ) ) # they share cache entries
n["out"].setFlags( Gaffer.Plug.Flags.Cacheable, False )
o2 = p2.getValue( _copy = False )
o3 = p3.getValue( _copy = False )
self.assertEqual( o2, IECore.StringData( "pig" ) )
self.assertEqual( o3, IECore.StringData( "pig" ) )
self.assertFalse( o2.isSame( o3 ) ) # they shouldn't share cache entries
def testSetValueSignalsDirtiness( self ) :
n = Gaffer.Node()
n["p"] = Gaffer.IntPlug()
cs = GafferTest.CapturingSlot( n.plugDirtiedSignal() )
n["p"].setValue( 10 )
self.assertEqual( len( cs ), 1 )
self.assertTrue( cs[0][0].isSame( n["p"] ) )
n["p"].setValue( 10 )
self.assertEqual( len( cs ), 1 )
def testDirtyCountPlug( self ) :
# The dirtyCount is relative to the current dirtyCountEpoch
refPlug = Gaffer.ValuePlug()
epoch = refPlug.dirtyCount()
i = Gaffer.IntPlug()
self.assertEqual( i.dirtyCount(), epoch + 0 )
i.setValue( 10 )
self.assertEqual( i.dirtyCount(), epoch + 1 )
i.setValue( 20 )
self.assertEqual( i.dirtyCount(), epoch + 2 )
i2 = Gaffer.IntPlug()
v = Gaffer.ValuePlug()
self.assertEqual( i2.dirtyCount(), epoch + 0 )
self.assertEqual( v.dirtyCount(), epoch + 0 )
# Need to parent to a node before dirtying based on plug reparenting will work
n = Gaffer.Node()
n.addChild( v )
self.assertEqual( v.dirtyCount(), epoch + 1 )
self.assertEqual( i2.dirtyCount(), epoch + 0 )
# Parenting dirties both parent and child
v.addChild( i2 )
self.assertEqual( v.dirtyCount(), epoch + 2 )
self.assertEqual( i2.dirtyCount(), epoch + 1 )
# Setting the value of the child should also dirty the parent
i2.setValue( 10 )
self.assertEqual( v.dirtyCount(), epoch + 3 )
self.assertEqual( i2.dirtyCount(), epoch + 2 )
def testCopyPasteDoesntRetainComputedValues( self ) :
s = Gaffer.ScriptNode()
s["add1"] = GafferTest.AddNode()
s["add2"] = GafferTest.AddNode()
s["add1"]["op1"].setValue( 2 )
s["add1"]["op2"].setValue( 3 )
s["add2"]["op1"].setInput( s["add1"]["sum"] )
s["add2"]["op2"].setValue( 0 )
self.assertEqual( s["add2"]["sum"].getValue(), 5 )
ss = s.serialise( filter = Gaffer.StandardSet( [ s["add2"] ] ) )
s = Gaffer.ScriptNode()
s.execute( ss )
self.assertTrue( s["add2"]["op1"].getInput() is None )
self.assertEqual( s["add2"]["op1"].getValue(), 0 )
self.assertEqual( s["add2"]["sum"].getValue(), 0 )
def testSerialisationOmitsDefaultValues( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
self.assertEqual( s["n"]["op1"].getValue(), s["n"]["op1"].defaultValue() )
self.assertEqual( s["n"]["op2"].getValue(), s["n"]["op2"].defaultValue() )
self.assertFalse( "setValue" in s.serialise() )
s["n"]["op1"].setValue( 10 )
self.assertTrue( "[\"op1\"].setValue" in s.serialise() )
def testFloatPlugOmitsDefaultValues( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertFalse( "setValue" in s.serialise() )
s["n"]["f"].setValue( 10.1 )
self.assertTrue( "[\"f\"].setValue" in s.serialise() )
def testBoolPlugOmitsDefaultValues( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["f"] = Gaffer.BoolPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertFalse( "setValue" in s.serialise() )
s["n"]["f"].setValue( True )
self.assertTrue( "[\"f\"].setValue" in s.serialise() )
def testBoolPlugOmitsDefaultValuesWhenDefaultIsTrue( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["f"] = Gaffer.BoolPlug( defaultValue = True, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertFalse( "setValue" in s.serialise() )
s["n"]["f"].setValue( False )
self.assertTrue( "[\"f\"].setValue" in s.serialise() )
def testCreateCounterpart( self ) :
p = Gaffer.ValuePlug()
p["i"] = Gaffer.IntPlug()
p["f"] = Gaffer.FloatPlug()
p2 = p.createCounterpart( "p2", Gaffer.Plug.Direction.In )
self.assertEqual( p2.keys(), [ "i", "f" ] )
self.assertTrue( isinstance( p2["i"], Gaffer.IntPlug ) )
self.assertTrue( isinstance( p2["f"], Gaffer.FloatPlug ) )
self.assertTrue( isinstance( p2, Gaffer.ValuePlug ) )
def testPrecomputedHashOptimisation( self ) :
n = GafferTest.CachingTestNode()
n["in"].setValue( "a" )
a1 = n["out"].getValue( _copy = False )
self.assertEqual( a1, IECore.StringData( "a" ) )
self.assertEqual( n.numHashCalls, 1 )
# We apply some leeway in our test for how many hash calls are
# made - a good ValuePlug implementation will probably avoid
# unecessary repeated calls in most cases, but it's not
# what this unit test is about.
a2 = n["out"].getValue( _copy = False )
self.assertTrue( a2.isSame( a1 ) )
self.assertTrue( n.numHashCalls == 1 or n.numHashCalls == 2 )
h = n["out"].hash()
self.assertTrue( n.numHashCalls >= 1 and n.numHashCalls <= 3 )
numHashCalls = n.numHashCalls
# What we care about is that calling getValue() with a precomputed hash
# definitely doesn't recompute the hash again.
a3 = n["out"].getValue( _copy = False, _precomputedHash = h )
self.assertEqual( n.numHashCalls, numHashCalls )
self.assertTrue( a3.isSame( a1 ) )
def testSerialisationOfChildValues( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["v"] = Gaffer.ValuePlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n"]["user"]["v"]["i"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n"]["user"]["v"]["i"].setValue( 10 )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2["n"]["user"]["v"]["i"].getValue(), 10 )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferTest.CompoundPlugNode()
s["n2"] = GafferTest.CompoundPlugNode()
s["n1"]["p"]["f"].setValue( 10 )
s["n1"]["p"]["s"].setInput( s["n2"]["p"]["s"] )
ss = s.serialise()
s = Gaffer.ScriptNode()
s.execute( ss )
self.assertEqual( s["n1"]["p"]["f"].getValue(), 10 )
self.assertTrue( s["n1"]["p"]["s"].getInput().isSame( s["n2"]["p"]["s"] ) )
def testDynamicSerialisation( self ) :
s = Gaffer.ScriptNode()
s["n1"] = Gaffer.Node()
s["n1"]["p"] = Gaffer.ValuePlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n1"]["p"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n1"]["p"]["f"].setValue( 10 )
ss = s.serialise()
s = Gaffer.ScriptNode()
s.execute( ss )
self.assertEqual( s["n1"]["p"]["f"].getValue(), 10 )
def testMasterConnectionTracksChildConnections( self ) :
c = Gaffer.ValuePlug( "c" )
c["f1"] = Gaffer.FloatPlug()
c["f2"] = Gaffer.FloatPlug()
n = Gaffer.Node()
n["c"] = c
c2 = Gaffer.ValuePlug( "c" )
c2["f1"] = Gaffer.FloatPlug()
c2["f2"] = Gaffer.FloatPlug()
n2 = Gaffer.Node()
n2["c"] = c2
n2["c"]["f1"].setInput( n["c"]["f1"] )
n2["c"]["f2"].setInput( n["c"]["f2"] )
self.assertTrue( n2["c"].getInput().isSame( n["c"] ) )
n2["c"]["f2"].setInput( None )
self.assertIsNone( n2["c"].getInput() )
n2["c"]["f2"].setInput( n["c"]["f2"] )
self.assertTrue( n2["c"].getInput().isSame( n["c"] ) )
c["f3"] = Gaffer.FloatPlug()
c2["f3"] = Gaffer.FloatPlug()
self.assertIsNone( n2["c"].getInput() )
n2["c"]["f3"].setInput( n["c"]["f3"] )
self.assertTrue( n2["c"].getInput().isSame( n["c"] ) )
def testInputChangedCrash( self ) :
ca = Gaffer.ValuePlug( "ca" )
ca["fa1"] = Gaffer.FloatPlug()
ca["fa2"] = Gaffer.FloatPlug()
na = Gaffer.Node()
na["ca"] = ca
cb = Gaffer.ValuePlug( "cb" )
cb["fb1"] = Gaffer.FloatPlug()
cb["fb2"] = Gaffer.FloatPlug()
nb = Gaffer.Node()
nb["cb"] = cb
nb["cb"]["fb1"].setInput( na["ca"]["fa1"] )
del ca, na, cb, nb
while gc.collect() :
pass
IECore.RefCounted.collectGarbage()
def testDirtyPropagation( self ) :
n = GafferTest.CompoundPlugNode()
dirtyPlugs = GafferTest.CapturingSlot( n.plugDirtiedSignal() )
n["p"]["f"].setValue( 100 )
self.assertEqual( len( dirtyPlugs ), 4 )
self.assertTrue( dirtyPlugs[0][0].isSame( n["p"]["f"] ) )
self.assertTrue( dirtyPlugs[1][0].isSame( n["p"] ) )
self.assertTrue( dirtyPlugs[2][0].isSame( n["o"]["f"] ) )
self.assertTrue( dirtyPlugs[3][0].isSame( n["o"] ) )
def testPlugSetPropagation( self ) :
c = Gaffer.ValuePlug()
c["f1"] = Gaffer.FloatPlug()
n = Gaffer.Node()
n["c"] = c
def setCallback( plug ) :
if plug.isSame( c ) :
self.set = True
cn = n.plugSetSignal().connect( setCallback )
self.set = False
c["f1"].setValue( 10 )
self.assertTrue( self.set )
def testMultipleLevelsOfPlugSetPropagation( self ) :
c = Gaffer.ValuePlug( "c" )
c["c1"] = Gaffer.ValuePlug()
c["c1"]["f1"] = Gaffer.FloatPlug()
n = Gaffer.Node()
n["c"] = c
def setCallback( plug ) :
self.setPlugs.append( plug.getName() )
cn = n.plugSetSignal().connect( setCallback )
self.setPlugs = []
c["c1"]["f1"].setValue( 10 )
self.assertEqual( len( self.setPlugs ), 3 )
self.assertEqual( self.setPlugs, [ "f1", "c1", "c" ] )
def testMultipleLevelsOfPlugSetPropagationWithDifferentParentingOrder( self ) :
n = Gaffer.Node()
n["c"] = Gaffer.ValuePlug()
n["c"]["c1"] = Gaffer.ValuePlug()
n["c"]["c1"]["f1"] = Gaffer.FloatPlug()
def setCallback( plug ) :
self.setPlugs.append( plug.getName() )
cn = n.plugSetSignal().connect( setCallback )
self.setPlugs = []
n["c"]["c1"]["f1"].setValue( 10 )
self.assertEqual( len( self.setPlugs ), 3 )
self.assertIn( "c", self.setPlugs )
self.assertIn( "c1", self.setPlugs )
self.assertIn( "f1", self.setPlugs )
def testAcceptsInput( self ) :
i = Gaffer.ValuePlug()
o = Gaffer.ValuePlug( direction=Gaffer.Plug.Direction.Out )
s = Gaffer.StringPlug( direction=Gaffer.Plug.Direction.Out )
i.addChild( Gaffer.IntPlug() )
o.addChild( Gaffer.IntPlug( direction=Gaffer.Plug.Direction.Out ) )
self.assertTrue( i.acceptsInput( o ) )
self.assertFalse( i.acceptsInput( s ) )
def testAcceptsNoneInput( self ) :
p = Gaffer.ValuePlug( "hello" )
self.assertTrue( p.acceptsInput( None ) )
def testSerialisationOfMasterConnection( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferTest.CompoundPlugNode()
s["n2"] = GafferTest.CompoundPlugNode()
s["n1"]["p"].setInput( s["n2"]["p"] )
self.assertTrue( s["n1"]["p"].getInput().isSame( s["n2"]["p"] ) )
self.assertTrue( s["n1"]["p"]["f"].getInput().isSame( s["n2"]["p"]["f"] ) )
self.assertTrue( s["n1"]["p"]["s"].getInput().isSame( s["n2"]["p"]["s"] ) )
ss = s.serialise()
s = Gaffer.ScriptNode()
s.execute( ss )
self.assertTrue( s["n1"]["p"].getInput().isSame( s["n2"]["p"] ) )
self.assertTrue( s["n1"]["p"]["f"].getInput().isSame( s["n2"]["p"]["f"] ) )
self.assertTrue( s["n1"]["p"]["s"].getInput().isSame( s["n2"]["p"]["s"] ) )
def testSetInputShortcut( self ) :
n1 = Gaffer.Node()
n1["c"] = Gaffer.Plug()
n2 = Gaffer.Node()
n2["c"] = Gaffer.Plug( direction = Gaffer.Plug.Direction.Out )
cs = GafferTest.CapturingSlot( n1.plugInputChangedSignal() )
self.assertEqual( len( cs ), 0 )
n1["c"].setInput( n2["c"] )
# we should get a signal the first time
self.assertEqual( len( cs ), 1 )
n1["c"].setInput( n2["c"] )
# but the second time there should be no signal,
# because it was the same.
self.assertEqual( len( cs ), 1 )
def testSetInputWithoutParent( self ) :
c1 = Gaffer.Plug( direction=Gaffer.Plug.Direction.Out )
c1["n"] = Gaffer.IntPlug( direction=Gaffer.Plug.Direction.Out )
c2 = Gaffer.Plug()
c2["n"] = Gaffer.IntPlug()
c2.setInput( c1 )
self.assertEqual( c2.getInput(), c1 )
def testCanMakeSomeConnectionsWhenSizesDontMatch( self ) :
n = Gaffer.Node()
n["c1"] = Gaffer.ValuePlug( direction = Gaffer.Plug.Direction.In )
n["c1"]["i"] = Gaffer.IntPlug()
n["c2"] = Gaffer.ValuePlug( direction = Gaffer.Plug.Direction.Out )
n["c2"]["i1"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
n["c2"]["i2"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
n["c1"]["i"].setInput( n["c2"]["i1"] )
self.assertTrue( n["c1"]["i"].getInput().isSame( n["c2"]["i1"] ) )
self.assertTrue( n["c1"].getInput().isSame( n["c2"] ) )
def testSerialisationOfDynamicPlugsOnNondynamicParent( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.CompoundPlugNode()
s["n"]["nonDynamicParent"]["dynamicPlug"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n"]["nonDynamicParent"]["dynamicPlug"].setValue( 10 )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2["n"]["nonDynamicParent"]["dynamicPlug"].getValue(), 10 )
def testChildAdditionEmitsPlugSet( self ) :
n = Gaffer.Node()
n["c"] = Gaffer.ValuePlug()
n["c"]["d"] = Gaffer.ValuePlug()
cs = GafferTest.CapturingSlot( n.plugSetSignal() )
n["c"]["d"]["e"] = Gaffer.IntPlug()
self.assertEqual( len( cs ), 2 )
self.assertEqual( cs[0][0], n["c"]["d"] )
self.assertEqual( cs[1][0], n["c"] )
def testNoNonValuePlugChildren( self ) :
v = Gaffer.ValuePlug()
p = Gaffer.Plug()
self.assertFalse( v.acceptsChild( p ) )
self.assertRaises( RuntimeError, v.addChild, p )
def testDerivingInPython( self ) :
class TestValuePlug( Gaffer.ValuePlug ) :
def __init__( self, name = "TestValuePlug", direction = Gaffer.Plug.Direction.In, flags = Gaffer.Plug.Flags.None_ ) :
Gaffer.ValuePlug.__init__( self, name, direction, flags )
def acceptsChild( self, child ) :
if not Gaffer.ValuePlug.acceptsChild( self, child ) :
return False
return isinstance( child, Gaffer.IntPlug )
IECore.registerRunTimeTyped( TestValuePlug )
# check the constructor
p = TestValuePlug()
self.assertEqual( p.getName(), "TestValuePlug" )
self.assertEqual( p.direction(), Gaffer.Plug.Direction.In )
self.assertEqual( p.getFlags(), Gaffer.Plug.Flags.None_ )
p = TestValuePlug( name = "p", direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertEqual( p.getName(), "p" )
self.assertEqual( p.direction(), Gaffer.Plug.Direction.Out )
self.assertEqual( p.getFlags(), Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
# check that acceptsChild can be overridden
p = TestValuePlug()
self.assertRaises( RuntimeError, p.addChild, Gaffer.FloatPlug() )
p.addChild( Gaffer.IntPlug() )
# check that the fact the plug has been wrapped solves the object identity problem
p = TestValuePlug()
n = Gaffer.Node()
n["p"] = p
self.assertTrue( n["p"] is p )
def testNullInputPropagatesToChildren( self ) :
n = Gaffer.Node()
n["user"]["c"] = Gaffer.ValuePlug()
n["user"]["c"]["o"] = Gaffer.IntPlug()
n["user"]["c"]["i"] = Gaffer.IntPlug()
n["user"]["c"]["i"].setInput( n["user"]["c"]["o"] )
self.assertTrue( n["user"]["c"]["i"].getInput().isSame( n["user"]["c"]["o"] ) )
n["user"]["c"].setInput( None )
self.assertTrue( n["user"]["c"]["i"].getInput() is None )
@GafferTest.TestRunner.PerformanceTestMethod()
def testContentionForOneItem( self ) :
m = GafferTest.MultiplyNode()
GafferTest.parallelGetValue( m["product"], 10000000 )
def testIsSetToDefault( self ) :
n1 = GafferTest.AddNode()
self.assertTrue( n1["op1"].isSetToDefault() )
self.assertTrue( n1["op2"].isSetToDefault() )
self.assertFalse( n1["sum"].isSetToDefault() )
n1["op1"].setValue( 10 )
self.assertFalse( n1["op1"].isSetToDefault() )
self.assertTrue( n1["op2"].isSetToDefault() )
n1["op1"].setToDefault()
self.assertTrue( n1["op1"].isSetToDefault() )
self.assertTrue( n1["op2"].isSetToDefault() )
n2 = GafferTest.AddNode()
self.assertTrue( n2["op1"].isSetToDefault() )
self.assertTrue( n2["op2"].isSetToDefault() )
self.assertFalse( n2["sum"].isSetToDefault() )
n2["op1"].setInput( n1["op1"] )
# Receiving a static value via an input. We know
# it can have only one value for all contexts,
# and can be confident that it is set to the default.
self.assertTrue( n2["op1"].isSetToDefault() )
self.assertEqual( n2["op1"].getValue(), n2["op1"].defaultValue() )
n1["op1"].setValue( 1 )
# Until it provides a non-default value, that is.
self.assertFalse( n2["op1"].isSetToDefault() )
n1["op1"].setValue( 0 )
n2["op2"].setInput( n1["sum"] )
# Driven by a compute, so not considered to be
# at the default, even if it the result happens
# to be equal in this context.
self.assertFalse( n2["op2"].isSetToDefault() )
self.assertEqual( n2["op2"].getValue(), n2["op2"].defaultValue() )
def testCancellationDuringCompute( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
s["e"] = Gaffer.Expression()
s["e"].setExpression( inspect.cleandoc(
"""
IECore.Canceller.check( context.canceller() )
parent['n']['op1'] = 40
"""
) )
canceller = IECore.Canceller()
canceller.cancel()
with Gaffer.Context( s.context(), canceller ) :
with self.assertRaises( IECore.Cancelled ) :
s["n"]["sum"].getValue()
canceller = IECore.Canceller()
with Gaffer.Context( s.context(), canceller ) :
self.assertEqual( s["n"]["sum"].getValue(), 40 )
def testClearHashCache( self ) :
node = GafferTest.AddNode()
node["sum"].getValue()
with Gaffer.PerformanceMonitor() as m :
node["sum"].getValue()
self.assertEqual( m.plugStatistics( node["sum"] ).hashCount, 0 )
Gaffer.ValuePlug.clearHashCache()
with Gaffer.PerformanceMonitor() as m :
node["sum"].getValue()
self.assertEqual( m.plugStatistics( node["sum"] ).hashCount, 1 )
def testResetDefault( self ) :
script = Gaffer.ScriptNode()
script["node"] = Gaffer.Node()
script["node"]["user"]["i"] = Gaffer.IntPlug( defaultValue = 1, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
script["node"]["user"]["v"] = Gaffer.V3iPlug( defaultValue = imath.V3i( 1, 2, 3 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
def assertPreconditions( script ) :
self.assertTrue( script["node"]["user"]["i"].isSetToDefault() )
self.assertEqual( script["node"]["user"]["i"].defaultValue(), 1 )
self.assertEqual( script["node"]["user"]["i"].getValue(), 1 )
self.assertTrue( script["node"]["user"]["v"].isSetToDefault() )
self.assertEqual( script["node"]["user"]["v"].defaultValue(), imath.V3i( 1, 2, 3 ) )
self.assertEqual( script["node"]["user"]["v"].getValue(), imath.V3i( 1, 2, 3 ) )
assertPreconditions( script )
with Gaffer.UndoScope( script ) :
script["node"]["user"]["i"].setValue( 2 )
script["node"]["user"]["i"].resetDefault()
script["node"]["user"]["v"].setValue( imath.V3i( 10, 11, 12 ) )
script["node"]["user"]["v"].resetDefault()
def assertPostconditions( script ) :
self.assertTrue( script["node"]["user"]["i"].isSetToDefault() )
self.assertEqual( script["node"]["user"]["i"].defaultValue(), 2 )
self.assertEqual( script["node"]["user"]["i"].getValue(), 2 )
self.assertTrue( script["node"]["user"]["v"].isSetToDefault() )
self.assertEqual( script["node"]["user"]["v"].defaultValue(), imath.V3i( 10, 11, 12 ) )
self.assertEqual( script["node"]["user"]["v"].getValue(), imath.V3i( 10, 11, 12 ) )
script.undo()
assertPreconditions( script )
script.redo()
assertPostconditions( script )
script.undo()
assertPreconditions( script )
script.redo()
assertPostconditions( script )
script2 = Gaffer.ScriptNode()
script2.execute( script.serialise() )
assertPostconditions( script2 )
def testCacheValidation( self ) :
defaultHashCacheMode = Gaffer.ValuePlug.getHashCacheMode()
Gaffer.ValuePlug.setHashCacheMode( Gaffer.ValuePlug.HashCacheMode.Checked )
try:
m = GafferTest.MultiplyNode( "test", True )
m["op1"].setValue( 2 )
m["op2"].setValue( 3 )
self.assertEqual( m["product"].getValue(), 6 )
m["op2"].setValue( 4 )
exception = None
try:
m["product"].getValue()
except Exception as e:
exception = e
self.assertEqual( type( exception ), Gaffer.ProcessException )
self.assertEqual( str( exception ), "test.product : Detected undeclared dependency. Fix DependencyNode::affects() implementation." )
# Make sure the plug with the actual issue is reported, when queried from a downstream network
m2 = GafferTest.MultiplyNode( "second" )
m2["op1"].setInput( m["product"] )
m2["op2"].setValue( 5 )
exception = None
try:
m2["product"].getValue()
except Exception as e:
exception = e
self.assertEqual( type( exception ), Gaffer.ProcessException )
self.assertEqual( str( exception ), "test.product : Detected undeclared dependency. Fix DependencyNode::affects() implementation." )
finally:
Gaffer.ValuePlug.setHashCacheMode( defaultHashCacheMode )
def testDefaultHash( self ) :
# Plug with single value
self.assertNotEqual( Gaffer.IntPlug().defaultHash(), IECore.MurmurHash() )
self.assertEqual( Gaffer.IntPlug().defaultHash(), Gaffer.IntPlug().defaultHash() )
self.assertNotEqual( Gaffer.IntPlug().defaultHash(), Gaffer.IntPlug( defaultValue = 2 ).defaultHash() )
self.assertEqual( Gaffer.IntPlug().defaultHash(), Gaffer.IntPlug().hash() )
# Compound plugs
self.assertNotEqual( Gaffer.V2iPlug().defaultHash(), IECore.MurmurHash() )
self.assertEqual( Gaffer.V2iPlug().defaultHash(), Gaffer.V2iPlug().defaultHash() )
self.assertNotEqual( Gaffer.V2iPlug().defaultHash(), Gaffer.V2iPlug( defaultValue = imath.V2i( 0, 1 ) ).defaultHash() )
self.assertNotEqual( Gaffer.V2iPlug().defaultHash(), Gaffer.V3iPlug().defaultHash() )
self.assertEqual( Gaffer.V2iPlug().defaultHash(), Gaffer.V2iPlug().hash() )
def testExceptionDuringParallelEval( self ) :
# This only caused a problem when using GAFFER_PYTHONEXPRESSION_CACHEPOLICY=TaskCollaboration
# with a TaskMutex without a properly isolated task_group so that exceptions in one thread
# can cancel the other. We're adding a more specific test for this to TaskMutex, so we're not
# expecting this to catch anything, but it's still a valid test
m = GafferTest.MultiplyNode()
m["e"] = Gaffer.Expression()
m["e"].setExpression( inspect.cleandoc(
"""
if context["testVar"]%10 == 9:
raise BaseException( "Foo" )
parent['op1'] = 1
"""
) )
with six.assertRaisesRegex( self, BaseException, "Foo" ):
GafferTest.parallelGetValue( m["product"], 10000, "testVar" )
def testCancellationOfSecondGetValueCall( self ) :
## \todo Should just be checking `tbb.global_control.active_value( max_allowed_parallelism )`
# to get the true limit set by `-threads` argument. But IECore's Python
# binding of `global_control` doesn't expose that yet.
if IECore.hardwareConcurrency() < 3 and "VALUEPLUGTEST_SUBPROCESS" not in os.environ :
# This test requires at least 3 TBB threads (including the main
# thread), because we need the second enqueued BackgroundTask to
# start execution before the first one has completed. If we have
# insufficient threads then we end up in deadlock, so to avoid this
# we relaunch in a subprocess with sufficient threads.
#
# Note : deadlock only ensues because the first task will never
# return without cancellation. This is an artificial situation, not
# one that would occur in practical usage of Gaffer itself.
print( "Running in subprocess due to insufficient TBB threads" )
try :
env = os.environ.copy()
env["VALUEPLUGTEST_SUBPROCESS"] = "1"
subprocess.check_output(
[ "gaffer", "test", "-threads", "3", "GafferTest.ValuePlugTest.testCancellationOfSecondGetValueCall" ],
stderr = subprocess.STDOUT,
env = env
)
except subprocess.CalledProcessError as e :
self.fail( e.output )
return
class InfiniteLoop( Gaffer.ComputeNode ) :
def __init__( self, name = "InfiniteLoop", cachePolicy = Gaffer.ValuePlug.CachePolicy.Standard ) :
Gaffer.ComputeNode.__init__( self, name )
self.computeStartedCondition = threading.Condition()
self.__cachePolicy = cachePolicy
self["out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
# No need to implement `hash()` - because our result is constant (or
# non-existent), the default hash is sufficient.
def compute( self, output, context ) :
with self.computeStartedCondition :
self.computeStartedCondition.notify()
if output == self["out"] :
while True :
IECore.Canceller.check( context.canceller() )
def computeCachePolicy( self, output ) :
return self.__cachePolicy
IECore.registerRunTimeTyped( InfiniteLoop )
for cachePolicy in (
Gaffer.ValuePlug.CachePolicy.Legacy,
Gaffer.ValuePlug.CachePolicy.Standard,
Gaffer.ValuePlug.CachePolicy.TaskIsolation,
# Omitting TaskCollaboration, because if our second compute joins as
# a worker, there is currently no way we can recall it. See comments
# in `LRUCachePolicy.TaskParallel.Handle.acquire`.
) :
node = InfiniteLoop( cachePolicy = cachePolicy )
# Launch a compute in the background, and wait for it to start.
with node.computeStartedCondition :
backgroundTask1 = Gaffer.ParallelAlgo.callOnBackgroundThread( node["out"], lambda : node["out"].getValue() )
node.computeStartedCondition.wait()
# Launch a second compute in the background, wait for it to start, and
# then make sure we can cancel it even though the compute is already in
# progress on another thread.
startedCondition = threading.Condition()
def getValueExpectingCancellation() :
with startedCondition :
startedCondition.notify()
with self.assertRaises( IECore.Cancelled ) :
node["out"].getValue()
with startedCondition :
backgroundTask2 = Gaffer.ParallelAlgo.callOnBackgroundThread( node["out"], getValueExpectingCancellation )
startedCondition.wait()
backgroundTask2.cancelAndWait()
backgroundTask1.cancelAndWait()
def setUp( self ) :
GafferTest.TestCase.setUp( self )
self.__originalCacheMemoryLimit = Gaffer.ValuePlug.getCacheMemoryLimit()
def tearDown( self ) :
GafferTest.TestCase.tearDown( self )
Gaffer.ValuePlug.setCacheMemoryLimit( self.__originalCacheMemoryLimit )
if __name__ == "__main__":
unittest.main()
| |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for getting molecule features from NIST sdf data.
This module includes functions to help with parsing sdf files and generating
features such as atom weight lists and adjacency matrices. Also contains a
function to parse mass spectra peaks from their string format in the NIST sdf
files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import feature_map_constants as fmap_constants
import mass_spec_constants as ms_constants
import numpy as np
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
FILTER_DICT = {
'steroid':
Chem.MolFromSmarts(
'[#6]1~[#6]2~[#6](~[#6]~[#6]~[#6]~1)'
'~[#6]~[#6]~[#6]1~[#6]~2~[#6]~[#6]~[#6]2~[#6]~1~[#6]~[#6]~[#6]~2'
),
'diazo':
Chem.MolFromSmarts(
'[#7]~[#7]'
),
}
def get_smiles_string(mol):
"""Make canonicalized smiles from rdkit.Mol."""
return Chem.MolToSmiles(mol, canonical=True, isomericSmiles=True)
def get_molecular_formula(mol):
"""Makes string of molecular formula from rdkit.Mol."""
return AllChem.CalcMolFormula(mol)
def parse_peaks(pk_str):
r"""Helper function for converting peak string into vector form.
Args:
pk_str : String from NIST MS data of format
"peak1_loc peak1_int\npeak2_loc peak2_int"
Returns:
A tuple containing two arrays: (peak_locs, peak_ints)
peak_locs : int list of the location of peaks
peak_intensities : float list of the intensity of the peaks
"""
all_peaks = pk_str.split('\n')
peak_locs = []
peak_intensities = []
for peak in all_peaks:
loc, intensity = peak.split()
peak_locs.append(int(loc))
peak_intensities.append(float(intensity))
return peak_locs, peak_intensities
def convert_spectrum_array_to_string(spectrum_array):
"""Write a spectrum array to string.
Args:
spectrum_array : np.array of shape (1000)
Returns:
string representing the peaks of the spectra.
"""
mz_peak_locations = np.nonzero(spectrum_array)[0].tolist()
mass_peak_strings = [
'%d %d' % (p, spectrum_array[p]) for p in mz_peak_locations
]
return '\n'.join(mass_peak_strings)
def get_largest_mass_spec_peak_loc(mol):
"""Returns largest ms peak location from an rdkit.Mol object."""
return parse_peaks(mol.GetProp(ms_constants.SDF_TAG_MASS_SPEC_PEAKS))[0][-1]
def make_dense_mass_spectra(peak_locs, peak_intensities, max_peak_loc):
"""Make a dense np.array of the mass spectra.
Args:
peak_locs : int list of the location of peaks
peak_intensities : float list of the intensity of the peaks
max_peak_loc : maximum number of peaks bins
Returns:
np.array of the mass spectra data as a dense vector.
"""
dense_spectrum = np.zeros(max_peak_loc)
dense_spectrum[peak_locs] = peak_intensities
return dense_spectrum
def get_padded_atom_weights(mol, max_atoms):
"""Make a padded list of atoms of length max_atoms given rdkit.Mol object.
Note: Returns atoms in the same order as the input rdkit.Mol.
If you want the atoms in canonical order, you should canonicalize
the molecule first.
Args:
mol : a rdkit.Mol object
max_atoms : maximum number of atoms to consider
Returns:
np array of atoms by atomic mass of shape (max_atoms)
Raises:
ValueError : If rdkit.Mol object had more atoms than max_atoms.
"""
if max_atoms < len(mol.GetAtoms()):
raise ValueError(
'molecule contains {} atoms, more than max_atoms {}'.format(
len(mol.GetAtoms()), max_atoms))
atom_list = np.array([at.GetMass() for at in mol.GetAtoms()])
atom_list = np.pad(atom_list, ((0, max_atoms - len(atom_list))), 'constant')
return atom_list
def get_padded_atom_ids(mol, max_atoms):
"""Make a padded list of atoms of length max_atoms given rdkit.Mol object.
Args:
mol : a rdkit.Mol object
max_atoms : maximum number of atoms to consider
Returns:
np array of atoms by atomic number of shape (max_atoms)
Note: function returns atoms in the same order as the input rdkit.Mol.
If you want the atoms in canonical order, you should canonicalize
the molecule first.
Raises:
ValueError : rdkit.Mol object is too big, had more atoms than max_atoms.
"""
if max_atoms < len(mol.GetAtoms()):
raise ValueError(
'molecule contains {} atoms, more than max_atoms {}'.format(
len(mol.GetAtoms()), max_atoms))
atom_list = np.array([at.GetAtomicNum() for at in mol.GetAtoms()])
atom_list = atom_list.astype('int32')
atom_list = np.pad(atom_list, ((0, max_atoms - len(atom_list))), 'constant')
return atom_list
def get_padded_adjacency_matrix(mol, max_atoms, add_hs_to_molecule=False):
"""Make a matrix with shape (max_atoms, max_atoms) given rdkit.Mol object.
Args:
mol: a rdkit.Mol object
max_atoms : maximum number of atoms to consider
add_hs_to_molecule : whether or not to add hydrogens to the molecule.
Returns:
np.array of floats of a flattened adjacency matrix of length
(max_atoms * max_atoms)
The values will be the index of the bond order in the alphabet
Raises:
ValueError : rdkit.Mol object is too big, had more atoms than max_atoms.
"""
# Add hydrogens to atoms:
if add_hs_to_molecule:
mol = Chem.rdmolops.AddHs(mol)
num_atoms_in_mol = len(mol.GetAtoms())
if max_atoms < num_atoms_in_mol:
raise ValueError(
'molecule contains {} atoms, more than max_atoms {}'.format(
len(mol.GetAtoms()), max_atoms))
adj_matrix = Chem.rdmolops.GetAdjacencyMatrix(mol, useBO=True)
for i in range(np.shape(adj_matrix)[0]):
for j in range(np.shape(adj_matrix)[1]):
if adj_matrix[i, j] != 0:
adj_matrix[i, j] = ms_constants.BOND_ORDER_TO_INTS_DICT[adj_matrix[i,
j]]
padded_adjacency_matrix = np.zeros((max_atoms, max_atoms))
padded_adjacency_matrix[:num_atoms_in_mol, :num_atoms_in_mol] = adj_matrix
padded_adjacency_matrix = padded_adjacency_matrix.astype('int32')
return np.reshape(padded_adjacency_matrix, (max_atoms * max_atoms))
def make_circular_fingerprint(mol, circular_fp_key):
"""Returns circular fingerprint for a mol given its circular_fp_key.
Args:
mol : rdkit.Mol
circular_fp_key : A ms_constants.CircularFingerprintKey object
Returns:
np.array of len circular_fp_key.fp_len
"""
# A dictionary to record rdkit functions to base names
fp_methods_dict = {
fmap_constants.CIRCULAR_FP_BASENAME:
AllChem.GetMorganFingerprintAsBitVect,
fmap_constants.COUNTING_CIRCULAR_FP_BASENAME:
AllChem.GetHashedMorganFingerprint
}
fp = fp_methods_dict[circular_fp_key.fp_type](
mol, circular_fp_key.radius, nBits=circular_fp_key.fp_len)
fp_arr = np.zeros(1)
DataStructs.ConvertToNumpyArray(fp, fp_arr)
return fp_arr
def all_circular_fingerprints_to_dict(mol):
"""Creates all circular fingerprints from list of lengths and radii.
Based on lists of fingerprint lengths and fingerprint radii inside
mass_spec_constants.
Args:
mol : rdkit.Mol
Returns:
a dict. The keys are CircularFingerprintKey instances and the values are
the corresponding fingerprints
"""
fp_dict = {}
for fp_len in ms_constants.NUM_CIRCULAR_FP_BITS_LIST:
for rad in ms_constants.CIRCULAR_FP_RADII_LIST:
for fp_type in fmap_constants.FP_TYPE_LIST:
circular_fp_key = ms_constants.CircularFingerprintKey(
fp_type, fp_len, rad)
fp_dict[circular_fp_key] = make_circular_fingerprint(
mol, circular_fp_key)
return fp_dict
def check_mol_has_non_empty_smiles(mol):
"""Checks if smiles string of rdkit.Mol is an empty string."""
return bool(get_smiles_string(mol))
def check_mol_has_non_empty_mass_spec_peak_tag(mol):
"""Checks if mass spec sdf tag is in properties of rdkit.Mol."""
return ms_constants.SDF_TAG_MASS_SPEC_PEAKS in mol.GetPropNames()
def check_mol_only_has_atoms(mol, accept_atom_list):
"""Checks if rdkit.Mol only contains atoms from accept_atom_list."""
atom_symbol_list = [atom.GetSymbol() for atom in mol.GetAtoms()]
return all(atom in accept_atom_list for atom in atom_symbol_list)
def check_mol_does_not_have_atoms(mol, exclude_atom_list):
"""Checks if rdkit.Mol contains any molecule from exclude_atom_list."""
atom_symbol_list = [atom.GetSymbol() for atom in mol.GetAtoms()]
return all(atom not in atom_symbol_list for atom in exclude_atom_list)
def check_mol_has_substructure(mol, substructure_mol):
"""Checks if rdkit.Mol has substructure.
Args:
mol : rdkit.Mol, representing query
substructure_mol: rdkit.Mol, representing substructure family
Returns:
Boolean, True if substructure found in molecule.
"""
return mol.HasSubstructMatch(substructure_mol)
def make_filter_by_substructure(family_name):
"""Returns a filter function according to the family_name."""
if family_name not in FILTER_DICT.keys():
raise ValueError('%s is not supported for family splitting' % family_name)
return lambda mol: check_mol_has_substructure(mol, FILTER_DICT[family_name])
def tokenize_smiles(smiles_string_arr):
"""Creates a list of tokens from a smiles string.
Two letter atom characters are considered to be a single token.
All two letter tokens observed in this dataset are recorded in
ms_constants.TWO_LETTER_TOKEN_NAMES.
Args:
smiles_string_arr: np.array of dtype str and shape (1, )
Returns:
A np.array of ints corresponding with the tokens
"""
smiles_str = smiles_string_arr[0]
if isinstance(smiles_str, bytes):
smiles_str = smiles_str.decode('utf-8')
token_list = []
ptr = 0
while ptr < len(smiles_str):
if smiles_str[ptr:ptr + 2] in ms_constants.TWO_LETTER_TOKEN_NAMES:
token_list.append(
ms_constants.SMILES_TOKEN_NAME_TO_INDEX[smiles_str[ptr:ptr + 2]])
ptr += 2
else:
token_list.append(
ms_constants.SMILES_TOKEN_NAME_TO_INDEX[smiles_str[ptr]])
ptr += 1
return np.array(token_list, dtype=np.int64)
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import os
import eventlet
from oslo_config import cfg
import oslo_messaging
from oslo_utils import importutils
from neutron.agent.linux import dhcp
from neutron.agent.linux import external_process
from neutron.agent.metadata import driver as metadata_driver
from neutron.agent import rpc as agent_rpc
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
class DhcpAgent(manager.Manager):
"""DHCP agent service manager.
Note that the public methods of this class are exposed as the server side
of an rpc interface. The neutron server uses
neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.DhcpAgentNotifyApi as the
client side to execute the methods here. For more information about
changing rpc interfaces, see doc/source/devref/rpc_api.rst.
"""
target = oslo_messaging.Target(version='1.0')
def __init__(self, host=None):
super(DhcpAgent, self).__init__(host=host)
self.needs_resync_reasons = collections.defaultdict(list)
self.conf = cfg.CONF
self.cache = NetworkCache()
self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver)
ctx = context.get_admin_context_without_session()
self.plugin_rpc = DhcpPluginApi(topics.PLUGIN,
ctx, self.conf.use_namespaces)
# create dhcp dir to store dhcp info
dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path)
if not os.path.isdir(dhcp_dir):
os.makedirs(dhcp_dir, 0o755)
self.dhcp_version = self.dhcp_driver_cls.check_version()
self._populate_networks_cache()
self._process_monitor = external_process.ProcessMonitor(
config=self.conf,
resource_type='dhcp')
def _populate_networks_cache(self):
"""Populate the networks cache when the DHCP-agent starts."""
try:
existing_networks = self.dhcp_driver_cls.existing_dhcp_networks(
self.conf
)
for net_id in existing_networks:
net = dhcp.NetModel(self.conf.use_namespaces,
{"id": net_id,
"subnets": [],
"ports": []})
self.cache.put(net)
except NotImplementedError:
# just go ahead with an empty networks cache
LOG.debug("The '%s' DHCP-driver does not support retrieving of a "
"list of existing networks",
self.conf.dhcp_driver)
def after_start(self):
self.run()
LOG.info(_LI("DHCP agent started"))
def run(self):
"""Activate the DHCP agent."""
self.sync_state()
self.periodic_resync()
def call_driver(self, action, network, **action_kwargs):
"""Invoke an action on a DHCP driver instance."""
LOG.debug('Calling driver for network: %(net)s action: %(action)s',
{'net': network.id, 'action': action})
try:
# the Driver expects something that is duck typed similar to
# the base models.
driver = self.dhcp_driver_cls(self.conf,
network,
self._process_monitor,
self.dhcp_version,
self.plugin_rpc)
getattr(driver, action)(**action_kwargs)
return True
except exceptions.Conflict:
# No need to resync here, the agent will receive the event related
# to a status update for the network
LOG.warning(_LW('Unable to %(action)s dhcp for %(net_id)s: there '
'is a conflict with its current state; please '
'check that the network and/or its subnet(s) '
'still exist.'),
{'net_id': network.id, 'action': action})
except Exception as e:
self.schedule_resync(e, network.id)
if (isinstance(e, oslo_messaging.RemoteError)
and e.exc_type == 'NetworkNotFound'
or isinstance(e, exceptions.NetworkNotFound)):
LOG.warning(_LW("Network %s has been deleted."), network.id)
else:
LOG.exception(_LE('Unable to %(action)s dhcp for %(net_id)s.'),
{'net_id': network.id, 'action': action})
def schedule_resync(self, reason, network=None):
"""Schedule a resync for a given network and reason. If no network is
specified, resync all networks.
"""
self.needs_resync_reasons[network].append(reason)
@utils.synchronized('dhcp-agent')
def sync_state(self, networks=None):
"""Sync the local DHCP state with Neutron. If no networks are passed,
or 'None' is one of the networks, sync all of the networks.
"""
only_nets = set([] if (not networks or None in networks) else networks)
LOG.info(_LI('Synchronizing state'))
pool = eventlet.GreenPool(cfg.CONF.num_sync_threads)
known_network_ids = set(self.cache.get_network_ids())
try:
active_networks = self.plugin_rpc.get_active_networks_info()
active_network_ids = set(network.id for network in active_networks)
for deleted_id in known_network_ids - active_network_ids:
try:
self.disable_dhcp_helper(deleted_id)
except Exception as e:
self.schedule_resync(e, deleted_id)
LOG.exception(_LE('Unable to sync network state on '
'deleted network %s'), deleted_id)
for network in active_networks:
if (not only_nets or # specifically resync all
network.id not in known_network_ids or # missing net
network.id in only_nets): # specific network to sync
pool.spawn(self.safe_configure_dhcp_for_network, network)
pool.waitall()
LOG.info(_LI('Synchronizing state complete'))
except Exception as e:
self.schedule_resync(e)
LOG.exception(_LE('Unable to sync network state.'))
@utils.exception_logger()
def _periodic_resync_helper(self):
"""Resync the dhcp state at the configured interval."""
while True:
eventlet.sleep(self.conf.resync_interval)
if self.needs_resync_reasons:
# be careful to avoid a race with additions to list
# from other threads
reasons = self.needs_resync_reasons
self.needs_resync_reasons = collections.defaultdict(list)
for net, r in reasons.items():
if not net:
net = "*"
LOG.debug("resync (%(network)s): %(reason)s",
{"reason": r, "network": net})
self.sync_state(reasons.keys())
def periodic_resync(self):
"""Spawn a thread to periodically resync the dhcp state."""
eventlet.spawn(self._periodic_resync_helper)
def safe_get_network_info(self, network_id):
try:
network = self.plugin_rpc.get_network_info(network_id)
if not network:
LOG.warn(_LW('Network %s has been deleted.'), network_id)
return network
except Exception as e:
self.schedule_resync(e, network_id)
LOG.exception(_LE('Network %s info call failed.'), network_id)
def enable_dhcp_helper(self, network_id):
"""Enable DHCP for a network that meets enabling criteria."""
network = self.safe_get_network_info(network_id)
if network:
self.configure_dhcp_for_network(network)
@utils.exception_logger()
def safe_configure_dhcp_for_network(self, network):
try:
self.configure_dhcp_for_network(network)
except (exceptions.NetworkNotFound, RuntimeError):
LOG.warn(_LW('Network %s may have been deleted and its resources '
'may have already been disposed.'), network.id)
def configure_dhcp_for_network(self, network):
if not network.admin_state_up:
return
enable_metadata = self.dhcp_driver_cls.should_enable_metadata(
self.conf, network)
dhcp_network_enabled = False
for subnet in network.subnets:
if subnet.enable_dhcp:
if self.call_driver('enable', network):
dhcp_network_enabled = True
self.cache.put(network)
break
if enable_metadata and dhcp_network_enabled:
for subnet in network.subnets:
if subnet.ip_version == 4 and subnet.enable_dhcp:
self.enable_isolated_metadata_proxy(network)
break
def disable_dhcp_helper(self, network_id):
"""Disable DHCP for a network known to the agent."""
network = self.cache.get_network_by_id(network_id)
if network:
if (self.conf.use_namespaces and
self.conf.enable_isolated_metadata):
# NOTE(jschwarz): In the case where a network is deleted, all
# the subnets and ports are deleted before this function is
# called, so checking if 'should_enable_metadata' is True
# for any subnet is false logic here.
self.disable_isolated_metadata_proxy(network)
if self.call_driver('disable', network):
self.cache.remove(network)
def refresh_dhcp_helper(self, network_id):
"""Refresh or disable DHCP for a network depending on the current state
of the network.
"""
old_network = self.cache.get_network_by_id(network_id)
if not old_network:
# DHCP current not running for network.
return self.enable_dhcp_helper(network_id)
network = self.safe_get_network_info(network_id)
if not network:
return
old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp)
new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp)
if new_cidrs and old_cidrs == new_cidrs:
self.call_driver('reload_allocations', network)
self.cache.put(network)
elif new_cidrs:
if self.call_driver('restart', network):
self.cache.put(network)
else:
self.disable_dhcp_helper(network.id)
@utils.synchronized('dhcp-agent')
def network_create_end(self, context, payload):
"""Handle the network.create.end notification event."""
network_id = payload['network']['id']
self.enable_dhcp_helper(network_id)
@utils.synchronized('dhcp-agent')
def network_update_end(self, context, payload):
"""Handle the network.update.end notification event."""
network_id = payload['network']['id']
if payload['network']['admin_state_up']:
self.enable_dhcp_helper(network_id)
else:
self.disable_dhcp_helper(network_id)
@utils.synchronized('dhcp-agent')
def network_delete_end(self, context, payload):
"""Handle the network.delete.end notification event."""
self.disable_dhcp_helper(payload['network_id'])
@utils.synchronized('dhcp-agent')
def subnet_update_end(self, context, payload):
"""Handle the subnet.update.end notification event."""
network_id = payload['subnet']['network_id']
self.refresh_dhcp_helper(network_id)
# Use the update handler for the subnet create event.
subnet_create_end = subnet_update_end
@utils.synchronized('dhcp-agent')
def subnet_delete_end(self, context, payload):
"""Handle the subnet.delete.end notification event."""
subnet_id = payload['subnet_id']
network = self.cache.get_network_by_subnet_id(subnet_id)
if network:
self.refresh_dhcp_helper(network.id)
@utils.synchronized('dhcp-agent')
def port_update_end(self, context, payload):
"""Handle the port.update.end notification event."""
updated_port = dhcp.DictModel(payload['port'])
network = self.cache.get_network_by_id(updated_port.network_id)
if network:
self.cache.put_port(updated_port)
self.call_driver('reload_allocations', network)
# Use the update handler for the port create event.
port_create_end = port_update_end
@utils.synchronized('dhcp-agent')
def port_delete_end(self, context, payload):
"""Handle the port.delete.end notification event."""
port = self.cache.get_port_by_id(payload['port_id'])
if port:
network = self.cache.get_network_by_id(port.network_id)
self.cache.remove_port(port)
self.call_driver('reload_allocations', network)
def enable_isolated_metadata_proxy(self, network):
# The proxy might work for either a single network
# or all the networks connected via a router
# to the one passed as a parameter
kwargs = {'network_id': network.id}
# When the metadata network is enabled, the proxy might
# be started for the router attached to the network
if self.conf.enable_metadata_network:
router_ports = [port for port in network.ports
if (port.device_owner ==
constants.DEVICE_OWNER_ROUTER_INTF)]
if router_ports:
# Multiple router ports should not be allowed
if len(router_ports) > 1:
LOG.warning(_LW("%(port_num)d router ports found on the "
"metadata access network. Only the port "
"%(port_id)s, for router %(router_id)s "
"will be considered"),
{'port_num': len(router_ports),
'port_id': router_ports[0].id,
'router_id': router_ports[0].device_id})
kwargs = {'router_id': router_ports[0].device_id}
metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy(
self._process_monitor, network.namespace, dhcp.METADATA_PORT,
self.conf, **kwargs)
def disable_isolated_metadata_proxy(self, network):
metadata_driver.MetadataDriver.destroy_monitored_metadata_proxy(
self._process_monitor, network.id, network.namespace, self.conf)
class DhcpPluginApi(object):
"""Agent side of the dhcp rpc API.
This class implements the client side of an rpc interface. The server side
of this interface can be found in
neutron.api.rpc.handlers.dhcp_rpc.DhcpRpcCallback. For more information
about changing rpc interfaces, see doc/source/devref/rpc_api.rst.
API version history:
1.0 - Initial version.
1.1 - Added get_active_networks_info, create_dhcp_port,
and update_dhcp_port methods.
"""
def __init__(self, topic, context, use_namespaces):
self.context = context
self.host = cfg.CONF.host
self.use_namespaces = use_namespaces
target = oslo_messaging.Target(
topic=topic,
namespace=constants.RPC_NAMESPACE_DHCP_PLUGIN,
version='1.0')
self.client = n_rpc.get_client(target)
def get_active_networks_info(self):
"""Make a remote process call to retrieve all network info."""
cctxt = self.client.prepare(version='1.1')
networks = cctxt.call(self.context, 'get_active_networks_info',
host=self.host)
return [dhcp.NetModel(self.use_namespaces, n) for n in networks]
def get_network_info(self, network_id):
"""Make a remote process call to retrieve network info."""
cctxt = self.client.prepare()
network = cctxt.call(self.context, 'get_network_info',
network_id=network_id, host=self.host)
if network:
return dhcp.NetModel(self.use_namespaces, network)
def get_dhcp_port(self, network_id, device_id):
"""Make a remote process call to get the dhcp port."""
cctxt = self.client.prepare()
port = cctxt.call(self.context, 'get_dhcp_port',
network_id=network_id, device_id=device_id,
host=self.host)
if port:
return dhcp.DictModel(port)
def create_dhcp_port(self, port):
"""Make a remote process call to create the dhcp port."""
cctxt = self.client.prepare(version='1.1')
port = cctxt.call(self.context, 'create_dhcp_port',
port=port, host=self.host)
if port:
return dhcp.DictModel(port)
def update_dhcp_port(self, port_id, port):
"""Make a remote process call to update the dhcp port."""
cctxt = self.client.prepare(version='1.1')
port = cctxt.call(self.context, 'update_dhcp_port',
port_id=port_id, port=port, host=self.host)
if port:
return dhcp.DictModel(port)
def release_dhcp_port(self, network_id, device_id):
"""Make a remote process call to release the dhcp port."""
cctxt = self.client.prepare()
return cctxt.call(self.context, 'release_dhcp_port',
network_id=network_id, device_id=device_id,
host=self.host)
def release_port_fixed_ip(self, network_id, device_id, subnet_id):
"""Make a remote process call to release a fixed_ip on the port."""
cctxt = self.client.prepare()
return cctxt.call(self.context, 'release_port_fixed_ip',
network_id=network_id, subnet_id=subnet_id,
device_id=device_id, host=self.host)
class NetworkCache(object):
"""Agent cache of the current network state."""
def __init__(self):
self.cache = {}
self.subnet_lookup = {}
self.port_lookup = {}
def get_network_ids(self):
return self.cache.keys()
def get_network_by_id(self, network_id):
return self.cache.get(network_id)
def get_network_by_subnet_id(self, subnet_id):
return self.cache.get(self.subnet_lookup.get(subnet_id))
def get_network_by_port_id(self, port_id):
return self.cache.get(self.port_lookup.get(port_id))
def put(self, network):
if network.id in self.cache:
self.remove(self.cache[network.id])
self.cache[network.id] = network
for subnet in network.subnets:
self.subnet_lookup[subnet.id] = network.id
for port in network.ports:
self.port_lookup[port.id] = network.id
def remove(self, network):
del self.cache[network.id]
for subnet in network.subnets:
del self.subnet_lookup[subnet.id]
for port in network.ports:
del self.port_lookup[port.id]
def put_port(self, port):
network = self.get_network_by_id(port.network_id)
for index in range(len(network.ports)):
if network.ports[index].id == port.id:
network.ports[index] = port
break
else:
network.ports.append(port)
self.port_lookup[port.id] = network.id
def remove_port(self, port):
network = self.get_network_by_port_id(port.id)
for index in range(len(network.ports)):
if network.ports[index] == port:
del network.ports[index]
del self.port_lookup[port.id]
break
def get_port_by_id(self, port_id):
network = self.get_network_by_port_id(port_id)
if network:
for port in network.ports:
if port.id == port_id:
return port
def get_state(self):
net_ids = self.get_network_ids()
num_nets = len(net_ids)
num_subnets = 0
num_ports = 0
for net_id in net_ids:
network = self.get_network_by_id(net_id)
num_subnets += len(network.subnets)
num_ports += len(network.ports)
return {'networks': num_nets,
'subnets': num_subnets,
'ports': num_ports}
class DhcpAgentWithStateReport(DhcpAgent):
def __init__(self, host=None):
super(DhcpAgentWithStateReport, self).__init__(host=host)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-dhcp-agent',
'host': host,
'topic': topics.DHCP_AGENT,
'configurations': {
'dhcp_driver': cfg.CONF.dhcp_driver,
'use_namespaces': cfg.CONF.use_namespaces,
'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration},
'start_flag': True,
'agent_type': constants.AGENT_TYPE_DHCP}
report_interval = cfg.CONF.AGENT.report_interval
self.use_call = True
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
try:
self.agent_state.get('configurations').update(
self.cache.get_state())
ctx = context.get_admin_context_without_session()
self.state_rpc.report_state(ctx, self.agent_state, self.use_call)
self.use_call = False
except AttributeError:
# This means the server does not support report_state
LOG.warn(_LW("Neutron server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
self.run()
return
except Exception:
LOG.exception(_LE("Failed reporting state!"))
return
if self.agent_state.pop('start_flag', None):
self.run()
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.schedule_resync(_("Agent updated: %(payload)s") %
{"payload": payload})
LOG.info(_LI("agent_updated by server side %s!"), payload)
def after_start(self):
LOG.info(_LI("DHCP agent started"))
| |
"""Switch platform for UniFi Network integration.
Support for controlling power supply of clients which are powered over Ethernet (POE).
Support for controlling network access of clients selected in option flow.
Support for controlling deep packet inspection (DPI) restriction groups.
"""
from typing import Any
from aiounifi.api import SOURCE_EVENT
from aiounifi.events import (
WIRED_CLIENT_BLOCKED,
WIRED_CLIENT_UNBLOCKED,
WIRELESS_CLIENT_BLOCKED,
WIRELESS_CLIENT_UNBLOCKED,
)
from homeassistant.components.switch import DOMAIN, SwitchEntity
from homeassistant.const import ENTITY_CATEGORY_CONFIG
from homeassistant.core import callback
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_registry import async_entries_for_config_entry
from homeassistant.helpers.restore_state import RestoreEntity
from .const import ATTR_MANUFACTURER, DOMAIN as UNIFI_DOMAIN
from .unifi_client import UniFiClient
from .unifi_entity_base import UniFiBase
BLOCK_SWITCH = "block"
DPI_SWITCH = "dpi"
POE_SWITCH = "poe"
CLIENT_BLOCKED = (WIRED_CLIENT_BLOCKED, WIRELESS_CLIENT_BLOCKED)
CLIENT_UNBLOCKED = (WIRED_CLIENT_UNBLOCKED, WIRELESS_CLIENT_UNBLOCKED)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up switches for UniFi Network integration.
Switches are controlling network access and switch ports with POE.
"""
controller = hass.data[UNIFI_DOMAIN][config_entry.entry_id]
controller.entities[DOMAIN] = {
BLOCK_SWITCH: set(),
POE_SWITCH: set(),
DPI_SWITCH: set(),
}
if controller.site_role != "admin":
return
# Store previously known POE control entities in case their POE are turned off.
known_poe_clients = []
entity_registry = await hass.helpers.entity_registry.async_get_registry()
for entry in async_entries_for_config_entry(entity_registry, config_entry.entry_id):
if not entry.unique_id.startswith(POE_SWITCH):
continue
mac = entry.unique_id.replace(f"{POE_SWITCH}-", "")
if mac not in controller.api.clients:
continue
known_poe_clients.append(mac)
for mac in controller.option_block_clients:
if mac not in controller.api.clients and mac in controller.api.clients_all:
client = controller.api.clients_all[mac]
controller.api.clients.process_raw([client.raw])
@callback
def items_added(
clients: set = controller.api.clients,
devices: set = controller.api.devices,
dpi_groups: set = controller.api.dpi_groups,
) -> None:
"""Update the values of the controller."""
if controller.option_block_clients:
add_block_entities(controller, async_add_entities, clients)
if controller.option_poe_clients:
add_poe_entities(controller, async_add_entities, clients, known_poe_clients)
if controller.option_dpi_restrictions:
add_dpi_entities(controller, async_add_entities, dpi_groups)
for signal in (controller.signal_update, controller.signal_options_update):
config_entry.async_on_unload(
async_dispatcher_connect(hass, signal, items_added)
)
items_added()
known_poe_clients.clear()
@callback
def add_block_entities(controller, async_add_entities, clients):
"""Add new switch entities from the controller."""
switches = []
for mac in controller.option_block_clients:
if mac in controller.entities[DOMAIN][BLOCK_SWITCH] or mac not in clients:
continue
client = controller.api.clients[mac]
switches.append(UniFiBlockClientSwitch(client, controller))
if switches:
async_add_entities(switches)
@callback
def add_poe_entities(controller, async_add_entities, clients, known_poe_clients):
"""Add new switch entities from the controller."""
switches = []
devices = controller.api.devices
for mac in clients:
if mac in controller.entities[DOMAIN][POE_SWITCH]:
continue
client = controller.api.clients[mac]
# Try to identify new clients powered by POE.
# Known POE clients have been created in previous HASS sessions.
# If port_poe is None the port does not support POE
# If poe_enable is False we can't know if a POE client is available for control.
if mac not in known_poe_clients and (
mac in controller.wireless_clients
or client.sw_mac not in devices
or not devices[client.sw_mac].ports[client.sw_port].port_poe
or not devices[client.sw_mac].ports[client.sw_port].poe_enable
or controller.mac == client.mac
):
continue
# Multiple POE-devices on same port means non UniFi POE driven switch
multi_clients_on_port = False
for client2 in controller.api.clients.values():
if mac in known_poe_clients:
break
if (
client2.is_wired
and client.mac != client2.mac
and client.sw_mac == client2.sw_mac
and client.sw_port == client2.sw_port
):
multi_clients_on_port = True
break
if multi_clients_on_port:
continue
switches.append(UniFiPOEClientSwitch(client, controller))
if switches:
async_add_entities(switches)
@callback
def add_dpi_entities(controller, async_add_entities, dpi_groups):
"""Add new switch entities from the controller."""
switches = []
for group in dpi_groups:
if (
group in controller.entities[DOMAIN][DPI_SWITCH]
or not dpi_groups[group].dpiapp_ids
):
continue
switches.append(UniFiDPIRestrictionSwitch(dpi_groups[group], controller))
if switches:
async_add_entities(switches)
class UniFiPOEClientSwitch(UniFiClient, SwitchEntity, RestoreEntity):
"""Representation of a client that uses POE."""
DOMAIN = DOMAIN
TYPE = POE_SWITCH
_attr_entity_category = ENTITY_CATEGORY_CONFIG
def __init__(self, client, controller):
"""Set up POE switch."""
super().__init__(client, controller)
self.poe_mode = None
if client.sw_port and self.port.poe_mode != "off":
self.poe_mode = self.port.poe_mode
async def async_added_to_hass(self):
"""Call when entity about to be added to Home Assistant."""
await super().async_added_to_hass()
if self.poe_mode: # POE is enabled and client in a known state
return
if (state := await self.async_get_last_state()) is None:
return
self.poe_mode = state.attributes.get("poe_mode")
if not self.client.sw_mac:
self.client.raw["sw_mac"] = state.attributes.get("switch")
if not self.client.sw_port:
self.client.raw["sw_port"] = state.attributes.get("port")
@property
def is_on(self):
"""Return true if POE is active."""
return self.port.poe_mode != "off"
@property
def available(self):
"""Return if switch is available.
Poe_mode None means its POE state is unknown.
Sw_mac unavailable means restored client.
"""
return (
self.poe_mode is not None
and self.controller.available
and self.client.sw_port
and self.client.sw_mac
and self.client.sw_mac in self.controller.api.devices
)
async def async_turn_on(self, **kwargs):
"""Enable POE for client."""
await self.device.async_set_port_poe_mode(self.client.sw_port, self.poe_mode)
async def async_turn_off(self, **kwargs):
"""Disable POE for client."""
await self.device.async_set_port_poe_mode(self.client.sw_port, "off")
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
attributes = {
"power": self.port.poe_power,
"switch": self.client.sw_mac,
"port": self.client.sw_port,
"poe_mode": self.poe_mode,
}
return attributes
@property
def device(self):
"""Shortcut to the switch that client is connected to."""
return self.controller.api.devices[self.client.sw_mac]
@property
def port(self):
"""Shortcut to the switch port that client is connected to."""
return self.device.ports[self.client.sw_port]
async def options_updated(self) -> None:
"""Config entry options are updated, remove entity if option is disabled."""
if not self.controller.option_poe_clients:
await self.remove_item({self.client.mac})
class UniFiBlockClientSwitch(UniFiClient, SwitchEntity):
"""Representation of a blockable client."""
DOMAIN = DOMAIN
TYPE = BLOCK_SWITCH
_attr_entity_category = ENTITY_CATEGORY_CONFIG
def __init__(self, client, controller):
"""Set up block switch."""
super().__init__(client, controller)
self._is_blocked = client.blocked
@callback
def async_update_callback(self) -> None:
"""Update the clients state."""
if (
self.client.last_updated == SOURCE_EVENT
and self.client.event.event in CLIENT_BLOCKED + CLIENT_UNBLOCKED
):
self._is_blocked = self.client.event.event in CLIENT_BLOCKED
super().async_update_callback()
@property
def is_on(self):
"""Return true if client is allowed to connect."""
return not self._is_blocked
async def async_turn_on(self, **kwargs):
"""Turn on connectivity for client."""
await self.controller.api.clients.async_unblock(self.client.mac)
async def async_turn_off(self, **kwargs):
"""Turn off connectivity for client."""
await self.controller.api.clients.async_block(self.client.mac)
@property
def icon(self):
"""Return the icon to use in the frontend."""
if self._is_blocked:
return "mdi:network-off"
return "mdi:network"
async def options_updated(self) -> None:
"""Config entry options are updated, remove entity if option is disabled."""
if self.client.mac not in self.controller.option_block_clients:
await self.remove_item({self.client.mac})
class UniFiDPIRestrictionSwitch(UniFiBase, SwitchEntity):
"""Representation of a DPI restriction group."""
DOMAIN = DOMAIN
TYPE = DPI_SWITCH
_attr_entity_category = ENTITY_CATEGORY_CONFIG
@property
def key(self) -> Any:
"""Return item key."""
return self._item.id
@property
def unique_id(self):
"""Return a unique identifier for this switch."""
return self._item.id
@property
def name(self) -> str:
"""Return the name of the client."""
return self._item.name
@property
def icon(self):
"""Return the icon to use in the frontend."""
if self._item.enabled:
return "mdi:network"
return "mdi:network-off"
@property
def is_on(self):
"""Return true if client is allowed to connect."""
return self._item.enabled
async def async_turn_on(self, **kwargs):
"""Turn on connectivity for client."""
await self.controller.api.dpi_groups.async_enable(self._item)
async def async_turn_off(self, **kwargs):
"""Turn off connectivity for client."""
await self.controller.api.dpi_groups.async_disable(self._item)
async def options_updated(self) -> None:
"""Config entry options are updated, remove entity if option is disabled."""
if not self.controller.option_dpi_restrictions:
await self.remove_item({self.key})
@property
def device_info(self) -> DeviceInfo:
"""Return a service description for device registry."""
return DeviceInfo(
entry_type=DeviceEntryType.SERVICE,
identifiers={(DOMAIN, f"unifi_controller_{self._item.site_id}")},
manufacturer=ATTR_MANUFACTURER,
model="UniFi Network",
name="UniFi Network",
)
| |
import re
import zlib
import string
from django.db import models
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
from django.core import exceptions
from oscar.core.compat import AUTH_USER_MODEL
from oscar.models.fields import UppercaseCharField, PhoneNumberField
class AbstractAddress(models.Model):
"""
Superclass address object
This is subclassed and extended to provide models for
user, shipping and billing addresses.
"""
MR, MISS, MRS, MS, DR = ('Mr', 'Miss', 'Mrs', 'Ms', 'Dr')
TITLE_CHOICES = (
(MR, _("Mr")),
(MISS, _("Miss")),
(MRS, _("Mrs")),
(MS, _("Ms")),
(DR, _("Dr")),
)
# Regex for each country. Not listed countries don't use postcodes
# Based on http://en.wikipedia.org/wiki/List_of_postal_codes
POSTCODES_REGEX = {
'AC': r'^[A-Z]{4}[0-9][A-Z]$',
'AD': r'^AD[0-9]{3}$',
'AF': r'^[0-9]{4}$',
'AI': r'^AI-2640$',
'AL': r'^[0-9]{4}$',
'AM': r'^[0-9]{4}$',
'AR': r'^([0-9]{4}|[A-Z][0-9]{4}[A-Z]{3})$',
'AS': r'^[0-9]{5}(-[0-9]{4}|-[0-9]{6})?$',
'AT': r'^[0-9]{4}$',
'AU': r'^[0-9]{4}$',
'AX': r'^[0-9]{5}$',
'AZ': r'^AZ[0-9]{4}$',
'BA': r'^[0-9]{5}$',
'BB': r'^BB[0-9]{5}$',
'BD': r'^[0-9]{4}$',
'BE': r'^[0-9]{4}$',
'BG': r'^[0-9]{4}$',
'BH': r'^[0-9]{3,4}$',
'BL': r'^[0-9]{5}$',
'BM': r'^[A-Z]{2}([0-9]{2}|[A-Z]{2})',
'BN': r'^[A-Z}{2}[0-9]]{4}$',
'BO': r'^[0-9]{4}$',
'BR': r'^[0-9]{5}(-[0-9]{3})?$',
'BT': r'^[0-9]{3}$',
'BY': r'^[0-9]{6}$',
'CA': r'^[A-Z][0-9][A-Z][0-9][A-Z][0-9]$',
'CC': r'^[0-9]{4}$',
'CH': r'^[0-9]{4}$',
'CL': r'^([0-9]{7}|[0-9]{3}-[0-9]{4})$',
'CN': r'^[0-9]{6}$',
'CO': r'^[0-9]{6}$',
'CR': r'^[0-9]{4,5}$',
'CU': r'^[0-9]{5}$',
'CV': r'^[0-9]{4}$',
'CX': r'^[0-9]{4}$',
'CY': r'^[0-9]{4}$',
'CZ': r'^[0-9]{5}$',
'DE': r'^[0-9]{5}$',
'DK': r'^[0-9]{4}$',
'DO': r'^[0-9]{5}$',
'DZ': r'^[0-9]{5}$',
'EC': r'^EC[0-9]{6}$',
'EE': r'^[0-9]{5}$',
'EG': r'^[0-9]{5}$',
'ES': r'^[0-9]{5}$',
'ET': r'^[0-9]{4}$',
'FI': r'^[0-9]{5}$',
'FK': r'^[A-Z]{4}[0-9][A-Z]{2}$',
'FM': r'^[0-9]{5}(-[0-9]{4})?$',
'FO': r'^[0-9]{3}$',
'FR': r'^[0-9]{5}$',
'GA': r'^[0-9]{2}.*[0-9]{2}$',
'GB': r'^[A-Z][A-Z0-9]{1,3}[0-9][A-Z]{2}$',
'GE': r'^[0-9]{4}$',
'GF': r'^[0-9]{5}$',
'GG': r'^([A-Z]{2}[0-9]{2,3}[A-Z]{2})$',
'GI': r'^GX111AA$',
'GL': r'^[0-9]{4}$',
'GP': r'^[0-9]{5}$',
'GR': r'^[0-9]{5}$',
'GS': r'^SIQQ1ZZ$',
'GT': r'^[0-9]{5}$',
'GU': r'^[0-9]{5}$',
'GW': r'^[0-9]{4}$',
'HM': r'^[0-9]{4}$',
'HN': r'^[0-9]{5}$',
'HR': r'^[0-9]{5}$',
'HT': r'^[0-9]{4}$',
'HU': r'^[0-9]{4}$',
'ID': r'^[0-9]{5}$',
'IL': r'^[0-9]{7}$',
'IM': r'^IM[0-9]{2,3}[A-Z]{2}$$',
'IN': r'^[0-9]{6}$',
'IO': r'^[A-Z]{4}[0-9][A-Z]{2}$',
'IQ': r'^[0-9]{5}$',
'IR': r'^[0-9]{5}-[0-9]{5}$',
'IS': r'^[0-9]{3}$',
'IT': r'^[0-9]{5}$',
'JE': r'^JE[0-9]{2}[A-Z]{2}$',
'JM': r'^JM[A-Z]{3}[0-9]{2}$',
'JO': r'^[0-9]{5}$',
'JP': r'^[0-9]{3}-?[0-9]{4}$',
'KE': r'^[0-9]{5}$',
'KG': r'^[0-9]{6}$',
'KH': r'^[0-9]{5}$',
'KR': r'^[0-9]{3}-?[0-9]{3}$',
'KY': r'^KY[0-9]-[0-9]{4}$',
'KZ': r'^[0-9]{6}$',
'LA': r'^[0-9]{5}$',
'LB': r'^[0-9]{8}$',
'LI': r'^[0-9]{4}$',
'LK': r'^[0-9]{5}$',
'LR': r'^[0-9]{4}$',
'LS': r'^[0-9]{3}$',
'LT': r'^[0-9]{5}$',
'LU': r'^[0-9]{4}$',
'LV': r'^LV-[0-9]{4}$',
'LY': r'^[0-9]{5}$',
'MA': r'^[0-9]{5}$',
'MC': r'^980[0-9]{2}$',
'MD': r'^MD-?[0-9]{4}$',
'ME': r'^[0-9]{5}$',
'MF': r'^[0-9]{5}$',
'MG': r'^[0-9]{3}$',
'MH': r'^[0-9]{5}$',
'MK': r'^[0-9]{4}$',
'MM': r'^[0-9]{5}$',
'MN': r'^[0-9]{5}$',
'MP': r'^[0-9]{5}$',
'MQ': r'^[0-9]{5}$',
'MT': r'^[A-Z]{3}[0-9]{4}$',
'MV': r'^[0-9]{4,5}$',
'MX': r'^[0-9]{5}$',
'MY': r'^[0-9]{5}$',
'MZ': r'^[0-9]{4}$',
'NA': r'^[0-9]{5}$',
'NC': r'^[0-9]{5}$',
'NE': r'^[0-9]{4}$',
'NF': r'^[0-9]{4}$',
'NG': r'^[0-9]{6}$',
'NI': r'^[0-9]{3}-[0-9]{3}-[0-9]$',
'NL': r'^[0-9]{4}[A-Z]{2}$',
'NO': r'^[0-9]{4}$',
'NP': r'^[0-9]{5}$',
'NZ': r'^[0-9]{4}$',
'OM': r'^[0-9]{3}$',
'PA': r'^[0-9]{6}$',
'PE': r'^[0-9]{5}$',
'PF': r'^[0-9]{5}$',
'PG': r'^[0-9]{3}$',
'PH': r'^[0-9]{4}$',
'PK': r'^[0-9]{5}$',
'PL': r'^[0-9]{2}-?[0-9]{3}$',
'PM': r'^[0-9]{5}$',
'PN': r'^[A-Z]{4}[0-9][A-Z]{2}$',
'PR': r'^[0-9]{5}$',
'PT': r'^[0-9]{4}(-?[0-9]{3})?$',
'PW': r'^[0-9]{5}$',
'PY': r'^[0-9]{4}$',
'RE': r'^[0-9]{5}$',
'RO': r'^[0-9]{6}$',
'RS': r'^[0-9]{5}$',
'RU': r'^[0-9]{6}$',
'SA': r'^[0-9]{5}$',
'SD': r'^[0-9]{5}$',
'SE': r'^[0-9]{5}$',
'SG': r'^([0-9]{2}|[0-9]{4}|[0-9]{6})$',
'SH': r'^(STHL1ZZ|TDCU1ZZ)$',
'SI': r'^(SI-)?[0-9]{4}$',
'SK': r'^[0-9]{5}$',
'SM': r'^[0-9]{5}$',
'SN': r'^[0-9]{5}$',
'SV': r'^01101$',
'SZ': r'^[A-Z][0-9]{3}$',
'TC': r'^TKCA1ZZ$',
'TD': r'^[0-9]{5}$',
'TH': r'^[0-9]{5}$',
'TJ': r'^[0-9]{6}$',
'TM': r'^[0-9]{6}$',
'TN': r'^[0-9]{4}$',
'TR': r'^[0-9]{5}$',
'TT': r'^[0-9]{6}$',
'TW': r'^[0-9]{5}$',
'UA': r'^[0-9]{5}$',
'US': r'^[0-9]{5}(-[0-9]{4}|-[0-9]{6})?$',
'UY': r'^[0-9]{5}$',
'UZ': r'^[0-9]{6}$',
'VA': r'^00120$',
'VC': r'^VC[0-9]{4}',
'VE': r'^[0-9]{4}[A-Z]?$',
'VG': r'^VG[0-9]{4}$',
'VI': r'^[0-9]{5}$',
'VN': r'^[0-9]{6}$',
'WF': r'^[0-9]{5}$',
'XK': r'^[0-9]{5}$',
'YT': r'^[0-9]{5}$',
'ZA': r'^[0-9]{4}$',
'ZM': r'^[0-9]{5}$',
}
title = models.CharField(
pgettext_lazy(u"Treatment Pronouns for the customer", u"Title"),
max_length=64, choices=TITLE_CHOICES, blank=True, null=True)
first_name = models.CharField(
_("First name"), max_length=255, blank=True, null=True)
last_name = models.CharField(_("Last name"), max_length=255, blank=True)
# We use quite a few lines of an address as they are often quite long and
# it's easier to just hide the unnecessary ones than add extra ones.
line1 = models.CharField(_("First line of address"), max_length=255)
line2 = models.CharField(
_("Second line of address"), max_length=255, blank=True, null=True)
line3 = models.CharField(
_("Third line of address"), max_length=255, blank=True, null=True)
line4 = models.CharField(_("City"), max_length=255, blank=True, null=True)
state = models.CharField(
_("State/County"), max_length=255, blank=True, null=True)
postcode = UppercaseCharField(
_("Post/Zip-code"), max_length=64, blank=True, null=True)
country = models.ForeignKey('address.Country', verbose_name=_("Country"))
#: A field only used for searching addresses - this contains all the
#: relevant fields. This is effectively a poor man's Solr text field.
search_text = models.CharField(
_("Search text - used only for searching addresses"),
max_length=1000, editable=False)
def __unicode__(self):
return self.summary
class Meta:
abstract = True
verbose_name = _('Address')
verbose_name_plural = _('Addresses')
# Saving
def save(self, *args, **kwargs):
self._update_search_text()
super(AbstractAddress, self).save(*args, **kwargs)
def clean(self):
# Strip all whitespace
for field in ['first_name', 'last_name', 'line1', 'line2', 'line3',
'line4', 'state', 'postcode']:
if self.__dict__[field]:
self.__dict__[field] = self.__dict__[field].strip()
# Ensure postcodes are valid for country
self.ensure_postcode_is_valid_for_country()
def ensure_postcode_is_valid_for_country(self):
"""
Validate postcode given the country
"""
if not self.postcode and self.country_id:
country_code = self.country.iso_3166_1_a2
regex = self.POSTCODES_REGEX.get(country_code, None)
if regex:
msg = ("Addresses in %(country)s require a valid postcode") % {
'country': self.country}
raise exceptions.ValidationError(msg)
if self.postcode and self.country_id:
# Ensure postcodes are always uppercase
postcode = self.postcode.upper().replace(' ', '')
country_code = self.country.iso_3166_1_a2
regex = self.POSTCODES_REGEX.get(country_code, None)
# Validate postcode against regext for the country if available
if regex and not re.match(regex, postcode):
msg = _("The postcode '%(postcode)s' is not valid "
"for %(country)s") % {
'postcode': self.postcode,
'country': self.country}
raise exceptions.ValidationError(
{'postcode': msg})
def _update_search_text(self):
search_fields = filter(
bool, [self.first_name, self.last_name,
self.line1, self.line2, self.line3, self.line4,
self.state, self.postcode, self.country.name])
self.search_text = ' '.join(search_fields)
# Properties
@property
def city(self):
# Common alias
return self.line4
@property
def summary(self):
"""
Returns a single string summary of the address,
separating fields using commas.
"""
return u", ".join(self.active_address_fields())
@property
def salutation(self):
"""
Name (including title)
"""
return self.join_fields(
('title', 'first_name', 'last_name'),
separator=u" ")
@property
def name(self):
return self.join_fields(('first_name', 'last_name'), separator=u" ")
# Helpers
def generate_hash(self):
"""
Returns a hash of the address summary
"""
# We use an upper-case version of the summary
return zlib.crc32(self.summary.strip().upper().encode('UTF8'))
def join_fields(self, fields, separator=u", "):
"""
Join a sequence of fields using the specified separator
"""
field_values = []
for field in fields:
# Title is special case
if field == 'title':
value = self.get_title_display()
else:
value = getattr(self, field)
field_values.append(value)
return separator.join(filter(bool, field_values))
def populate_alternative_model(self, address_model):
"""
For populating an address model using the matching fields
from this one.
This is used to convert a user address to a shipping address
as part of the checkout process.
"""
destination_field_names = [
field.name for field in address_model._meta.fields]
for field_name in [field.name for field in self._meta.fields]:
if field_name in destination_field_names and field_name != 'id':
setattr(address_model, field_name, getattr(self, field_name))
def active_address_fields(self):
"""
Return the non-empty components of the address, but merging the
title, first_name and last_name into a single line.
"""
fields = [self.salutation, self.line1, self.line2,
self.line3, self.line4, self.state, self.postcode]
fields = map(string.strip, filter(bool, fields))
try:
fields.append(self.country.name)
except exceptions.ObjectDoesNotExist:
pass
return fields
class AbstractCountry(models.Model):
"""
International Organization for Standardization (ISO) 3166-1 Country list.
"""
iso_3166_1_a2 = models.CharField(_('ISO 3166-1 alpha-2'), max_length=2,
primary_key=True)
iso_3166_1_a3 = models.CharField(_('ISO 3166-1 alpha-3'), max_length=3,
null=True, db_index=True)
# This should have been a CharField as it needs to be padded with zeros to
# be 3 digits. Access via the numeric_code instead.
iso_3166_1_numeric = models.PositiveSmallIntegerField(
_('ISO 3166-1 numeric'), null=True, db_index=True)
name = models.CharField(_('Official name (CAPS)'), max_length=128)
printable_name = models.CharField(_('Country name'), max_length=128)
display_order = models.PositiveSmallIntegerField(
_("Display order"), default=0, db_index=True,
help_text=_('Higher the number, higher the country in the list.'))
is_shipping_country = models.BooleanField(_("Is Shipping Country"),
default=False, db_index=True)
class Meta:
abstract = True
verbose_name = _('Country')
verbose_name_plural = _('Countries')
ordering = ('-display_order', 'name',)
def __unicode__(self):
return self.printable_name or self.name
@property
def code(self):
"""
Shorthand for the ISO 3166 code
"""
return self.iso_3166_1_a2
@property
def numeric_code(self):
return u"%.03d" % self.iso_3166_1_numeric
class AbstractShippingAddress(AbstractAddress):
"""
A shipping address.
A shipping address should not be edited once the order has been placed -
it should be read-only after that.
"""
phone_number = PhoneNumberField(
_("Phone number"), blank=True,
help_text=_("In case we need to call you about your order"))
notes = models.TextField(
blank=True, null=True,
verbose_name=_('Instructions'),
help_text=_("Tell us anything we should know when delivering "
"your order."))
class Meta:
abstract = True
verbose_name = _("Shipping address")
verbose_name_plural = _("Shipping addresses")
@property
def order(self):
"""
Return the order linked to this shipping address
"""
orders = self.order_set.all()
if not orders:
return None
return orders[0]
class AbstractUserAddress(AbstractShippingAddress):
"""
A user's address. A user can have many of these and together they form an
'address book' of sorts for the user.
We use a separate model for shipping and billing (even though there will be
some data duplication) because we don't want shipping/billing addresses
changed or deleted once an order has been placed. By having a separate
model, we allow users the ability to add/edit/delete from their address
book without affecting orders already placed.
"""
user = models.ForeignKey(
AUTH_USER_MODEL, related_name='addresses', verbose_name=_("User"))
#: Whether this address is the default for shipping
is_default_for_shipping = models.BooleanField(
_("Default shipping address?"), default=False)
#: Whether this address should be the default for billing.
is_default_for_billing = models.BooleanField(
_("Default billing address?"), default=False)
#: We keep track of the number of times an address has been used
#: as a shipping address so we can show the most popular ones
#: first at the checkout.
num_orders = models.PositiveIntegerField(_("Number of Orders"), default=0)
#: A hash is kept to try and avoid duplicate addresses being added
#: to the address book.
hash = models.CharField(_("Address Hash"), max_length=255, db_index=True,
editable=False)
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
def save(self, *args, **kwargs):
"""
Save a hash of the address fields
"""
# Save a hash of the address fields so we can check whether two
# addresses are the same to avoid saving duplicates
self.hash = self.generate_hash()
# Ensure that each user only has one default shipping address
# and billing address
self._ensure_defaults_integrity()
super(AbstractUserAddress, self).save(*args, **kwargs)
def _ensure_defaults_integrity(self):
if self.is_default_for_shipping:
self.__class__._default_manager.filter(
user=self.user,
is_default_for_shipping=True).update(
is_default_for_shipping=False)
if self.is_default_for_billing:
self.__class__._default_manager.filter(
user=self.user,
is_default_for_billing=True).update(
is_default_for_billing=False)
class Meta:
abstract = True
verbose_name = _("User address")
verbose_name_plural = _("User addresses")
ordering = ['-num_orders']
unique_together = ('user', 'hash')
def validate_unique(self, exclude=None):
super(AbstractAddress, self).validate_unique(exclude)
qs = self.__class__.objects.filter(
user=self.user,
hash=self.generate_hash())
if self.id:
qs = qs.exclude(id=self.id)
if qs.count() > 0:
raise exceptions.ValidationError({
'__all__': [_("This address is already in your address book")]})
class AbstractBillingAddress(AbstractAddress):
class Meta:
abstract = True
verbose_name = _("Billing address")
verbose_name_plural = _("Billing addresses")
@property
def order(self):
"""
Return the order linked to this shipping address
"""
orders = self.order_set.all()
if not orders:
return None
return orders[0]
class AbstractPartnerAddress(AbstractAddress):
"""
A partner can have one or more addresses. This can be useful e.g. when
determining US tax which depends on the origin of the shipment.
"""
partner = models.ForeignKey('partner.Partner', related_name='addresses',
verbose_name=_('Partner'))
class Meta:
abstract = True
verbose_name = _("Partner address")
verbose_name_plural = _("Partner addresses")
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import celery
import pretend
import pytest
import redis
from pyramid.exceptions import ConfigurationError
import warehouse.legacy.api.xmlrpc.cache
from warehouse.legacy.api.xmlrpc import cache
from warehouse.legacy.api.xmlrpc.cache import (
NullXMLRPCCache,
RedisLru,
RedisXMLRPCCache,
cached_return_view,
services,
)
from warehouse.legacy.api.xmlrpc.cache.interfaces import CacheError, IXMLRPCCache
@pytest.fixture
def fakeredis():
import fakeredis
_fakeredis = fakeredis.FakeStrictRedis()
yield _fakeredis
_fakeredis.flushall()
def func_test(arg0, arg1, kwarg0=0, kwarg1=1):
return [[arg0, arg1], {"kwarg0": kwarg0, "kwarg1": kwarg1}]
class TestXMLRPCCache:
def test_null_cache(self):
purger = pretend.call_recorder(lambda tags: None)
service = NullXMLRPCCache("null://", purger)
assert service.fetch(
func_test, (1, 2), {"kwarg0": 3, "kwarg1": 4}, None, None, None
) == [[1, 2], {"kwarg0": 3, "kwarg1": 4}]
assert service.purge(None) is None
class TestRedisXMLRPCCache:
def test_redis_cache(self, monkeypatch):
strict_redis_obj = pretend.stub()
strict_redis_cls = pretend.stub(
from_url=pretend.call_recorder(lambda url, db=None: strict_redis_obj)
)
monkeypatch.setattr(redis, "StrictRedis", strict_redis_cls)
redis_lru_obj = pretend.stub(
fetch=pretend.call_recorder(
lambda func, args, kwargs, key, tag, expires: func(*args, **kwargs)
),
purge=pretend.call_recorder(lambda tag: None),
)
redis_lru_cls = pretend.call_recorder(
lambda redis_conn, **kwargs: redis_lru_obj
)
monkeypatch.setattr(
warehouse.legacy.api.xmlrpc.cache, "RedisLru", redis_lru_cls
)
purger = pretend.call_recorder(lambda tags: None)
service = RedisXMLRPCCache("redis://localhost:6379", purger)
assert strict_redis_cls.from_url.calls == [
pretend.call("redis://localhost:6379", db=0)
]
assert redis_lru_cls.calls == [
pretend.call(
strict_redis_obj, name="lru", expires=None, metric_reporter=None
)
]
assert service.fetch(
func_test, (1, 2), {"kwarg0": 3, "kwarg1": 4}, None, None, None
) == [[1, 2], {"kwarg0": 3, "kwarg1": 4}]
assert service.purge(None) is None
assert redis_lru_obj.fetch.calls == [
pretend.call(
func_test, (1, 2), {"kwarg0": 3, "kwarg1": 4}, None, None, None
)
]
assert redis_lru_obj.purge.calls == [pretend.call(None)]
class TestIncludeMe:
@pytest.mark.parametrize(
("url", "cache_class"),
[
("redis://", "RedisXMLRPCCache"),
("rediss://", "RedisXMLRPCCache"),
("null://", "NullXMLRPCCache"),
],
)
def test_configuration(self, url, cache_class, monkeypatch):
client_obj = pretend.stub()
client_cls = pretend.stub(
create_service=pretend.call_recorder(lambda *a, **kw: client_obj)
)
monkeypatch.setattr(cache, cache_class, client_cls)
registry = {}
config = pretend.stub(
add_view_deriver=pretend.call_recorder(
lambda deriver, over=None, under=None: None
),
register_service_factory=pretend.call_recorder(
lambda service, iface=None: None
),
registry=pretend.stub(
settings={"warehouse.xmlrpc.cache.url": url},
__setitem__=registry.__setitem__,
),
)
cache.includeme(config)
assert config.add_view_deriver.calls == [
pretend.call(
cache.cached_return_view, under="rendered_view", over="mapped_view"
)
]
def test_no_url_configuration(self, monkeypatch):
registry = {}
config = pretend.stub(
registry=pretend.stub(settings={}, __setitem__=registry.__setitem__)
)
with pytest.raises(ConfigurationError):
cache.includeme(config)
def test_bad_url_configuration(self, monkeypatch):
registry = {}
config = pretend.stub(
registry=pretend.stub(
settings={"warehouse.xmlrpc.cache.url": "memcached://"},
__setitem__=registry.__setitem__,
)
)
with pytest.raises(ConfigurationError):
cache.includeme(config)
def test_bad_expires_configuration(self, monkeypatch):
client_obj = pretend.stub()
client_cls = pretend.call_recorder(lambda *a, **kw: client_obj)
monkeypatch.setattr(cache, "NullXMLRPCCache", client_cls)
registry = {}
config = pretend.stub(
registry=pretend.stub(
settings={
"warehouse.xmlrpc.cache.url": "null://",
"warehouse.xmlrpc.cache.expires": "Never",
},
__setitem__=registry.__setitem__,
)
)
with pytest.raises(ConfigurationError):
cache.includeme(config)
def test_create_null_service(self):
purge_tags = pretend.stub(delay=pretend.call_recorder(lambda tag: None))
request = pretend.stub(
registry=pretend.stub(settings={"warehouse.xmlrpc.cache.url": "null://"}),
task=lambda f: purge_tags,
)
service = NullXMLRPCCache.create_service(None, request)
service.purge_tags(["wu", "tang", "4", "evah"])
assert isinstance(service, NullXMLRPCCache)
assert service._purger is purge_tags.delay
assert purge_tags.delay.calls == [
pretend.call("wu"),
pretend.call("tang"),
pretend.call("4"),
pretend.call("evah"),
]
def test_create_redis_service(self):
purge_tags = pretend.stub(delay=pretend.call_recorder(lambda tag: None))
request = pretend.stub(
registry=pretend.stub(settings={"warehouse.xmlrpc.cache.url": "redis://"}),
task=lambda f: purge_tags,
)
service = RedisXMLRPCCache.create_service(None, request)
service.purge_tags(["wu", "tang", "4", "evah"])
assert isinstance(service, RedisXMLRPCCache)
assert service._purger is purge_tags.delay
assert purge_tags.delay.calls == [
pretend.call("wu"),
pretend.call("tang"),
pretend.call("4"),
pretend.call("evah"),
]
class TestRedisLru:
def test_redis_lru(self, fakeredis):
redis_lru = RedisLru(fakeredis)
expected = func_test(0, 1, kwarg0=2, kwarg1=3)
assert expected == redis_lru.fetch(
func_test, [0, 1], {"kwarg0": 2, "kwarg1": 3}, None, None, None
)
assert expected == redis_lru.fetch(
func_test, [0, 1], {"kwarg0": 2, "kwarg1": 3}, None, None, None
)
def test_redis_custom_metrics(self, fakeredis):
metric_reporter = pretend.stub(
increment=pretend.call_recorder(lambda *args: None)
)
redis_lru = RedisLru(fakeredis, metric_reporter=metric_reporter)
expected = func_test(0, 1, kwarg0=2, kwarg1=3)
assert expected == redis_lru.fetch(
func_test, [0, 1], {"kwarg0": 2, "kwarg1": 3}, None, None, None
)
assert expected == redis_lru.fetch(
func_test, [0, 1], {"kwarg0": 2, "kwarg1": 3}, None, None, None
)
assert metric_reporter.increment.calls == [
pretend.call("lru.cache.miss"),
pretend.call("lru.cache.hit"),
]
def test_redis_purge(self, fakeredis):
metric_reporter = pretend.stub(
increment=pretend.call_recorder(lambda *args: None)
)
redis_lru = RedisLru(fakeredis, metric_reporter=metric_reporter)
expected = func_test(0, 1, kwarg0=2, kwarg1=3)
assert expected == redis_lru.fetch(
func_test, [0, 1], {"kwarg0": 2, "kwarg1": 3}, None, "test", None
)
assert expected == redis_lru.fetch(
func_test, [0, 1], {"kwarg0": 2, "kwarg1": 3}, None, "test", None
)
redis_lru.purge("test")
assert expected == redis_lru.fetch(
func_test, [0, 1], {"kwarg0": 2, "kwarg1": 3}, None, "test", None
)
assert expected == redis_lru.fetch(
func_test, [0, 1], {"kwarg0": 2, "kwarg1": 3}, None, "test", None
)
assert metric_reporter.increment.calls == [
pretend.call("lru.cache.miss"),
pretend.call("lru.cache.hit"),
pretend.call("lru.cache.purge"),
pretend.call("lru.cache.miss"),
pretend.call("lru.cache.hit"),
]
def test_redis_down(self):
metric_reporter = pretend.stub(
increment=pretend.call_recorder(lambda *args: None)
)
down_redis = pretend.stub(
hget=pretend.raiser(redis.exceptions.RedisError),
pipeline=pretend.raiser(redis.exceptions.RedisError),
scan_iter=pretend.raiser(redis.exceptions.RedisError),
)
redis_lru = RedisLru(down_redis, metric_reporter=metric_reporter)
expected = func_test(0, 1, kwarg0=2, kwarg1=3)
assert expected == redis_lru.fetch(
func_test, [0, 1], {"kwarg0": 2, "kwarg1": 3}, None, "test", None
)
assert expected == redis_lru.fetch(
func_test, [0, 1], {"kwarg0": 2, "kwarg1": 3}, None, "test", None
)
with pytest.raises(CacheError):
redis_lru.purge("test")
assert metric_reporter.increment.calls == [
pretend.call("lru.cache.error"), # Failed get
pretend.call("lru.cache.miss"),
pretend.call("lru.cache.error"), # Failed add
pretend.call("lru.cache.error"), # Failed get
pretend.call("lru.cache.miss"),
pretend.call("lru.cache.error"), # Failed add
pretend.call("lru.cache.error"), # Failed purge
]
class TestDeriver:
@pytest.mark.parametrize(
("service_available", "xmlrpc_cache"),
[(True, True), (True, False), (False, True), (False, False)],
)
def test_deriver(self, service_available, xmlrpc_cache, fakeredis):
context = pretend.stub()
purger = pretend.call_recorder(lambda tags: None)
service = RedisXMLRPCCache("redis://127.0.0.2:6379/0", purger)
service.redis_conn = fakeredis
service.redis_lru.conn = fakeredis
if service_available:
_find_service = pretend.call_recorder(lambda *args, **kwargs: service)
else:
_find_service = pretend.raiser(LookupError)
request = pretend.stub(
find_service=_find_service, rpc_method="rpc_method", rpc_args=(0, 1)
)
response = {}
@pretend.call_recorder
def view(context, request):
return response
info = pretend.stub(options={}, exception_only=False)
info.options["xmlrpc_cache"] = xmlrpc_cache
derived_view = cached_return_view(view, info)
assert derived_view(context, request) is response
assert view.calls == [pretend.call(context, request)]
@pytest.mark.parametrize(
("service_available", "xmlrpc_cache"),
[(True, True), (True, False), (False, True), (False, False)],
)
def test_custom_tag(self, service_available, xmlrpc_cache):
context = pretend.stub()
service = pretend.stub(
fetch=pretend.call_recorder(
lambda func, args, kwargs, key, tag, expires: func(*args, **kwargs)
)
)
if service_available:
_find_service = pretend.call_recorder(lambda *args, **kwargs: service)
else:
_find_service = pretend.raiser(LookupError)
request = pretend.stub(
find_service=_find_service,
rpc_method="rpc_method",
rpc_args=("warehouse", "1.0.0"),
)
response = {}
@pretend.call_recorder
def view(context, request):
return response
info = pretend.stub(options={}, exception_only=False)
info.options["xmlrpc_cache"] = xmlrpc_cache
info.options["xmlrpc_cache_tag"] = "arg1/%s"
info.options["xmlrpc_cache_arg_index"] = 1
derived_view = cached_return_view(view, info)
assert derived_view(context, request) is response
assert view.calls == [pretend.call(context, request)]
@pytest.mark.parametrize(
("service_available", "xmlrpc_cache"),
[(True, True), (True, False), (False, True), (False, False)],
)
def test_down_redis(self, service_available, xmlrpc_cache):
context = pretend.stub()
service = pretend.stub(
fetch=pretend.raiser(CacheError), purge=pretend.raiser(CacheError)
)
if service_available:
_find_service = pretend.call_recorder(lambda *args, **kwargs: service)
else:
_find_service = pretend.raiser(LookupError)
request = pretend.stub(
find_service=_find_service, rpc_method="rpc_method", rpc_args=(0, 1)
)
response = pretend.stub()
@pretend.call_recorder
def view(context, request):
return response
info = pretend.stub(options={}, exception_only=False)
info.options["xmlrpc_cache"] = xmlrpc_cache
derived_view = cached_return_view(view, info) # miss
derived_view = cached_return_view(view, info) # hit
assert derived_view(context, request) is response
assert view.calls == [pretend.call(context, request)]
class TestPurgeTask:
def test_purges_successfully(self, monkeypatch):
task = pretend.stub()
service = pretend.stub(purge=pretend.call_recorder(lambda k: None))
request = pretend.stub(
find_service=pretend.call_recorder(lambda iface: service),
log=pretend.stub(info=pretend.call_recorder(lambda *args, **kwargs: None)),
)
services.purge_tag(task, request, "foo")
assert request.find_service.calls == [pretend.call(IXMLRPCCache)]
assert service.purge.calls == [pretend.call("foo")]
assert request.log.info.calls == [pretend.call("Purging %s", "foo")]
@pytest.mark.parametrize("exception_type", [CacheError])
def test_purges_fails(self, monkeypatch, exception_type):
exc = exception_type()
class Cache:
@staticmethod
@pretend.call_recorder
def purge(key):
raise exc
class Task:
@staticmethod
@pretend.call_recorder
def retry(exc):
raise celery.exceptions.Retry
task = Task()
service = Cache()
request = pretend.stub(
find_service=pretend.call_recorder(lambda iface: service),
log=pretend.stub(
info=pretend.call_recorder(lambda *args, **kwargs: None),
error=pretend.call_recorder(lambda *args, **kwargs: None),
),
)
with pytest.raises(celery.exceptions.Retry):
services.purge_tag(task, request, "foo")
assert request.find_service.calls == [pretend.call(IXMLRPCCache)]
assert service.purge.calls == [pretend.call("foo")]
assert task.retry.calls == [pretend.call(exc=exc)]
assert request.log.info.calls == [pretend.call("Purging %s", "foo")]
assert request.log.error.calls == [
pretend.call("Error purging %s: %s", "foo", str(exception_type()))
]
def test_store_purge_keys(self):
class Type1:
pass
class Type2:
pass
class Type3:
pass
class Type4:
pass
config = pretend.stub(
registry={
"cache_keys": {
Type1: lambda o: cache.CacheKeys(cache=[], purge=["type_1"]),
Type2: lambda o: cache.CacheKeys(cache=[], purge=["type_2", "foo"]),
Type3: lambda o: cache.CacheKeys(cache=[], purge=["type_3", "foo"]),
}
}
)
session = pretend.stub(
info={}, new={Type1()}, dirty={Type2()}, deleted={Type3(), Type4()}
)
cache.store_purge_keys(config, session, pretend.stub())
assert session.info["warehouse.legacy.api.xmlrpc.cache.purges"] == {
"type_1",
"type_2",
"type_3",
"foo",
}
def test_execute_purge(self, app_config):
service = pretend.stub(purge_tags=pretend.call_recorder(lambda purges: None))
factory = pretend.call_recorder(lambda ctx, config: service)
app_config.register_service_factory(factory, IXMLRPCCache)
app_config.commit()
session = pretend.stub(
info={
"warehouse.legacy.api.xmlrpc.cache.purges": {
"type_1",
"type_2",
"foobar",
}
}
)
cache.execute_purge(app_config, session)
assert factory.calls == [pretend.call(None, app_config)]
assert service.purge_tags.calls == [
pretend.call({"type_1", "type_2", "foobar"})
]
assert "warehouse.legacy.api.xmlrpc.cache.purges" not in session.info
def test_execute_unsuccessful_purge(self):
@pretend.call_recorder
def find_service_factory(interface):
raise LookupError
config = pretend.stub(find_service_factory=find_service_factory)
session = pretend.stub(
info={
"warehouse.legacy.api.xmlrpc.cache.purges": {
"type_1",
"type_2",
"foobar",
}
}
)
cache.execute_purge(config, session)
assert find_service_factory.calls == [pretend.call(IXMLRPCCache)]
assert "warehouse.legacy.api.xmlrpc.cache.purges" not in session.info
| |
#!/usr/bin/python3
#
# This file is open source software, licensed to you under the terms
# of the Apache License, Version 2.0 (the "License"). See the NOTICE file
# distributed with this work for additional information regarding copyright
# ownership. You may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os, os.path, textwrap, argparse, sys, shlex, subprocess, tempfile, re
configure_args = str.join(' ', [shlex.quote(x) for x in sys.argv[1:]])
def get_flags():
with open('/proc/cpuinfo') as f:
for line in f:
if line.strip():
if line.rstrip('\n').startswith('flags'):
return re.sub(r'^flags\s+: ', '', line).split()
def add_tristate(arg_parser, name, dest, help):
arg_parser.add_argument('--enable-' + name, dest = dest, action = 'store_true', default = None,
help = 'Enable ' + help)
arg_parser.add_argument('--disable-' + name, dest = dest, action = 'store_false', default = None,
help = 'Disable ' + help)
def apply_tristate(var, test, note, missing):
if (var is None) or var:
if test():
return True
elif var == True:
print(missing)
sys.exit(1)
else:
print(note)
return False
return False
#
# dpdk_cflags - fetch the DPDK specific CFLAGS
#
# Run a simple makefile that "includes" the DPDK main makefile and prints the
# MACHINE_CFLAGS value
#
def dpdk_cflags (dpdk_target):
with tempfile.NamedTemporaryFile() as sfile:
dpdk_target = os.path.abspath(dpdk_target)
dpdk_target = re.sub(r'\/+$', '', dpdk_target)
dpdk_sdk_path = os.path.dirname(dpdk_target)
dpdk_target_name = os.path.basename(dpdk_target)
dpdk_arch = dpdk_target_name.split('-')[0]
if args.dpdk:
dpdk_sdk_path = 'dpdk'
dpdk_target = os.getcwd() + '/build/dpdk'
dpdk_target_name = 'x86_64-{}-linuxapp-gcc'.format(dpdk_machine)
dpdk_arch = 'x86_64'
sfile.file.write(bytes('include ' + dpdk_sdk_path + '/mk/rte.vars.mk' + "\n", 'utf-8'))
sfile.file.write(bytes('all:' + "\n\t", 'utf-8'))
sfile.file.write(bytes('@echo $(MACHINE_CFLAGS)' + "\n", 'utf-8'))
sfile.file.flush()
dpdk_cflags = subprocess.check_output(['make', '--no-print-directory',
'-f', sfile.name,
'RTE_SDK=' + dpdk_sdk_path,
'RTE_OUTPUT=' + dpdk_target,
'RTE_TARGET=' + dpdk_target_name,
'RTE_SDK_BIN=' + dpdk_target,
'RTE_ARCH=' + dpdk_arch])
dpdk_cflags_str = dpdk_cflags.decode('utf-8')
dpdk_cflags_str = re.sub(r'\n+$', '', dpdk_cflags_str)
dpdk_cflags_final = ''
return dpdk_cflags_str
def try_compile(compiler, source = '', flags = []):
with tempfile.NamedTemporaryFile() as sfile:
sfile.file.write(bytes(source, 'utf-8'))
sfile.file.flush()
return subprocess.call([compiler, '-x', 'c++', '-o', '/dev/null', '-c', sfile.name] + flags,
stdout = subprocess.DEVNULL,
stderr = subprocess.DEVNULL) == 0
def try_compile_and_run(compiler, flags, source, env = {}):
mktemp = tempfile.NamedTemporaryFile
with mktemp() as sfile, mktemp(mode='rb') as xfile:
sfile.file.write(bytes(source, 'utf-8'))
sfile.file.flush()
xfile.file.close()
if subprocess.call([compiler, '-x', 'c++', '-o', xfile.name, sfile.name] + flags,
stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL) != 0:
# The compiler may delete the target on failure, and lead to
# NamedTemporaryFile's destructor throwing an exception.
open(xfile.name, 'a').close()
return False
e = os.environ.copy()
e.update(env)
env = e
return subprocess.call([xfile.name], stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL, env=env) == 0
def warning_supported(warning, compiler):
# gcc ignores -Wno-x even if it is not supported
adjusted = re.sub('^-Wno-', '-W', warning)
return try_compile(flags = [adjusted], compiler = compiler)
def debug_flag(compiler):
src_with_auto = textwrap.dedent('''\
template <typename T>
struct x { auto f() {} };
x<int> a;
''')
if try_compile(source = src_with_auto, flags = ['-g', '-std=gnu++1y'], compiler = compiler):
return '-g'
else:
print('Note: debug information disabled; upgrade your compiler')
return ''
def sanitize_vptr_flag(compiler):
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67258
if (not try_compile(compiler, flags=['-fsanitize=vptr'])
or try_compile_and_run(compiler, flags=['-fsanitize=undefined', '-fno-sanitize-recover'],
env={'UBSAN_OPTIONS': 'exitcode=1'}, source=textwrap.dedent('''
struct A
{
virtual ~A() {}
};
struct B : virtual A {};
struct C : virtual A {};
struct D : B, virtual C {};
int main()
{
D d;
}
'''))):
return ''
else:
print('-fsanitize=vptr is broken, disabling')
return '-fno-sanitize=vptr'
modes = {
'debug': {
'sanitize': '-fsanitize=address -fsanitize=leak -fsanitize=undefined',
'sanitize_libs': '-lubsan -lasan',
'opt': '-O0 -DDEBUG -DDEBUG_SHARED_PTR -DDEFAULT_ALLOCATOR',
'libs': '',
},
'release': {
'sanitize': '',
'sanitize_libs': '',
'opt': '-O2',
'libs': '',
},
}
tests = [
'tests/fileiotest',
'tests/directory_test',
'tests/linecount',
'tests/echotest',
'tests/l3_test',
'tests/ip_test',
'tests/timertest',
'tests/tcp_test',
'tests/futures_test',
'tests/alloc_test',
'tests/foreign_ptr_test',
'tests/smp_test',
'tests/thread_test',
'tests/thread_context_switch',
'tests/udp_server',
'tests/udp_client',
'tests/blkdiscard_test',
'tests/sstring_test',
'tests/httpd',
'tests/memcached/test_ascii_parser',
'tests/tcp_server',
'tests/tcp_client',
'tests/allocator_test',
'tests/output_stream_test',
'tests/udp_zero_copy',
'tests/shared_ptr_test',
'tests/slab_test',
'tests/fstream_test',
'tests/distributed_test',
'tests/rpc',
'tests/semaphore_test',
'tests/packet_test',
]
apps = [
'apps/httpd/httpd',
'apps/seawreck/seawreck',
'apps/seastar/seastar',
'apps/memcached/memcached',
]
all_artifacts = apps + tests + ['libseastar.a', 'seastar.pc']
arg_parser = argparse.ArgumentParser('Configure seastar')
arg_parser.add_argument('--static', dest = 'static', action = 'store_const', default = '',
const = '-static',
help = 'Static link (useful for running on hosts outside the build environment')
arg_parser.add_argument('--pie', dest = 'pie', action = 'store_true',
help = 'Build position-independent executable (PIE)')
arg_parser.add_argument('--so', dest = 'so', action = 'store_true',
help = 'Build shared object (SO) instead of executable')
arg_parser.add_argument('--mode', action='store', choices=list(modes.keys()) + ['all'], default='all')
arg_parser.add_argument('--with', dest='artifacts', action='append', choices=all_artifacts, default=[])
arg_parser.add_argument('--cflags', action = 'store', dest = 'user_cflags', default = '',
help = 'Extra flags for the C++ compiler')
arg_parser.add_argument('--ldflags', action = 'store', dest = 'user_ldflags', default = '',
help = 'Extra flags for the linker')
arg_parser.add_argument('--compiler', action = 'store', dest = 'cxx', default = 'g++',
help = 'C++ compiler path')
arg_parser.add_argument('--with-osv', action = 'store', dest = 'with_osv', default = '',
help = 'Shortcut for compile for OSv')
arg_parser.add_argument('--enable-dpdk', action = 'store_true', dest = 'dpdk', default = False,
help = 'Enable dpdk (from included dpdk sources)')
arg_parser.add_argument('--dpdk-target', action = 'store', dest = 'dpdk_target', default = '',
help = 'Path to DPDK SDK target location (e.g. <DPDK SDK dir>/x86_64-native-linuxapp-gcc)')
arg_parser.add_argument('--debuginfo', action = 'store', dest = 'debuginfo', type = int, default = 1,
help = 'Enable(1)/disable(0)compiler debug information generation')
add_tristate(arg_parser, name = 'hwloc', dest = 'hwloc', help = 'hwloc support')
add_tristate(arg_parser, name = 'xen', dest = 'xen', help = 'Xen support')
args = arg_parser.parse_args()
libnet = [
'net/proxy.cc',
'net/virtio.cc',
'net/dpdk.cc',
'net/ip.cc',
'net/ethernet.cc',
'net/arp.cc',
'net/native-stack.cc',
'net/ip_checksum.cc',
'net/udp.cc',
'net/tcp.cc',
'net/dhcp.cc',
]
core = [
'core/reactor.cc',
'core/fstream.cc',
'core/posix.cc',
'core/memory.cc',
'core/resource.cc',
'core/scollectd.cc',
'core/app-template.cc',
'core/thread.cc',
'core/dpdk_rte.cc',
'util/conversions.cc',
'net/packet.cc',
'net/posix-stack.cc',
'net/net.cc',
'rpc/rpc.cc',
]
http = ['http/transformers.cc',
'http/json_path.cc',
'http/file_handler.cc',
'http/common.cc',
'http/routes.cc',
'json/json_elements.cc',
'json/formatter.cc',
'http/matcher.cc',
'http/mime_types.cc',
'http/httpd.cc',
'http/reply.cc',
'http/request_parser.rl',
'http/api_docs.cc',
]
boost_test_lib = [
'tests/test-utils.cc',
'tests/test_runner.cc',
]
defines = []
libs = '-laio -lboost_program_options -lboost_system -lstdc++ -lm -lboost_unit_test_framework -lboost_thread -lcryptopp -lrt'
hwloc_libs = '-lhwloc -lnuma -lpciaccess -lxml2 -lz'
xen_used = False
def have_xen():
source = '#include <stdint.h>\n'
source += '#include <xen/xen.h>\n'
source += '#include <xen/sys/evtchn.h>\n'
source += '#include <xen/sys/gntdev.h>\n'
source += '#include <xen/sys/gntalloc.h>\n'
return try_compile(compiler = args.cxx, source = source)
if apply_tristate(args.xen, test = have_xen,
note = 'Note: xen-devel not installed. No Xen support.',
missing = 'Error: required package xen-devel not installed.'):
libs += ' -lxenstore'
defines.append("HAVE_XEN")
libnet += [ 'net/xenfront.cc' ]
core += [
'core/xen/xenstore.cc',
'core/xen/gntalloc.cc',
'core/xen/evtchn.cc',
]
xen_used=True
if xen_used and args.dpdk_target:
print("Error: only xen or dpdk can be used, not both.")
sys.exit(1)
memcache_base = [
'apps/memcached/ascii.rl'
] + libnet + core
deps = {
'libseastar.a' : core + libnet + http,
'seastar.pc': [],
'apps/seastar/seastar': ['apps/seastar/main.cc'] + core,
'apps/httpd/httpd': ['apps/httpd/demo.json', 'apps/httpd/main.cc'] + http + libnet + core,
'apps/memcached/memcached': ['apps/memcached/memcache.cc'] + memcache_base,
'tests/memcached/test_ascii_parser': ['tests/memcached/test_ascii_parser.cc'] + memcache_base + boost_test_lib,
'tests/fileiotest': ['tests/fileiotest.cc'] + core + boost_test_lib,
'tests/directory_test': ['tests/directory_test.cc'] + core,
'tests/linecount': ['tests/linecount.cc'] + core,
'tests/echotest': ['tests/echotest.cc'] + core + libnet,
'tests/l3_test': ['tests/l3_test.cc'] + core + libnet,
'tests/ip_test': ['tests/ip_test.cc'] + core + libnet,
'tests/tcp_test': ['tests/tcp_test.cc'] + core + libnet,
'tests/timertest': ['tests/timertest.cc'] + core,
'tests/futures_test': ['tests/futures_test.cc'] + core + boost_test_lib,
'tests/alloc_test': ['tests/alloc_test.cc'] + core + boost_test_lib,
'tests/foreign_ptr_test': ['tests/foreign_ptr_test.cc'] + core + boost_test_lib,
'tests/semaphore_test': ['tests/semaphore_test.cc'] + core + boost_test_lib,
'tests/smp_test': ['tests/smp_test.cc'] + core,
'tests/thread_test': ['tests/thread_test.cc'] + core + boost_test_lib,
'tests/thread_context_switch': ['tests/thread_context_switch.cc'] + core,
'tests/udp_server': ['tests/udp_server.cc'] + core + libnet,
'tests/udp_client': ['tests/udp_client.cc'] + core + libnet,
'tests/tcp_server': ['tests/tcp_server.cc'] + core + libnet,
'tests/tcp_client': ['tests/tcp_client.cc'] + core + libnet,
'apps/seawreck/seawreck': ['apps/seawreck/seawreck.cc', 'http/http_response_parser.rl'] + core + libnet,
'tests/blkdiscard_test': ['tests/blkdiscard_test.cc'] + core,
'tests/sstring_test': ['tests/sstring_test.cc'] + core,
'tests/httpd': ['tests/httpd.cc'] + http + core + boost_test_lib,
'tests/allocator_test': ['tests/allocator_test.cc', 'core/memory.cc', 'core/posix.cc'],
'tests/output_stream_test': ['tests/output_stream_test.cc'] + core + libnet + boost_test_lib,
'tests/udp_zero_copy': ['tests/udp_zero_copy.cc'] + core + libnet,
'tests/shared_ptr_test': ['tests/shared_ptr_test.cc'] + core,
'tests/slab_test': ['tests/slab_test.cc'] + core,
'tests/fstream_test': ['tests/fstream_test.cc'] + core + boost_test_lib,
'tests/distributed_test': ['tests/distributed_test.cc'] + core,
'tests/rpc': ['tests/rpc.cc'] + core + libnet,
'tests/packet_test': ['tests/packet_test.cc'] + core + libnet,
}
warnings = [
'-Wno-mismatched-tags', # clang-only
]
# The "--with-osv=<path>" parameter is a shortcut for a bunch of other
# settings:
if args.with_osv:
args.so = True
args.hwloc = False
args.user_cflags = (args.user_cflags +
' -DDEFAULT_ALLOCATOR -fvisibility=default -DHAVE_OSV -I' +
args.with_osv + ' -I' + args.with_osv + '/include -I' +
args.with_osv + '/arch/x64')
dpdk_arch_xlat = {
'native': 'native',
'nehalem': 'nhm',
'westmere': 'wsm',
'sandybridge': 'snb',
'ivybridge': 'ivb',
}
dpdk_machine = 'native'
if args.dpdk:
if not os.path.exists('dpdk') or not os.listdir('dpdk'):
raise Exception('--enable-dpdk: dpdk/ is empty. Run "git submodule update --init".')
cflags = args.user_cflags.split()
dpdk_machine = ([dpdk_arch_xlat[cflag[7:]]
for cflag in cflags
if cflag.startswith('-march')] or ['native'])[0]
subprocess.check_call('make -C dpdk RTE_OUTPUT=$PWD/build/dpdk/ config T=x86_64-native-linuxapp-gcc'.format(
dpdk_machine=dpdk_machine),
shell = True)
# adjust configutation to taste
dotconfig = 'build/dpdk/.config'
lines = open(dotconfig, encoding='UTF-8').readlines()
def update(lines, vars):
ret = []
for line in lines:
for var, val in vars.items():
if line.startswith(var + '='):
line = var + '=' + val + '\n'
ret.append(line)
return ret
lines = update(lines, {'CONFIG_RTE_LIBRTE_PMD_BOND': 'n',
'CONFIG_RTE_MBUF_SCATTER_GATHER': 'n',
'CONFIG_RTE_LIBRTE_IP_FRAG': 'n',
'CONFIG_RTE_APP_TEST': 'n',
'CONFIG_RTE_TEST_PMD': 'n',
'CONFIG_RTE_MBUF_REFCNT_ATOMIC': 'n',
'CONFIG_RTE_MAX_MEMSEG': '8192',
'CONFIG_RTE_EAL_IGB_UIO': 'n',
'CONFIG_RTE_LIBRTE_KNI': 'n',
'CONFIG_RTE_KNI_KMOD': 'n',
'CONFIG_RTE_LIBRTE_JOBSTATS': 'n',
'CONFIG_RTE_LIBRTE_LPM': 'n',
'CONFIG_RTE_LIBRTE_ACL': 'n',
'CONFIG_RTE_LIBRTE_POWER': 'n',
'CONFIG_RTE_LIBRTE_IP_FRAG': 'n',
'CONFIG_RTE_LIBRTE_METER': 'n',
'CONFIG_RTE_LIBRTE_SCHED': 'n',
'CONFIG_RTE_LIBRTE_DISTRIBUTOR': 'n',
'CONFIG_RTE_LIBRTE_REORDER': 'n',
'CONFIG_RTE_LIBRTE_PORT': 'n',
'CONFIG_RTE_LIBRTE_TABLE': 'n',
'CONFIG_RTE_LIBRTE_PIPELINE': 'n',
})
lines += 'CONFIG_RTE_MACHINE={}'.format(dpdk_machine)
open(dotconfig, 'w', encoding='UTF-8').writelines(lines)
args.dpdk_target = os.getcwd() + '/build/dpdk'
if args.dpdk_target:
args.user_cflags = (args.user_cflags +
' -DHAVE_DPDK -I' + args.dpdk_target + '/include ' +
dpdk_cflags(args.dpdk_target) +
' -Wno-error=literal-suffix -Wno-literal-suffix -Wno-invalid-offsetof')
libs += (' -L' + args.dpdk_target + '/lib ')
if args.with_osv:
libs += '-lintel_dpdk -lrt -lm -ldl'
else:
libs += '-Wl,--whole-archive -lrte_pmd_vmxnet3_uio -lrte_pmd_i40e -lrte_pmd_ixgbe -lrte_pmd_e1000 -lrte_pmd_ring -Wl,--no-whole-archive -lrte_hash -lrte_kvargs -lrte_mbuf -lethdev -lrte_eal -lrte_malloc -lrte_mempool -lrte_ring -lrte_cmdline -lrte_cfgfile -lrt -lm -ldl'
warnings = [w
for w in warnings
if warning_supported(warning = w, compiler = args.cxx)]
warnings = ' '.join(warnings)
dbgflag = debug_flag(args.cxx) if args.debuginfo else ''
sanitize_flags = sanitize_vptr_flag(args.cxx)
modes['debug']['sanitize'] += ' ' + sanitize_flags
def have_hwloc():
return try_compile(compiler = args.cxx, source = '#include <hwloc.h>\n#include <numa.h>')
if apply_tristate(args.hwloc, test = have_hwloc,
note = 'Note: hwloc-devel/numactl-devel not installed. No NUMA support.',
missing = 'Error: required packages hwloc-devel/numactl-devel not installed.'):
libs += ' ' + hwloc_libs
defines.append('HAVE_HWLOC')
defines.append('HAVE_NUMA')
if args.so:
args.pie = '-shared'
args.fpie = '-fpic'
elif args.pie:
args.pie = '-pie'
args.fpie = '-fpie'
else:
args.pie = ''
args.fpie = ''
defines = ' '.join(['-D' + d for d in defines])
globals().update(vars(args))
total_memory = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')
link_pool_depth = max(int(total_memory / 7e9), 1)
build_modes = modes if args.mode == 'all' else [args.mode]
build_artifacts = all_artifacts if not args.artifacts else args.artifacts
dpdk_sources = []
if args.dpdk:
for root, dirs, files in os.walk('dpdk'):
dpdk_sources += [os.path.join(root, file)
for file in files
if file.endswith('.h') or file.endswith('.c')]
dpdk_sources = ' '.join(dpdk_sources)
outdir = 'build'
buildfile = 'build.ninja'
os.makedirs(outdir, exist_ok = True)
do_sanitize = True
if args.static:
do_sanitize = False
with open(buildfile, 'w') as f:
dpdk_deps = ''
if args.dpdk:
# fake dependencies on dpdk, so that it is built before anything else
dpdk_deps = ' {dpdk_target}/include/rte_eal.h {dpdk_target}/lib/librte_eal.a'.format(dpdk_target=args.dpdk_target)
f.write(textwrap.dedent('''\
configure_args = {configure_args}
builddir = {outdir}
cxx = {cxx}
# we disable _FORTIFY_SOURCE because it generates false positives with longjmp() (core/thread.cc)
cxxflags = -std=gnu++1y {dbgflag} {fpie} -Wall -Werror -fvisibility=hidden -pthread -I. -U_FORTIFY_SOURCE {user_cflags} {warnings} {defines}
ldflags = {dbgflag} -Wl,--no-as-needed {static} {pie} -fvisibility=hidden -pthread {user_ldflags}
libs = {libs}
pool link_pool
depth = {link_pool_depth}
rule ragel
command = ragel -G2 -o $out $in
description = RAGEL $out
rule gen
command = /bin/echo -e $text > $out
description = GEN $out
rule swagger
command = json/json2code.py -f $in -o $out
description = SWAGGER $out
''').format(**globals()))
if args.dpdk:
f.write(textwrap.dedent('''\
rule dpdkmake
command = make -C build/dpdk
build {dpdk_deps} : dpdkmake {dpdk_sources}
''').format(**globals()))
for mode in build_modes:
modeval = modes[mode]
if modeval['sanitize'] and not do_sanitize:
print('Note: --static disables debug mode sanitizers')
modeval['sanitize'] = ''
modeval['sanitize_libs'] = ''
f.write(textwrap.dedent('''\
cxxflags_{mode} = {sanitize} {opt} -I $builddir/{mode}/gen
libs_{mode} = {libs} {sanitize_libs}
rule cxx.{mode}
command = $cxx -MMD -MT $out -MF $out.d $cxxflags_{mode} $cxxflags -c -o $out $in
description = CXX $out
depfile = $out.d
rule link.{mode}
command = $cxx $cxxflags_{mode} $ldflags -o $out $in $libs $libs_{mode}
description = LINK $out
pool = link_pool
rule link_stripped.{mode}
command = $cxx $cxxflags_{mode} -s $ldflags -o $out $in $libs $libs_{mode}
description = LINK (stripped) $out
pool = link_pool
rule ar.{mode}
command = rm -f $out; ar cr $out $in; ranlib $out
description = AR $out
''').format(mode = mode, **modeval))
f.write('build {mode}: phony {artifacts}\n'.format(mode = mode,
artifacts = str.join(' ', ('$builddir/' + mode + '/' + x for x in build_artifacts))))
compiles = {}
ragels = {}
swaggers = {}
for binary in build_artifacts:
srcs = deps[binary]
objs = ['$builddir/' + mode + '/' + src.replace('.cc', '.o')
for src in srcs
if src.endswith('.cc')]
if binary.endswith('.pc'):
vars = modeval.copy()
vars.update(globals())
pc = textwrap.dedent('''\
Name: Seastar
URL: http://seastar-project.org/
Description: Advanced C++ framework for high-performance server applications on modern hardware.
Version: 1.0
Libs: -L{srcdir}/{builddir} -Wl,--whole-archive,-lseastar,--no-whole-archive {dbgflag} -Wl,--no-as-needed {static} {pie} -fvisibility=hidden -pthread {user_ldflags} {libs} {sanitize_libs}
Cflags: -std=gnu++1y {dbgflag} {fpie} -Wall -Werror -fvisibility=hidden -pthread -I{srcdir} -I{srcdir}/{builddir}/gen {user_cflags} {warnings} {defines} {sanitize} {opt}
''').format(builddir = 'build/' + mode, srcdir = os.getcwd(), **vars)
f.write('build $builddir/{}/{}: gen\n text = {}\n'.format(mode, binary, repr(pc)))
elif binary.endswith('.a'):
f.write('build $builddir/{}/{}: ar.{} {}\n'.format(mode, binary, mode, str.join(' ', objs)))
else:
if binary.startswith('tests/'):
# Our code's debugging information is huge, and multiplied
# by many tests yields ridiculous amounts of disk space.
# So we strip the tests by default; The user can very
# quickly re-link the test unstripped by adding a "_g"
# to the test name, e.g., "ninja build/release/testname_g"
f.write('build $builddir/{}/{}: link_stripped.{} {} | {}\n'.format(mode, binary, mode, str.join(' ', objs), dpdk_deps))
f.write('build $builddir/{}/{}_g: link.{} {} | {}\n'.format(mode, binary, mode, str.join(' ', objs), dpdk_deps))
else:
f.write('build $builddir/{}/{}: link.{} {} | {}\n'.format(mode, binary, mode, str.join(' ', objs), dpdk_deps))
for src in srcs:
if src.endswith('.cc'):
obj = '$builddir/' + mode + '/' + src.replace('.cc', '.o')
compiles[obj] = src
elif src.endswith('.rl'):
hh = '$builddir/' + mode + '/gen/' + src.replace('.rl', '.hh')
ragels[hh] = src
elif src.endswith('.json'):
hh = '$builddir/' + mode + '/gen/' + src + '.hh'
swaggers[hh] = src
else:
raise Exception('No rule for ' + src)
for obj in compiles:
src = compiles[obj]
gen_headers = list(ragels.keys()) + list(swaggers.keys())
f.write('build {}: cxx.{} {} || {} \n'.format(obj, mode, src, ' '.join(gen_headers) + dpdk_deps))
for hh in ragels:
src = ragels[hh]
f.write('build {}: ragel {}\n'.format(hh, src))
for hh in swaggers:
src = swaggers[hh]
f.write('build {}: swagger {}\n'.format(hh,src))
f.write(textwrap.dedent('''\
rule configure
command = python3 configure.py $configure_args
generator = 1
build build.ninja: configure | configure.py
rule cscope
command = find -name '*.[chS]' -o -name "*.cc" -o -name "*.hh" | cscope -bq -i-
description = CSCOPE
build cscope: cscope
rule md2html
command = pandoc --self-contained --toc -c doc/template.css -V documentclass=report --chapters --number-sections -f markdown_github+pandoc_title_block --highlight-style tango $in -o $out
description = PANDOC $out
rule md2pdf
command = pandoc -f markdown_github+pandoc_title_block --highlight-style tango --template=doc/template.tex $in -o $out
description = PANDOC $out
build doc/tutorial.html: md2html doc/tutorial.md
build doc/tutorial.pdf: md2pdf doc/tutorial.md
default {modes_list}
''').format(modes_list = ' '.join(build_modes), **globals()))
| |
from collections import Mapping
from contextlib import contextmanager
import pandas as pd
from . import formatting
from .merge import merge_dataarray_coords
from .pycompat import iteritems, basestring, OrderedDict
def _coord_merge_finalize(target, other, target_conflicts, other_conflicts,
promote_dims=None):
if promote_dims is None:
promote_dims = {}
for k in target_conflicts:
del target[k]
for k, v in iteritems(other):
if k not in other_conflicts:
var = v.variable
if k in promote_dims:
var = var.expand_dims(promote_dims[k])
target[k] = var
def _common_shape(*args):
dims = OrderedDict()
for arg in args:
for dim in arg.dims:
size = arg.shape[arg.get_axis_num(dim)]
if dim in dims and size != dims[dim]:
# sometimes we may not have checked the index first
raise ValueError('index %r not aligned' % dim)
dims[dim] = size
return dims
def _dim_shape(var):
return [(dim, size) for dim, size in zip(var.dims, var.shape)]
class AbstractCoordinates(Mapping):
def __getitem__(self, key):
if (key in self._names or
(isinstance(key, basestring) and
key.split('.')[0] in self._names)):
# allow indexing current coordinates or components
return self._data[key]
else:
raise KeyError(key)
def __setitem__(self, key, value):
self.update({key: value})
def __iter__(self):
# needs to be in the same order as the dataset variables
for k in self._variables:
if k in self._names:
yield k
def __len__(self):
return len(self._names)
def __contains__(self, key):
return key in self._names
def __repr__(self):
return formatting.coords_repr(self)
@property
def dims(self):
return self._data.dims
def to_index(self, ordered_dims=None):
"""Convert all index coordinates into a :py:class:`pandas.MultiIndex`
"""
if ordered_dims is None:
ordered_dims = self.dims
indexes = [self._variables[k].to_index() for k in ordered_dims]
return pd.MultiIndex.from_product(indexes, names=list(ordered_dims))
def _merge_validate(self, other):
"""Determine conflicting variables to be dropped from either self or
other (or unresolvable conflicts that should just raise)
"""
self_conflicts = set()
other_conflicts = set()
promote_dims = {}
for k in self:
if k in other:
self_var = self._variables[k]
other_var = other[k].variable
if not self_var.broadcast_equals(other_var):
if k in self.dims and k in other.dims:
raise ValueError('index %r not aligned' % k)
if k not in self.dims:
self_conflicts.add(k)
if k not in other.dims:
other_conflicts.add(k)
elif _dim_shape(self_var) != _dim_shape(other_var):
promote_dims[k] = _common_shape(self_var, other_var)
self_conflicts.add(k)
return self_conflicts, other_conflicts, promote_dims
@contextmanager
def _merge_inplace(self, other):
if other is None:
yield
else:
# ignore conflicts in self because we don't want to remove
# existing coords in an in-place update
_, other_conflicts, promote_dims = self._merge_validate(other)
# treat promoted dimensions as a conflict, also because we don't
# want to modify existing coords
other_conflicts.update(promote_dims)
yield
_coord_merge_finalize(self, other, {}, other_conflicts)
def merge(self, other):
"""Merge two sets of coordinates to create a new Dataset
The method implments the logic used for joining coordinates in the
result of a binary operation performed on xarray objects:
- If two index coordinates conflict (are not equal), an exception is
raised.
- If an index coordinate and a non-index coordinate conflict, the non-
index coordinate is dropped.
- If two non-index coordinates conflict, both are dropped.
Parameters
----------
other : DatasetCoordinates or DataArrayCoordinates
The coordinates from another dataset or data array.
Returns
-------
merged : Dataset
A new Dataset with merged coordinates.
"""
ds = self.to_dataset()
if other is not None:
conflicts = self._merge_validate(other)
_coord_merge_finalize(ds.coords, other, *conflicts)
return ds
class DatasetCoordinates(AbstractCoordinates):
"""Dictionary like container for Dataset coordinates.
Essentially an immutable OrderedDict with keys given by the array's
dimensions and the values given by the corresponding xarray.Coordinate
objects.
"""
def __init__(self, dataset):
self._data = dataset
@property
def _names(self):
return self._data._coord_names
@property
def _variables(self):
return self._data._variables
def to_dataset(self):
"""Convert these coordinates into a new Dataset
"""
return self._data._copy_listed(self._names)
def update(self, other):
self._data.update(other)
self._names.update(other.keys())
def __delitem__(self, key):
if key in self:
del self._data[key]
else:
raise KeyError(key)
class DataArrayCoordinates(AbstractCoordinates):
"""Dictionary like container for DataArray coordinates.
Essentially an OrderedDict with keys given by the array's
dimensions and the values given by the corresponding xarray.Coordinate
objects.
"""
def __init__(self, dataarray):
self._data = dataarray
@property
def _names(self):
return set(self._data._coords)
@property
def _variables(self):
return self._data._coords
def _to_dataset(self, shallow_copy=True):
from .dataset import Dataset
coords = OrderedDict((k, v.copy(deep=False) if shallow_copy else v)
for k, v in self._data._coords.items())
dims = dict(zip(self.dims, self._data.shape))
return Dataset._construct_direct(coords, coord_names=set(self._names),
dims=dims, attrs=None)
def to_dataset(self):
return self._to_dataset()
def update(self, other):
new_vars = merge_dataarray_coords(
self._data.indexes, self._data._coords, other)
self._data._coords = new_vars
def __delitem__(self, key):
if key in self.dims:
raise ValueError('cannot delete a coordinate corresponding to a '
'DataArray dimension')
del self._data._coords[key]
class Indexes(Mapping):
def __init__(self, source):
self._source = source
def __iter__(self):
return iter(self._source.dims)
def __len__(self):
return len(self._source.dims)
def __contains__(self, key):
return key in self._source.dims
def __getitem__(self, key):
if key in self:
return self._source[key].to_index()
else:
raise KeyError(key)
def __repr__(self):
return formatting.indexes_repr(self)
| |
# Copyright (c) 2012 Citrix Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Aggregate admin API extension."""
import datetime
import six
from webob import exc
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova import exception
from nova.i18n import _
from nova import utils
authorize = extensions.extension_authorizer('compute', 'aggregates')
def _get_context(req):
return req.environ['nova.context']
def get_host_from_body(fn):
"""Makes sure that the host exists."""
def wrapped(self, req, id, body, *args, **kwargs):
if len(body) != 1:
msg = _('Only host parameter can be specified')
raise exc.HTTPBadRequest(explanation=msg)
elif 'host' not in body:
msg = _('Host parameter must be specified')
raise exc.HTTPBadRequest(explanation=msg)
try:
utils.check_string_length(body['host'], 'host', 1, 255)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
host = body['host']
return fn(self, req, id, host, *args, **kwargs)
return wrapped
class AggregateController(object):
"""The Host Aggregates API controller for the OpenStack API."""
def __init__(self):
self.api = compute_api.AggregateAPI()
def index(self, req):
"""Returns a list a host aggregate's id, name, availability_zone."""
context = _get_context(req)
authorize(context)
aggregates = self.api.get_aggregate_list(context)
return {'aggregates': [self._marshall_aggregate(a)['aggregate']
for a in aggregates]}
def create(self, req, body):
"""Creates an aggregate, given its name and
optional availability zone.
"""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest()
try:
host_aggregate = body["aggregate"]
name = host_aggregate["name"]
except KeyError:
raise exc.HTTPBadRequest()
avail_zone = host_aggregate.get("availability_zone")
try:
utils.check_string_length(name, "Aggregate name", 1, 255)
if avail_zone is not None:
utils.check_string_length(avail_zone, "Availability_zone", 1,
255)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
try:
aggregate = self.api.create_aggregate(context, name, avail_zone)
except exception.AggregateNameExists as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InvalidAggregateAction as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
agg = self._marshall_aggregate(aggregate)
# To maintain the same API result as before the changes for returning
# nova objects were made.
del agg['aggregate']['hosts']
del agg['aggregate']['metadata']
return agg
def show(self, req, id):
"""Shows the details of an aggregate, hosts and metadata included."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.get_aggregate(context, id)
except exception.AggregateNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return self._marshall_aggregate(aggregate)
def update(self, req, id, body):
"""Updates the name and/or availability_zone of given aggregate."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest()
try:
updates = body["aggregate"]
except KeyError:
raise exc.HTTPBadRequest()
if len(updates) < 1:
raise exc.HTTPBadRequest()
for key in updates.keys():
if key not in ["name", "availability_zone"]:
raise exc.HTTPBadRequest()
try:
if 'name' in updates:
utils.check_string_length(updates['name'], "Aggregate name", 1,
255)
if updates.get("availability_zone") is not None:
utils.check_string_length(updates['availability_zone'],
"Availability_zone", 1, 255)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
try:
aggregate = self.api.update_aggregate(context, id, updates)
except exception.AggregateNameExists as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.AggregateNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InvalidAggregateAction as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
return self._marshall_aggregate(aggregate)
def delete(self, req, id):
"""Removes an aggregate by id."""
context = _get_context(req)
authorize(context)
try:
self.api.delete_aggregate(context, id)
except exception.AggregateNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InvalidAggregateAction as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
def action(self, req, id, body):
_actions = {
'add_host': self._add_host,
'remove_host': self._remove_host,
'set_metadata': self._set_metadata,
}
for action, data in six.iteritems(body):
if action not in _actions.keys():
msg = _('Aggregates does not have %s action') % action
raise exc.HTTPBadRequest(explanation=msg)
return _actions[action](req, id, data)
raise exc.HTTPBadRequest(explanation=_("Invalid request body"))
@get_host_from_body
def _add_host(self, req, id, host):
"""Adds a host to the specified aggregate."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.add_host_to_aggregate(context, id, host)
except (exception.AggregateNotFound, exception.ComputeHostNotFound):
msg = _('Cannot add host %(host)s in aggregate'
' %(id)s: not found') % {'host': host, 'id': id}
raise exc.HTTPNotFound(explanation=msg)
except (exception.AggregateHostExists,
exception.InvalidAggregateAction):
msg = _('Cannot add host %(host)s in aggregate'
' %(id)s: host exists') % {'host': host, 'id': id}
raise exc.HTTPConflict(explanation=msg)
return self._marshall_aggregate(aggregate)
@get_host_from_body
def _remove_host(self, req, id, host):
"""Removes a host from the specified aggregate."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.remove_host_from_aggregate(context, id, host)
except (exception.AggregateNotFound, exception.AggregateHostNotFound,
exception.ComputeHostNotFound):
msg = _('Cannot remove host %(host)s in aggregate'
' %(id)s: not found') % {'host': host, 'id': id}
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidAggregateAction:
msg = _('Cannot remove host %(host)s in aggregate'
' %(id)s: invalid') % {'host': host, 'id': id}
raise exc.HTTPConflict(explanation=msg)
return self._marshall_aggregate(aggregate)
def _set_metadata(self, req, id, body):
"""Replaces the aggregate's existing metadata with new metadata."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest()
try:
metadata = body["metadata"]
except KeyError:
raise exc.HTTPBadRequest()
# The metadata should be a dict
if not isinstance(metadata, dict):
msg = _('The value of metadata must be a dict')
raise exc.HTTPBadRequest(explanation=msg)
try:
for key, value in metadata.items():
utils.check_string_length(key, "metadata.key", 1, 255)
if value is not None:
utils.check_string_length(value, "metadata.value", 0, 255)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
try:
aggregate = self.api.update_aggregate_metadata(context,
id, metadata)
except exception.AggregateNotFound:
msg = _('Cannot set metadata %(metadata)s in aggregate'
' %(id)s') % {'metadata': metadata, 'id': id}
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidAggregateAction as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
return self._marshall_aggregate(aggregate)
def _marshall_aggregate(self, aggregate):
_aggregate = {}
for key, value in aggregate.items():
# NOTE(danms): The original API specified non-TZ-aware timestamps
if isinstance(value, datetime.datetime):
value = value.replace(tzinfo=None)
_aggregate[key] = value
return {"aggregate": _aggregate}
class Aggregates(extensions.ExtensionDescriptor):
"""Admin-only aggregate administration."""
name = "Aggregates"
alias = "os-aggregates"
namespace = "http://docs.openstack.org/compute/ext/aggregates/api/v1.1"
updated = "2012-01-12T00:00:00Z"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-aggregates',
AggregateController(),
member_actions={"action": "POST", })
resources.append(res)
return resources
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import threading
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
# NOTE(mrry): Dummy shape registration for op used in the tests.
ops.RegisterShape('ConstructionFails')(None)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray([[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]],
dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testSessionInterOpThreadPool(self):
config = config_pb2.ConfigProto()
pool = config.session_inter_op_thread_pool.add()
with session.Session(config=config) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertTrue(isinstance(res, list))
self.assertEqual(42.0, res[0])
self.assertEqual(None, res[1])
self.assertEqual(44.0, res[2])
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[4])
self.assertEqual(63.0, sess.run(v))
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(42.0, res[0])
self.assertEqual(None, res[1])
self.assertEqual(44.0, res[2])
self.assertEqual(42.0, res[3])
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertTrue(isinstance(res, dict))
self.assertEqual(42.0, res['a'])
self.assertEqual(None, res['b'])
self.assertEqual(44.0, res['c'])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
# List of lists, tuples, namedtuple, and dict
res = sess.run([[a, b, c], (a, b, c), ABC(a=a, b=b, c=c),
{'a': a.name, 'c': c, 'b': b}])
self.assertTrue(isinstance(res, list))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Tuple of lists, tuples, namedtuple, and dict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c),
{'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, tuple))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Namedtuple of lists, tuples, namedtuples, and dict
res = sess.run(DEFG(d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g={'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, DEFG))
self.assertTrue(isinstance(res.d, list))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertTrue(isinstance(res.e, tuple))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertTrue(isinstance(res.f, ABC))
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertTrue(isinstance(res.g, dict))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
# Dict of lists, tuples, namedtuples, and dict
res = sess.run({'d': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': {'a': a.name, 'c': c, 'b': b}})
self.assertTrue(isinstance(res, dict))
self.assertTrue(isinstance(res['d'], list))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertTrue(isinstance(res['e'], tuple))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertTrue(isinstance(res['f'], ABC))
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
self.assertTrue(isinstance(res['g'], dict))
self.assertEqual(a_val, res['g']['a'])
self.assertEqual(b_val, res['g']['b'])
self.assertEqual(c_val, res['g']['c'])
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(sp, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
def testFeedSparePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32,
shape=shape,
name='placeholder1')
self.assertAllEqual(sp.shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
array_ops.placeholder(dtype=np.int64,
shape=(3,)),)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: (values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run(
[ind_values, ind_indices], {ind: (values, indices)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run(
[ind_values, ind_indices],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals(
'versions { producer: %d min_consumer: %d }' % (
versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'The Session graph is empty.' in str(e)):
sess.run([])
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint8,
dtypes.int16,
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64,
dtypes.complex128]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={feed_t: np_array}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={feed_t: np_array}))
# Also check that we can get both back.
out_v, feed_v = sess.run([out_t, feed_t],
feed_dict={feed_t: np_array})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(sess.run(feed_t, feed_dict={feed_t: c_list}),
c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 17
res = sess.partial_run(h, r2, feed_dict={c: temp})
self.assertEqual(153, res)
# Call again on the same graph.
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 18
res = sess.partial_run(h2, r2, feed_dict={c: temp})
self.assertEqual(162, res)
def testPartialRunIncomplete(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
def testConcurrentPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h1 = sess.partial_run_setup([r1], [a, b, c])
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 19
res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
self.assertEqual(66, res)
res = sess.partial_run(h2, r2, feed_dict={c: 7})
self.assertEqual(462, res)
def testManyPartialRun(self):
with session.Session() as sess:
steps = 200
inputs = []
outputs = []
a = constant_op.constant(2.0, dtypes.float32)
for i in xrange(steps):
inputs.append(array_ops.placeholder(dtypes.float32, shape=[]))
a = math_ops.mul(a, inputs[i])
outputs.append(a)
h = sess.partial_run_setup(outputs, inputs)
for i in xrange(steps):
res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0})
self.assertEqual(2.0, res)
feed_dict = {}
for i in xrange(steps):
feed_dict[inputs[i]] = 1.0
res = sess.run(outputs, feed_dict)
self.assertEqual(steps, len(res))
self.assertEqual(2.0, res[-1])
def testRunAndPartialRun(self):
with session.Session() as sess:
a = constant_op.constant(2.0, dtypes.float32)
b = a * 2
c = b * 3
r1 = sess.run([b, c])
h = sess.partial_run_setup([b, c], [])
r2 = sess.partial_run(h, [b, c])
self.assertEqual(r1, r2)
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(constant_op.constant(1.0), options=None,
run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(ValueError, 'may not be fed'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testInferShapesTrue(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config) as sess:
with ops.device('/gpu:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.mul(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(d, feed_dict={a: 1.0},
options=run_options, run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
def testInvalidArgument(self):
with self.assertRaisesRegexp(TypeError, 'target must be a string'):
session.Session(37)
with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37)
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(
capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
if __name__ == '__main__':
googletest.main()
| |
import os
import sys
import json
import numpy as np
# For python 2
import urllib2 as urllib
# For python 3
#from urllib import request as urllib
def format_url(server, full_size, group_list):
# Format the URL for butterfly
query_fmt = 'feature={}&id={}'
shape_fmt = 'depth={}&height={}&width={}&x=0&y=0&z=0'.format(*full_size)
group_fmt = 'experiment={}&sample={}&dataset={}&channel={}'.format(*group_list)
return '{}/api/entity_feature?{}&{}&{}'.format(server, query_fmt, shape_fmt, group_fmt)
def get_all_neurons(_fmt):
# Get all neurons
all_string = urllib.urlopen(_fmt.format('all_neurons', 0)).read()
return set( json.loads(all_string) )
def get_synapse_parent(_fmt, _id):
# Get neurons linked to a synapse
linked_string = urllib.urlopen(_fmt.format('synapse_parent', _id)).read()
return set(json.loads(linked_string).values()) - set([_id])
def get_neuron_children(_fmt, _id):
# Get synapses linked to a neuron
linked_string = urllib.urlopen(_fmt.format('neuron_children', _id)).read()
return set([ int(k) for k in json.loads(linked_string).keys() ])
def get_synapse_keypoint(_fmt, _id):
# Get neurons linked to a neuron
linked_string = urllib.urlopen(_fmt.format('synapse_keypoint', _id)).read()
return json.loads(linked_string)
def get_shared_synapse(_syn_file):
# Declare all parameters
full_size= [3394, 26624, 26624]
real_server = 'https://butterfly.rc.fas.harvard.edu'
group_list = [
'team1_waypoint_201610_full_May6',
'team1_waypoint_201610_full_May6',
'25k_201610_dataset',
'em',
]
fmt = format_url(real_server, full_size, group_list)
# Output array
all_shared = []
with open(_syn_file, 'r') as jf:
pairs = json.load(jf)
for p in pairs:
synapses = [get_neuron_children(fmt, i) for i in p]
# Take the set union of the two synapse sets
shared_set = synapses[0] & synapses[1]
print ("""
{} and {} share synapse {}
""".format(p[0],p[1], shared_set))
# Find the synapse keypoint for each of the shared synapses
shared_keypoints = [get_synapse_keypoint(fmt, i) for i in shared_set]
# Store to output
coordinates = dict(zip(shared_set, shared_keypoints))
all_shared.append(p + [coordinates])
with open('out.json','w') as jf:
json.dump(all_shared, jf)
def size_filter(count_file, count_min, banned_set=set()):
""" Filter by size and number of synapses
Arguments
----------
count_file : str
Path to file with id box-size on each line
count_min : int
The minimum number of boxes a neuron must cover
banned_set : set
The neurons never to use ever
Returns
--------
list
All neurons matching a given size
"""
# Output
out_list = []
# If no file
if not os.path.exists(count_file):
return out_list
# Load counts for all neurons
all_counts = np.loadtxt(count_file, dtype=np.uint32)
# Get indexes above threshold
all_big = np.where(all_counts >= count_min)[0]
return list(set(all_big) - banned_set)
def size_synapse_filter(count_file, count_min, banned_set=set(), two_way=False):
""" Filter by size and number of synapses
Arguments
----------
count_file : str
Path to file with id box-size on each line
count_min : int
The minimum number of boxes a neuron must cover
banned_set : set
The neurons never to use ever
two_way : bool
Whether linked neurons must also be >= min_size
Returns
--------
dict
Big neurons as keys with linked neurons as values
"""
# Declare all parameters
full_size= [1664, 14336, 14336]
real_server = 'http://localhost:8487'
group_list = [
'R0',
'2017_07_12',
'50_50_50',
'final_segmentation'
]
# Get the URL to the butterfly instance
fmt = format_url(real_server, full_size, group_list)
# Output
out_dict = {}
# If no file
if not os.path.exists(count_file):
return out_dict
# Load counts for all neurons
all_counts = np.loadtxt(count_file, dtype=np.uint32)
# Get indexes above threshold
all_big = np.where(all_counts >= count_min)[0]
all_big = list(set(all_big) - banned_set)
# Check if each index has synapse
for big_id in all_big:
# Make some temporary sets
this_id = set([big_id])
these_ids = set()
# Get all the synapses
id_syns = get_neuron_children(fmt, big_id)
# If there are synapses
for syn_id in id_syns:
# Get the neurons for the synapse
syn_neurons = get_synapse_parent(fmt, syn_id) - this_id
# Add the neuron to the final output
if len(syn_neurons):
new_id = list(syn_neurons)[0]
# Check if meets size criteria
if not two_way or new_id in all_big:
# Add to temp id set
these_ids.add(new_id)
# Add set to dictionary if not empty
if len(these_ids):
out_dict[big_id] = list(these_ids)
return out_dict
if __name__ == "__main__":
CASE = 0
if CASE == 0:
set_path = "/n/coxfs01/thejohnhoffer/R0/ids-2017-06-26_stitched/meshes/top_spread.txt"
count_path = "/n/coxfs01/thejohnhoffer/R0/ids-2017-06-26_stitched/meshes/spread_count.txt"
count_min = 20
all_neurons = size_filter(count_path, count_min)
# Write set
with open(set_path, 'w') as sf:
# Make set of all neurons
sf.write(':'.join(map(str,all_neurons)))
set_path = "/n/coxfs01/thejohnhoffer/R0/ids-2017-06-26_stitched/meshes/top_high.txt"
count_path = "/n/coxfs01/thejohnhoffer/R0/ids-2017-06-26_stitched/meshes/high_count.txt"
count_min = 7
all_neurons = size_filter(count_path, count_min)
# Write set
with open(set_path, 'w') as sf:
# Make set of all neurons
sf.write(':'.join(map(str,all_neurons)))
sys.exit()
# DEFAULT CASE
out_path = "/n/coxfs01/thejohnhoffer/R0/ids-2017-07-12_final/meshes/big_linked_nodes.json"
set_path = "/n/coxfs01/thejohnhoffer/R0/ids-2017-07-12_final/meshes/big_linked_nodes.txt"
count_path = "/n/coxfs01/thejohnhoffer/R0/ids-2017-07-12_final/meshes/spread_count.txt"
banned_set = set([418327,224632])
count_min = 4
two_way = True
# Get big neurons with synapses
neuron_dict = size_synapse_filter(count_path, count_min, banned_set, two_way)
# Write out
with open(out_path, 'w') as jf:
json.dump(neuron_dict, jf, indent=4)
# Write set
with open(set_path, 'w') as sf:
# Make set of all neurons
all_neurons = set(neuron_dict.keys())
for n_val in neuron_dict.values():
all_neurons = all_neurons | set(n_val)
# Make into a string
all_neurons = list(all_neurons)
sf.write(':'.join(map(str,all_neurons)))
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import random
import unittest
from sympy import var
from symoroutils import tools
from pysymoro.robotf import FloatingRobot as Robot
def planar2r():
robo = Robot('planar2r', 2, 2, 3, False, tools.SIMPLE)
# update geometric params
params = {
1: {'sigma': 0, 'mu': 1, 'theta': var('th1')},
2: {'sigma': 0, 'mu': 1, 'alpha': 0, 'd': var('L1'), 'theta': var('th2')},
3: {'sigma': 2, 'd': var('L2')}
}
robo.update_params('geos', params)
# update dynamic params
params = {
1: {'xx': 0, 'xy': 0, 'xz': 0, 'yy': 0, 'yz': 0, 'ia': 0}
}
robo.update_params('dyns', params)
# update joint params
params = {
0: {'qdots': 0, 'qddots': 0, 'torques': 0}
}
robo.update_params('misc', params)
# update gravity vector
params = {
0: {'gravity': 0},
1: {'gravity': 0},
2: {'gravity': var('G3')},
}
robo.update_params('misc', params)
return robo
def planar2r_numerical(is_floating=False):
# numerical values (random in some case)
L1 = 0.5
L2 = 0.4
ZZ1 = 3.7
ZZ2 = 0.35
MX2 = 0.4
MY2 = 0.15
M1 = 1.2
M2 = 0.8
FC1 = 0.3
FC2 = 0.25
FV1 = 0.3
FV2 = 0.18
IA1 = 0.0
IA2 = 0.0
G3 = -9.81
# create robot
robo = Robot(
'planar2r', 2, 2, 3, is_floating,
tools.SIMPLE, is_symbolic=False
)
robo.set_dyns_to_zero()
# update geometric params
params = {
1: {'sigma': 0, 'mu': 1},
2: {'sigma': 0, 'mu': 1, 'd': L1},
3: {'sigma': 2, 'd': L2}
}
robo.update_params('geos', params)
# update dynamic params
params = {
1: {
'zz': ZZ1, 'frc': FC1, 'frv': FV1, 'ia': IA1, 'mass': M1
},
2: {
'zz': ZZ2, 'frc': FC2, 'frv': FV2, 'ia': IA2, 'mass': M2,
'msx': MX2, 'msy': MY2
}
}
robo.update_params('dyns', params)
# update joint params
params = {0: {'qdots': 0, 'qddots': 0, 'torques': 0}}
robo.update_params('misc', params)
# update gravity vector
params = {
0: {'gravity': 0},
1: {'gravity': 0},
2: {'gravity': G3},
}
robo.update_params('misc', params)
return robo
def set_planar2r_joint_state(robo, q, qdot, qddot):
"""
Set the joint states for a 2R planar robot.
Args:
robo: An instance of Robot class.
q: Joint positions. A list of size 2.
qdot: Joint velocities. A list of size 2.
qddot: Joint accelerations. A list of size 2.
Returns:
The modified instance of Robot class.
"""
q1 = q[0]
q2 = q[1]
q1dot = qdot[0]
q2dot = qdot[1]
q1ddot = qddot[0]
q2ddot = qddot[1]
# update joint variables
params = {1: {'theta': q1}, 2: {'theta': q2}}
robo.update_params('geos', params)
# update joint params
params = {
1: {'qdots': q1dot, 'qddots': q1ddot},
2: {'qdots': q2dot, 'qddots': q2ddot}
}
robo.update_params('misc', params)
return robo
def set_planar2r_joint_torque(robo, qtorque):
"""
Set the joint torques for a 2R planar robot.
Args:
robo: An instance of Robot class.
qtorque: Joint torques. A list of size 2.
Returns:
The modified instance of Robot class.
"""
gam1 = qtorque[0]
gam2 = qtorque[1]
# update torque values
params = {1: {'torques': gam1}, 2: {'torques': gam2}}
robo.update_params('misc', params)
return robo
class TestDynModelPlanar2rFixed(unittest.TestCase):
"""
Unit test for testing the inverse and direct dynamic model
computation for floating base robots algorithm. This testing is done
numerically.
"""
def setUp(self):
pass
def test_when_zero(self):
"""
Test the dynamic model computation when joint position,
velocity and acceleration are set to zero.
"""
robo = planar2r_numerical()
# set joint state
robo = set_planar2r_joint_state(
robo, [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]
)
# compute IDyM
robo.compute_idym()
# set torque values for DDyM
robo = set_planar2r_joint_torque(
robo, [robo.idym.torques[1], robo.idym.torques[2]]
)
# compute DDyM
robo.compute_ddym()
# check if the result of IDyM (computed torques) are zero
self.assertEqual(robo.idym.torques[1], 0.0)
self.assertEqual(robo.idym.torques[2], 0.0)
# check if the result of DDyM (computed qddots) are zero
self.assertEqual(robo.ddym.qddots[1], 0.0)
self.assertEqual(robo.ddym.qddots[2], 0.0)
# check if input to IDyM is same as output of DDyM
self.assertEqual(robo.ddym.qddots[1], robo.qddots[1])
self.assertEqual(robo.ddym.qddots[2], robo.qddots[2])
def test_when_random(self):
"""
Test the dynamic model computation when joint position,
velocity and acceleration are set to random meaningful values.
"""
robo = planar2r_numerical()
# initialise joint position, velocity, acceleration to random
# values
random.seed(math.pi)
q = list(random.uniform(-math.pi, math.pi) for j in range(2))
qdot = list(random.uniform(-math.pi, math.pi) for j in range(2))
qddot = list(random.uniform(-math.pi, math.pi) for j in range(2))
# set joint state
robo = set_planar2r_joint_state(robo, q, qdot, qddot)
# compute IDyM
robo.compute_idym()
print(robo.idym)
# set torque values for DDyM
robo = set_planar2r_joint_torque(
robo, [robo.idym.torques[1], robo.idym.torques[2]]
)
# compute DDyM
robo.compute_ddym()
#
print('\n')
print(q)
print(qdot)
print(qddot)
print(robo.idym.torques)
print(robo.ddym.qddots)
# check if input to IDyM is same as output of DDyM
self.assertEqual(robo.ddym.qddots[1], robo.qddots[1])
self.assertEqual(robo.ddym.qddots[2], robo.qddots[2])
class TestDynModelPlanar2rFloating(unittest.TestCase):
"""
Unit test for testing the inverse and direct dynamic model
computation for floating base robots algorithm. This testing is done
numerically.
"""
def setUp(self):
pass
def test_when_zero(self):
"""
Test the dynamic model computation when joint position,
velocity and acceleration are set to zero.
"""
robo = planar2r_numerical(is_floating=True)
# set joint state
robo = set_planar2r_joint_state(
robo, [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]
)
# compute IDyM
robo.compute_idym()
# set torque values for DDyM
robo = set_planar2r_joint_torque(
robo, [robo.idym.torques[1], robo.idym.torques[2]]
)
# compute DDyM
robo.compute_ddym()
# assertions
# check if the result of IDyM (computed torques) are zero
self.assertEqual(robo.idym.torques[1], 0.0)
self.assertEqual(robo.idym.torques[2], 0.0)
# check if the result of DDyM (computed qddots) are zero
self.assertEqual(robo.ddym.qddots[1], 0.0)
self.assertEqual(robo.ddym.qddots[2], 0.0)
# check if input to IDyM is same as output of DDyM
self.assertEqual(robo.ddym.qddots[1], robo.qddots[1])
self.assertEqual(robo.ddym.qddots[2], robo.qddots[2])
def test_when_random(self):
"""
Test the dynamic model computation when joint position,
velocity and acceleration are set to random meaningful values.
"""
robo = planar2r_numerical(is_floating=True)
# initialise joint position, velocity, acceleration to random
# values
random.seed(math.pi)
q = list(random.uniform(-math.pi, math.pi) for j in range(2))
qdot = list(random.uniform(-math.pi, math.pi) for j in range(2))
qddot = list(random.uniform(-math.pi, math.pi) for j in range(2))
# set joint state
robo = set_planar2r_joint_state(robo, q, qdot, qddot)
# compute IDyM
robo.compute_idym()
# set torque values for DDyM
robo = set_planar2r_joint_torque(
robo, [robo.idym.torques[1], robo.idym.torques[2]]
)
# compute DDyM
robo.compute_ddym()
#
print('\n')
print(q)
print(qdot)
print(qddot)
print(robo.idym.torques)
print(robo.ddym.qddots)
# assertions
# check if input to IDyM is same as output of DDyM
self.assertEqual(robo.ddym.qddots[1], robo.qddots[1])
self.assertEqual(robo.ddym.qddots[2], robo.qddots[2])
def run_tests():
"""Load and run the unittests"""
unit_suite = unittest.TestLoader().loadTestsFromTestCase(
TestDynModelPlanar2rFixed
)
unittest.TextTestRunner(verbosity=2).run(unit_suite)
def main():
"""Main function."""
run_tests()
if __name__ == '__main__':
main()
| |
from __future__ import print_function
from global_config import USE_STEREOCHEMISTRY
import rdkit.Chem as Chem
from rdkit.Chem import AllChem
import numpy as np
from multiprocessing import Pool, cpu_count
from functools import partial # used for passing args to multiprocessing
class Transformer:
'''
The Transformer class defines an object which can be used to perform
one-step retrosyntheses for a given molecule.
'''
def __init__(self):
self.source = None
self.templates = []
self.has_synth = False
self.has_retro = False
def load(self, collection, mincount = 50, get_retro = False, get_synth = True, lowe = True):
'''
Loads the object from a MongoDB collection containing transform
template records.
'''
# Save collection source
self.source = collection
# Save get_retro/get_synth:
if get_retro: self.has_retro = True
if get_synth: self.has_synth = True
if mincount and 'count' in collection.find_one():
filter_dict = {'count': { '$gte': mincount}}
else:
filter_dict = {}
# Look for all templates in collection
for document in collection.find(filter_dict, ['_id', 'reaction_smarts', 'necessary_reagent']):
# Skip if no reaction SMARTS
if 'reaction_smarts' not in document: continue
reaction_smarts = str(document['reaction_smarts'])
if not reaction_smarts: continue
# Define dictionary
template = {
'name': document['name'] if 'name' in document else '',
'reaction_smarts': reaction_smarts,
'incompatible_groups': document['incompatible_groups'] if 'incompatible_groups' in document else [],
'reference': document['reference'] if 'reference' in document else '',
'references': document['references'] if 'references' in document else [],
'rxn_example': document['rxn_example'] if 'rxn_example' in document else '',
'explicit_H': document['explicit_H'] if 'explicit_H' in document else False,
'_id': document['_id'] if '_id' in document else -1,
'product_smiles': document['product_smiles'] if 'product_smiles' in document else [],
'necessary_reagent': document['necessary_reagent'] if 'necessary_reagent' in document else '',
}
# Frequency/popularity score
if 'count' in document:
template['count'] = document['count']
elif 'popularity' in document:
template['count'] = document['popularity']
else:
template['count'] = 1
# Define reaction in RDKit and validate
if get_retro:
try:
# Force reactants and products to be one molecule (not really, but for bookkeeping)
reaction_smarts_retro = '(' + reaction_smarts.replace('>>', ')>>(') + ')'
rxn = AllChem.ReactionFromSmarts(str(reaction_smarts_retro))
#if rxn.Validate() == (0, 0):
if rxn.Validate()[1] == 0:
template['rxn'] = rxn
else:
template['rxn'] = None
except Exception as e:
print('Couldnt load retro: {}: {}'.format(reaction_smarts_retro, e))
template['rxn'] = None
# Define forward version, too
if get_synth:
try:
if lowe:
reaction_smarts_synth = '(' + reaction_smarts.split('>')[2] + ')>>(' + reaction_smarts.split('>')[0] + ')'
else:
reaction_smarts_synth = '(' + reaction_smarts.replace('>>', ')>>(') + ')'
rxn_f = AllChem.ReactionFromSmarts(reaction_smarts_synth)
#if rxn_f.Validate() == (0, 0):
if rxn_f.Validate()[1] == 0:
template['rxn_f'] = rxn_f
else:
template['rxn_f'] = None
except Exception as e:
print('Couldnt load forward: {}: {}'.format(reaction_smarts_synth, e))
template['rxn_f'] = None
# Need to have either a retro or forward reaction be valid
if get_retro and get_synth:
if not template['rxn'] and not template['rxn_f']: continue
elif get_retro:
if not template['rxn']: continue
elif get_synth:
if not template['rxn_f']: continue
else:
raise ValueError('Cannot run Transformer.load() with get_retro = get_synth = False')
# Add to list
self.templates.append(template)
self.num_templates = len(self.templates)
def reorder(self):
'''
Re-orders the list of templates (self.templates) according to
field 'count' in descending order. This means we will apply the
most popular templates first
'''
self.templates[:] = [x for x in sorted(self.templates, key = lambda z: z['count'], reverse = True)]
def perform_forward(self, smiles, stop_if = None, progbar = False, singleonly = False):
'''
Performs a forward synthesis (i.e., reaction enumeration) given
a SMILES string by applying each transformation template in
reverse sequentially
stop_if - can be used for testing product matching based on
if the isomericSmiles matches with one of the products. It terminates
early instead of going through all of the templates and returns True.
'''
# Define pseudo-molecule (single molecule) to operate on
mol = Chem.MolFromSmiles(smiles)
smiles = '.'.join(sorted(Chem.MolToSmiles(mol, isomericSmiles = USE_STEREOCHEMISTRY).split('.')))
# Initialize results object
result = ForwardResult(smiles)
# Draw?
if progbar:
from tqdm import tqdm
generator = tqdm(self.templates)
else:
generator = self.templates
# Try each in turn
for template in generator:
# Perform
try:
outcomes = template['rxn_f'].RunReactants([mol])
except Exception as e:
#print('Forward warning: {}'.format(e))
continue
#print('Retro version of reaction: {}'.format(template['reaction_smarts']))
if not outcomes: continue
for j, outcome in enumerate(outcomes):
try:
for x in outcome:
x.UpdatePropertyCache()
Chem.SanitizeMol(x)
except Exception as e:
#print(e)
continue
smiles_list = []
for x in outcome:
smiles_list.extend(Chem.MolToSmiles(x, isomericSmiles = USE_STEREOCHEMISTRY).split('.'))
# Reduce to largest (longest) product only?
if singleonly: smiles_list = [max(smiles_list, key = len)]
product = ForwardProduct(
smiles_list = sorted(smiles_list),
template_id = template['_id'],
num_examples = template['count'],
)
if '.'.join(product.smiles_list) == smiles: continue # no transformation
# Early termination?
if stop_if:
if stop_if in product.smiles_list:
print('Found true product - skipping remaining templates to apply')
return True
# If not early termination, we want to keep all products
else:
result.add_product(product)
# Were we trying to stop early?
if stop_if:
return False
# Otherwise, return the full list of products
return result
def lookup_id(self, template_id):
'''
Find the reaction smarts for this template_id
'''
for template in self.templates:
if template['_id'] == template_id:
return template
class ForwardResult:
'''
A class to store the results of a one-step forward synthesis.
'''
def __init__(self, smiles):
self.smiles = smiles
self.products = []
def add_product(self, product):
'''
Adds a product to the product set if it is a new product
'''
# Check if it is new or old
for old_product in self.products:
if product.smiles_list == old_product.smiles_list:
# Just add this template_id and score
old_product.template_ids |= set(product.template_ids)
old_product.num_examples += product.num_examples
return
# New!
self.products.append(product)
def return_top(self, n = 50):
'''
Returns the top n products as a list of dictionaries,
sorted by descending score
'''
top = []
for (i, product) in enumerate(sorted(self.products, key = lambda x: x.num_examples, reverse = True)):
top.append({
'rank': i + 1,
'smiles': '.'.join(product.smiles_list),
'smiles_split': product.smiles_list,
'num_examples': product.num_examples,
'tforms': sorted(list(product.template_ids)),
})
if i + 1 == n:
break
return top
class ForwardProduct:
'''
A class to store a single forward product for reaction enumeration
'''
def __init__(self, smiles_list = [], template_id = -1, num_examples = 0):
self.smiles_list = smiles_list
self.template_ids = set([template_id])
self.num_examples = num_examples
| |
# Copyright 2014 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for OpenHTF."""
import errno
import glob
import os
import platform
import subprocess
import sys
from distutils.command.build import build
from distutils.command.clean import clean
from distutils.cmd import Command
from setuptools import find_packages
from setuptools import setup
from setuptools.command.test import test
class CleanCommand(clean):
"""Custom logic for the clean command."""
def run(self):
clean.run(self)
targets = [
'./dist',
'./*.egg-info',
'./openhtf/output/proto/*_pb2.py',
'./openhtf/**/*.pyc',
]
os.system('shopt -s globstar; rm -vrf %s' % ' '.join(targets))
class BuildProtoCommand(Command):
"""Custom setup command to build protocol buffers."""
description = 'Builds the proto files into python files.'
user_options = [('protoc=', None, 'Path to the protoc compiler.'),
('protodir=', None, 'Path to protobuf install.'),
('indir=', 'i', 'Directory containing input .proto files.'),
('outdir=', 'o', 'Where to output .py files.')]
def initialize_options(self):
self.skip_proto = False
try:
prefix = subprocess.check_output(
'pkg-config --variable prefix protobuf'.split()).strip().decode('utf-8')
except (subprocess.CalledProcessError, OSError):
if platform.system() == 'Linux':
# Default to /usr?
prefix = '/usr'
elif platform.system() in ['Mac', 'Darwin']:
# Default to /usr/local for Homebrew
prefix = '/usr/local'
else:
print('Warning: mfg-inspector output is not fully implemented for '
'Windows. OpenHTF will be installed without it.')
self.skip_proto = True
maybe_protoc = os.path.join(prefix, 'bin', 'protoc')
if os.path.isfile(maybe_protoc) and os.access(maybe_protoc, os.X_OK):
self.protoc = maybe_protoc
else:
print('Warning: protoc not found at %s' % maybe_protoc)
print('setup will attempt to run protoc with no prefix.')
self.protoc = 'protoc'
self.protodir = os.path.join(prefix, 'include')
self.indir = os.getcwd()
self.outdir = os.getcwd()
def finalize_options(self):
pass
def run(self):
if self.skip_proto:
print('Skipping building protocol buffers.')
return
# Build regular proto files.
protos = glob.glob(
os.path.join(self.indir, 'openhtf', 'output', 'proto', '*.proto'))
if protos:
print('Attempting to build proto files:\n%s' % '\n'.join(protos))
cmd = [
self.protoc,
'--proto_path', self.indir,
'--proto_path', self.protodir,
'--python_out', self.outdir,
] + protos
try:
subprocess.check_call(cmd)
except OSError as e:
if e.errno == errno.ENOENT:
print('Could not find the protobuf compiler at \'%s\'' % self.protoc)
if sys.platform.startswith('linux'):
print('On many Linux systems, this is fixed by installing the '
'"protobuf-compiler" and "libprotobuf-dev" packages.')
elif sys.platform == 'darwin':
print('On Mac, protobuf is often installed via homebrew.')
raise
except subprocess.CalledProcessError:
print('Could not build proto files.')
print('This could be due to missing helper files. On many Linux '
'systems, this is fixed by installing the '
'"libprotobuf-dev" package.')
raise
else:
print('Found no proto files to build.')
# Make building protos part of building overall.
build.sub_commands.insert(0, ('build_proto', None))
INSTALL_REQUIRES = [
'colorama>=0.3.9,<1.0',
'contextlib2>=0.5.1,<1.0',
'future>=0.16.0',
'mutablerecords>=0.4.1,<2.0',
'oauth2client>=1.5.2,<2.0',
'protobuf>=3.6.0,<4.0',
'PyYAML>=3.13,<4.0',
'pyOpenSSL>=17.1.0,<18.0',
'sockjs-tornado>=1.0.3,<2.0',
'tornado>=4.3,<5.0',
]
# Not all versions of setuptools support semicolon syntax for specifying
# platform-specific dependencies, so we do it the old school way.
if sys.version_info < (3,4):
INSTALL_REQUIRES.append('enum34>=1.1.2,<2.0')
class PyTestCommand(test):
# Derived from
# https://github.com/chainreactionmfg/cara/blob/master/setup.py
user_options = [
('pytest-args=', None, 'Arguments to pass to py.test'),
('pytest-cov=', None, 'Enable coverage. Choose output type: '
'term, html, xml, annotate, or multiple with comma separation'),
]
def initialize_options(self):
test.initialize_options(self)
self.pytest_args = ['test']
self.pytest_cov = None
def finalize_options(self):
test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
self.run_command('build_proto')
import pytest
cov = []
if self.pytest_cov is not None:
outputs = []
for output in self.pytest_cov.split(','):
outputs.extend(['--cov-report', output])
cov = ['--cov', 'openhtf'] + outputs
sys.argv = [sys.argv[0]]
print('invoking pytest.main with %s' % (self.pytest_args + cov))
sys.exit(pytest.main(self.pytest_args + cov))
setup(
name='openhtf',
version='1.4.2',
description='OpenHTF, the open hardware testing framework.',
author='John Hawley',
author_email='madsci@google.com',
maintainer='Joe Ethier',
maintainer_email='jethier@google.com',
packages=find_packages(exclude='examples'),
package_data={'openhtf': ['output/proto/*.proto',
'output/web_gui/dist/*',
'output/web_gui/dist/css/*',
'output/web_gui/dist/js/*',
'output/web_gui/dist/img/*',
'output/web_gui/*']},
cmdclass={
'build_proto': BuildProtoCommand,
'clean': CleanCommand,
'test': PyTestCommand,
},
install_requires=INSTALL_REQUIRES,
extras_require={
'usb_plugs': [
'libusb1>=1.3.0,<2.0',
'M2Crypto>=0.22.3,<1.0',
],
'update_units': [
'xlrd>=1.0.0,<2.0',
],
'serial_collection_plug': [
'pyserial>=3.3.0,<4.0',
],
},
setup_requires=[
'wheel>=0.29.0,<1.0',
],
tests_require=[
'mock>=2.0.0',
'pandas>=0.22.0',
'pytest>=2.9.2',
'pytest-cov>=2.2.1',
],
)
| |
"""
Module for bulk-exporting IsisCB data in EBSCO format.
The strategy here is to favor extensibility/flexibility in defining output
columns, at the expense of performance. The performance hit is probably OK,
since these jobs will be performed asynchronously.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import str
from isisdata.models import *
from django.utils.text import slugify
import functools
from . import export_item_count_csv
from django.conf import settings
def _authors_editors_names(obj, extra, config={}):
fields = ['authority__id', 'authority__name', 'type_controlled']
names = obj.acrelation_set.filter(type_controlled__in=[ACRelation.EDITOR, ACRelation.AUTHOR])\
.order_by('data_display_order')\
.values_list(*fields)
return u' // '.join(["AuthorityID %s || AuthorityName %s || Role %s"%(x[0], x[1], dict(ACRelation.TYPE_CHOICES)[x[2]]) for x in names])
def _publisher_school(obj, extra, config={}):
if obj.type_controlled in [Citation.BOOK, Citation.CHAPTER]:
obj_with_publisher = obj
# if we have a chapter we need to get the connect book
if obj.type_controlled == Citation.CHAPTER:
book_ccr = obj.ccrelations.filter(type_controlled__in=[CCRelation.INCLUDES_CHAPTER])
if book_ccr and book_ccr.first():
obj = book_ccr.first().subject
# get publisher
fields = ['authority__id', 'authority__name']
names = obj.acrelation_set.filter(type_controlled=ACRelation.PUBLISHER)\
.values_list(*fields)
return u' // '.join(["AuthorityID %s || AuthorityName %s"%(x[0], x[1]) for x in names])
# school
if obj.type_controlled in [Citation.THESIS]:
fields = ['authority__id', 'authority__name']
names = obj.acrelation_set.filter(type_controlled=ACRelation.SCHOOL)\
.values_list(*fields)
return u' // '.join(["AuthorityID %s || AuthorityName %s"%(x[0], x[1]) for x in names])
return ""
def _journal_name(obj, extra, config={}):
qs = obj.acrelation_set.filter(type_controlled=ACRelation.PERIODICAL)
if qs.count() == 0:
return u""
_first = qs.first()
if _first.authority:
return str(_first.authority.name)
return u""
def _volume(obj, extra, config={}):
if not hasattr(obj, 'part_details') or obj.part_details is None:
return u""
# ISISCB-1033
if obj.part_details.volume_free_text and obj.part_details.volume_free_text.strip():
return obj.part_details.volume_free_text.strip()
if obj.part_details.volume_begin or obj.part_details.volume_end:
return "-".join([str(x) for x in [_f for _f in [obj.part_details.volume_begin, obj.part_details.volume_end] if _f]])
return ''
def _pages_free_text(obj, extra, config={}):
if not getattr(obj, 'part_details', None):
return u""
if obj.part_details.pages_free_text and obj.part_details.pages_free_text.strip():
return obj.part_details.pages_free_text.strip()
if obj.part_details.page_begin or obj.part_details.page_end:
return "-".join([str(x) for x in [_f for _f in [obj.part_details.page_begin, obj.part_details.page_end] if _f]])
return ''
def _category(obj, extra, config={}):
fields = ['authority__name']
names = obj.acrelation_set.filter(type_controlled=ACRelation.CATEGORY)\
.values_list(*fields)
return u' || '.join([x[0] for x in names])
def _language(obj, extra, config={}):
return u' || '.join([o for o in list(obj.language.all().values_list('name', flat=True)) if o is not None])
# check functions
CHECK_WELL_FORMED = "Well-formed"
CHECK_EMPTY = "Empty"
CHECK_NON_STANDARD = "Non-Standard"
CHECK_BROKEN_LINK = "BrokenLink"
def _title_check(obj, extra, config={}):
if not obj.title and not obj.title.strip():
return CHECK_EMPTY
return CHECK_WELL_FORMED
def _author_check(obj, extra, config={}):
authors = obj.acrelation_set.filter(type_controlled__in=[ACRelation.EDITOR, ACRelation.AUTHOR])
if not authors:
return CHECK_EMPTY
return _check_acrs(authors, [Authority.PERSON])
def _publisher_school_check(obj, extra, config={}):
acrelations = None
if obj.type_controlled in [Citation.BOOK, Citation.CHAPTER]:
obj_with_publisher = obj
# if we have a chapter we need to get the connect book
if obj.type_controlled == Citation.CHAPTER:
book_ccr = obj.ccrelations.filter(type_controlled__in=[CCRelation.INCLUDES_CHAPTER])
if book_ccr and book_ccr.first():
obj = book_ccr.first().subject
# get publisher
acrelations = obj.acrelation_set.filter(type_controlled=ACRelation.PUBLISHER)
if obj.type_controlled in [Citation.THESIS]:
acrelations = obj.acrelation_set.filter(type_controlled=ACRelation.SCHOOL)
if not acrelations:
return CHECK_EMPTY
return _check_acrs(acrelations, [Authority.INSTITUTION])
def _journal_check(obj, extra, config={}):
journals = obj.acrelation_set.filter(type_controlled=ACRelation.PERIODICAL)
if not journals:
return CHECK_EMPTY
return _check_acrs(journals, [Authority.SERIAL_PUBLICATION])
def _year_check(obj, extra, config={}):
year = obj.publication_date.year
if not year:
return CHECK_EMPTY
if year < 1970:
return CHECK_NON_STANDARD
return CHECK_WELL_FORMED
def _vol_check(obj, extra, config={}):
vol = _volume(obj, extra, config)
if not vol:
return CHECK_EMPTY
if len(vol) > 9:
return CHECK_NON_STANDARD
return CHECK_WELL_FORMED
def _page_check(obj, extra, config={}):
pages = _pages_free_text(obj, extra, config)
if not pages:
return CHECK_EMPTY
return CHECK_WELL_FORMED
def _lang_check(obj, extra, config={}):
lang = _language(obj, extra, config)
if not lang:
return CHECK_EMPTY
return CHECK_WELL_FORMED
def _cat_check(obj, extra, config={}):
categories = obj.acrelation_set.filter(type_controlled=ACRelation.CATEGORY)
if not categories:
return CHECK_EMPTY
if categories.count() > 1 or categories.first().authority.classification_system != Authority.SPWC:
return CHECK_NON_STANDARD
return CHECK_WELL_FORMED
def _check_acrs(acrs, authority_types):
is_non_standard = False
for acr in acrs:
if is_broken_link(acr):
return CHECK_BROKEN_LINK
if acr.authority.type_controlled not in authority_types:
is_non_standard = True
if is_non_standard:
return CHECK_NON_STANDARD
return CHECK_WELL_FORMED
def is_broken_link(acr):
return acr.record_status_value != CuratedMixin.ACTIVE or not acr.authority or acr.authority.record_status_value != CuratedMixin.ACTIVE
object_id = export_item_count_csv.Column(u'Record number', lambda obj, extra, config={}: obj.id)
print_status = export_item_count_csv.Column(u'Print status', export_item_count_csv._print_status)
record_status = export_item_count_csv.Column(u'Record Status', lambda obj, extra, config={}: obj.get_record_status_value_display())
curation_link = export_item_count_csv.Column('Curation Link', export_item_count_csv._curation_link)
public_link = export_item_count_csv.Column('Public Link', export_item_count_csv._public_link)
record_type = export_item_count_csv.Column('Record Type', export_item_count_csv._record_type)
title_check = export_item_count_csv.Column('Title Check', _title_check)
author_check = export_item_count_csv.Column('Auth/Ed Check', _author_check)
publisher_school_check = export_item_count_csv.Column('Pub/Sch Check', _publisher_school_check)
journal_check = export_item_count_csv.Column('Jrnl Check', _journal_check)
year_check = export_item_count_csv.Column('Year Check', _year_check)
vol_check = export_item_count_csv.Column('Vol Check', _vol_check)
page_check = export_item_count_csv.Column('Page Check', _page_check)
lang_check = export_item_count_csv.Column('Lang Check', _lang_check)
cat_check = export_item_count_csv.Column('Cat Check', _cat_check)
citation_title = export_item_count_csv.Column(u'Title', export_item_count_csv._citation_title, Citation)
authors_editors_names = export_item_count_csv.Column('Author/Editor Names', _authors_editors_names)
publisher_school = export_item_count_csv.Column('Publisher/School', _publisher_school)
journal_name = export_item_count_csv.Column("Journal Name", _journal_name)
year_of_publication = export_item_count_csv.Column(u'Year Published', lambda obj, extra, config={}: obj.publication_date.year)
volume = export_item_count_csv.Column(u"Volume", _volume)
pages_free_text = export_item_count_csv.Column(u"Pages", _pages_free_text)
category = export_item_count_csv.Column(u"Category", _category)
language = export_item_count_csv.Column(u"Language", _language)
tracking_records = export_item_count_csv.Column('Tracking Records', export_item_count_csv._tracking_records)
record_action = export_item_count_csv.Column(u'Record Action', lambda obj, extra, config={}: obj.get_record_action_display())
related_citations = export_item_count_csv.Column('Related Citations', export_item_count_csv._related_citations)
staff_notes = export_item_count_csv.Column(u"Staff Notes", lambda obj, extra, config={}: obj.administrator_notes)
record_history = export_item_count_csv.Column(u"Record History", lambda obj, extra, config={}: obj.record_history)
dataset = export_item_count_csv.Column(u"Dataset", export_item_count_csv._dataset)
created_date = export_item_count_csv.Column(u"Created Date", export_item_count_csv._created_date)
modified_date = export_item_count_csv.Column(u"Modified Date",export_item_count_csv. _modified_date)
CITATION_COLUMNS = [
object_id,
print_status,
record_status,
curation_link,
public_link,
record_type,
title_check,
author_check,
publisher_school_check,
journal_check,
year_check,
vol_check,
page_check,
lang_check,
cat_check,
citation_title,
authors_editors_names,
publisher_school,
journal_name,
year_of_publication,
volume,
pages_free_text,
category,
language,
tracking_records,
related_citations,
staff_notes,
record_history,
dataset,
created_date,
modified_date,
]
| |
# Lint as: python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for models.realized_volatility."""
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
class RealizedVolatilityTest(parameterized.TestCase, tf.test.TestCase):
def test_log_vol_calculation(self):
"""Tests the basic calculation of log realized volatility."""
dtype = tf.float64
num_series = 200
num_times = 100
seed = (1, 2)
draws = tf.random.stateless_normal((num_series, num_times),
seed=seed,
dtype=dtype)
sample_paths = tf.math.exp(tf.math.cumsum(draws, axis=-1))
volatilities = tff.models.realized_volatility(sample_paths)
expected_volatilities = tf.math.sqrt(
tf.math.reduce_sum(draws[:, 1:]**2, axis=1))
self.assertAllClose(volatilities, expected_volatilities, 1e-6)
def test_log_vol_scaling_factor(self):
"""Tests use of the scaling factor in log volatility calculation."""
dtype = tf.float64
num_series = 200
num_times = 100
seed = (1, 2)
draws = tf.random.stateless_normal((num_series, num_times),
seed=seed,
dtype=dtype)
sample_paths = tf.math.exp(tf.math.cumsum(draws, axis=-1))
volatilities = tff.models.realized_volatility(
sample_paths, scaling_factors=np.sqrt(num_times), dtype=dtype)
expected_volatilities = tf.math.sqrt(
tf.math.reduce_sum(draws[:, 1:]**2, axis=1) * num_times)
self.assertAllClose(volatilities, expected_volatilities, 1e-6)
def test_log_vol_log_scale_sample(self):
"""Tests the treatment of log scale samples in log volatility calc."""
dtype = tf.float64
num_series = 200
num_times = 100
seed = (1, 2)
draws = tf.random.stateless_normal((num_series, num_times),
seed=seed,
dtype=dtype)
sample_paths = tf.math.cumsum(draws, axis=-1)
volatilities = tff.models.realized_volatility(
sample_paths, path_scale=tff.models.PathScale.LOG)
expected_volatilities = tf.math.sqrt(
tf.math.reduce_sum(draws[:, 1:]**2, axis=1))
self.assertAllClose(volatilities, expected_volatilities, 1e-6)
def test_log_vol_non_default_times(self):
"""Tests use of non-uniform sampling times in the volatility calculation."""
dtype = tf.float64
num_series = 500
num_times = 100
seed = (1, 2)
time_deltas = tf.random.stateless_uniform((num_series, num_times),
seed=seed,
dtype=dtype)
draws = tf.random.stateless_normal((num_series, num_times),
seed=seed,
dtype=dtype)
sample_paths = tf.math.exp(
tf.math.cumsum(tf.math.sqrt(time_deltas) * draws, axis=-1))
volatilities = tff.models.realized_volatility(
sample_paths, times=tf.math.cumsum(time_deltas, axis=1))
expected_volatilities = tf.math.sqrt(
tf.math.reduce_sum(draws[:, 1:]**2, axis=1))
self.assertAllClose(volatilities, expected_volatilities, 1e-6)
def test_abs_volatility_calculation(self):
"""Tests the basic calculation of abs realized volatility."""
dtype = tf.float64
num_series = 200
num_times = 100
seed = (1, 2)
draws = tf.random.stateless_normal((num_series, num_times),
seed=seed,
dtype=dtype)
sample_paths = tf.math.exp(tf.math.cumsum(draws, axis=-1))
volatilities = tff.models.realized_volatility(
sample_paths, returns_type=tff.models.ReturnsType.ABS)
diffs = tf.math.abs(tff.math.diff(sample_paths, exclusive=True))
expected_volatilities = tf.reduce_sum(diffs / sample_paths[:, :-1], axis=1)
self.assertAllClose(volatilities, expected_volatilities, 1e-6)
def test_abs_volatility_scaling(self):
"""Tests abs realized volatility calculation with a scaling factor."""
dtype = tf.float64
num_series = 200
num_times = 100
seed = (1, 2)
draws = tf.random.stateless_normal((num_series, num_times),
seed=seed,
dtype=dtype)
sample_paths = tf.math.exp(tf.math.cumsum(draws, axis=-1))
scaling = 100 * np.sqrt(np.pi / (2 * num_times))
volatilities = tff.models.realized_volatility(
sample_paths,
scaling_factors=scaling,
returns_type=tff.models.ReturnsType.ABS)
diffs = tf.math.abs(tff.math.diff(sample_paths, exclusive=True))
expected_volatilities = tf.reduce_sum(diffs / sample_paths[:, :-1], axis=1)
self.assertAllClose(volatilities, scaling * expected_volatilities, 1e-6)
def test_abs_volatility_logspace_samples(self):
"""Tests abs realized volatility for logspace sample paths."""
dtype = tf.float64
num_series = 200
num_times = 100
seed = (1, 2)
draws = tf.random.stateless_normal((num_series, num_times),
seed=seed,
dtype=dtype)
logspace_paths = tf.math.cumsum(draws, axis=-1)
sample_paths = tf.math.exp(logspace_paths)
volatilities = tff.models.realized_volatility(
logspace_paths,
path_scale=tff.models.PathScale.LOG,
returns_type=tff.models.ReturnsType.ABS)
diffs = tf.math.abs(tff.math.diff(sample_paths, exclusive=True))
expected_volatilities = tf.reduce_sum(diffs / sample_paths[:, :-1], axis=1)
self.assertAllClose(volatilities, expected_volatilities, 1e-6)
def test_abs_volatility_non_default_times(self):
"""Tests abs realized volatiltity with non-default times."""
dtype = tf.float64
num_series = 200
num_times = 100
seed = (1, 2)
draws = tf.random.stateless_normal((num_series, num_times),
seed=seed,
dtype=dtype)
time_deltas = tf.random.stateless_uniform((num_series, num_times),
seed=seed,
dtype=dtype)
sample_paths = tf.math.exp(tf.math.cumsum(draws, axis=-1))
volatilities = tff.models.realized_volatility(
sample_paths,
times=tf.math.cumsum(time_deltas, axis=1),
returns_type=tff.models.ReturnsType.ABS)
numer = tf.math.abs(tff.math.diff(sample_paths, exclusive=True))
denom = sample_paths[:, :-1] * time_deltas[:, 1:]
expected_volatilities = tf.math.reduce_sum(numer / denom, axis=1)
self.assertAllClose(volatilities, expected_volatilities, 1e-6)
@parameterized.named_parameters(
('Abs', tff.models.ReturnsType.ABS),
('Log', tff.models.ReturnsType.LOG)
)
def test_non_default_axis(self, returns_type):
"""Tests realized volatility works with non default axis."""
dtype = tf.float64
num_series = 200
num_times = 100
seed = (1, 2)
draws = tf.random.stateless_normal((num_series, num_times),
seed=seed,
dtype=dtype)
sample_paths = tf.math.exp(tf.math.cumsum(draws, axis=-1))
volatilities = tff.models.realized_volatility(
tf.transpose(sample_paths),
returns_type=returns_type,
axis=0)
if returns_type == tff.models.ReturnsType.ABS:
diffs = tf.math.abs(tff.math.diff(sample_paths, exclusive=True))
expected_volatilities = tf.reduce_sum(
diffs / sample_paths[:, :-1], axis=1)
elif returns_type == tff.models.ReturnsType.LOG:
expected_volatilities = tf.math.sqrt(
tf.math.reduce_sum(draws[:, 1:]**2, axis=1))
self.assertAllClose(volatilities, expected_volatilities, 1e-6)
if __name__ == '__main__':
tf.test.main()
| |
from __future__ import absolute_import
from datetime import timedelta
from sentry.snuba.models import QueryDatasets, QuerySubscription, SnubaQueryEventType
from sentry.snuba.subscriptions import (
bulk_delete_snuba_subscriptions,
create_snuba_query,
create_snuba_subscription,
delete_snuba_subscription,
update_snuba_query,
update_snuba_subscription,
)
from sentry.testutils import TestCase
class CreateSnubaQueryTest(TestCase):
def test(self):
dataset = QueryDatasets.EVENTS
query = "level:error"
aggregate = "count()"
time_window = timedelta(minutes=10)
resolution = timedelta(minutes=1)
snuba_query = create_snuba_query(dataset, query, aggregate, time_window, resolution, None)
assert snuba_query.dataset == dataset.value
assert snuba_query.query == query
assert snuba_query.aggregate == aggregate
assert snuba_query.time_window == int(time_window.total_seconds())
assert snuba_query.resolution == int(resolution.total_seconds())
assert snuba_query.environment is None
assert set(snuba_query.event_types) == set([SnubaQueryEventType.EventType.ERROR])
def test_environment(self):
dataset = QueryDatasets.EVENTS
query = "level:error"
aggregate = "count()"
time_window = timedelta(minutes=10)
resolution = timedelta(minutes=1)
snuba_query = create_snuba_query(
dataset, query, aggregate, time_window, resolution, self.environment
)
assert snuba_query.dataset == dataset.value
assert snuba_query.query == query
assert snuba_query.aggregate == aggregate
assert snuba_query.time_window == int(time_window.total_seconds())
assert snuba_query.resolution == int(resolution.total_seconds())
assert snuba_query.environment == self.environment
assert set(snuba_query.event_types) == set([SnubaQueryEventType.EventType.ERROR])
def test_event_types(self):
dataset = QueryDatasets.EVENTS
query = "level:error"
aggregate = "count()"
time_window = timedelta(minutes=10)
resolution = timedelta(minutes=1)
snuba_query = create_snuba_query(
dataset,
query,
aggregate,
time_window,
resolution,
None,
[SnubaQueryEventType.EventType.DEFAULT],
)
assert snuba_query.dataset == dataset.value
assert snuba_query.query == query
assert snuba_query.aggregate == aggregate
assert snuba_query.time_window == int(time_window.total_seconds())
assert snuba_query.resolution == int(resolution.total_seconds())
assert snuba_query.environment is None
assert set(snuba_query.event_types) == set([SnubaQueryEventType.EventType.DEFAULT])
class CreateSnubaSubscriptionTest(TestCase):
def test(self):
type = "something"
dataset = QueryDatasets.EVENTS
query = "level:error"
time_window = timedelta(minutes=10)
resolution = timedelta(minutes=1)
snuba_query = create_snuba_query(
dataset, query, "count()", time_window, resolution, self.environment
)
subscription = create_snuba_subscription(self.project, type, snuba_query)
assert subscription.status == QuerySubscription.Status.CREATING.value
assert subscription.project == self.project
assert subscription.type == type
assert subscription.subscription_id is None
def test_with_task(self):
with self.tasks():
type = "something"
dataset = QueryDatasets.EVENTS
query = "level:error"
time_window = timedelta(minutes=10)
resolution = timedelta(minutes=1)
snuba_query = create_snuba_query(
dataset, query, "count()", time_window, resolution, self.environment
)
subscription = create_snuba_subscription(self.project, type, snuba_query)
subscription = QuerySubscription.objects.get(id=subscription.id)
assert subscription.status == QuerySubscription.Status.ACTIVE.value
assert subscription.project == self.project
assert subscription.type == type
assert subscription.subscription_id is not None
def test_translated_query(self):
type = "something"
dataset = QueryDatasets.EVENTS
query = "event.type:error"
time_window = timedelta(minutes=10)
resolution = timedelta(minutes=1)
with self.tasks():
snuba_query = create_snuba_query(
dataset, query, "count()", time_window, resolution, self.environment
)
subscription = create_snuba_subscription(self.project, type, snuba_query)
subscription = QuerySubscription.objects.get(id=subscription.id)
assert subscription.status == QuerySubscription.Status.ACTIVE.value
assert subscription.project == self.project
assert subscription.type == type
assert subscription.subscription_id is not None
class UpdateSnubaQueryTest(TestCase):
def test(self):
snuba_query = create_snuba_query(
QueryDatasets.EVENTS,
"hello",
"count_unique(tags[sentry:user])",
timedelta(minutes=100),
timedelta(minutes=2),
self.environment,
[SnubaQueryEventType.EventType.ERROR],
)
dataset = QueryDatasets.TRANSACTIONS
query = "level:error"
aggregate = "count()"
time_window = timedelta(minutes=10)
resolution = timedelta(minutes=1)
event_types = [SnubaQueryEventType.EventType.ERROR, SnubaQueryEventType.EventType.DEFAULT]
update_snuba_query(
snuba_query, dataset, query, aggregate, time_window, resolution, None, event_types,
)
assert snuba_query.dataset == dataset.value
assert snuba_query.query == query
assert snuba_query.aggregate == aggregate
assert snuba_query.time_window == int(time_window.total_seconds())
assert snuba_query.resolution == int(resolution.total_seconds())
assert snuba_query.environment is None
assert set(snuba_query.event_types) == set(event_types)
event_types = [SnubaQueryEventType.EventType.DEFAULT]
update_snuba_query(
snuba_query, dataset, query, aggregate, time_window, resolution, None, event_types,
)
assert set(snuba_query.event_types) == set(event_types)
def test_environment(self):
snuba_query = create_snuba_query(
QueryDatasets.EVENTS,
"hello",
"count_unique(tags[sentry:user])",
timedelta(minutes=100),
timedelta(minutes=2),
self.environment,
)
new_env = self.create_environment()
dataset = QueryDatasets.TRANSACTIONS
query = "level:error"
aggregate = "count()"
time_window = timedelta(minutes=10)
resolution = timedelta(minutes=1)
event_types = snuba_query.event_types
update_snuba_query(
snuba_query, dataset, query, aggregate, time_window, resolution, new_env, None
)
assert snuba_query.dataset == dataset.value
assert snuba_query.query == query
assert snuba_query.aggregate == aggregate
assert snuba_query.time_window == int(time_window.total_seconds())
assert snuba_query.resolution == int(resolution.total_seconds())
assert snuba_query.environment == new_env
assert set(snuba_query.event_types) == set(event_types)
def test_subscriptions(self):
snuba_query = create_snuba_query(
QueryDatasets.EVENTS,
"hello",
"count_unique(tags[sentry:user])",
timedelta(minutes=100),
timedelta(minutes=2),
self.environment,
)
sub = create_snuba_subscription(self.project, "hi", snuba_query)
new_env = self.create_environment()
dataset = QueryDatasets.TRANSACTIONS
query = "level:error"
aggregate = "count()"
time_window = timedelta(minutes=10)
resolution = timedelta(minutes=1)
update_snuba_query(
snuba_query, dataset, query, aggregate, time_window, resolution, new_env, None
)
sub.refresh_from_db()
assert sub.snuba_query == snuba_query
assert sub.status == QuerySubscription.Status.UPDATING.value
class UpdateSnubaSubscriptionTest(TestCase):
def test(self):
old_dataset = QueryDatasets.EVENTS
with self.tasks():
snuba_query = create_snuba_query(
old_dataset,
"level:error",
"count()",
timedelta(minutes=10),
timedelta(minutes=1),
None,
)
subscription = create_snuba_subscription(self.project, "something", snuba_query)
dataset = QueryDatasets.TRANSACTIONS
query = "level:warning"
aggregate = "count_unique(tags[sentry:user])"
time_window = timedelta(minutes=20)
resolution = timedelta(minutes=2)
subscription = QuerySubscription.objects.get(id=subscription.id)
subscription_id = subscription.subscription_id
snuba_query.update(
dataset=dataset.value,
query=query,
time_window=int(time_window.total_seconds()),
resolution=int(resolution.total_seconds()),
environment=self.environment,
aggregate=aggregate,
)
assert subscription_id is not None
update_snuba_subscription(subscription, old_dataset)
assert subscription.status == QuerySubscription.Status.UPDATING.value
assert subscription.subscription_id == subscription_id
assert subscription.snuba_query.dataset == dataset.value
assert subscription.snuba_query.query == query
assert subscription.snuba_query.aggregate == aggregate
assert subscription.snuba_query.time_window == int(time_window.total_seconds())
assert subscription.snuba_query.resolution == int(resolution.total_seconds())
def test_with_task(self):
with self.tasks():
old_dataset = QueryDatasets.EVENTS
snuba_query = create_snuba_query(
old_dataset,
"level:error",
"count()",
timedelta(minutes=10),
timedelta(minutes=1),
None,
)
subscription = create_snuba_subscription(self.project, "something", snuba_query)
dataset = QueryDatasets.TRANSACTIONS
query = "level:warning"
aggregate = "count_unique(tags[sentry:user])"
time_window = timedelta(minutes=20)
resolution = timedelta(minutes=2)
subscription = QuerySubscription.objects.get(id=subscription.id)
subscription_id = subscription.subscription_id
assert subscription_id is not None
snuba_query.update(
dataset=dataset.value,
query=query,
time_window=int(time_window.total_seconds()),
resolution=int(resolution.total_seconds()),
environment=self.environment,
aggregate=aggregate,
)
update_snuba_subscription(subscription, old_dataset)
subscription = QuerySubscription.objects.get(id=subscription.id)
assert subscription.status == QuerySubscription.Status.ACTIVE.value
assert subscription.subscription_id is not None
assert subscription.subscription_id != subscription_id
class BulkDeleteSnubaSubscriptionTest(TestCase):
def test(self):
with self.tasks():
snuba_query = create_snuba_query(
QueryDatasets.EVENTS,
"level:error",
"count()",
timedelta(minutes=10),
timedelta(minutes=1),
None,
)
subscription = create_snuba_subscription(self.project, "something", snuba_query)
snuba_query = create_snuba_query(
QueryDatasets.EVENTS,
"level:error",
"count()",
timedelta(minutes=10),
timedelta(minutes=1),
None,
)
other_subscription = create_snuba_subscription(
self.create_project(organization=self.organization), "something", snuba_query
)
subscription_ids = [subscription.id, other_subscription.id]
bulk_delete_snuba_subscriptions([subscription, other_subscription])
assert (
QuerySubscription.objects.filter(
id__in=subscription_ids,
status=QuerySubscription.Status.DELETING.value,
subscription_id__isnull=False,
).count()
== 2
)
class DeleteSnubaSubscriptionTest(TestCase):
def test(self):
with self.tasks():
snuba_query = create_snuba_query(
QueryDatasets.EVENTS,
"level:error",
"count()",
timedelta(minutes=10),
timedelta(minutes=1),
None,
)
subscription = create_snuba_subscription(self.project, "something", snuba_query)
# Refetch since snuba creation happens in a task
subscription = QuerySubscription.objects.get(id=subscription.id)
subscription_id = subscription.subscription_id
assert subscription_id is not None
delete_snuba_subscription(subscription)
assert subscription.status == QuerySubscription.Status.DELETING.value
assert subscription.subscription_id == subscription_id
def test_with_task(self):
with self.tasks():
snuba_query = create_snuba_query(
QueryDatasets.EVENTS,
"level:error",
"count()",
timedelta(minutes=10),
timedelta(minutes=1),
None,
)
subscription = create_snuba_subscription(self.project, "something", snuba_query)
subscription_id = subscription.id
delete_snuba_subscription(subscription)
assert not QuerySubscription.objects.filter(id=subscription_id).exists()
| |
# issue 3
matrix = ['%s%d'%(a,n) for a in 'abc' for n in [1,2,3]]
assert 'a1' in matrix
# issue 5
range_tuple = tuple(range(7))
assert range_tuple == (0,1,2,3,4,5,6)
# issue 6
map_tuples = zip( 'abc', [1,2,3])
map_array = ['%s%d'%(l, n) for l, n in map_tuples
if '%s%d'%(l, n) in 'a1b2']
assert 'a1' in map_array, 'incorrect tuple %s'%map_array
# issue 7
def fail_local():
local_abc = 'abc'
letnum = [[letter+str(num) for letter in local_abc]
for num in range(3)]
return letnum
local_fail = fail_local()
assert ['a0', 'b0', 'c0'] in local_fail, 'failed local %s'%local_fail
def fail_local1():
local_abc = 'abc'
letnum = dict((num,[letter+str(num) for letter in local_abc]) for num in range(3))
return letnum
fail_local1()
# issue 14
a = {1:1,2:4}
assert a.pop(1) == 1, 'Error in pop'
assert a=={2:4}
# issue 15
def no_lambda(fail_arg):
lbd = lambda arg= fail_arg: arg
return [i for i in lbd()]
assert no_lambda([1,2]) == [1,2], 'Fail lambda namespace'
# issue 16
class Noeq:
def __init__(self,oid):
self.oid = oid
ne1, ne2 = Noeq(0),Noeq(1)
fail_rmv = [ne1, ne2]
fail_rmv.remove(ne1)
assert fail_rmv == [ne2], 'Fail remove obj from list'
# issue 17
class No_dic_comp:
def __init__(self,oid):
self.oid = oid
self.ldic = {i: self.oid for i in 'ab'}
ndc = No_dic_comp(0)
assert ndc.ldic['a'] == 0, ne1
# issue 18
class Base:
pass
class No_inherit(Base):
def __init__(self,oid,ab):
self.oid , self.ab= oid, ab
ndc = No_inherit(0,'ab')
assert isinstance(ndc,No_inherit),'Not instance %s'%ndc
assert ndc.oid == 0, ndc.oid
# issue 19
class No_static:
OBJID = 0
def __init__(self,oid):
self.oid = oid
self.gid = No_static.OBJID
No_static.OBJID += 1
gids = (No_static(0).gid,No_static(1).gid)
assert gids == (0,1), 'Fail incrementing static (%d,%d)'%gids
# issue 20
assert 'fail slice string!'[5:-1] == 'slice string', 'Failure in string slicing'
#issue 21
_s=' abc '
assert _s.rjust(15, 'b') == 'bbbbbb abc '
# issue 23 : json
import json
original = [[1,1],{'1':1}]
pyjson = str(original).replace("'",'"').replace(' ','')
jsoned=json.dumps(original).replace(' ','')
pythoned=json.loads(jsoned)
assert original == pythoned, 'python %s is not json %s'%(original, pythoned)
assert jsoned == pyjson, 'json %s is not python %s'%(jsoned, pyjson)
x = """{
"menu": {
"id": "file",
"value": "File",
"popup": {
"menuitem": [
{ "value": "New", "onclick": "CreateNewDoc()" },
{ "value": "Open", "onclick": "OpenDoc()" },
{ "value": "Close", "onclick": "CloseDoc()" }
]
}
}
}"""
y = json.loads(x)
assert y["menu"]["value"]=="File"
assert y["menu"]["popup"]["menuitem"][1]["value"]=="Open"
# issue 24
import math
eval_zero = eval('math.sin(0)')
exec('exec_zero=math.sin(0)')
assert eval_zero == exec_zero, 'no math in exe or eval for sin(0) = %f'%math.sin(0)
# issue 29
import math
eval_zero = eval('math.sin(%d)'%0)
#eval_zero = 0
exec('exec_zero=math.sin(%d)'%0)
assert eval_zero == exec_zero, ' exe or eval for fails string subs = %f'%math.sin(0)
# issue 30
def delete(delete):
return delete
class Delete:
def delete(self):
delete = 0
return delete
delete = delete(Delete().delete())
assert delete == 0, 'name delete cannot be used %s'%delete
# issue 31
SEED= 0
class Base:
def __init__(self):
global SEED
self.value = SEED = SEED + 1
class Inherit(Base):
def __init__(self):
global SEED
self.value = SEED = SEED + 1
one = (Inherit().value)
assert one == 1, 'Init recursed: %d'%one
#issue 43
class myclass:
@property
def getx(self):
return 5
c=myclass()
assert c.getx == 5
#issue 45
assert 2**2 == 4
assert 2.0**2 == 4.0
assert 2**2.0 == 4.0
assert 2.0**2.0 == 4.0
#also do 3**2 since 2**2 == 2*2
assert 3**2 == 9
assert 3.0**2 == 9.0
assert 3**2.0 == 9.0
assert 3.0**2.0 == 9.0
# issue 55
assert 1 <= 3 <= 5
assert not 1 <= (3+3) <= 5
# issue 70
class Dummy:
def __init__(self, foo):
self.foo = foo
dummy = Dummy(3)
assert -dummy.foo == -3
# issue 71
def rect(x,y,width, height):
pass
assert [rect(x=0, y=0, width=10, height=10) for i in range(2)], 'error in list'
# issue 75
assert {0:42}[0] == 42
# issue 80
def twice(n):
yield n
yield n
f = twice(2)
assert next(f) == 2
assert next(f) == 2
# issue #81
class foo:
def __init__(self,x):
self.x = x
def __ior__(self,z):
self.x = 33
return self
def __str__(self):
return self.x
X = foo(4)
X |= 1
assert X.x == 33
# issue 85
try:
exec("def foo(x, x, x=1, x=2):\n pass")
# -> does not raise SyntaxError
raise Exception('should have raised SyntaxError')
except SyntaxError:
pass
def foo(x, y, verbose=False):
pass
try:
foo(1, 2, 3, verbose=True)
raise Exception('should have raised TypeError')
except TypeError:
pass
try:
eval("foo(1, 2, 3, verbose=True, verbose=False)")
raise Exception('should have raised SyntaxError')
except SyntaxError:
pass
# issue #86
def foo(x, *args, verbose=False):
assert locals()=={'verbose':False,'x':1,'args':(2,)}
foo(1, 2)
# issue #87
def foo(*args):
assert isinstance(args,tuple)
foo(1,2)
# issue 95 : trailing comma in dict or set literals
a = {1,2,}
assert a == {1,2}
b = {'a':1,'b':2,}
assert b == {'a':1,'b':2}
#issue 101 - new style classes are the default
class MyClass(object):
def __init__(self, s):
self.string=s
class MyClass1:
def __init__(self, s):
self.string=s
_m=MyClass("abc")
_m1=MyClass1("abc")
#assert dir(_m) == dir(_m1) <=== fix me, these should be equal
assert _m.string==_m1.string
# issue 112
x=0
class foo:
y = 1
z = [x,y]
assert foo().z == [0,1]
#issue 114
import random
_a=random.randrange(10)
assert 0 <= _a < 10
_a=random.randrange(1,10)
assert 1 <= _a < 10
_a=random.randrange(1,10,2)
assert _a in (1,3,5,7,9)
# issue 118
assert 1.27e+5 == 127000.0
assert 1.27E+5 == 127000.0
assert 1.27e+5 == 127000.0
assert 1.27E5 == 127000.0
# issue 122
class Cl(object):
def __init__(self):
self._x = None
@property
def x(self):
return self._x
@x.setter
def x(self, value):
self._x = value
my_cl = Cl
my_cl.x = 123
assert my_cl.x==123
# issue 125
a = [1,2]
a.clear()
assert a == []
a = [3,6,'a']
c = a
b = a.copy()
assert b == a
b.append(4)
a.append(99)
assert b != a
assert c == a
# issue 126
assert ''' \'inner quote\'''', 'fails inner quote'
assert " \'inner quote\'", 'fails inner quote'
assert ' \'inner quote\'', 'fails inner quote'
assert """ \"inner quote\"""", 'fails inner quote'
assert " \"inner quote\"", 'fails inner quote'
# issue 128
LIST = []
class Parent:
def __init__(self):
self.level = self.get_level()
self.inherited()
def get_level(self): return 0
def inherited(self):
self.override()
return self
def override(self):
LIST.append((self, self.level))
return self
class Child(Parent):
def get_level(self): return 1
def override(self):
LIST.append((self, self.level))
return self
class Sibling(Parent):
def __init__(self):
self.level = self.get_level()
Parent.__init__(self)
def get_level(self): return 1
def override(self):
LIST.append((self, self.level))
return self
parent = Parent()
#assert str(parent)=='<Parent object>',str(parent)
child = Child()
#assert str(child)=='<Child object>'
sibil = Sibling()
#assert str(sibil)== '<Sibling object>'
given = sibil.override()
assert sibil.level==1
assert given.level==1
assert [l[1] for l in LIST]==[0,1,1,1]
assert parent == parent.override()
assert sibil == given
# issue 129
def rect(x,y,width, height):
pass
def comp(x,y,width, height):
return[rect(x=x, y=y, width=10, height=10) for i in range(2)]
assert comp(1,2,3,4), 'error in list'
# issue 132
a = 1
if a is not None and not isinstance(a,int):
raise AssertionError
# issue 134
run_else = False
for i in range(4):
pass
else:
run_else = True
assert run_else
run_else = False
assert not run_else
for i in range(10):
if i>7:
break
else:
run_else = True
assert not run_else
run_else = False
n=10
while n>5:
n -= 1
else:
run_else = True
assert run_else
# issue 135
assert pow(*(2,3)) == 8
assert pow(*(2,-3)) == 0.125
assert pow(*(-2,3)) == -8
assert pow(*(-2,-3)) == -0.125
# issue 137
assert int('-10') == -10
# issue 139
try:
d = []
d[3]
except (IndexError,TypeError) as err:
pass # just check that there is no SyntaxError
# issue 157 : check that the 2nd line doesn't raise a SyntaxError
y=1
a=(1/3,-y/4)
# issue 158
class A:
def __init__(self,val):
self.a=val
class B:
def __init__(self,val):
self.b=val
self.c=A(val)
b=B(2)
#assert str(b)=='<B object>'
# issue #166
assert pow(2,3,4) == 0
assert pow(2,3,3) == 2
try:
pow(2.2,3,4)
raise Exception('should have raised TypeError')
except TypeError:
pass
try:
pow('2',3)
raise Exception('should have raised TypeError')
except TypeError:
pass
# issue 170
assert (lambda *args:args)(3,4,5)==(3, 4, 5)
# issue 173
def gofun(fun):
def ifun():
funi = fun
return [fun(i) for i in (0,1)]
return ifun()
def pr(x):
return x
zero_one = gofun(pr)
assert zero_one == [0, 1], 'Expected [0, 1] but got: %s'% zero_one
# issue 174
assert '%d' % (1,) == '1'
# issue 175
def foo():
pass
r = foo()
assert r is None
# issue 177
class _ParameterKind(int):
def __new__(self, *args, name):
obj = int.__new__(self, *args)
obj._name = name
return obj
def __str__(self):
return self._name
def __repr__(self):
return '<_ParameterKind: {!r}>'.format(self._name)
_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
# issue 198
assert 2 << 16 != 4
assert 2 << 16 == 131072
# issue 208
try:
spam = 0
except:
spam = 1
else:
spam = 2
assert spam == 2
# issue 209
assert "ko" if None else "ok"=="ok"
assert ("ko" if None else "ok")=="ok"
assert ("ko" if [] else "ok")=="ok"
assert ("ko" if {} else "ok")=="ok"
assert ("ko" if False else "ok")=="ok"
# issue 210
class myRepr:
def repr(self, a):
return a
class myclass:
_repr=myRepr()
repr= _repr.repr
def myfunc(self):
return self.repr('test')
_m=myclass()
assert _m.myfunc()=='test'
# issue 212
class Test:
def sound(self, a=""):
return "moo: " + a
class Test2(Test):
def sound(self):
return super().sound("apple")
assert Test2().sound()=='moo: apple'
# issue 213
import math
assert str(math.atan2(0.,-0.)).startswith('3.14')
# issue 214
n = 0
assert 1+n if n else 0 == 0
n = 7
assert 1 + n*n if n else 0 == 50
# issue 217
def myfunc(code, code1):
code.append('1')
code.append('2')
code.append('3')
a=[]
b=0
myfunc(a, b)
assert a==['1', '2', '3']
# issue 218
_d = { 0: b'a', 1: b'b'}
assert [v[0] for v in _d.items()] == [0, 1]
# issue 219
y=3
w=2
w2=1
y, w, w2 = -y, -w, -w2
assert y == -3
assert w == -2
assert w2 == -1
#issue 220
assert '{} {} {}'.format(1, 2, 3) == '1 2 3'
# issue 222
atuple = ()
assert not type(atuple) != type(atuple), "type of tuple is different of itself"
# bug in assignment of attributes or subscriptions to exec() or eval()
x={}
x['a'] = eval("2")
assert x=={'a':2}
# issue 224
assert '{0}, {1}, {2}'.format('a', 'b', 'c') == 'a, b, c'
# issue 225
a = dict()
a[1,2]=3
assert a[1,2] == 3
# issue 226
b = { -20 : -1, -21 : 2, 2 : 0 }
# issue 227
b = [ ((-20, 2), 1), ((-21, 1), 2), ((-2,0), 2)]
assert sorted(b) == [((-21, 1), 2), ((-20, 2), 1), ((-2, 0), 2)]
# bug in string format
assert 'Coordinates: {latitude}, {b}'.format(latitude='2', b='4')=='Coordinates: 2, 4'
# check that trailing comma is supported in function calls
def foo(a=2,):
print(a)
# issue 236 : packing
a,*b,c = [1, 2, 3, 4, 5]
assert a==1
assert b==[2,3,4]
assert c==5
*a,b = [1, 2, 3, 4, 5]
assert a==[1, 2, 3, 4]
assert b==5
a,b,*c = [1, 2, 3, 4, 5, 6]
assert a==1
assert b==2
assert c==[3, 4, 5, 6]
# issue 237
res = []
for i in -1,0,1:
res.append(i)
y = 2
for i in -3*y,2*y:
res.append(i)
assert res==[-1, 0, 1, -6, 4]
# issue 238
import operator
# example used in the docs
inventory = [('apple', 3), ('banana', 2), ('pear', 5), ('orange', 1)]
getcount = operator.itemgetter(1)
assert list(map(getcount, inventory)) == [3, 2, 5, 1]
assert sorted(inventory, key=getcount) == [('orange', 1), ('banana', 2), ('apple', 3), ('pear', 5)]
# issue 239
assert '' in ''
# issue 240
d = dict.fromkeys(['a','b'],3)
assert d=={'a':3, 'b':3}
# augmented assignement in class body
class A:
x = 8
x += 1
assert A().x==9
# issue 243
class Dad:
@property
def prop(self):
return 1
@prop.setter
def prop(self, val):
print(200 + val)
x = []
class Son(Dad):
@Dad.prop.setter
def prop(self, val):
x.append(400 +val)
son = Son()
son.prop = 4
assert x ==[404]
# issue 247
zlist = [0]
assert zlist == [1] or zlist == [0]
# issue 264
import itertools
assert list(itertools.permutations([1,2,3])) == [(1, 2, 3), (1, 3, 2), (2, 1, 3), (2, 3, 1), (3, 1, 2), (3, 2, 1)]
# issue 268
from base64 import b64encode, b64decode
assert b64encode(b'decode error') == b'ZGVjb2RlIGVycm9y'
assert b64decode(b'ZGVjb2RlIGVycm9y') == b'decode error'
# issue 272
assert round(-0.9) == -1
# issue 275
text='a'
assert text.split(';',1) == ['a']
# issue 276
import re
pat=re.compile(r"(\<[a-z0-9A-Z\-]+\>)")
s="this is <fun> is <it> not"
result=re.search(pat, s)
assert result.groups() == ('<fun>',)
# issue 279
# check that this doesn't raise a SyntaxError
data = [{'name': 'Data {}'.format(i+1), # comment
'x': i+1} for i in range(10)] # x value
# issue 282
assert int('1') == 1
assert int(' 1 ') == 1
assert int('1 ') == 1
assert int(' 1') == 1
assert int(1) == 1
assert int(1.1) == 1
assert int('011', 2) == 3
assert int('011', 8) == 9
#issue 283
try:
int("")
except ValueError:
pass
try:
int(" ")
except ValueError:
pass
#issue 284
class CustomStr(str): pass
_a=CustomStr('')
assert isinstance(_a, str)
# issue 285
class Foo3(int):
def __int__(self):
return self
assert int(Foo3()) == 0
#issue 286
try:
float('')
except ValueError:
pass
#issue 287
try:
max([])
except ValueError:
pass
try:
max(key = lambda x:x)
except TypeError:
pass
try:
max(default = 'k')
except TypeError:
pass
assert max([], default = 'k') == 'k'
assert max([1,2,3], default = 'k') == 3
try:
max(1,2,3, default = 'k')
except TypeError:
pass
assert max(1,2,3) == 3
assert max([1,2,3]) == 3
assert max(1,2,3, key = lambda x: -x) == 1
assert max([1,2,3], key = lambda x: -x, default = 'k') == 1
assert min(1,2,3) == 1
assert min([1,2,3]) == 1
assert min(1,2,3, key = lambda x: -x) == 3
# issue 288
class CustomStr(str): pass
class CustomBytes(bytes): pass
class CustomByteArray(bytearray): pass
values = [b'100',
bytearray(b'100'),
CustomStr('100'),
CustomBytes(b'100'),
CustomByteArray(b'100')]
assert str(values)== "[b'100', bytearray(b'100'), '100', b'100', bytearray(b'100')]"
# issue 294
assert [1][::-1] == [1]
# issue 295
import datetime
assert datetime.datetime.strptime('2014-01-01','%Y-%m-%d') == datetime.datetime(2014,1,1,0,0)
print('passed all tests')
| |
import datetime
from django import http
from django.shortcuts import get_object_or_404, redirect, render
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from django.utils.timezone import utc, make_naive
from django.db import transaction
from django.conf import settings
import requests
import pytz
from funfactory.urlresolvers import reverse
from slugify import slugify
from jsonview.decorators import json_view
from airmozilla.main.models import (
SuggestedEvent,
Event,
Channel,
SuggestedEventComment,
Location
)
from airmozilla.uploads.models import Upload
from airmozilla.comments.models import SuggestedDiscussion
from airmozilla.base.utils import tz_apply
from . import utils
from . import forms
from . import sending
def _increment_slug_if_exists(slug):
base = slug
count = 2
def exists(slug):
return (
Event.objects.filter(slug__iexact=slug)
or
SuggestedEvent.objects.filter(slug__iexact=slug)
)
while exists(slug):
slug = base + '-%s' % count
count += 1
return slug
@login_required
@transaction.commit_on_success
def start(request):
data = {}
if request.method == 'POST':
form = forms.StartForm(request.POST, user=request.user)
if form.is_valid():
slug = slugify(form.cleaned_data['title']).lower()
slug = _increment_slug_if_exists(slug)
upcoming = False
event_type = form.cleaned_data['event_type']
if event_type == 'upcoming':
upcoming = True
event = SuggestedEvent.objects.create(
user=request.user,
title=form.cleaned_data['title'],
upcoming=upcoming,
slug=slug,
)
# Enable discussion on by default.
# https://bugzilla.mozilla.org/show_bug.cgi?id=1135822
SuggestedDiscussion.objects.create(
event=event,
enabled=True,
notify_all=True,
)
if not event.upcoming:
location, __ = Location.objects.get_or_create(
name=settings.DEFAULT_PRERECORDED_LOCATION[0],
timezone=settings.DEFAULT_PRERECORDED_LOCATION[1]
)
event.location = location
now = datetime.datetime.utcnow().replace(tzinfo=utc)
event.start_time = now
event.save()
event.channels.add(
Channel.objects.get(slug=settings.DEFAULT_CHANNEL_SLUG)
)
# XXX use next_url() instead?
if event.upcoming:
url = reverse('suggest:description', args=(event.pk,))
elif event_type == 'popcorn':
# this is a hack but it works well
event.popcorn_url = 'https://'
event.save()
url = reverse('suggest:popcorn', args=(event.pk,))
else:
request.session['active_suggested_event'] = event.pk
if request.session.get('active_event'):
del request.session['active_event']
url = reverse('uploads:upload')
return redirect(url)
else:
initial = {
'event_type': 'upcoming'
}
if request.GET.get('upload'):
try:
upload = Upload.objects.get(
pk=request.GET['upload'],
user=request.user
)
# is that upload used by some other suggested event
# in progress?
try:
suggested_event = SuggestedEvent.objects.get(
upload=upload
)
# that's bad!
messages.warning(
request,
'The file upload you selected belongs to a requested '
'event with the title: %s' % suggested_event.title
)
return redirect('uploads:home')
except SuggestedEvent.DoesNotExist:
pass
initial['event_type'] = 'pre-recorded'
request.session['active_upload'] = upload.pk
except Upload.DoesNotExist:
pass
form = forms.StartForm(user=request.user, initial=initial)
data['suggestions'] = (
SuggestedEvent.objects
.filter(user=request.user)
.order_by('modified')
)
data['form'] = form
data['event'] = None
return render(request, 'suggest/start.html', data)
@login_required
@transaction.commit_on_success
def title(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
if request.method == 'POST':
form = forms.TitleForm(request.POST, instance=event)
if form.is_valid():
event = form.save()
# XXX use next_url() instead?
url = reverse('suggest:description', args=(event.pk,))
return redirect(url)
else:
form = forms.TitleForm(instance=event)
data = {'form': form, 'event': event}
return render(request, 'suggest/title.html', data)
@login_required
@transaction.commit_on_success
def choose_file(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
if event.upcoming:
return redirect(reverse('suggest:description', args=(event.pk,)))
if request.method == 'POST':
form = forms.ChooseFileForm(
request.POST,
user=request.user,
instance=event
)
if form.is_valid():
event = form.save()
event.upload.suggested_event = event
event.upload.save()
# did any *other* upload belong to this suggested event?
other_uploads = (
Upload.objects
.filter(suggested_event=event)
.exclude(pk=event.upload.pk)
)
for upload in other_uploads:
upload.suggested_event = None
upload.save()
if request.session.get('active_suggested_event'):
del request.session['active_suggested_event']
# XXX use next_url() instead?
url = reverse('suggest:description', args=(event.pk,))
return redirect(url)
else:
initial = {}
if request.GET.get('upload'):
try:
upload = Upload.objects.get(
pk=request.GET['upload'],
user=request.user
)
initial['upload'] = upload.pk
except Upload.DoesNotExist:
pass
form = forms.ChooseFileForm(
user=request.user,
instance=event,
initial=initial
)
data = {'form': form, 'event': event}
return render(request, 'suggest/file.html', data)
@login_required
@transaction.commit_on_success
def popcorn(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
if event.upcoming:
return redirect(reverse('suggest:description', args=(event.pk,)))
if request.method == 'POST':
form = forms.PopcornForm(
request.POST,
instance=event
)
if form.is_valid():
event = form.save()
image_url = utils.find_open_graph_image_url(event.popcorn_url)
if image_url:
from django.core.files.uploadedfile import InMemoryUploadedFile
import os
from StringIO import StringIO
image_content = requests.get(image_url).content
buf = StringIO(image_content)
# Seek to the end of the stream, so we can get its
# length with `buf.tell()`
buf.seek(0, 2)
file = InMemoryUploadedFile(
buf,
"image",
os.path.basename(image_url),
None,
buf.tell(),
None
)
event.placeholder_img = file
event.save()
# XXX use next_url() instead?
url = reverse('suggest:description', args=(event.pk,))
return redirect(url)
else:
initial = {}
form = forms.PopcornForm(
instance=event,
initial=initial
)
data = {'form': form, 'event': event}
return render(request, 'suggest/popcorn.html', data)
@login_required
@transaction.commit_on_success
def description(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
if request.method == 'POST':
form = forms.DescriptionForm(request.POST, instance=event)
if form.is_valid():
form.save()
# XXX use next_url() instead?
url = reverse('suggest:details', args=(event.pk,))
return redirect(url)
else:
form = forms.DescriptionForm(instance=event)
data = {'form': form, 'event': event}
return render(request, 'suggest/description.html', data)
@login_required
@transaction.commit_on_success
def details(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
try:
discussion = SuggestedDiscussion.objects.get(event=event)
except SuggestedDiscussion.DoesNotExist:
discussion = None
if request.method == 'POST':
form = forms.DetailsForm(request.POST, instance=event)
if form.is_valid():
event = form.save()
# the start_time comes to us as a string, e.g. '2014-01-01
# 12:00:00' and that'll be converted into '2014-01-01
# 12:00:00 tzinfo=UTC' automatically. But that's not what we want
# so we change it first.
event.start_time = tz_apply(
event.start_time,
pytz.timezone(event.location.timezone)
)
event.save()
next_url = reverse('suggest:placeholder', args=(event.pk,))
if form.cleaned_data['enable_discussion']:
if discussion:
# make sure it's enabled
discussion.enabled = True
# discussion.moderate_all = (
# event.privacy != Event.PRIVACY_COMPANY
# )
discussion.save()
else:
discussion = SuggestedDiscussion.objects.create(
event=event,
enabled=True,
notify_all=True,
# moderate_all=event.privacy != Event.PRIVACY_COMPANY
)
if request.user not in discussion.moderators.all():
discussion.moderators.add(request.user)
next_url = reverse('suggest:discussion', args=(event.pk,))
elif SuggestedDiscussion.objects.filter(event=event):
discussion = SuggestedDiscussion.objects.get(event=event)
discussion.enabled = False
discussion.save()
return redirect(next_url)
else:
if event.location and event.start_time:
# Because the modelform is going present our user
# without input widgets' that are datetimes in
# naive format, when it does this is does so using the
# settings.TIME_ZONE and when saved it applies the
# settings.TIME_ZONE back again.
# Normally in Django templates, this is solved with
# {% timezone "Europe/Paris" %}
# {{ form.as_p }}
# {% endtimezone %}
# But that's not going to work when working with jinja
# so we do it manually from the view code.
event.start_time = make_naive(
event.start_time,
pytz.timezone(event.location.timezone)
)
initial = {'enable_discussion': not (event and not discussion)}
form = forms.DetailsForm(instance=event, initial=initial)
data = {'form': form, 'event': event}
return render(request, 'suggest/details.html', data)
@login_required
@transaction.commit_on_success
def discussion(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
discussion = SuggestedDiscussion.objects.get(event=event)
if request.method == 'POST':
form = forms.DiscussionForm(request.POST, instance=discussion)
if form.is_valid():
discussion = form.save()
discussion.moderators.clear()
for email in form.cleaned_data['emails']:
try:
user = User.objects.get(email__iexact=email)
except User.DoesNotExist:
user = User.objects.create(
username=email.split('@')[0],
email=email
)
user.set_unusable_password()
user.save()
discussion.moderators.add(user)
url = reverse('suggest:placeholder', args=(event.pk,))
return redirect(url)
else:
emails = []
for moderator in discussion.moderators.all():
if moderator.email not in emails:
emails.append(moderator.email)
if not emails:
emails.append(request.user.email)
initial = {'emails': ', '.join(emails)}
form = forms.DiscussionForm(instance=discussion, initial=initial)
context = {'event': event, 'form': form, 'discussion': discussion}
return render(request, 'suggest/discussion.html', context)
@login_required
@json_view
def autocomplete_emails(request):
if 'q' not in request.GET:
return http.HttpResponseBadRequest('Missing q')
q = request.GET.get('q', '').strip()
emails = []
if len(q) > 1:
users = (
User.objects
.filter(email__istartswith=q)
.exclude(email__isnull=True)
)
for user in users.order_by('email'):
if user.email not in emails:
emails.append(user.email)
if not emails:
if utils.is_valid_email(q):
emails.append(q)
elif utils.is_valid_email('%s@mozilla.com' % q):
emails.append('%s@mozilla.com' % q)
return {'emails': emails}
@login_required
@transaction.commit_on_success
def placeholder(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
if request.method == 'POST':
form = forms.PlaceholderForm(
request.POST,
request.FILES,
instance=event
)
if form.is_valid():
event = form.save()
if form['placeholder_img'].value() != event.placeholder_img:
# User selected a new placeholder image. Clear gallery select.
event.picture = None
event.save()
# XXX use next_url() instead?
url = reverse('suggest:summary', args=(event.pk,))
return redirect(url)
else:
form = forms.PlaceholderForm()
if event.picture:
form.fields['picture'].initial = event.picture.id
data = {'form': form, 'event': event}
return render(request, 'suggest/placeholder.html', data)
@login_required
@transaction.commit_on_success
def summary(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
# it's ok if it's submitted and you have the 'add_event'
# permission
if request.user.has_perm('main.add_event'):
if not event.submitted:
return http.HttpResponseBadRequest('Not submitted')
else:
return http.HttpResponseBadRequest('Not your event')
comment_form = forms.SuggestedEventCommentForm()
if request.method == 'POST':
if request.POST.get('save_comment'):
comment_form = forms.SuggestedEventCommentForm(data=request.POST)
if comment_form.is_valid():
comment = SuggestedEventComment.objects.create(
comment=comment_form.cleaned_data['comment'].strip(),
user=request.user,
suggested_event=event
)
if event.submitted:
sending.email_about_suggested_event_comment(
comment,
request
)
messages.info(
request,
'Comment added and producers notified by email.'
)
else:
messages.info(
request,
'Comment added but not emailed to producers because '
'the event is not submitted.'
)
return redirect('suggest:summary', event.pk)
else:
if event.submitted:
event.status = SuggestedEvent.STATUS_RETRACTED
event.submitted = None
event.save()
else:
now = datetime.datetime.utcnow().replace(tzinfo=utc)
event.submitted = now
if not event.first_submitted:
event.status = SuggestedEvent.STATUS_SUBMITTED
event.first_submitted = now
else:
# it was only resubmitted if it was previously rejected
if event.status == SuggestedEvent.STATUS_REJECTED:
event.status = SuggestedEvent.STATUS_RESUBMITTED
else:
event.status = SuggestedEvent.STATUS_SUBMITTED
event.save()
sending.email_about_suggested_event(event, request)
url = reverse('suggest:summary', args=(event.pk,))
return redirect(url)
# we don't need the label for this form layout
comment_form.fields['comment'].label = ''
comments = (
SuggestedEventComment.objects
.filter(suggested_event=event)
.select_related('User')
.order_by('created')
)
discussion = None
for each in SuggestedDiscussion.objects.filter(event=event):
discussion = each
context = {
'event': event,
'comment_form': comment_form,
'comments': comments,
'discussion': discussion,
}
return render(request, 'suggest/summary.html', context)
@csrf_exempt
@require_POST
@login_required
def delete(request, id):
event = get_object_or_404(SuggestedEvent, pk=id)
if event.user != request.user:
return http.HttpResponseBadRequest('Not your event')
event.delete()
return redirect('suggest:start')
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.auth.transport.requests import AuthorizedSession # type: ignore
import json # type: ignore
import grpc # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.api_core import exceptions as core_exceptions
from google.api_core import retry as retries
from google.api_core import rest_helpers
from google.api_core import rest_streaming
from google.api_core import path_template
from google.api_core import gapic_v1
from requests import __version__ as requests_version
import dataclasses
import re
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import warnings
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.types import compute
from .base import (
ResourcePoliciesTransport,
DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO,
)
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
grpc_version=None,
rest_version=requests_version,
)
class ResourcePoliciesRestInterceptor:
"""Interceptor for ResourcePolicies.
Interceptors are used to manipulate requests, request metadata, and responses
in arbitrary ways.
Example use cases include:
* Logging
* Verifying requests according to service or custom semantics
* Stripping extraneous information from responses
These use cases and more can be enabled by injecting an
instance of a custom subclass when constructing the ResourcePoliciesRestTransport.
.. code-block:: python
class MyCustomResourcePoliciesInterceptor(ResourcePoliciesRestInterceptor):
def pre_aggregated_list(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_aggregated_list(response):
logging.log(f"Received response: {response}")
def pre_delete(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_delete(response):
logging.log(f"Received response: {response}")
def pre_get(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get(response):
logging.log(f"Received response: {response}")
def pre_get_iam_policy(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get_iam_policy(response):
logging.log(f"Received response: {response}")
def pre_insert(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_insert(response):
logging.log(f"Received response: {response}")
def pre_list(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list(response):
logging.log(f"Received response: {response}")
def pre_set_iam_policy(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_set_iam_policy(response):
logging.log(f"Received response: {response}")
def pre_test_iam_permissions(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_test_iam_permissions(response):
logging.log(f"Received response: {response}")
transport = ResourcePoliciesRestTransport(interceptor=MyCustomResourcePoliciesInterceptor())
client = ResourcePoliciesClient(transport=transport)
"""
def pre_aggregated_list(
self,
request: compute.AggregatedListResourcePoliciesRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
compute.AggregatedListResourcePoliciesRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for aggregated_list
Override in a subclass to manipulate the request or metadata
before they are sent to the ResourcePolicies server.
"""
return request, metadata
def post_aggregated_list(
self, response: compute.ResourcePolicyAggregatedList
) -> compute.ResourcePolicyAggregatedList:
"""Post-rpc interceptor for aggregated_list
Override in a subclass to manipulate the response
after it is returned by the ResourcePolicies server but before
it is returned to user code.
"""
return response
def pre_delete(
self,
request: compute.DeleteResourcePolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.DeleteResourcePolicyRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for delete
Override in a subclass to manipulate the request or metadata
before they are sent to the ResourcePolicies server.
"""
return request, metadata
def post_delete(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for delete
Override in a subclass to manipulate the response
after it is returned by the ResourcePolicies server but before
it is returned to user code.
"""
return response
def pre_get(
self,
request: compute.GetResourcePolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.GetResourcePolicyRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get
Override in a subclass to manipulate the request or metadata
before they are sent to the ResourcePolicies server.
"""
return request, metadata
def post_get(self, response: compute.ResourcePolicy) -> compute.ResourcePolicy:
"""Post-rpc interceptor for get
Override in a subclass to manipulate the response
after it is returned by the ResourcePolicies server but before
it is returned to user code.
"""
return response
def pre_get_iam_policy(
self,
request: compute.GetIamPolicyResourcePolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.GetIamPolicyResourcePolicyRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get_iam_policy
Override in a subclass to manipulate the request or metadata
before they are sent to the ResourcePolicies server.
"""
return request, metadata
def post_get_iam_policy(self, response: compute.Policy) -> compute.Policy:
"""Post-rpc interceptor for get_iam_policy
Override in a subclass to manipulate the response
after it is returned by the ResourcePolicies server but before
it is returned to user code.
"""
return response
def pre_insert(
self,
request: compute.InsertResourcePolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.InsertResourcePolicyRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for insert
Override in a subclass to manipulate the request or metadata
before they are sent to the ResourcePolicies server.
"""
return request, metadata
def post_insert(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for insert
Override in a subclass to manipulate the response
after it is returned by the ResourcePolicies server but before
it is returned to user code.
"""
return response
def pre_list(
self,
request: compute.ListResourcePoliciesRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.ListResourcePoliciesRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list
Override in a subclass to manipulate the request or metadata
before they are sent to the ResourcePolicies server.
"""
return request, metadata
def post_list(
self, response: compute.ResourcePolicyList
) -> compute.ResourcePolicyList:
"""Post-rpc interceptor for list
Override in a subclass to manipulate the response
after it is returned by the ResourcePolicies server but before
it is returned to user code.
"""
return response
def pre_set_iam_policy(
self,
request: compute.SetIamPolicyResourcePolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.SetIamPolicyResourcePolicyRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for set_iam_policy
Override in a subclass to manipulate the request or metadata
before they are sent to the ResourcePolicies server.
"""
return request, metadata
def post_set_iam_policy(self, response: compute.Policy) -> compute.Policy:
"""Post-rpc interceptor for set_iam_policy
Override in a subclass to manipulate the response
after it is returned by the ResourcePolicies server but before
it is returned to user code.
"""
return response
def pre_test_iam_permissions(
self,
request: compute.TestIamPermissionsResourcePolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
compute.TestIamPermissionsResourcePolicyRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for test_iam_permissions
Override in a subclass to manipulate the request or metadata
before they are sent to the ResourcePolicies server.
"""
return request, metadata
def post_test_iam_permissions(
self, response: compute.TestPermissionsResponse
) -> compute.TestPermissionsResponse:
"""Post-rpc interceptor for test_iam_permissions
Override in a subclass to manipulate the response
after it is returned by the ResourcePolicies server but before
it is returned to user code.
"""
return response
@dataclasses.dataclass
class ResourcePoliciesRestStub:
_session: AuthorizedSession
_host: str
_interceptor: ResourcePoliciesRestInterceptor
class ResourcePoliciesRestTransport(ResourcePoliciesTransport):
"""REST backend transport for ResourcePolicies.
The ResourcePolicies API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
"""
_STUBS: Dict[str, ResourcePoliciesRestStub] = {}
def __init__(
self,
*,
host: str = "compute.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
url_scheme: str = "https",
interceptor: Optional[ResourcePoliciesRestInterceptor] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you are developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
url_scheme: the protocol scheme for the API endpoint. Normally
"https", but for testing or local servers,
"http" can be specified.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
if maybe_url_match is None:
raise ValueError(
f"Unexpected hostname structure: {host}"
) # pragma: NO COVER
url_match_items = maybe_url_match.groupdict()
host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._interceptor = interceptor or ResourcePoliciesRestInterceptor()
self._prep_wrapped_messages(client_info)
class _AggregatedList(ResourcePoliciesRestStub):
def __hash__(self):
return hash("AggregatedList")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.AggregatedListResourcePoliciesRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.ResourcePolicyAggregatedList:
r"""Call the aggregated list method over HTTP.
Args:
request (~.compute.AggregatedListResourcePoliciesRequest):
The request object. A request message for
ResourcePolicies.AggregatedList. See the
method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.ResourcePolicyAggregatedList:
Contains a list of resourcePolicies.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/aggregated/resourcePolicies",
},
]
request, metadata = self._interceptor.pre_aggregated_list(request, metadata)
request_kwargs = compute.AggregatedListResourcePoliciesRequest.to_dict(
request
)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.AggregatedListResourcePoliciesRequest.to_json(
compute.AggregatedListResourcePoliciesRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.ResourcePolicyAggregatedList.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_aggregated_list(resp)
return resp
class _Delete(ResourcePoliciesRestStub):
def __hash__(self):
return hash("Delete")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.DeleteResourcePolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the delete method over HTTP.
Args:
request (~.compute.DeleteResourcePolicyRequest):
The request object. A request message for
ResourcePolicies.Delete. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource_policy}",
},
]
request, metadata = self._interceptor.pre_delete(request, metadata)
request_kwargs = compute.DeleteResourcePolicyRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.DeleteResourcePolicyRequest.to_json(
compute.DeleteResourcePolicyRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_delete(resp)
return resp
class _Get(ResourcePoliciesRestStub):
def __hash__(self):
return hash("Get")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.GetResourcePolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.ResourcePolicy:
r"""Call the get method over HTTP.
Args:
request (~.compute.GetResourcePolicyRequest):
The request object. A request message for
ResourcePolicies.Get. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.ResourcePolicy:
Represents a Resource Policy
resource. You can use resource policies
to schedule actions for some Compute
Engine resources. For example, you can
use them to schedule persistent disk
snapshots.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource_policy}",
},
]
request, metadata = self._interceptor.pre_get(request, metadata)
request_kwargs = compute.GetResourcePolicyRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.GetResourcePolicyRequest.to_json(
compute.GetResourcePolicyRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.ResourcePolicy.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_get(resp)
return resp
class _GetIamPolicy(ResourcePoliciesRestStub):
def __hash__(self):
return hash("GetIamPolicy")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.GetIamPolicyResourcePolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Policy:
r"""Call the get iam policy method over HTTP.
Args:
request (~.compute.GetIamPolicyResourcePolicyRequest):
The request object. A request message for
ResourcePolicies.GetIamPolicy. See the
method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Policy:
An Identity and Access Management (IAM) policy, which
specifies access controls for Google Cloud resources. A
``Policy`` is a collection of ``bindings``. A
``binding`` binds one or more ``members``, or
principals, to a single ``role``. Principals can be user
accounts, service accounts, Google groups, and domains
(such as G Suite). A ``role`` is a named list of
permissions; each ``role`` can be an IAM predefined role
or a user-created custom role. For some types of Google
Cloud resources, a ``binding`` can also specify a
``condition``, which is a logical expression that allows
access to a resource only if the expression evaluates to
``true``. A condition can add constraints based on
attributes of the request, the resource, or both. To
learn which resources support conditions in their IAM
policies, see the `IAM
documentation <https://cloud.google.com/iam/help/conditions/resource-policies>`__.
**JSON example:** { "bindings": [ { "role":
"roles/resourcemanager.organizationAdmin", "members": [
"user:mike@example.com", "group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
] }, { "role":
"roles/resourcemanager.organizationViewer", "members": [
"user:eve@example.com" ], "condition": { "title":
"expirable access", "description": "Does not grant
access after Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag":
"BwWWja0YfJA=", "version": 3 } **YAML example:**
bindings: - members: - user:mike@example.com -
group:admins@example.com - domain:google.com -
serviceAccount:my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin - members:
- user:eve@example.com role:
roles/resourcemanager.organizationViewer condition:
title: expirable access description: Does not grant
access after Sep 2020 expression: request.time <
timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA=
version: 3 For a description of IAM and its features,
see the `IAM
documentation <https://cloud.google.com/iam/docs/>`__.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource}/getIamPolicy",
},
]
request, metadata = self._interceptor.pre_get_iam_policy(request, metadata)
request_kwargs = compute.GetIamPolicyResourcePolicyRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.GetIamPolicyResourcePolicyRequest.to_json(
compute.GetIamPolicyResourcePolicyRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Policy.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_get_iam_policy(resp)
return resp
class _Insert(ResourcePoliciesRestStub):
def __hash__(self):
return hash("Insert")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.InsertResourcePolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the insert method over HTTP.
Args:
request (~.compute.InsertResourcePolicyRequest):
The request object. A request message for
ResourcePolicies.Insert. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/regions/{region}/resourcePolicies",
"body": "resource_policy_resource",
},
]
request, metadata = self._interceptor.pre_insert(request, metadata)
request_kwargs = compute.InsertResourcePolicyRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.ResourcePolicy.to_json(
compute.ResourcePolicy(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.InsertResourcePolicyRequest.to_json(
compute.InsertResourcePolicyRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_insert(resp)
return resp
class _List(ResourcePoliciesRestStub):
def __hash__(self):
return hash("List")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.ListResourcePoliciesRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.ResourcePolicyList:
r"""Call the list method over HTTP.
Args:
request (~.compute.ListResourcePoliciesRequest):
The request object. A request message for
ResourcePolicies.List. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.ResourcePolicyList:
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/resourcePolicies",
},
]
request, metadata = self._interceptor.pre_list(request, metadata)
request_kwargs = compute.ListResourcePoliciesRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.ListResourcePoliciesRequest.to_json(
compute.ListResourcePoliciesRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.ResourcePolicyList.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_list(resp)
return resp
class _SetIamPolicy(ResourcePoliciesRestStub):
def __hash__(self):
return hash("SetIamPolicy")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.SetIamPolicyResourcePolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Policy:
r"""Call the set iam policy method over HTTP.
Args:
request (~.compute.SetIamPolicyResourcePolicyRequest):
The request object. A request message for
ResourcePolicies.SetIamPolicy. See the
method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Policy:
An Identity and Access Management (IAM) policy, which
specifies access controls for Google Cloud resources. A
``Policy`` is a collection of ``bindings``. A
``binding`` binds one or more ``members``, or
principals, to a single ``role``. Principals can be user
accounts, service accounts, Google groups, and domains
(such as G Suite). A ``role`` is a named list of
permissions; each ``role`` can be an IAM predefined role
or a user-created custom role. For some types of Google
Cloud resources, a ``binding`` can also specify a
``condition``, which is a logical expression that allows
access to a resource only if the expression evaluates to
``true``. A condition can add constraints based on
attributes of the request, the resource, or both. To
learn which resources support conditions in their IAM
policies, see the `IAM
documentation <https://cloud.google.com/iam/help/conditions/resource-policies>`__.
**JSON example:** { "bindings": [ { "role":
"roles/resourcemanager.organizationAdmin", "members": [
"user:mike@example.com", "group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
] }, { "role":
"roles/resourcemanager.organizationViewer", "members": [
"user:eve@example.com" ], "condition": { "title":
"expirable access", "description": "Does not grant
access after Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag":
"BwWWja0YfJA=", "version": 3 } **YAML example:**
bindings: - members: - user:mike@example.com -
group:admins@example.com - domain:google.com -
serviceAccount:my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin - members:
- user:eve@example.com role:
roles/resourcemanager.organizationViewer condition:
title: expirable access description: Does not grant
access after Sep 2020 expression: request.time <
timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA=
version: 3 For a description of IAM and its features,
see the `IAM
documentation <https://cloud.google.com/iam/docs/>`__.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource}/setIamPolicy",
"body": "region_set_policy_request_resource",
},
]
request, metadata = self._interceptor.pre_set_iam_policy(request, metadata)
request_kwargs = compute.SetIamPolicyResourcePolicyRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.RegionSetPolicyRequest.to_json(
compute.RegionSetPolicyRequest(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.SetIamPolicyResourcePolicyRequest.to_json(
compute.SetIamPolicyResourcePolicyRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Policy.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_set_iam_policy(resp)
return resp
class _TestIamPermissions(ResourcePoliciesRestStub):
def __hash__(self):
return hash("TestIamPermissions")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.TestIamPermissionsResourcePolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.TestPermissionsResponse:
r"""Call the test iam permissions method over HTTP.
Args:
request (~.compute.TestIamPermissionsResourcePolicyRequest):
The request object. A request message for
ResourcePolicies.TestIamPermissions. See
the method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.TestPermissionsResponse:
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions",
"body": "test_permissions_request_resource",
},
]
request, metadata = self._interceptor.pre_test_iam_permissions(
request, metadata
)
request_kwargs = compute.TestIamPermissionsResourcePolicyRequest.to_dict(
request
)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.TestPermissionsRequest.to_json(
compute.TestPermissionsRequest(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.TestIamPermissionsResourcePolicyRequest.to_json(
compute.TestIamPermissionsResourcePolicyRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.TestPermissionsResponse.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_test_iam_permissions(resp)
return resp
@property
def aggregated_list(
self,
) -> Callable[
[compute.AggregatedListResourcePoliciesRequest],
compute.ResourcePolicyAggregatedList,
]:
stub = self._STUBS.get("aggregated_list")
if not stub:
stub = self._STUBS["aggregated_list"] = self._AggregatedList(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def delete(
self,
) -> Callable[[compute.DeleteResourcePolicyRequest], compute.Operation]:
stub = self._STUBS.get("delete")
if not stub:
stub = self._STUBS["delete"] = self._Delete(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def get(
self,
) -> Callable[[compute.GetResourcePolicyRequest], compute.ResourcePolicy]:
stub = self._STUBS.get("get")
if not stub:
stub = self._STUBS["get"] = self._Get(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def get_iam_policy(
self,
) -> Callable[[compute.GetIamPolicyResourcePolicyRequest], compute.Policy]:
stub = self._STUBS.get("get_iam_policy")
if not stub:
stub = self._STUBS["get_iam_policy"] = self._GetIamPolicy(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def insert(
self,
) -> Callable[[compute.InsertResourcePolicyRequest], compute.Operation]:
stub = self._STUBS.get("insert")
if not stub:
stub = self._STUBS["insert"] = self._Insert(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def list(
self,
) -> Callable[[compute.ListResourcePoliciesRequest], compute.ResourcePolicyList]:
stub = self._STUBS.get("list")
if not stub:
stub = self._STUBS["list"] = self._List(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def set_iam_policy(
self,
) -> Callable[[compute.SetIamPolicyResourcePolicyRequest], compute.Policy]:
stub = self._STUBS.get("set_iam_policy")
if not stub:
stub = self._STUBS["set_iam_policy"] = self._SetIamPolicy(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def test_iam_permissions(
self,
) -> Callable[
[compute.TestIamPermissionsResourcePolicyRequest],
compute.TestPermissionsResponse,
]:
stub = self._STUBS.get("test_iam_permissions")
if not stub:
stub = self._STUBS["test_iam_permissions"] = self._TestIamPermissions(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
def close(self):
self._session.close()
__all__ = ("ResourcePoliciesRestTransport",)
| |
# Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import webob
from lxml import etree
from nova.api.openstack import wsgi
from nova.api.openstack.compute.contrib import keypairs
from nova import context
from nova import db
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
def fake_keypair(name):
return {'public_key': 'FAKE_KEY',
'fingerprint': 'FAKE_FINGERPRINT',
'name': name}
def db_key_pair_get_all_by_user(self, user_id):
return [fake_keypair('FAKE')]
def db_key_pair_create(self, keypair):
pass
def db_key_pair_destroy(context, user_id, name):
if not (user_id and name):
raise Exception()
def db_key_pair_get(context, user_id, name):
pass
class KeypairsTest(test.TestCase):
def setUp(self):
super(KeypairsTest, self).setUp()
self.controller = keypairs.KeypairController()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
self.context = context.get_admin_context()
def test_keypair_list(self):
req = webob.Request.blank('/v2/fake/os-keypairs')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
response = {'keypairs': [{'keypair': fake_keypair('FAKE')}]}
self.assertEqual(res_dict, response)
def test_keypair_create(self):
body = {'keypair': {'name': 'create_test'}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertTrue(len(res_dict['keypair']['private_key']) > 0)
def test_keypair_create_with_empty_name(self):
body = {'keypair': {'name': ''}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_keypair_create_with_invalid_name(self):
body = {
'keypair': {
'name': 'a' * 256
}
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_keypair_import(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
# FIXME(ja): sholud we check that public_key was sent to create?
res_dict = json.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertFalse('private_key' in res_dict['keypair'])
def test_keypair_create_duplicate(self):
self.stubs.Set(db, "key_pair_get", db_key_pair_get)
body = {'keypair': {'name': 'create_duplicate'}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 409)
def test_keypair_import_bad_key(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-what negative',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_keypair_delete(self):
req = webob.Request.blank('/v2/fake/os-keypairs/FAKE')
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
def test_keypair_delete_not_found(self):
def db_key_pair_get_not_found(context, user_id, name):
raise exception.KeyPairNotFound()
self.stubs.Set(db, "key_pair_get",
db_key_pair_get_not_found)
req = webob.Request.blank('/v2/fake/os-keypairs/WHAT')
res = req.get_response(fakes.wsgi_app())
print res
self.assertEqual(res.status_int, 404)
class KeypairsXMLSerializerTest(test.TestCase):
def setUp(self):
super(KeypairsXMLSerializerTest, self).setUp()
self.deserializer = wsgi.XMLDeserializer()
def test_default_serializer(self):
exemplar = dict(keypair=dict(
public_key='fake_public_key',
private_key='fake_private_key',
fingerprint='fake_fingerprint',
user_id='fake_user_id',
name='fake_key_name'))
serializer = keypairs.KeypairTemplate()
text = serializer.serialize(exemplar)
print text
tree = etree.fromstring(text)
self.assertEqual('keypair', tree.tag)
for child in tree:
self.assertTrue(child.tag in exemplar['keypair'])
self.assertEqual(child.text, exemplar['keypair'][child.tag])
def test_index_serializer(self):
exemplar = dict(keypairs=[
dict(keypair=dict(
name='key1_name',
public_key='key1_key',
fingerprint='key1_fingerprint')),
dict(keypair=dict(
name='key2_name',
public_key='key2_key',
fingerprint='key2_fingerprint'))])
serializer = keypairs.KeypairsTemplate()
text = serializer.serialize(exemplar)
print text
tree = etree.fromstring(text)
self.assertEqual('keypairs', tree.tag)
self.assertEqual(len(exemplar['keypairs']), len(tree))
for idx, keypair in enumerate(tree):
self.assertEqual('keypair', keypair.tag)
kp_data = exemplar['keypairs'][idx]['keypair']
for child in keypair:
self.assertTrue(child.tag in kp_data)
self.assertEqual(child.text, kp_data[child.tag])
def test_deserializer(self):
exemplar = dict(keypair=dict(
name='key_name',
public_key='public_key'))
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<keypair><name>key_name</name>'
'<public_key>public_key</public_key></keypair>')
result = self.deserializer.deserialize(intext)['body']
self.assertEqual(result, exemplar)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.seq2seq.python.seq2seq.beam_search_decoder."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import numpy as np
from tensorflow.contrib.rnn import core_rnn_cell
from tensorflow.contrib.seq2seq.python.ops import attention_wrapper
from tensorflow.contrib.seq2seq.python.ops import beam_search_decoder
from tensorflow.contrib.seq2seq.python.ops import beam_search_ops
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# pylint: enable=g-import-not-at-top
class TestGatherTree(test.TestCase):
"""Tests the gather_tree function."""
def test_gather_tree(self):
# (max_time = 3, batch_size = 2, beam_width = 3)
# create (batch_size, max_time, beam_width) matrix and transpose it
predicted_ids = np.array(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[2, 3, 4], [5, 6, 7], [8, 9, 10]]],
dtype=np.int32).transpose([1, 0, 2])
parent_ids = np.array(
[[[0, 0, 0], [0, 1, 1], [2, 1, 2]],
[[0, 0, 0], [1, 2, 0], [2, 1, 1]]],
dtype=np.int32).transpose([1, 0, 2])
# sequence_lengths is shaped (batch_size = 2, beam_width = 3)
sequence_lengths = [[3, 3, 3], [3, 3, 3]]
expected_result = np.array(
[[[2, 2, 2], [6, 5, 6], [7, 8, 9]],
[[2, 4, 4], [7, 6, 6], [8, 9, 10]]]).transpose([1, 0, 2])
res = beam_search_ops.gather_tree(
predicted_ids, parent_ids, sequence_lengths)
with self.test_session() as sess:
res_ = sess.run(res)
self.assertAllEqual(expected_result, res_)
class TestEosMasking(test.TestCase):
"""Tests EOS masking used in beam search."""
def test_eos_masking(self):
probs = constant_op.constant([
[[-.2, -.2, -.2, -.2, -.2], [-.3, -.3, -.3, 3, 0], [5, 6, 0, 0, 0]],
[[-.2, -.2, -.2, -.2, 0], [-.3, -.3, -.1, 3, 0], [5, 6, 3, 0, 0]],
])
eos_token = 0
previously_finished = constant_op.constant(
[[0, 1, 0], [0, 1, 1]], dtype=dtypes.float32)
masked = beam_search_decoder._mask_probs(probs, eos_token,
previously_finished)
with self.test_session() as sess:
probs = sess.run(probs)
masked = sess.run(masked)
self.assertAllEqual(probs[0][0], masked[0][0])
self.assertAllEqual(probs[0][2], masked[0][2])
self.assertAllEqual(probs[1][0], masked[1][0])
self.assertEqual(masked[0][1][0], 0)
self.assertEqual(masked[1][1][0], 0)
self.assertEqual(masked[1][2][0], 0)
for i in range(1, 5):
self.assertAllClose(masked[0][1][i], np.finfo('float32').min)
self.assertAllClose(masked[1][1][i], np.finfo('float32').min)
self.assertAllClose(masked[1][2][i], np.finfo('float32').min)
class TestBeamStep(test.TestCase):
"""Tests a single step of beam search."""
def setUp(self):
super(TestBeamStep, self).setUp()
self.batch_size = 2
self.beam_width = 3
self.vocab_size = 5
self.end_token = 0
self.length_penalty_weight = 0.6
def test_step(self):
dummy_cell_state = array_ops.zeros([self.batch_size, self.beam_width])
beam_state = beam_search_decoder.BeamSearchDecoderState(
cell_state=dummy_cell_state,
log_probs=nn_ops.log_softmax(
array_ops.ones([self.batch_size, self.beam_width])),
lengths=constant_op.constant(
2, shape=[self.batch_size, self.beam_width], dtype=dtypes.int32),
finished=array_ops.zeros(
[self.batch_size, self.beam_width], dtype=dtypes.bool))
logits_ = np.full([self.batch_size, self.beam_width, self.vocab_size],
0.0001)
logits_[0, 0, 2] = 1.9
logits_[0, 0, 3] = 2.1
logits_[0, 1, 3] = 3.1
logits_[0, 1, 4] = 0.9
logits_[1, 0, 1] = 0.5
logits_[1, 1, 2] = 2.7
logits_[1, 2, 2] = 10.0
logits_[1, 2, 3] = 0.2
logits = ops.convert_to_tensor(logits_, dtype=dtypes.float32)
log_probs = nn_ops.log_softmax(logits)
outputs, next_beam_state = beam_search_decoder._beam_search_step(
time=2,
logits=logits,
beam_state=beam_state,
batch_size=ops.convert_to_tensor(self.batch_size),
beam_width=self.beam_width,
end_token=self.end_token,
length_penalty_weight=self.length_penalty_weight)
with self.test_session() as sess:
outputs_, next_state_, state_, log_probs_ = sess.run(
[outputs, next_beam_state, beam_state, log_probs])
self.assertAllEqual(outputs_.predicted_ids, [[3, 3, 2], [2, 2, 1]])
self.assertAllEqual(outputs_.parent_ids, [[1, 0, 0], [2, 1, 0]])
self.assertAllEqual(next_state_.lengths, [[3, 3, 3], [3, 3, 3]])
self.assertAllEqual(next_state_.finished, [[False, False, False],
[False, False, False]])
expected_log_probs = []
expected_log_probs.append(state_.log_probs[0][[1, 0, 0]])
expected_log_probs.append(state_.log_probs[1][[2, 1, 0]]) # 0 --> 1
expected_log_probs[0][0] += log_probs_[0, 1, 3]
expected_log_probs[0][1] += log_probs_[0, 0, 3]
expected_log_probs[0][2] += log_probs_[0, 0, 2]
expected_log_probs[1][0] += log_probs_[1, 2, 2]
expected_log_probs[1][1] += log_probs_[1, 1, 2]
expected_log_probs[1][2] += log_probs_[1, 0, 1]
self.assertAllEqual(next_state_.log_probs, expected_log_probs)
def test_step_with_eos(self):
dummy_cell_state = array_ops.zeros([self.batch_size, self.beam_width])
beam_state = beam_search_decoder.BeamSearchDecoderState(
cell_state=dummy_cell_state,
log_probs=nn_ops.log_softmax(
array_ops.ones([self.batch_size, self.beam_width])),
lengths=ops.convert_to_tensor(
[[2, 1, 2], [2, 2, 1]], dtype=dtypes.int32),
finished=ops.convert_to_tensor(
[[False, True, False], [False, False, True]], dtype=dtypes.bool))
logits_ = np.full([self.batch_size, self.beam_width, self.vocab_size],
0.0001)
logits_[0, 0, 2] = 1.9
logits_[0, 0, 3] = 2.1
logits_[0, 1, 3] = 3.1
logits_[0, 1, 4] = 0.9
logits_[1, 0, 1] = 0.5
logits_[1, 1, 2] = 5.7 # why does this not work when it's 2.7?
logits_[1, 2, 2] = 1.0
logits_[1, 2, 3] = 0.2
logits = ops.convert_to_tensor(logits_, dtype=dtypes.float32)
log_probs = nn_ops.log_softmax(logits)
outputs, next_beam_state = beam_search_decoder._beam_search_step(
time=2,
logits=logits,
beam_state=beam_state,
batch_size=ops.convert_to_tensor(self.batch_size),
beam_width=self.beam_width,
end_token=self.end_token,
length_penalty_weight=self.length_penalty_weight)
with self.test_session() as sess:
outputs_, next_state_, state_, log_probs_ = sess.run(
[outputs, next_beam_state, beam_state, log_probs])
self.assertAllEqual(outputs_.parent_ids, [[1, 0, 0], [1, 2, 0]])
self.assertAllEqual(outputs_.predicted_ids, [[0, 3, 2], [2, 0, 1]])
self.assertAllEqual(next_state_.lengths, [[1, 3, 3], [3, 1, 3]])
self.assertAllEqual(next_state_.finished, [[True, False, False],
[False, True, False]])
expected_log_probs = []
expected_log_probs.append(state_.log_probs[0][[1, 0, 0]])
expected_log_probs.append(state_.log_probs[1][[1, 2, 0]])
expected_log_probs[0][1] += log_probs_[0, 0, 3]
expected_log_probs[0][2] += log_probs_[0, 0, 2]
expected_log_probs[1][0] += log_probs_[1, 1, 2]
expected_log_probs[1][2] += log_probs_[1, 0, 1]
self.assertAllEqual(next_state_.log_probs, expected_log_probs)
class BeamSearchDecoderTest(test.TestCase):
def _testDynamicDecodeRNN(self, time_major, has_attention):
encoder_sequence_length = [3, 2, 3, 1, 0]
decoder_sequence_length = [2, 0, 1, 2, 3]
batch_size = 5
decoder_max_time = 4
input_depth = 7
cell_depth = 9
attention_depth = 6
vocab_size = 20
end_token = vocab_size - 1
start_token = 0
embedding_dim = 50
max_out = max(decoder_sequence_length)
output_layer = layers_core.Dense(vocab_size, use_bias=True, activation=None)
beam_width = 3
with self.test_session() as sess:
embedding = np.random.randn(vocab_size, embedding_dim).astype(np.float32)
cell = core_rnn_cell.LSTMCell(cell_depth)
if has_attention:
inputs = np.random.randn(batch_size, decoder_max_time,
input_depth).astype(np.float32)
tiled_inputs = beam_search_decoder.tile_batch(
inputs, multiplier=beam_width)
tiled_sequence_length = beam_search_decoder.tile_batch(
encoder_sequence_length, multiplier=beam_width)
attention_mechanism = attention_wrapper.BahdanauAttention(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
cell = attention_wrapper.AttentionWrapper(
cell=cell,
attention_mechanism=attention_mechanism,
attention_layer_size=attention_depth,
alignment_history=False)
cell_state = cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size * beam_width)
bsd = beam_search_decoder.BeamSearchDecoder(
cell=cell,
embedding=embedding,
start_tokens=batch_size * [start_token],
end_token=end_token,
initial_state=cell_state,
beam_width=beam_width,
output_layer=output_layer,
length_penalty_weight=0.0)
final_outputs, final_state, final_sequence_lengths = (
decoder.dynamic_decode(
bsd, output_time_major=time_major, maximum_iterations=max_out))
def _t(shape):
if time_major:
return (shape[1], shape[0]) + shape[2:]
return shape
self.assertTrue(
isinstance(final_outputs,
beam_search_decoder.FinalBeamSearchDecoderOutput))
self.assertTrue(
isinstance(final_state, beam_search_decoder.BeamSearchDecoderState))
beam_search_decoder_output = final_outputs.beam_search_decoder_output
self.assertEqual(
_t((batch_size, None, beam_width)),
tuple(beam_search_decoder_output.scores.get_shape().as_list()))
self.assertEqual(
_t((batch_size, None, beam_width)),
tuple(final_outputs.predicted_ids.get_shape().as_list()))
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
'final_outputs': final_outputs,
'final_state': final_state,
'final_sequence_lengths': final_sequence_lengths
})
max_sequence_length = np.max(sess_results['final_sequence_lengths'])
# A smoke test
self.assertEqual(
_t((batch_size, max_sequence_length, beam_width)),
sess_results['final_outputs'].beam_search_decoder_output.scores.shape)
self.assertEqual(
_t((batch_size, max_sequence_length, beam_width)), sess_results[
'final_outputs'].beam_search_decoder_output.predicted_ids.shape)
def testDynamicDecodeRNNBatchMajorNoAttention(self):
self._testDynamicDecodeRNN(time_major=False, has_attention=False)
def testDynamicDecodeRNNBatchMajorYesAttention(self):
self._testDynamicDecodeRNN(time_major=False, has_attention=True)
if __name__ == '__main__':
test.main()
| |
"""
Code to generate the ONCdb from Robberto et al. (2013) data
"""
# from astrodbkit import astrodb
from astropy.io import ascii
import astropy.coordinates as coord
import astropy.table as at
import astropy.units as q
import numpy as np
from astrodbkit import astrodb, astrocat
import pandas as pd
path = '/Users/jfilippazzo/Documents/Modules/ONCdb/'
# Photometry flags
# --------------------------------------
# 1:good => A
# 2:saturated but recoverd => B
# 3:saturatated => C
# 4:from drizzle/coadded image => D
# 0:undetected => E
# --------------------------------------
flags = {0:'E', 1:'A', 2:'B', 3:'C', 4:'D'}
def ONC_catalogs_to_database(ra=83.81775*q.deg, dec=-5.38788889*q.deg, radius=0.0001, count=-1):
"""
Generated the SQL database from the input catalogs
"""
# Empty instance
onc, db = astrocat.Catalog(), None
# ACS catalog from Robberto+2013
onc.Vizier_query('J/ApJS/207/10/table5', 'ACS', ra, dec, 10*q.deg, group=False)
# ACS catalog from Robberto+2013
onc.Vizier_query('J/ApJS/207/10/table6', 'WFPC2', ra, dec, 10*q.deg, group=False)
# ACS catalog from Robberto+2013
onc.Vizier_query('J/ApJS/207/10/table7', 'NICMOS', ra, dec, 10*q.deg, group=False)
# Group sources
onc.group_sources(radius)
# Get the radius from the ONC center which includes all Robberto+2013 sources
center = coord.SkyCoord(ra=ra, dec=dec, frame='icrs')
radec = coord.SkyCoord(ra=onc.sources['ra'], dec=onc.sources['dec'], unit=(q.deg, q.deg), frame='icrs')
onc_radius = np.max(radec.separation(center)).value*q.deg
# Get 2MASS
onc.Vizier_query('II/246/out', 'TMASS', ra, dec, onc_radius, group=False)
# Get GAIA DR2
onc.Vizier_query('I/345/gaia2', 'GAIA', ra, dec, onc_radius, ra_col='RA_ICRS', dec_col='DE_ICRS', group=False)
# Get ALLWISE
onc.Vizier_query('II/328/allwise', 'ALLWISE', ra, dec, onc_radius, group=False)
# Get spectral types from Hillenbrand+2013
onc.Vizier_query('J/AJ/146/85/table2', 'Hill13', ra, dec, onc_radius, group=False)
# Get the LAMOST spectra
onc.Vizier_query('V/149/dr2', 'LAMOST', ra, dec, onc_radius, group=False, column_filters={"Class":"=STAR", "objType":"=Star"})
# Get SDSS spectra
onc.Vizier_query('V/147/sdss12', 'SDSS', ra, dec, onc_radius, ra_col='RA_ICRS', dec_col='DE_ICRS', column_filters={"class":"=6"}, group=False)
onc.group_sources(radius)
# Generate SQL database
db = generate_ONCdb(onc)
return onc, db
def generate_ONCdb(cat):
"""
Generate the database from a list of unique sources and the Robberto+2013 data
Parameters
----------
cat: astrodbkit.astrocat.Catalog
The assembled catalog
"""
# Make an empty database
astrodb.create_database(path+'orion.db', path+'orion.sql', overwrite=True)
# Load the empty database
db = astrodb.Database(path+'orion.db')
# Load the source list
source_list = at.Table(cat.sources.values, names=cat.sources.columns)
# Rename some columns
source_list.rename_column('flag', 'comments')
# Populate the SOURCES table (must have 'ra' and 'dec' columns)
db.add_data(source_list, 'sources')
# Populate the SYSTEMS, INSTRUMENTS, TELESCOPES and PUBLICATIONS tables
db.add_data([['name'],['Vega'],['AB']], 'systems', clean_up=False)
db.add_data([['name','publication_shortname'],['HST',''],['2MASS',''],['WISE',''],['GAIA',''],['LAMOST',''],['SDSS','']], 'telescopes', clean_up=False)
db.add_data([['name','publication_shortname'],['ACS',''],['NICMOS',''],['WFPC2',''],['WFC3',''],['2MASS',''],['WISE',''],['GAIA',''],['LAMOST',''],['SDSS','']], 'instruments', clean_up=False)
db.add_data([['bibcode','shortname','DOI','description'],\
['2013yCat..22070010R','Robb13','','VizieR Online Data Catalog: HST Treasury Program on the ONC'],\
['2003yCat.2246....0C','Cutr03','','VizieR Online Data Catalog: 2MASS All-Sky Catalog of Point Sources'],\
['2014yCat.2328....0C','Cutr13','','VizieR Online Data Catalog: AllWISE Data Release '],\
['2018A&A..in.prep...','Gaia18','','Gaia DR2'],\
['2016yCat.5149....0L','Luo_16','','The second data release (DR2) of the LAMOST regular survey'],\
['2015ApJS..219...12A','Alam15','','The Eleventh and Twelfth Data Releases of the Sloan Digital Sky Survey: Final Data from SDSS-III']\
], 'publications', clean_up=False)
# Populate the other tables with dummy data
db.query("pragma foreign_keys=OFF")
db.modify("INSERT INTO spectra (source_id, spectrum) VALUES(0,'/foobar/test.fits')")
db.modify("INSERT INTO spectral_types (source_id, spectral_type) VALUES(0,0)")
db.modify("INSERT INTO parallaxes (source_id, parallax) VALUES(0,0)")
db.query("pragma foreign_keys=ON")
# Add the ACS photometry
try:
add_acs_data(db, cat.ACS)
except:
print('No ACS data added')
# Add the NICMOS photometry
try:
add_nicmos_data(db, cat.NICMOS)
except:
print('No NICMOS data added')
# Add the WPC photometry
try:
add_wpc2_data(db, cat.WFPC2)
except:
print('No WFPC2 data added')
db.add_data(cat.SDSS, table='photometry', bands=['SDSS.u','SDSS.g','SDSS.r','SDSS.i','SDSS.z'], rename_columns='SDSS', column_fill='SDSS', clean_up=False)
db.add_data(cat.TMASS, table='photometry', bands=['2MASS.J','2MASS.H','2MASS.Ks'], rename_columns='2MASS', column_fill='2MASS', clean_up=False)
db.add_data(cat.ALLWISE, table='photometry', bands=['WISE.W1','WISE.W2','WISE.W3','WISE.W4'], rename_columns='WISE', column_fill='WISE', clean_up=False)
db.add_data(cat.GAIA, table='parallaxes', rename_columns='GAIA', column_fill='GAIA', clean_up=False)
db.add_data(cat.GAIA, table='photometry', rename_columns='GAIA', column_fill='GAIA', clean_up=False)
# Add the spectral types
try:
add_Hill13_data(db, cat.Hill13)
except:
print('No Hill13 data added')
# Add LAMOST spectra
try:
add_LAMOST_data(db, cat.LAMOST)
except:
print('No LAMOST data added')
return db
def add_LAMOST_data(db, cat):
lamo = at.Table.from_pandas(cat)
# Add columns for telescope_id, instrument_id, system_id, and publication_shortname
lamo['publication_shortname'] = ['Luo_15']*len(lamo)
lamo['telescope_id'] = [5]*len(lamo)
lamo['instrument_id'] = [8]*len(lamo)
lamo['flux_units'] = ['Wm-2um-1']*len(lamo)
lamo['wavelength_units'] = ['A']*len(lamo)
lamo.rename_column('Obs.Date', 'obs_date')
lamo['spectrum'] = ['http://cdsarc.u-strasbg.fr/ftp/cats/V/146/LAMOST/fits/{0}/spec-{1}-{0}%5Fsp{2}-{3}.fits'.format(lamo[n]['PlanId'],lamo[n]['LMJD'],lamo[n]['spId'],lamo[n]['FiberId']) for n in range(len(lamo))]
db.query("pragma foreign_keys=OFF")
db.add_data(lamo, table='spectra', clean_up=False)
db.query("pragma foreign_keys=ON")
# Collect the spectral types
spts = lamo['SubClass']
# Convert the spectral types to integers
spts = [specType(s) if isinstance(s, str) else [np.nan,''] for s in spts]
typ, lc = np.array(spts).T
# Add to the table
lamo['spectral_type'] = typ
lamo['luminosity_class'] = lc
# Add the data
db.query("pragma foreign_keys=OFF")
db.add_data(lamo, table='spectral_types', clean_up=False)
db.query("pragma foreign_keys=ON")
db.save()
def add_acs_data(db, cat):#, file=path+'raw_data/viz_acs_with_IDs.tsv'):
"""
Read in the Robberto+2013 ACS data and match objects by RA and Dec
"""
# Read in the data
# acs = ascii.read(file)
acs = at.Table.from_pandas(cat)
# Rename some columns
acs.rename_column('Obs', 'epoch')
# Add columns for telescope_id, instrument_id, system_id, and publication_shortname
acs['publication_shortname'] = ['Robb13']*len(acs)
acs['telescope_id'] = [1]*len(acs)
acs['instrument_id'] = [1]*len(acs)
acs['system_id'] = [1]*len(acs)
# Add the photometry to the database one band at a time
# Rename the columns to match svo_filters
bands = [c for c in acs.colnames if c.startswith('F')]
for b in bands:
try:
# Change the column names to add the band
acs.rename_column(b, 'magnitude')
acs.rename_column('e_'+b, 'magnitude_unc')
acs.rename_column('f_'+b, 'flags')
acs.add_column(at.Column(['ACS_HRC.'+b]*len(acs), 'band'))
# Convert flag integer to string
acs['flags'] = at.Column([flags[i] for i in acs['flags']], 'flags')
# Move the magnitudes into the correct column
for row in acs:
if row['flags'] in ['D','E']:
row['magnitude'] = row['magnitude_unc']
row['magnitude_unc'] = np.nan
if row['flags']=='C':
row['magnitude_unc'] = np.nan
if not str(row['magnitude']).strip():
row['magnitude'] = np.nan
if not str(row['magnitude_unc']).strip():
row['magnitude_unc'] = np.nan
# Make sure the magntiudes are floats
acs['magnitude'] = at.Column(acs['magnitude'], 'magnitude', dtype=float)
acs['magnitude_unc'] = at.Column(acs['magnitude_unc'], 'magnitude_unc', dtype=float)
# Add the data
db.query("pragma foreign_keys=OFF")
db.add_data(acs, table='photometry', clean_up=False)
db.query("pragma foreign_keys=ON")
# Change the column name back
acs.rename_column('magnitude', b)
acs.rename_column('magnitude_unc', 'e_'+b)
acs.rename_column('flags', 'f_'+b)
acs.remove_column('band')
except IOError:
pass
db.save()
def add_nicmos_data(db, cat):#file=path+'raw_data/viz_nicmos_with_IDs.tsv'):
"""
Read in the Robberto+2013 ACS data and match objects by RA and Dec
"""
# Read in the data
# nic = ascii.read(file)
nic = at.Table.from_pandas(cat)
# Add columns for telescope_id, instrument_id, system_id, and publication_shortname
nic['publication_shortname'] = ['Robb13']*len(nic)
nic['telescope_id'] = [1]*len(nic)
nic['instrument_id'] = [2]*len(nic)
nic['system_id'] = [1]*len(nic)
# Add the photometry to the database one band at a time
# Rename the columns to match svo_filters
bands = [c for c in nic.colnames if c.startswith('F')]
for b in bands:
try:
# Change the column names to add the band
nic.rename_column(b, 'magnitude')
nic.rename_column('e_'+b, 'magnitude_unc')
nic.rename_column('f_'+b, 'flags')
nic.add_column(at.Column(['NICMOS3.'+b]*len(nic), 'band'))
# Convert flag integer to string
nic['flags'] = at.Column([flags[i] for i in nic['flags']], 'flags')
# Move the magnitudes into the correct column
for row in nic:
if row['flags'] in ['D','E']:
row['magnitude'] = row['magnitude_unc']
row['magnitude_unc'] = np.nan
if row['flags']=='C':
row['magnitude_unc'] = np.nan
if not str(row['magnitude']).strip():
row['magnitude'] = np.nan
if not str(row['magnitude_unc']).strip():
row['magnitude_unc'] = np.nan
# Make sure the magntiudes are floats
nic['magnitude'] = at.Column(nic['magnitude'], 'magnitude', dtype=float)
nic['magnitude_unc'] = at.Column(nic['magnitude_unc'], 'magnitude_unc', dtype=float)
# Add the data
db.query("pragma foreign_keys=OFF")
db.add_data(nic, table='photometry', clean_up=False)
db.query("pragma foreign_keys=ON")
# Change the column name back
nic.rename_column('magnitude', b)
nic.rename_column('magnitude_unc', 'e_'+b)
nic.rename_column('flags', 'f_'+b)
nic.remove_column('band')
except IOError:
pass
db.save()
def add_wpc2_data(db, cat):#file=path+'raw_data/viz_wfpc2_with_IDs.tsv'):
"""
Read in the Robberto+2013 ACS data and match objects by RA and Dec
"""
# Read in the data
# wpc = ascii.read(file)
wpc = at.Table.from_pandas(cat)
# Add columns for telescope_id, instrument_id, system_id, and publication_shortname
wpc['publication_shortname'] = ['Robb13']*len(wpc)
wpc['telescope_id'] = [1]*len(wpc)
wpc['instrument_id'] = [3]*len(wpc)
wpc['system_id'] = [1]*len(wpc)
# Add the photometry to the database one band at a time
# Rename the columns to match svo_filters
bands = [c for c in wpc.colnames if c.startswith('F')]
for b in bands:
try:
# Change the column names to add the band
wpc.rename_column(b, 'magnitude')
wpc.rename_column('e_'+b, 'magnitude_unc')
wpc.rename_column('f_'+b, 'flags')
wpc.add_column(at.Column(['WFPC2.'+b.upper()]*len(wpc), 'band'))
# Convert flag integer to string
wpc['flags'] = at.Column([flags[i] for i in wpc['flags']], 'flags')
# Move the magnitudes into the correct column
for row in wpc:
if row['flags'] in ['D','E']:
row['magnitude'] = row['magnitude_unc']
row['magnitude_unc'] = np.nan
if row['flags']=='C':
row['magnitude_unc'] = np.nan
if not str(row['magnitude']).strip():
row['magnitude'] = np.nan
if not str(row['magnitude_unc']).strip():
row['magnitude_unc'] = np.nan
# Make sure the magntiudes are floats
wpc['magnitude'] = at.Column(wpc['magnitude'], 'magnitude', dtype=float)
wpc['magnitude_unc'] = at.Column(wpc['magnitude_unc'], 'magnitude_unc', dtype=float)
# Add the data
db.query("pragma foreign_keys=OFF")
db.add_data(wpc, table='photometry', clean_up=False)
db.query("pragma foreign_keys=ON")
# Change the column name back
wpc.rename_column('magnitude', b)
wpc.rename_column('magnitude_unc', 'e_'+b)
wpc.rename_column('flags', 'f_'+b)
wpc.remove_column('band')
except IOError:
pass
db.save()
def add_Hill13_data(db, cat):
"""
Read in the cross matched Hillenbrand+13 data
"""
# Read in the data
hill13 = at.Table.from_pandas(cat)
# Collect the spectral types
spts = hill13['SpT2']
# Convert the spectral types to integers
spts = [specType(s) if isinstance(s, str) else [np.nan,''] for s in spts]
typ, lc = np.array(spts).T
# Add to the table
hill13['spectral_type'] = typ
hill13['luminosity_class'] = lc
# Add column publication_shortname
hill13['publication_shortname'] = ['Hill13']*len(hill13)
# Add the data
db.query("pragma foreign_keys=OFF")
db.add_data(hill13, table='spectral_types', clean_up=False)
db.query("pragma foreign_keys=ON")
db.save()
def specType(SpT, types=[i for i in 'OBAFGKMLTY'], verbose=False):
"""
Converts between float and letter/number spectral types (e.g. 14.5 => 'B4.5' and 'A3' => 23).
Parameters
----------
SpT: float, str
Float spectral type or letter/number spectral type between O0.0 and Y9.9
types: list
The MK spectral type letters to include, e.g. ['M','L','T','Y']
Returns
-------
list, str
The converted spectral type string or (spectral type, luminosity class) numbers
"""
result = [np.nan, '']
try:
# String input
if isinstance(SpT, (str,bytes)):
SpT = SpT.replace("'",'').replace('b','')
val, LC = np.nan, ''
if SpT[0] in types and SpT!='':
MK, LC = SpT[0], 'V'
suf = SpT[1:].replace('n','').replace('e','').replace('w','')\
.replace('m','').replace('a','').replace('Fe','')\
.replace('-1','').replace(':','').replace('?','')\
.replace('-V','').replace('p','').replace('<','')\
.replace('>','')
if suf.replace('.','').isdigit():
val = float(suf)
else:
for cl in ['III','V','IV']:
try:
idx = suf.find(cl)
val = float(suf[:idx].split('/')[0])
LC = suf[idx:].split('/')[0].split(',')[0]
break
except:
try:
val = float(suf)
except:
continue
# return [types.index(MK)*10+val-(4. if MK in ['M','L','T','Y'] else 0), LC]
return [types.index(MK)*10+val, LC]
# Numerical input
elif isinstance(SpT, float) or isinstance(SpT, int) and 0.0 <= SpT < len(types)*10:
letter = ''.join(types)[int(SpT // 10)]
number = int(SpT % 10) if SpT%10==int(SpT%10) else SpT%10
result = '{}{}'.format(letter, number)
# Bogus input
else:
if verbose:
print('Sir, Spectral type',SpT,'must be a float between 0 and',len(types)*10,'or a string of class',types)
except:
pass
return result
| |
# -*- test-case-name: twisted.test.test_application,twisted.test.test_cooperator -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Reactor-based Services
Here are services to run clients, servers and periodic services using
the reactor.
This module (dynamically) defines various Service subclasses that let
you represent clients and servers in a Service hierarchy.
They are as follows::
TCPServer, TCPClient,
UNIXServer, UNIXClient,
SSLServer, SSLClient,
UDPServer, UDPClient,
UNIXDatagramServer, UNIXDatagramClient,
MulticastServer
These classes take arbitrary arguments in their constructors and pass
them straight on to their respective reactor.listenXXX or
reactor.connectXXX calls.
For example, the following service starts a web server on port 8080:
C{TCPServer(8080, server.Site(r))}. See the documentation for the
reactor.listen/connect* methods for more information.
Maintainer: Moshe Zadka
"""
import warnings
from twisted.python import log
from twisted.application import service
from twisted.internet import task
class _VolatileDataService(service.Service):
volatile = []
def __getstate__(self):
d = service.Service.__getstate__(self)
for attr in self.volatile:
if attr in d:
del d[attr]
return d
class _AbstractServer(_VolatileDataService):
"""
@cvar volatile: list of attribute to remove from pickling.
@type volatile: C{list}
@ivar method: the type of method to call on the reactor, one of B{TCP},
B{UDP}, B{SSL} or B{UNIX}.
@type method: C{str}
@ivar reactor: the current running reactor.
@type reactor: a provider of C{IReactorTCP}, C{IReactorUDP},
C{IReactorSSL} or C{IReactorUnix}.
@ivar _port: instance of port set when the service is started.
@type _port: a provider of C{IListeningPort}.
"""
volatile = ['_port']
method = None
reactor = None
_port = None
def __init__(self, *args, **kwargs):
self.args = args
if 'reactor' in kwargs:
self.reactor = kwargs.pop("reactor")
self.kwargs = kwargs
def privilegedStartService(self):
service.Service.privilegedStartService(self)
self._port = self._getPort()
def startService(self):
service.Service.startService(self)
if self._port is None:
self._port = self._getPort()
def stopService(self):
service.Service.stopService(self)
# TODO: if startup failed, should shutdown skip stopListening?
# _port won't exist
if self._port is not None:
d = self._port.stopListening()
del self._port
return d
def _getPort(self):
"""
Wrapper around the appropriate listen method of the reactor.
@return: the port object returned by the listen method.
@rtype: an object providing L{IListeningPort}.
"""
if self.reactor is None:
from twisted.internet import reactor
else:
reactor = self.reactor
return getattr(reactor, 'listen%s' % (self.method,))(
*self.args, **self.kwargs)
class _AbstractClient(_VolatileDataService):
"""
@cvar volatile: list of attribute to remove from pickling.
@type volatile: C{list}
@ivar method: the type of method to call on the reactor, one of B{TCP},
B{UDP}, B{SSL} or B{UNIX}.
@type method: C{str}
@ivar reactor: the current running reactor.
@type reactor: a provider of C{IReactorTCP}, C{IReactorUDP},
C{IReactorSSL} or C{IReactorUnix}.
@ivar _connection: instance of connection set when the service is started.
@type _connection: a provider of C{IConnector}.
"""
volatile = ['_connection']
method = None
reactor = None
_connection = None
def __init__(self, *args, **kwargs):
self.args = args
if 'reactor' in kwargs:
self.reactor = kwargs.pop("reactor")
self.kwargs = kwargs
def startService(self):
service.Service.startService(self)
self._connection = self._getConnection()
def stopService(self):
service.Service.stopService(self)
if self._connection is not None:
self._connection.disconnect()
del self._connection
def _getConnection(self):
"""
Wrapper around the appropriate connect method of the reactor.
@return: the port object returned by the connect method.
@rtype: an object providing L{IConnector}.
"""
if self.reactor is None:
from twisted.internet import reactor
else:
reactor = self.reactor
return getattr(reactor, 'connect%s' % (self.method,))(
*self.args, **self.kwargs)
_doc={
'Client':
"""Connect to %(tran)s
Call reactor.connect%(method)s when the service starts, with the
arguments given to the constructor.
""",
'Server':
"""Serve %(tran)s clients
Call reactor.listen%(method)s when the service starts, with the
arguments given to the constructor. When the service stops,
stop listening. See twisted.internet.interfaces for documentation
on arguments to the reactor method.
""",
}
import new
for tran in 'TCP UNIX SSL UDP UNIXDatagram Multicast'.split():
for side in 'Server Client'.split():
if tran == "Multicast" and side == "Client":
continue
base = globals()['_Abstract'+side]
method = {'Generic': 'With'}.get(tran, tran)
doc = _doc[side]%vars()
klass = new.classobj(tran+side, (base,),
{'method': method, '__doc__': doc})
globals()[tran+side] = klass
class GenericServer(_AbstractServer):
"""
Serve Generic clients
Call reactor.listenWith when the service starts, with the arguments given to
the constructor. When the service stops, stop listening. See
twisted.internet.interfaces for documentation on arguments to the reactor
method.
This service is deprecated (because reactor.listenWith is deprecated).
"""
method = 'With'
def __init__(self, *args, **kwargs):
warnings.warn(
'GenericServer was deprecated in Twisted 10.1.',
category=DeprecationWarning,
stacklevel=2)
_AbstractServer.__init__(self, *args, **kwargs)
class GenericClient(_AbstractClient):
"""
Connect to Generic.
Call reactor.connectWith when the service starts, with the arguments given
to the constructor.
This service is deprecated (because reactor.connectWith is deprecated).
"""
method = 'With'
def __init__(self, *args, **kwargs):
warnings.warn(
'GenericClient was deprecated in Twisted 10.1.',
category=DeprecationWarning,
stacklevel=2)
_AbstractClient.__init__(self, *args, **kwargs)
class TimerService(_VolatileDataService):
"""Service to periodically call a function
Every C{step} seconds call the given function with the given arguments.
The service starts the calls when it starts, and cancels them
when it stops.
"""
volatile = ['_loop']
def __init__(self, step, callable, *args, **kwargs):
self.step = step
self.call = (callable, args, kwargs)
def startService(self):
service.Service.startService(self)
callable, args, kwargs = self.call
# we have to make a new LoopingCall each time we're started, because
# an active LoopingCall remains active when serialized. If
# LoopingCall were a _VolatileDataService, we wouldn't need to do
# this.
self._loop = task.LoopingCall(callable, *args, **kwargs)
self._loop.start(self.step, now=True).addErrback(self._failed)
def _failed(self, why):
# make a note that the LoopingCall is no longer looping, so we don't
# try to shut it down a second time in stopService. I think this
# should be in LoopingCall. -warner
self._loop.running = False
log.err(why)
def stopService(self):
if self._loop.running:
self._loop.stop()
return service.Service.stopService(self)
class CooperatorService(service.Service):
"""
Simple L{service.IService} which starts and stops a L{twisted.internet.task.Cooperator}.
"""
def __init__(self):
self.coop = task.Cooperator(started=False)
def coiterate(self, iterator):
return self.coop.coiterate(iterator)
def startService(self):
self.coop.start()
def stopService(self):
self.coop.stop()
__all__ = (['TimerService', 'CooperatorService', 'MulticastServer'] +
[tran+side
for tran in 'Generic TCP UNIX SSL UDP UNIXDatagram'.split()
for side in 'Server Client'.split()])
| |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
from .helpers import ReaderError, SUPPRESS
from .py23compat import py23_str, py23_basestring
class _KeyLevel(object):
"""An abstract base class that provides functionality essential
for a key"""
def __init__(self, case=False):
"""Init the KeyLevel class"""
# Are the keys case-sensitive by default?
self._case = case
if not isinstance(case, bool):
raise ValueError('case must be bool, '
'given '+repr(self._case))
def _validate_string(self, string):
"""Make sure a string has no spaces"""
if string is None:
return
elif hasattr(string, 'pattern'):
for s in (r'\s', r'.'):
if s in string.pattern:
msg = ': Regex should not allow the possibility of spaces'
msg += ', given "'+string.pattern+'"'
raise ValueError(self.name+msg)
else:
if len(string.split()) == 0:
msg = ': String cannot be of zero length'
raise ValueError(self.name+msg)
elif len(string.split()) > 1:
msg = ': String cannot contain spaces, given "'+string+'"'
raise ValueError(self.name+msg)
def _return_val(self, i, val, namespace):
"""Returns the result properly, depending on the key type
and how the user wants it."""
# Substitute the keyname for dest if required
name = self._dest if self._dest is not None else self.name
# If multiple occurences of the keyname may appear, store
# each of these in the namespace
if self._repeat:
# If this key has been found, check if we need to append to
# the previous values or create the new value
if name in namespace:
return i, name, getattr(namespace, name)+(val,)
# If the key jas not been found, simply return (as a tuple)
else:
return i, name, (val,)
# In this case, only one instance of the keyname may appear
# or it is an error.
else:
# If the keyname has already been found it is an error,
if name in namespace:
raise ReaderError(self.name+': The key "'+name+'" appears twice')
# If the key has not been found, simply return
else:
return i, name, val
def _add_kwargs(self, **kwargs):
"""Generic keyword arguments common to many methods"""
# If this class defines a default default attribute, use that instead
self._default = getattr(self, 'default', None)
if self._default is None:
self._default = kwargs.pop('default', None)
# Repeat
self._repeat = kwargs.pop('repeat', False)
if not isinstance(self._repeat, bool):
raise ValueError('repeat value must be a bool, '
'given '+repr(self._repeat))
# Required
self._required = kwargs.pop('required', False)
if not isinstance(self._required, bool):
raise ValueError('required value must be a bool, '
'given '+repr(self._required))
# If this class defines a default dest attribute, use that instead
self._dest = getattr(self, 'dest', None)
if self._dest is None:
self._dest = kwargs.pop('dest', None)
if self._dest is not None and not isinstance(self._dest, py23_basestring):
raise ValueError('dest value '+repr(self._dest)+' must be a str')
# Depends
self._depends = kwargs.pop('depends', None)
# Make sure nothing extra was given
if kwargs:
msg = ': Unknown arguments given: '+','.join(kwargs)
raise TypeError(self.name+msg)
class BooleanKey(_KeyLevel):
"""A class to store data in a boolean key"""
def __init__(self, keyname, action, **kwargs):
"""Defines a boolean key."""
super(BooleanKey, self).__init__()
# Fill in the non-generic values
self.name = keyname
self._action = action
# Add the generic keyword arguments
self._add_kwargs(**kwargs)
# Check strings
self._validate_string(self.name)
self._validate_string(self._dest)
def _parse(self, f, i, namespace):
"""Parses the current line for the key. Returns the line that
we read from and the value"""
n = len(f[i].split())
if n == 1:
return self._return_val(i, self._action, namespace)
else:
raise ReaderError('The boolean "'+self.name+'" was given '
'arguments, this is illegal')
class Regex(_KeyLevel):
"""A class to store data from a regex"""
def __init__(self, handle, regex, **kwargs):
"""Defines a regex searcher."""
super(Regex, self).__init__()
# Fill in the non-generic values
self.name = handle
self._regex = regex
# Add the generic keyword arguments
self._add_kwargs(**kwargs)
# Check strings
self._validate_string(self.name)
self._validate_string(self._dest)
def _parse(self, f, i, namespace):
"""Parses the current line for the regex. Returns the match objext
for the line."""
# Grab the match object for this line
val = self._regex.match(f[i])
return self._return_val(i, val, namespace)
class LineKey(_KeyLevel):
"""A class to store data on a line key"""
def __init__(self, keyname, type, glob, keywords, case, **kwargs):
"""Defines a line key."""
super(LineKey, self).__init__(case=case)
# Fill in the values
self.name = keyname
# Add the generic keyword arguments
self._add_kwargs(**kwargs)
# Check strings
self._validate_string(self.name)
self._validate_string(self._dest)
# Cannot have both glob and keywords defined
if glob and keywords:
msg = ': Cannot define both glob and keywords'
raise TypeError(self.name+msg)
# Validate type
# type given as a list
if isinstance(type, list):
self._type = type
self._nolist = False
# type given as a single value
elif type is None:
self._type = []
self._nolist = False
else:
self._type = [type]
self._nolist = True
self._check_types_in_list(self._type)
# Validate glob
if glob:
if not isinstance(glob, dict):
raise ValueError(self.name+': glob must be a dict')
if 'len' not in glob:
raise ValueError(self.name+': "len" required for glob')
elif glob['len'] not in ('*', '+', '?'):
msg = ': "len" must be one of "*", "+", or "?" in glob'
raise ValueError(self.name+msg)
if 'type' not in glob:
glob['type'] = str
if isinstance(glob['type'], list):
msg = ': list not allowed in type for glob or keywords'
raise ValueError(self.name+msg)
self._check_types_in_list([glob['type']])
if 'join' not in glob:
glob['join'] = False
if glob['join'] and glob['len'] == '?':
msg = ': "join=True" makes no sense for "len=?"'
raise ValueError(self.name+msg)
if set(glob.keys()) != set(['len', 'type', 'join']):
if set(glob.keys()) != set(['len', 'type', 'join', 'default']):
raise TypeError(self.name+': Unknown key in glob')
if not isinstance(glob['join'], bool):
raise ValueError(self.name+': "join" must be a bool in glob')
# Make the result is only a string when there is no positionals
if not self._type and (glob['join'] or glob['len'] == '?'):
self._nolist = True
else:
self._nolist = False
self._glob = glob
else:
self._glob = {} # In case glob = None
# Validate keywords
if keywords:
if not isinstance(keywords, dict):
raise ValueError(self.name+': keywords must be a dict')
for key in keywords:
if not isinstance(key, py23_basestring):
msg = ': keys in keywords must be of type str'
raise ValueError(self.name+msg)
else:
self._validate_string(key)
if keywords[key] is None:
keywords[key] = {}
elif not isinstance(keywords[key], dict):
msg = ': Options for keyword "'+key+'" must be a dict'
raise ValueError(self.name+msg)
if 'default' not in keywords[key]:
keywords[key]['default'] = SUPPRESS
if 'type' not in keywords[key]:
keywords[key]['type'] = str
if set(keywords[key].keys()) != set(['default', 'type']):
msg = ': Unknown key in keyword: "'+key+'"'
raise TypeError(self.name+msg)
# Check the type of the keyword
if isinstance(keywords[key]['type'], list):
msg = ': list not allowed in type for glob or keywords'
raise ValueError(self.name+msg)
else:
self._check_types_in_list([keywords[key]['type']])
self._keywords = keywords
# Since we append this dict to the end, we must keep as a list
# unless only the keywords are being kept
self._nolist = True if not self._type else False
else:
self._keywords = {} # In case keywords = None
# Type, glob and keywords can't be empty
if not (self._type or self._glob or self._keywords):
msg = ': type, glob and keywords cannot all be empty'
raise ValueError(self.name+msg)
def _parse(self, f, i, namespace):
"""Parses the current line for the key. Returns the line that
we read from and the value"""
# Separate the arguments from the key
if self._case:
args = f[i].split()[1:]
else:
args = f[i].lower().split()[1:]
# Check that the length of args matches the type length
if len(args) == len(self._type):
if not self._glob and not self._keywords:
pass # Not expecting anything else, we're good to go
elif self._glob.get('len') == '+':
msg = ': expected at least '+str(len(self._type)+1)
msg += ' arguments, got '+str(len(args))
raise ReaderError(self.name+msg)
# Checking keywords will be done later
# If the # args is less than the positional
elif len(args) < len(self._type):
if self._glob.get('len') == '+':
msg = ': expected at least '+str(len(self._type)+1)
else:
msg = ': expected '+str(len(self._type))
msg += ' arguments, got '+str(len(args))
raise ReaderError(self.name+msg)
# If there are too many arguments
elif len(args) > len(self._type):
if self._keywords:
pass
elif self._glob and self._glob['len'] in ('*', '+'):
pass
else:
n = len(self._type)
if self._glob.get('len') == '?':
n += 1
msg =': expected at most '+str(n)
else:
msg =': expected '+str(n)
if len(args) != n:
msg += ' arguments, got '+str(len(args))
raise ReaderError(self.name+msg)
# Read in the arguments, making sure they match the types and choices
val = []
for a, t in zip(args[:len(self._type)], self._type):
val.append(self._check_type_of_value(a, t, self._case))
# Remove the arguments that were just read in
try:
args = args[len(self._type):]
except IndexError:
args = []
# Read in the glob or the keywords
glob = []
kw = {}
if self._glob:
t = self._glob['type']
for a in args:
glob.append(self._check_type_of_value(a, t, self._case))
# Assign the default if there was nothing
if self._glob['join']:
if not glob:
try:
glob = self._glob['default']
except KeyError:
pass
else:
# Change all the globbed values to strings
for j, v in enumerate(glob):
glob[j] = py23_str(v)
glob = ' '.join(glob)
elif not glob:
try:
glob.append(self._glob['default'])
except KeyError:
pass
# Tag onto the end of val and prep val
if not val:
if self._nolist:
if isinstance(glob, py23_basestring):
val = glob
else:
try:
val = glob[0]
except IndexError:
val = ''
else:
val = tuple(glob)
elif not glob:
if self._nolist:
val = val[0]
else:
val = tuple(val)
elif self._glob['join']:
val.append(glob)
val = tuple(val)
else:
val.extend(glob)
val = tuple(val)
elif self._keywords:
# Each keyword is assumed to be key=value with no spaces
for kvpair in args:
try:
key, value = kvpair.split('=')
except ValueError:
msg = ': Error reading keyword argument "'+kvpair+'"'
raise ReaderError(self.name+msg)
# Make sure the keyword is good
if not self._case:
key = key.lower()
if key not in self._keywords:
raise ReaderError(self.name+': Unknown keyword: "'+key+'"')
# Assign this keyword
try:
t = self._keywords[key]['type']
except KeyError:
t = str # Default to string if not given
kw[key] = self._check_type_of_value(value, t, self._case)
# Assign the defaults
for key in self._keywords:
try:
default = self._keywords[key]['default']
except KeyError:
continue
if key not in kw and default is not SUPPRESS:
kw[key] = default
# Tag onto the end of val and prep val
if not val:
val = kw
elif not kw:
if self._nolist:
val = val[0]
else:
val.append({})
val = tuple(val)
else:
val.append(kw)
val = tuple(val)
else:
if self._nolist:
try:
val = val[0]
except IndexError:
val = ''
else:
val = tuple(val)
return self._return_val(i, val, namespace)
def _check_types_in_list(self, typ):
"""Make sure each type in a list is legal. The function is recursive"""
for t in typ:
if isinstance(t, list):
msg = ': Embedded lists not allowed in type'
raise ValueError(self.name+msg)
elif isinstance(t, tuple):
if len(t) == 0:
msg = ': Empty tuple in type'
raise ValueError(self.name+msg)
else:
self._check_types_in_list(t)
elif not (isinstance(t, py23_basestring) or isinstance(t, int) or
isinstance(t, float) or t is None or
hasattr(t, 'pattern') or t is str or t is int or
t is float):
msg = (': type must be one of None, str, float '
'int, or an instance of str, float, '
'int or regex')
raise ValueError(self.name+msg)
if isinstance(t, py23_basestring) or hasattr(t, 'pattern'):
self._validate_string(t)
def _validate_given_value(self, val, typ, case):
"""Checks that the given value is valid by checking
its type. Raises ValueError if unsuccessful.
"""
# Check case if necessary
if not case:
try:
typ = type.lower()
except AttributeError:
pass
# One of the core datatypes
if typ is float or typ is int or typ is str:
return typ(val)
# Explicit None
elif typ is None:
if val.lower() == 'none':
return None
else:
raise ValueError
# Explicit choices
elif (isinstance(typ, py23_basestring) or isinstance(typ, int) or
isinstance(typ, float)):
if type(typ)(val) == typ:
return type(typ)(val)
else:
raise ValueError
# Regular expression
else:
if typ.match(val):
return val
else:
raise ValueError
def _check_type_of_value(self, val, typ, case):
"""Checks the type of a value, accounting for
various forms of type"""
if isinstance(typ, tuple):
for tp in typ:
try:
return self._validate_given_value(val, tp, case)
except ValueError:
continue
else:
msg = self.name+': expected one of {0}, got "{1}"'
t = sorted([self._make_value_readable(x) for x in typ])
t = ', '.join(t[:-1])+' or '+t[-1]
raise ReaderError(msg.format(t, val))
else:
try:
return self._validate_given_value(val, typ, case)
except ValueError:
msg = self.name+': expected {0}, got "{1}"'
raise ReaderError(msg.format(self._make_value_readable(typ), val))
def _make_value_readable(self, val):
"""Returns a a string version of the input value."""
if isinstance(val, int) or isinstance(val, float):
return str(val)
elif isinstance(val, py23_basestring):
return '"'+str(val)+'"'
elif val is None:
return '"None"'
else:
try:
return 'regex({0})'.format(val.pattern)
except AttributeError:
return str(val).split()[1].strip("'><")
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals
)
import numpy as np
import pandas as pd
import Quandl
import os
# from sqlalchemy import sqlalchemy # from flask.ext.
class twitfin(object):
"""This is a description of the class."""
#: An example class variable.
aClassVariable = True
def __init__(self, argumentName, anOptionalArg=None):
"""Initialization method.
:param argumentName: an example argument.
:type argumentName: string
:param anOptionalArg: an optional argument.
:type anOptionalArg: string
:returns: New instance of :class:`twitfin`
:rtype: twitfin
"""
self.instanceVariable1 = argumentName
if self.aClassVariable:
print('Hello')
if anOptionalArg:
print('anOptionalArg: %s' % anOptionalArg)
def load(*args, **kwargs):
"""Load data from Quandl into a dataframe, modify column names and
check for non-numeric values."""
# Grab the Quandl token
# token = os.environ.get('QUANDL_TOKEN')
# if token is None:
token = raw_input("Enter Quandl token: ")
ticker = raw_input("Enter Quandl ticker symbol (or hit Enter for default of YAHOO/INDEX_GSPC): ")
if len(ticker) < 1:
ticker = 'YAHOO/INDEX_GSPC'
print(ticker)
start_date = raw_input("Enter start date as YYYY-MM-DD (or hit ENTER for default of 1990-01-01): ")
if len(start_date) < 1:
start_date = '1990-01-01'
print(start_date)
# Call Quandl module, trim input by default from 1990 forward
print('Pulling Quandl data...')
df = Quandl.get(ticker, authtoken=token, trim_start=start_date)
# Get the column labels
# old_columns = list(df.columns.values)
# Use the ticker symbol as our new prefix
# ticker_tag = ticker.split('_')[-1] + '_'
# Drop spaces and concatenate
# new_labels = [ticker_tag + i.replace(' ', '') for i in old_columns]
# Create a dictionary of old and new column labels
# new_columns = dict(zip(old_columns, new_labels))
# Rename the columns using our dictionary
# df = df.rename(columns=new_columns)
nulls = df[~df.applymap(np.isreal).all(1)]
# Check for non-numeric values
if len(nulls) > 0:
raise ValueError('Dataframe contains non-numeric values')
row_count = len(df)
print('%d rows loaded into dataframe.' % row_count)
return df
def long_sma(df, column, *args, **kwargs):
"""Given a dataframe, a column name and a period the function
returns a dataframe with new column with a simple moving average
for the period."""
### SMA function parameters
# 1st parameter: target dataframe
# 2nd parameter: target column
# 3rd parameter: the period for the moving average
# 4th paramter, optional: supply a label to be appended with period info,
# for example df = twitfin.sma(df, 'GSPC_Close', 20, label='Close')
# will result in a column label of 'Close_20-day'.
# The default label is constructed as follows:
# SMA_{ target column }_{ period }-day
period = int(raw_input("Enter the period in days for the long SMA: "))
if 'label' in kwargs:
column_label = kwargs['label'] + '_' + str(period) + '-day'
else:
column_label = 'SMA_' + column + '_' + str(period) + '-day'
df[column_label] = pd.stats.moments.rolling_mean(df[column], period)
return df
def short_sma(df, column, *args, **kwargs):
"""Given a dataframe, a column name and a period the function
returns a dataframe with new column with a simple moving average
for the period."""
period = int(raw_input("Enter the period in days for the short SMA: "))
if 'label' in kwargs:
column_label = kwargs['label'] + '_' + str(period) + '-day'
else:
column_label = 'SMA_' + column + '_' + str(period) + '-day'
df[column_label] = pd.stats.moments.rolling_mean(df[column], period)
return df
def diff(df, column_a, column_b, **kwargs):
"""Creates a new column from the differnce of column_a and column_b,
as column_a minus column_b."""
### diff function parameters
# 1st parameter: target dataframe
# 2nd parameter: target column_a
# 3rd parameter: target column_b
# TODO: describe default label and custom label options
column_a_suffix = column_a.split('_')[-1]
column_b_suffix = column_b.split('_')[-1]
column_prefix = "_".join(column_b.split('_')[0:2])
if 'label' in kwargs:
column_label = kwargs['label']
else:
column_label = 'Delta_' + column_prefix + '_' + column_a_suffix + '_' + column_b_suffix
df[column_label] = df[column_a] - df[column_b]
return df
def macd(df, column, *args, **kwargs):
"""Given a dataframe, a column name and a period the function
returns a dataframe with new column with a simple moving average
for the period."""
period = int(raw_input("Enter the period in days for the SMA of the MACD: "))
if 'label' in kwargs:
column_label = kwargs['label'] + '_' + str(period) + '-day'
else:
column_label = 'SMA_' + column + '_' + str(period) + '-day'
df[column_label] = pd.stats.moments.rolling_mean(df[column], period)
return df
def flag_swings(df, column, *args, **kwargs):
"""Given a dataframe and column and a minimum sequence period
for the same sign, the function returns: "1" for upward swings,
"-1" for downward swings, or "0" if niether condition is met."""
### flag_swings function parameters
# 1st parameter: target dataframe
# 2nd parameter: target column
# 3rd parameter: minimum swing period
# TODO: describe default label and custom label options
period = int(raw_input("Enter the period in days to flag swings: "))
if 'label' in kwargs:
# Append custom label with period days
column_label = kwargs['label'] + '_' + str(period) + '-day'
else:
column_label = 'SwingFlag_' + str(period) + '-day'
# Trim null value artifacts in SMA columns
df = df.dropna()
# Create a temporary dataframe
tmp = df.copy()
tmp['sum'] = 0
# Determine the sign of each day and sum signs from prior days using the
# "x-day" notation as "sign-'reference day'"
tmp['sign-0'] = [1 if x >= 0 else -1 for x in df[column]]
if period < 2:
raise ValueError('The minimum swing period should be 2 days.')
else:
# Shift rows down for lateral comparison depending on period
for i in range(1, period):
label = 'sign-' + str(i)
tmp[label] = tmp['sign-0'].shift(i)
# The sum of consecutive signs agregates here
tmp['sum'] = tmp['sum'] + tmp[label]
# The we shift the sum signs by one to compare prior sequence history
tmp['sum-shift'] = tmp['sum'].shift(1)
def flagger(sign_now, sign_prior, sign_run, sign_sum, period):
# flagger contains the logical for lateral comparison of time-shifted
# sign data, agregations and time-shifted agregations
if sign_now > sign_prior and abs(sign_run) >= period - 1 and sign_sum != 0:
# Indicates a positive sign after a sufficient period of negative signs
return 1 # Also referred to here as an upward swing or crossover
else:
if sign_now < sign_prior and abs(sign_run) >= period - 1 and sign_sum != 0:
# Indicates a negative sign after a sufficient period of positive signs
return -1 # Also referred to here as an downward swing or crossover
else:
# Otherwaise returning zero. Zero could still be a sign change
# but prior minimum sign sequence period criteria was not met.
return 0
try:
df = df.copy()
df[column_label] = [flagger(n, p, r, s, period) for n, p, r, s in zip(tmp['sign-0'], tmp['sign-1'], tmp['sum-shift'], tmp['sum'])]
except Exception as e:
print(e)
if e =='SettingWithCopyWarning':
pass
return df
def sign_sequence(df, column, *args, **kwargs):
"""Given a dataframe and column, returns a column with a list
of prior signs for the given period."""
period = int(raw_input("Enter the days prior to list the signs: "))
prior_signs_label = 'SignSequence_' + str(period) + '-days'
# Trim null value artifacts in SMA columns
df = df.dropna()
# Create a temporary dataframe
tmp = df.copy()
# Determine the sign of each day and sum signs from prior days using the
# "x-day" notation as "sign-'reference day'"
tmp['sign-0'] = ['1' if x >= 0 else '-1' for x in df[column]]
# Shift rows down for lateral comparison depending on period
labels = ['sign-0']
for i in range(1, period):
label = 'sign-' + str(i)
labels.append(label)
tmp[label] = tmp['sign-0'].shift(i)
tmp2 = tmp.ix[(period -1):]
df2 = df.ix[(period -1):]
labels = labels[::-1]
try:
df2 = df2.copy()
df2[prior_signs_label] = tmp2[labels].apply(lambda x: ','.join(x), axis=1)
except Exception as e:
print(e)
if e =='SettingWithCopyWarning':
pass
return df2
def x_days(df):
"""Add a column with a descending counter."""
# One paramter: target dataframe
df['x-day'] = ['x-' + str(i) for i in range(len(df) - 1, -1, -1)]
return df
def x_transpose(df):
"""Transpose the dataframe and set the x-days as the column labels."""
# One paramter: target dataframe, assumes an x-day column has been created
df = df.set_index('x-day')
df = df.transpose()
pd.options.display.float_format = '{:.3f}'.format
return df
def read_csv(filename, *args, **kwargs):
"""read_csv is a port of the Pandas read_csv module."""
return pd.read_csv(filename, *args)
def read_sql(table, db, *args, **kwargs):
"""read_sql is a port of the Pandas read_sql module."""
return pd.read_sql(table, db, *args, **kwargs)
def db_connection(uri):
"""db_connection is a port of the SQLAlchemy create_engine module."""
return sqlalchemy.create_engine(uri)
# Execute example IO utilities
# To write data to csv
# df.to_csv('data/example.csv')
# print('Modified dataframe saved to: data/standard-example.csv')
# print('\nData saved.')
# To read data from csv
# df = read_csv('data/example.csv')
# df = df.set_index('Date')
# print('\nData read from csv:')
# print(df_test.tail())
# To write data to sql table
# db = db_connection('sqlite:///data/dev.db')
# df.to_sql('example', db, if_exists='replace')
# print('\nData saved to data/dev.db/gspc')
# To read data from sql table
# df = read_sql('example', db)
# df = df.set_index('Date')
# print('\nData read from sql:')
# print(df_test.tail())
| |
import pytest
import numpy as np
from mirdata.datasets import billboard
from mirdata import annotations
def test_track():
default_trackid = "3"
data_home = "tests/resources/mir_datasets/billboard"
dataset = billboard.Dataset(data_home)
track = dataset.track(default_trackid)
# test attributes are loaded as expected
assert track.track_id == default_trackid
assert track._data_home == data_home
assert track._track_paths == {
"audio": [
"audio/1960s/James Brown/I Don't Mind/audio.flac",
"bb9f022b25c43983cf19aef562b00eac",
],
"salami": [
"McGill-Billboard/0003/salami_chords.txt",
"8deb413e4cecadcffa5a7180a5f4c597",
],
"bothchroma": [
"McGill-Billboard/0003/bothchroma.csv",
"c92ee46045f5bacd681543e8b9aa55b8",
],
"tuning": [
"McGill-Billboard/0003/tuning.csv",
"31c744b447b739bc8c4ed29891dc1fb1",
],
"lab_full": [
"McGill-Billboard/0003/full.lab",
"59c73209de645ef7e4e4293f4d6882b3",
],
"lab_majmin7": [
"McGill-Billboard/0003/majmin7.lab",
"59c73209de645ef7e4e4293f4d6882b3",
],
"lab_majmin7inv": [
"McGill-Billboard/0003/majmin7inv.lab",
"59c73209de645ef7e4e4293f4d6882b3",
],
"lab_majmin": [
"McGill-Billboard/0003/majmin.lab",
"59c73209de645ef7e4e4293f4d6882b3",
],
"lab_majmininv": [
"McGill-Billboard/0003/majmininv.lab",
"59c73209de645ef7e4e4293f4d6882b3",
],
}
assert track.title == "I Don't Mind"
assert track.artist == "James Brown"
# test that cached properties don't fail and have the expected type
assert type(track.chords_full) is annotations.ChordData
assert type(track.chords_majmin7) is annotations.ChordData
assert type(track.chords_majmin7inv) is annotations.ChordData
assert type(track.chords_majmin) is annotations.ChordData
assert type(track.chords_majmininv) is annotations.ChordData
assert type(track.chroma) is np.ndarray
assert type(track.tuning) is list
assert type(track.sections) is annotations.SectionData
assert type(track.named_sections) is annotations.SectionData
assert type(track.salami_metadata) is dict
def test_to_jams():
default_trackid = "3"
data_home = "tests/resources/mir_datasets/billboard"
dataset = billboard.Dataset(data_home)
track = dataset.track(default_trackid)
jam = track.to_jams()
segments = jam.search(namespace="segment")[0]["data"]
assert [segment.time for segment in segments] == [
0.073469387,
22.346394557,
49.23802721,
76.123990929,
102.924353741,
130.206598639,
]
assert [segment.duration for segment in segments] == [
22.27292517,
26.891632653,
26.885963719000003,
26.800362812000003,
27.282244897999988,
20.70278911600002,
]
assert [segment.value for segment in segments] == ["A", "B", "B", "A", "B", "A"]
assert [segment.confidence for segment in segments] == [
None,
None,
None,
None,
None,
None,
]
named_segments = jam.search(namespace="segment")[1]["data"]
assert [segment.value for segment in named_segments] == [
"intro",
"verse",
"verse",
"interlude",
"verse",
"interlude",
]
assert jam["file_metadata"]["title"] == "I Don't Mind"
assert jam["file_metadata"]["artist"] == "James Brown"
chords = jam.search(namespace="chord")[0]["data"]
assert [chord.value for chord in chords][:10] == [
"N",
"N",
"N",
"A:min",
"A:min",
"C:maj",
"C:maj",
"A:min",
"A:min",
"C:maj",
]
chords = jam.search(namespace="chord")
assert len(chords) == 5
assert chords[0]["sandbox"]["name"] == "Full chords"
def test_load_chords():
default_trackid = "35"
data_home = "tests/resources/mir_datasets/billboard"
dataset = billboard.Dataset(data_home)
track = dataset.track(default_trackid)
full_chords = track.chords_full
assert type(full_chords) == annotations.ChordData
assert type(full_chords.intervals) is np.ndarray
assert type(full_chords.labels) is list
assert full_chords.labels[:36] == [
"N",
"N",
"N",
"N",
"N",
"N",
"C:7(#9)",
"C:7(#9)",
"C:7(#9)",
"C:7(#9)",
"C:7(#9)",
"C:7(#9)",
"C:7(#9)",
"C:7(#9)",
"C:7(#9)",
"C:7(#9)",
"C:7(#9)",
"C:7(#9)",
"C:7(#9)",
"Eb:5",
"C:5",
"Eb:5",
"C:5",
"Eb:5",
"C:5",
"Eb:5",
"C:5",
"Eb:5",
"C:5",
"Eb:5",
"C:5",
"Eb:5",
"C:5",
"Eb:5",
"C:5",
"C:5",
]
def test_load_sections():
default_trackid = "35"
data_home = "tests/resources/mir_datasets/billboard"
dataset = billboard.Dataset(data_home)
track = dataset.track(default_trackid)
sections = track.sections
assert type(sections) == annotations.SectionData
assert type(sections.intervals) is np.ndarray
assert type(sections.labels) is list
assert np.array_equal(
sections.labels,
np.array(["A'", "A", "B", "C", "A", "B", "D", "E", "F", "A'", "B", "G", "Z"]),
)
named_sections = track.named_sections
assert np.array_equal(
named_sections.labels,
np.array(
[
"intro",
"verse",
"chorus",
"solo",
"verse",
"chorus",
"trans",
"bridge",
"solo",
"verse",
"chorus",
"outro",
"fadeout",
]
),
)
with pytest.raises(ValueError):
sections = billboard._load_sections(
"tests/resources/mir_datasets/billboard/McGill-Billboard/0035/salami_chords.txt",
"no_section_type",
)
def test_load_chroma():
default_trackid = "35"
data_home = "tests/resources/mir_datasets/billboard"
dataset = billboard.Dataset(data_home)
track = dataset.track(default_trackid)
chroma = track.chroma
assert chroma.shape[0] == 5666
assert chroma.shape[1] == 25
default_trackid = "3"
data_home = "tests/resources/mir_datasets/billboard"
dataset = billboard.Dataset(data_home)
track = dataset.track(default_trackid)
chroma = track.chroma
assert chroma.shape[0] == 3250
assert chroma.shape[1] == 25
def test_load_tuning():
default_trackid = "35"
data_home = "tests/resources/mir_datasets/billboard"
dataset = billboard.Dataset(data_home)
track = dataset.track(default_trackid)
tuning = track.tuning
assert type(tuning) == list
assert len(tuning) == 4
def test_load_metadata():
data_home = "tests/resources/mir_datasets/billboard"
dataset = billboard.Dataset(data_home)
metadata = dataset._metadata
assert metadata["3"] == {
"title": "I Don't Mind",
"artist": "James Brown",
"actual_rank": 57,
"peak_rank": 47,
"target_rank": 56,
"weeks_on_chart": 8,
"chart_date": "1961-07-03",
}
| |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2019 Dag Wieers (@dagwieers) <dag@wieers.com>
This file implements the Kodi xbmc module, either using stubs or alternative functionality
SPDX-License-Identifier: GPL-3.0-only
See LICENSES/GPL-3.0-only.md for more information.
"""
# pylint: disable=unused-argument
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import os
import json
import time
from xbmcextra import global_settings, import_language
ISO_639_1 = 0
ISO_639_2 = 1
ENGLISH_NAME = 2
LOGDEBUG = 'Debug'
LOGINFO = 'Info'
LOGNOTICE = 'Notice'
LOGWARNING = 'Warning'
LOGERROR = 'Error'
LOGSEVERE = 'Severe'
LOGFATAL = 'Fatal'
LOGNONE = 'None'
INFO_LABELS = {
'System.BuildVersion': '18.2',
'Container.FolderPath': 'plugin://plugin.video.netflix/',
}
REGIONS = {
'datelong': '%A, %e %B %Y',
'dateshort': '%Y-%m-%d',
}
GLOBAL_SETTINGS = global_settings()
PO = import_language(language=GLOBAL_SETTINGS.get('locale.language'))
class Keyboard:
"""A stub implementation of the xbmc Keyboard class"""
def __init__(self, line='', heading=''):
"""A stub constructor for the xbmc Keyboard class"""
def doModal(self, autoclose=0):
"""A stub implementation for the xbmc Keyboard class doModal() method"""
def isConfirmed(self):
"""A stub implementation for the xbmc Keyboard class isConfirmed() method"""
return True
def getText(self):
"""A stub implementation for the xbmc Keyboard class getText() method"""
return 'unittest'
class Monitor:
"""A stub implementation of the xbmc Monitor class"""
def __init__(self, line='', heading=''):
"""A stub constructor for the xbmc Monitor class"""
def abortRequested(self):
"""A stub implementation for the xbmc Keyboard class abortRequested() method"""
return False
def waitForAbort(self, timeout=0):
"""A stub implementation for the xbmc Keyboard class waitForAbort() method"""
return
class Player:
"""A stub implementation of the xbmc Player class"""
def __init__(self):
self._count = 0
def pause(self):
"""A stub implementation for the xbmc Player class pause() method"""
def play(self, item='', listitem=None, windowed=False, startpos=-1):
"""A stub implementation for the xbmc Player class play() method"""
def isPlaying(self):
"""A stub implementation for the xbmc Player class isPlaying() method"""
# Return True four times out of five
self._count += 1
return bool(self._count % 5 != 0)
def seekTime(self, seekTime):
"""A stub implementation for the xbmc Player class seekTime() method"""
def showSubtitles(self, bVisible):
"""A stub implementation for the xbmc Player class showSubtitles() method"""
return
def setAudioStream(self):
"""A stub implementation for the xbmc Player class setAudioStream() method"""
def setSubtitleStream(self):
"""A stub implementation for the xbmc Player class setSubtitleStream() method"""
def convertLanguage(language, format): # pylint: disable=redefined-builtin
"""A reimplementation of the xbmc convertLanguage() function"""
if format == ISO_639_1:
return 'en'
if format == ISO_639_2:
return 'eng'
return 'English'
def executebuiltin(string, wait=False):
"""A stub implementation of the xbmc executebuiltin() function"""
return
def executeJSONRPC(jsonrpccommand):
"""A reimplementation of the xbmc executeJSONRPC() function"""
command = json.loads(jsonrpccommand)
if command.get('method') == 'Settings.GetSettingValue':
key = command.get('params').get('setting')
return json.dumps(dict(id=1, jsonrpc='2.0', result=dict(value=GLOBAL_SETTINGS.get(key))))
print("Error in executeJSONRPC, method '{method}' is not implemented".format(**command), file=sys.stderr)
return json.dumps(dict(error=dict(code=-1, message='Not implemented'), id=1, jsonrpc='2.0'))
def getCondVisibility(string):
"""A reimplementation of the xbmc getCondVisibility() function"""
if string == 'system.platform.android':
return False
return True
def getGlobalIdleTime():
"""A reimplementation of the xbmc getGlobalIdleTime() function"""
return 0
def getInfoLabel(key):
"""A reimplementation of the xbmc getInfoLabel() function"""
return INFO_LABELS.get(key)
def getLanguage(format=None, region=None): # pylint: disable=redefined-builtin
"""A reimplementation of the xbmc getLanguage() function"""
if format == ISO_639_1:
return 'en'
if format == ISO_639_2:
return 'eng'
return 'English'
def getLocalizedString(msgctxt):
"""A reimplementation of the xbmc getLocalizedString() function"""
for entry in PO:
if entry.msgctxt == '#%s' % msgctxt:
return entry.msgstr or entry.msgid
return 'smurf'
def getRegion(key):
"""A reimplementation of the xbmc getRegion() function"""
return REGIONS.get(key)
def log(msg, level):
"""A reimplementation of the xbmc log() function"""
print('[32;1m%s: [32;0m%s[0m' % (level, msg))
def makeLegalFilename(filename, fatX=None): # Kodi 18
"""A reimplementation of the xbmc makeLegalFilename() function"""
if fatX:
return filename
return os.path.basename(filename)
def setContent(self, content):
"""A stub implementation of the xbmc setContent() function"""
return
def sleep(seconds):
"""A reimplementation of the xbmc sleep() function"""
time.sleep(seconds)
def translatePath(path):
"""A stub implementation of the xbmc translatePath() function"""
if path.startswith('special://home'):
return path.replace('special://home', os.path.join(os.getcwd(), 'test'))
if path.startswith('special://profile'):
return path.replace('special://profile', os.path.join(os.getcwd(), 'tests/usedata'))
if path.startswith('special://userdata'):
return path.replace('special://userdata', os.path.join(os.getcwd(), 'tests/userdata'))
return path
| |
from google.appengine.api import memcache
__author__ = 'Gundsambuu'
from StringIO import StringIO
import gzip
import logging
import hashlib
import json
import time
import sys
from httplib import HTTPConnection
import htmlentitydefs
from urllib import quote_plus as url_quote_plus
import six
STATUS_INVALID_SERVICE = 2
STATUS_INVALID_METHOD = 3
STATUS_AUTH_FAILED = 4
STATUS_INVALID_FORMAT = 5
STATUS_INVALID_PARAMS = 6
STATUS_INVALID_RESOURCE = 7
STATUS_TOKEN_ERROR = 8
STATUS_INVALID_SK = 9
STATUS_INVALID_API_KEY = 10
STATUS_OFFLINE = 11
STATUS_SUBSCRIBERS_ONLY = 12
STATUS_INVALID_SIGNATURE = 13
STATUS_TOKEN_UNAUTHORIZED = 14
STATUS_TOKEN_EXPIRED = 15
EVENT_ATTENDING = '0'
EVENT_MAYBE_ATTENDING = '1'
EVENT_NOT_ATTENDING = '2'
PERIOD_OVERALL = 'overall'
PERIOD_7DAYS = "7day"
PERIOD_3MONTHS = '3month'
PERIOD_6MONTHS = '6month'
PERIOD_12MONTHS = '12month'
DOMAIN_ENGLISH = 0
DOMAIN_GERMAN = 1
DOMAIN_SPANISH = 2
DOMAIN_FRENCH = 3
DOMAIN_ITALIAN = 4
DOMAIN_POLISH = 5
DOMAIN_PORTUGUESE = 6
DOMAIN_SWEDISH = 7
DOMAIN_TURKISH = 8
DOMAIN_RUSSIAN = 9
DOMAIN_JAPANESE = 10
DOMAIN_CHINESE = 11
COVER_SMALL = 0
COVER_MEDIUM = 1
COVER_LARGE = 2
COVER_EXTRA_LARGE = 3
COVER_MEGA = 4
IMAGES_ORDER_POPULARITY = "popularity"
IMAGES_ORDER_DATE = "dateadded"
USER_MALE = 'Male'
USER_FEMALE = 'Female'
class _Network(object):
"""
A music social network website such as sons.mn or
one with a sons.mn-compatible API.
"""
def __init__(
self, name, homepage, ws_server, session_key, urls):
"""
name: the name of the network
homepage: the homepage URL
ws_server: the URL of the webservices server
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
password_hash: the output of pylast.md5(password) where password is
the user's password
domain_names: a dict mapping each DOMAIN_* value to a string domain
name
urls: a dict mapping types to URLs
You should use a preconfigured network object through a
get_*_network(...) method instead of creating an object
of this class, unless you know what you're doing.
"""
self.name = name
self.homepage = homepage
self.ws_server = ws_server
self.session_key = session_key
self.urls = urls
self.cache_backend = None
self.proxy_enabled = False
self.proxy = None
self.last_call_time = 0
self.limit_rate = False
"""def __repr__(self):
attributes = ("name", "homepage", "ws_server", "api_key", "api_secret",
"session_key", "submission_server", "password_hash",
"domain_names", "urls")
text = "pylast._Network(%s)"
args = []
for attr in attributes:
args.append("=".join((attr, repr(getattr(self, attr)))))
return text % ", ".join(args)
"""
def __str__(self):
return "%s Network" % self.name
def get_artist(self, artist_id):
"""
Return an Artist object
"""
response = _Request(network=self, method_name='artists/fetchById?id=' + artist_id, params={},
request_method='GET').execute(True)
artist = response['artist']
return Artist(id=artist['id'], image=artist['image'], name=artist['name'], num_albums=artist['numAlbums'],
num_tracks=artist['numTracks'])
def get_track(self, track_id):
"""
Return an Artist object
"""
response = _Request(self, 'tracks/fetchById?id=' + track_id, params={}, request_method='GET').execute(True)
track = response['track']
return Track(id=track['id'], image=track['image'], title=track['title'], album_name=track['albumName'],
length=track['length'], media_path=track['mediaPath'], origin=track['origin'],
youtube=track['youtube'], lyrics=track['lyrics'], artist_id=track['artistId'],
artist_name=track['artistName'])
def get_track_list(self, page_number=1):
response = _Request(self, 'tracks/fetchByPage?page=' + str(page_number), params={},
request_method='GET').execute(True)
tracks = response['tracks']
seq = []
for track in tracks:
seq.append(Track(id=track['id'], image=track['image'], title=track['title'], album_name=track['albumName'],
length=track['length'], media_path=track['mediaPath'], origin=track['origin'],
youtube=track['youtube'], lyrics=track['lyrics'], artist_id=track['artistId'],
artist_name=track['artistName']))
return seq
def get_album_list(self, page_number=1):
response = _Request(self, 'albums/fetchByPage?page=' + str(page_number), params={},
request_method='GET').execute(True)
albums = response['albums']
seq = []
for album in albums:
seq.append(
Album(id=album['id'], title=album['title'], image=album['image'], artist_name=album['artistName'],
artist_id=album['artistId'], year=album['year'], duration=album['duration'],
num_tracks=album['numTracks']))
return seq
def get_album(self, album_id):
"""
Return an Album object
"""
response = _Request(self, 'track/fetchById?id=' + album_id, params={}, request_method='GET').execute(True)
album = response['album']
return Album(id=album['id'], title=album['title'], image=album['image'], artist_name=album['artistName'],
artist_id=album['artistId'], year=album['year'], duration=album['duration'],
num_tracks=album['numTracks'])
def _get_url(self, domain, url_type):
return "http://%s/%s" % (
self._get_language_domain(domain), self.urls[url_type])
def _delay_call(self):
"""
Makes sure that web service calls are at least 0.2 seconds apart.
"""
# Delay time in seconds from section 4.4 of http://www.sons.mn/api/tos
DELAY_TIME = 0.2
now = time.time()
time_since_last = now - self.last_call_time
if time_since_last < DELAY_TIME:
time.sleep(DELAY_TIME - time_since_last)
self.last_call_time = now
def enable_proxy(self, host, port):
"""Enable a default web proxy"""
self.proxy = [host, _number(port)]
self.proxy_enabled = True
def disable_proxy(self):
"""Disable using the web proxy"""
self.proxy_enabled = False
def is_proxy_enabled(self):
"""Returns True if a web proxy is enabled."""
return self.proxy_enabled
def _get_proxy(self):
"""Returns proxy details."""
return self.proxy
def enable_rate_limit(self):
"""Enables rate limiting for this network"""
self.limit_rate = True
def disable_rate_limit(self):
"""Disables rate limiting for this network"""
self.limit_rate = False
def is_rate_limited(self):
"""Return True if web service calls are rate limited"""
return self.limit_rate
def enable_caching(self):
"""Enables caching request-wide for all cacheable calls.
* file_path: A file path for the backend storage file. If
None set, a temp file would probably be created, according the backend.
"""
self.cache_backend = _ShelfCacheBackend()
def disable_caching(self):
"""Disables all caching features."""
self.cache_backend = None
def is_caching_enabled(self):
"""Returns True if caching is enabled."""
return not (self.cache_backend is None)
def _get_cache_backend(self):
return self.cache_backend
def get_top_chart(self):
"""
Return an track list object
"""
response = _Request(self, 'chart/fetchAll?chartType=mongolian', params={},
request_method='GET').execute(True)
tracks = response['tracks']
seq = []
for track in tracks:
seq.append(Track(id=track['id'], image=track['image'], title=track['title'], album_name=track['albumName'],
length=track['length'], media_path=track['mediaPath'], origin=track['origin'],
youtube=track['youtube'], lyrics=track['lyrics'], artist_id=track['artistId'],
artist_name=track['artistName']))
return seq
def get_playlist_page(self, page=1):
"""
Return an track list object
"""
print 'playlists/fetchByPage?orderBy=recent&page=' + str(page)
response = _Request(self, 'playlists/fetchByPage?orderBy=rating&page=' + str(page), params={},
request_method='GET').execute(True)
playlist_list = response['playlists']
seq = []
for playlist in playlist_list:
seq.append({
"id": playlist['id'],
"image": playlist['image'],
"name": playlist['name'],
})
return seq
def fetch_playlist(self, playlist_id):
response = _Request(self, 'tracks/fetchByPlaylist?playlistId=' + str(playlist_id), params={},
request_method='GET').execute(True)
tracks = response['tracks']
seq = []
for track in tracks:
seq.append(Track(id=track['id'], image=track['image'], title=track['title'], album_name=track['albumName'],
length=track['length'], media_path=track['mediaPath'], origin=track['origin'],
youtube=track['youtube'], lyrics=track['lyrics'], artist_id=track['artistId'],
artist_name=track['artistName']))
return seq
def search_for_album(self, album_name):
"""Searches for an album by its name. Returns a AlbumSearch object.
Use get_next_page() to retrieve sequences of results."""
return AlbumSearch(album_name, self)
def search_for_artist(self, artist_name):
"""Searches of an artist by its name. Returns a ArtistSearch object.
Use get_next_page() to retrieve sequences of results."""
return ArtistSearch(artist_name, self)
def search_for_track(self, query):
"""Searches of a track by its name and its artist. Set artist to an
empty string if not available.
Returns a TrackSearch object.
Use get_next_page() to retrieve sequences of results."""
return TrackSearch(query, self)
def get_track_by_sons_id(self, sons_id):
"""Looks up a track by its MusicBrainz ID"""
params = {"id": sons_id}
doc = _Request(self, "track.getInfo", params).execute(True)
return Track(_extract(doc, "name", 1), _extract(doc, "name"), self)
def get_artist_by_sonsid(self, sons_id):
"""Loooks up an artist by its MusicBrainz ID"""
params = {"id": sons_id}
doc = _Request(self, "artist.getInfo", params).execute(True)
return Artist(_extract(doc, "name"), self)
def get_album_by_sonsid(self, sons_id):
"""Looks up an album by its MusicBrainz ID"""
params = {"id": sons_id}
doc = _Request(self, "album.getInfo", params).execute(True)
return Album(_extract(doc, "artist"), _extract(doc, "name"), self)
def get_play_links(self, link_type, things, cacheable=True):
method = link_type + ".getPlaylinks"
params = {}
for i, thing in enumerate(things):
if link_type == "artist":
params['artist[' + str(i) + ']'] = thing
elif link_type == "album":
params['artist[' + str(i) + ']'] = thing.artist
params['album[' + str(i) + ']'] = thing.title
elif link_type == "track":
params['artist[' + str(i) + ']'] = thing.artist
params['track[' + str(i) + ']'] = thing.title
doc = _Request(self, method, params).execute(cacheable)
seq = []
for node in doc.getElementsByTagName("externalids"):
spotify = _extract(node, "spotify")
seq.append(spotify)
return seq
def get_artist_play_links(self, artists, cacheable=True):
return self.get_play_links("artist", artists, cacheable)
def get_album_play_links(self, albums, cacheable=True):
return self.get_play_links("album", albums, cacheable)
def get_track_play_links(self, tracks, cacheable=True):
return self.get_play_links("track", tracks, cacheable)
class SonsNetwork(_Network):
"""A sons.mn network object
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
password_hash: the output of pylast.md5(password) where password is the
user's password
Most read-only webservices only require an api_key and an api_secret, see
about obtaining them from:
http://www.sons.mn/api/account
"""
def __init__(self, session_key="", ):
_Network.__init__(
self,
name="sons.mn",
homepage="http://sons.fm",
ws_server=("sons.mn", "/api/"),
session_key=session_key,
urls={
"album": "music/%(artist)s/%(album)s",
"artist": "music/%(artist)s",
"track": "music/%(artist)s/_/%(title)s",
"search": "search/tracks/%(query)s"
}
)
def __repr__(self):
return "pylast.LastFMNetwork(%s)" % (", ".join(
("'%s'" % self.session_key)))
class _ShelfCacheBackend(object):
"""Used as a backend for caching cacheable requests."""
def get_xml(self, key):
return memcache.get('sons_tmp_' + key)
def set_xml(self, key, json_string):
memcache.set('sons_tmp_' + key, json_string, 60)
class _Request(object):
"""Representing an abstract web service operation."""
def __init__(self, network, method_name, params={}, request_method='POST'):
self.network = network
self.params = {}
for key in params:
self.params[key] = _unicode(params[key])
self.method_name = method_name
self.request_method = request_method
if network.is_caching_enabled():
self.cache = network._get_cache_backend()
def _get_cache_key(self):
"""
The cache key is a string of concatenated sorted names and values.
"""
keys = list(self.params.keys())
keys.sort()
cache_key = str()
for key in keys:
if key != "api_sig" and key != "api_key" and key != "sk":
cache_key += key + self.params[key]
cache_key += self.method_name
return hashlib.sha1(cache_key.encode("utf-8")).hexdigest()
def _get_cached_response(self):
"""Returns a file object of the cached response."""
ca = self.cache.get_xml(self._get_cache_key())
if ca is None:
response = self._download_response()
self.cache.set_xml(self._get_cache_key(), response)
return self.cache.get_xml(self._get_cache_key())
def _download_response(self):
"""Returns a response body string from the server."""
if self.network.limit_rate:
self.network._delay_call()
data = []
for name in self.params.keys():
data.append('='.join((
name, url_quote_plus(_string(self.params[name])))))
data = '&'.join(data)
headers = {
'Accept': 'application/json, text/plain, */*',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': "keep-alive",
'X-CSRF-Guard': 'on'
}
(HOST_NAME, HOST_SUBDIR) = self.network.ws_server
if self.network.is_proxy_enabled():
conn = HTTPConnection(
host=self.network._get_proxy()[0],
port=self.network._get_proxy()[1])
try:
conn.request(
method=self.request_method, url="http://" + HOST_NAME + HOST_SUBDIR + self.method_name,
body=data, headers=headers)
except Exception as e:
raise NetworkError(self.network, e)
else:
conn = HTTPConnection(host=HOST_NAME)
try:
conn.request(
method=self.request_method, url=HOST_SUBDIR + self.method_name, body=data, headers=headers)
except Exception as e:
raise NetworkError(self.network, e)
try:
buf = StringIO(conn.getresponse().read())
f = gzip.GzipFile(fileobj=buf)
response_text = _unicode(f.read())
except Exception as e:
raise MalformedResponseError(self.network, e)
response_text = response_text.replace(")]}',", '')
self._check_response_for_errors(response_text)
return response_text
def execute(self, cacheable=False):
"""Returns the XML DOM response of the POST Request from the server"""
if self.network.is_caching_enabled() and cacheable:
response = self._get_cached_response()
else:
response = self._download_response()
return json.loads(_string(response))
def _check_response_for_errors(self, response):
"""Checks the response for errors and raises one if any exists."""
try:
doc = json.loads(_string(response))
except Exception as e:
logging.error(response)
raise MalformedResponseError(self.network, e)
if not doc['success']:
e = doc['error']
print e.encode('utf-8')
raise WSError(self.network, 500, response)
def _string_output(funct):
def r(*args):
return _string(funct(*args))
return r
def _pad_list(given_list, desired_length, padding=None):
"""
Pads a list to be of the desired_length.
"""
while len(given_list) < desired_length:
given_list.append(padding)
return given_list
class _BaseObject(object):
"""An abstract webservices object."""
network = None
def __init__(self, network, ws_prefix):
self.network = network
self.ws_prefix = ws_prefix
def _request(self, method_name, cacheable=True, params=None):
if not params:
params = self._get_params()
return _Request(self.network, method_name, params).execute(cacheable)
def _get_params(self):
"""Returns the most common set of parameters between all objects."""
return {}
def __hash__(self):
# Convert any ints (or whatever) into strings
values = map(six.text_type, self._get_params().values())
return hash(self.network) + hash(six.text_type(type(self)) + "".join(
list(self._get_params().keys()) + list(values)
).lower())
def _extract_cdata_from_request(self, method_name, tag_name, params):
doc = self._request(method_name, True, params)
return doc.getElementsByTagName(
tag_name)[0].firstChild.wholeText.strip()
class WSError(Exception):
"""Exception related to the Network web service"""
def __init__(self, network, status, details):
self.status = status
self.details = details
self.network = network
@_string_output
def __str__(self):
return self.details
def get_id(self):
"""Returns the exception ID, from one of the following:
STATUS_INVALID_SERVICE = 2
STATUS_INVALID_METHOD = 3
STATUS_AUTH_FAILED = 4
STATUS_INVALID_FORMAT = 5
STATUS_INVALID_PARAMS = 6
STATUS_INVALID_RESOURCE = 7
STATUS_TOKEN_ERROR = 8
STATUS_INVALID_SK = 9
STATUS_INVALID_API_KEY = 10
STATUS_OFFLINE = 11
STATUS_SUBSCRIBERS_ONLY = 12
STATUS_TOKEN_UNAUTHORIZED = 14
STATUS_TOKEN_EXPIRED = 15
"""
return self.status
class MalformedResponseError(Exception):
"""Exception conveying a malformed response from sons.mn."""
def __init__(self, network, underlying_error):
self.network = network
self.underlying_error = underlying_error
def __str__(self):
return "Malformed response from sons.mn. Underlying error: %s" % str(
self.underlying_error)
class NetworkError(Exception):
"""Exception conveying a problem in sending a request to sons.mn"""
def __init__(self, network, underlying_error):
self.network = network
self.underlying_error = underlying_error
def __str__(self):
return "NetworkError: %s" % str(self.underlying_error)
class _Opus(_BaseObject):
"""An album or track."""
artist = None
title = None
__hash__ = _BaseObject.__hash__
def __init__(self, artist, title, network, ws_prefix):
"""
Create an opus instance.
# Parameters:
* artist: An artist name or an Artist object.
* title: The album or track title.
* ws_prefix: 'album' or 'track'
"""
_BaseObject.__init__(self, network, ws_prefix)
if isinstance(artist, Artist):
self.artist = artist
else:
self.artist = Artist(artist, self.network)
self.title = title
def __repr__(self):
return "pylast.%s(%s, %s, %s)" % (
self.ws_prefix.title(), repr(self.artist.name),
repr(self.title), repr(self.network))
@_string_output
def __str__(self):
return _unicode("%s - %s") % (
self.get_artist().get_name(), self.get_title())
def __eq__(self, other):
if type(self) != type(other):
return False
a = self.get_title().lower()
b = other.get_title().lower()
c = self.get_artist().get_name().lower()
d = other.get_artist().get_name().lower()
return (a == b) and (c == d)
def __ne__(self, other):
return not self.__eq__(other)
def _get_params(self):
return {
'artist': self.get_artist().get_name(),
self.ws_prefix: self.get_title()}
def get_artist(self):
"""Returns the associated Artist object."""
return self.artist
def get_title(self, properly_capitalized=False):
"""Returns the artist or track title."""
if properly_capitalized:
self.title = _extract(
self._request(self.ws_prefix + ".getInfo", True), "name")
return self.title
def get_name(self, properly_capitalized=False):
"""Returns the album or track title (alias to get_title())."""
return self.get_title(properly_capitalized)
def get_id(self):
"""Returns the ID on the network."""
return _extract(
self._request(self.ws_prefix + ".getInfo", cacheable=True), "id")
def get_playcount(self):
"""Returns the number of plays on the network"""
return _number(_extract(
self._request(
self.ws_prefix + ".getInfo", cacheable=True), "playcount"))
def get_sons_id(self):
"""Returns the MusicBrainz ID of the album or track."""
return _extract(
self._request(self.ws_prefix + ".getInfo", cacheable=True), "sons_id")
class Album(object):
"""An album."""
def __init__(self, id, title, image, artist_name, artist_id, year, duration, num_tracks):
self.id = id
self.title = title
self.image = image
self.artist_name = artist_name
self.artist_id = artist_id
self.year = year
self.duration = duration
self.num_tracks = num_tracks
def get_tracks(self, network):
response = _Request(network, 'tracks/fetchByAlbum?albumId=' + str(self.id), params={},
request_method='GET').execute(True)
tracks = response['tracks']
seq = []
for track in tracks:
seq.append(Track(id=track['id'], image=track['image'], title=track['title'], album_name=track['albumName'],
length=track['length'], media_path=track['mediaPath'], origin=track['origin'],
youtube=track['youtube'], lyrics=track['lyrics'], artist_id=track['artistId'],
artist_name=track['artistName']))
return seq
class Artist():
"""An artist."""
def __init__(self, id, image, name, num_albums, num_tracks):
self.id = id
self.image = image
self.name = name
self.num_albums = num_albums
self.num_tracks = num_tracks
class Track():
"""A sons.mn track."""
def __init__(self, id, image, title,
album_name, length, media_path, origin, youtube, lyrics, artist_id, artist_name):
self.id = id
self.image = image
self.title = title
self.album_name = album_name
self.length = length
self.media_path = media_path
self.origin = origin
self.youtube = youtube
self.lyrics = lyrics
self.artist_id = artist_id
self.artist_name = artist_name
class _Search(_BaseObject):
"""An abstract class. Use one of its derivatives."""
def __init__(self, ws_prefix, search_terms, network):
_BaseObject.__init__(self, network, ws_prefix)
self._ws_prefix = ws_prefix
self.search_terms = search_terms
self._last_page_index = 0
def _get_params(self):
params = {}
for key in self.search_terms.keys():
params[key] = self.search_terms[key]
return params
def get_total_result_count(self):
"""Returns the total count of all the results."""
self.search_terms['page'] = 1
doc = self._request(self._ws_prefix, True)
return _extract(doc, "totalNumResults")
def _retrieve_page(self, page_index):
"""Returns the node of matches to be processed"""
params = self._get_params()
params["page"] = page_index
return self._request(self._ws_prefix, True, params)
def _retrieve_next_page(self):
self._last_page_index += 1
return self._retrieve_page(self._last_page_index)
class AlbumSearch(_Search):
"""Search for an album by name."""
def __init__(self, album_name, network):
_Search.__init__(self, "album", {"album": album_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Album objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("album"):
seq.append(Album(
_extract(node, "artist"),
_extract(node, "name"),
self.network))
return seq
class ArtistSearch(_Search):
"""Search for an artist by artist name."""
def __init__(self, artist_name, network):
_Search.__init__(self, "artist", {"artist": artist_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Artist objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("artist"):
artist = Artist(_extract(node, "name"), self.network)
artist.listener_count = _number(_extract(node, "listeners"))
seq.append(artist)
return seq
class TrackSearch(_Search):
"""
Search for a track by track title. If you don't want to narrow the results
down by specifying the artist name, set it to empty string.
"""
def __init__(self, query, network):
_Search.__init__(
self,
"search/tracks",
{"keywords": query},
network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Track objects."""
seq = []
page = self._retrieve_next_page()
for track in page['results']:
seq.append(Track(id=track['id'], image=track['image'], title=track['title'], album_name=track['albumName'],
length=track['length'], media_path=track['mediaPath'], origin=track['origin'],
youtube=track['youtube'], lyrics=track['lyrics'], artist_id=track['artistId'],
artist_name=track['artistName']))
return seq
def md5(text):
"""Returns the md5 hash of a string."""
h = hashlib.md5()
h.update(_unicode(text).encode("utf-8"))
return h.hexdigest()
def _unicode(text):
if isinstance(text, six.binary_type):
return six.text_type(text, "utf-8")
elif isinstance(text, six.text_type):
return text
else:
return six.text_type(text)
def _string(string):
"""For Python2 routines that can only process str type."""
if isinstance(string, str):
return string
casted = six.text_type(string)
if sys.version_info[0] == 2:
casted = casted.encode("utf-8")
return casted
def _extract(node, name, index=0):
"""Extracts a value from the xml string"""
return node[name]
def _extract_element_tree(node, index=0):
"""Extract an element tree into a multi-level dictionary
NB: If any elements have text nodes as well as nested
elements this will ignore the text nodes"""
def _recurse_build_tree(rootNode, targetDict):
"""Recursively build a multi-level dict"""
def _has_child_elements(rootNode):
"""Check if an element has any nested (child) elements"""
for node in rootNode.childNodes:
if node.nodeType == node.ELEMENT_NODE:
return True
return False
for node in rootNode.childNodes:
if node.nodeType == node.ELEMENT_NODE:
if _has_child_elements(node):
targetDict[node.tagName] = {}
_recurse_build_tree(node, targetDict[node.tagName])
else:
val = None if node.firstChild is None else \
_unescape_htmlentity(node.firstChild.data.strip())
targetDict[node.tagName] = val
return targetDict
return _recurse_build_tree(node, {})
def _extract_all(node, name, limit_count=None):
"""Extracts all the values from the xml string. returning a list."""
seq = []
for i in range(0, len(node.getElementsByTagName(name))):
if len(seq) == limit_count:
break
seq.append(_extract(node, name, i))
return seq
def _url_safe(text):
"""Does all kinds of tricks on a text to make it safe to use in a url."""
return url_quote_plus(url_quote_plus(_string(text))).lower()
def _number(string):
"""
Extracts an int from a string.
Returns a 0 if None or an empty string was passed.
"""
if not string:
return 0
elif string == "":
return 0
else:
try:
return int(string)
except ValueError:
return float(string)
def _unescape_htmlentity(string):
# string = _unicode(string)
mapping = htmlentitydefs.name2codepoint
for key in mapping:
string = string.replace("&%s;" % key, unichr(mapping[key]))
return string
def extract_items(topitems_or_libraryitems):
"""
Extracts a sequence of items from a sequence of TopItem or
LibraryItem objects.
"""
seq = []
for i in topitems_or_libraryitems:
seq.append(i.item)
return seq
class ScrobblingError(Exception):
def __init__(self, message):
Exception.__init__(self)
self.message = message
@_string_output
def __str__(self):
return self.message
class BannedClientError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(
self, "This version of the client has been banned")
class BadAuthenticationError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(self, "Bad authentication token")
class BadTimeError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(
self, "Time provided is not close enough to current time")
class BadSessionError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(
self, "Bad session id, consider re-handshaking")
# End of file
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import namedtuple
from io import StringIO
import click
import errno
import json
import logging
import os
import pathlib
import sys
from .benchmark.codec import JsonEncoder
from .benchmark.compare import RunnerComparator, DEFAULT_THRESHOLD
from .benchmark.runner import CppBenchmarkRunner, JavaBenchmarkRunner
from .compat import _import_pandas
from .lang.cpp import CppCMakeDefinition, CppConfiguration
from .utils.cli import ArrowBool, validate_arrow_sources, add_optional_command
from .utils.lint import linter, python_numpydoc, LintValidationException
from .utils.logger import logger, ctx as log_ctx
from .utils.source import ArrowSources
from .utils.tmpdir import tmpdir
# Set default logging to INFO in command line.
logging.basicConfig(level=logging.INFO)
BOOL = ArrowBool()
@click.group()
@click.option("--debug", type=BOOL, is_flag=True, default=False,
help="Increase logging with debugging output.")
@click.option("--pdb", type=BOOL, is_flag=True, default=False,
help="Invoke pdb on uncaught exception.")
@click.option("-q", "--quiet", type=BOOL, is_flag=True, default=False,
help="Silence executed commands.")
@click.pass_context
def archery(ctx, debug, pdb, quiet):
""" Apache Arrow developer utilities.
See sub-commands help with `archery <cmd> --help`.
"""
# Ensure ctx.obj exists
ctx.ensure_object(dict)
log_ctx.quiet = quiet
if debug:
logger.setLevel(logging.DEBUG)
ctx.debug = debug
if pdb:
import pdb
sys.excepthook = lambda t, v, e: pdb.pm()
build_dir_type = click.Path(dir_okay=True, file_okay=False, resolve_path=True)
# Supported build types
build_type = click.Choice(["debug", "relwithdebinfo", "release"],
case_sensitive=False)
# Supported warn levels
warn_level_type = click.Choice(["everything", "checkin", "production"],
case_sensitive=False)
simd_level = click.Choice(["NONE", "SSE4_2", "AVX2", "AVX512"],
case_sensitive=True)
def cpp_toolchain_options(cmd):
options = [
click.option("--cc", metavar="<compiler>", help="C compiler."),
click.option("--cxx", metavar="<compiler>", help="C++ compiler."),
click.option("--cxx-flags", help="C++ compiler flags."),
click.option("--cpp-package-prefix",
help=("Value to pass for ARROW_PACKAGE_PREFIX and "
"use ARROW_DEPENDENCY_SOURCE=SYSTEM"))
]
return _apply_options(cmd, options)
def java_toolchain_options(cmd):
options = [
click.option("--java-home", metavar="<java_home>",
help="Path to Java Developers Kit."),
click.option("--java-options", help="java compiler options."),
]
return _apply_options(cmd, options)
def _apply_options(cmd, options):
for option in options:
cmd = option(cmd)
return cmd
@archery.command(short_help="Initialize an Arrow C++ build")
@click.option("--src", metavar="<arrow_src>", default=None,
callback=validate_arrow_sources,
help="Specify Arrow source directory")
# toolchain
@cpp_toolchain_options
@click.option("--build-type", default=None, type=build_type,
help="CMake's CMAKE_BUILD_TYPE")
@click.option("--warn-level", default="production", type=warn_level_type,
help="Controls compiler warnings -W(no-)error.")
@click.option("--use-gold-linker", default=True, type=BOOL,
help="Toggles ARROW_USE_LD_GOLD option.")
@click.option("--simd-level", default="SSE4_2", type=simd_level,
help="Toggles ARROW_SIMD_LEVEL option.")
# Tests and benchmarks
@click.option("--with-tests", default=True, type=BOOL,
help="Build with tests.")
@click.option("--with-benchmarks", default=None, type=BOOL,
help="Build with benchmarks.")
@click.option("--with-examples", default=None, type=BOOL,
help="Build with examples.")
@click.option("--with-integration", default=None, type=BOOL,
help="Build with integration test executables.")
# Static checks
@click.option("--use-asan", default=None, type=BOOL,
help="Toggle ARROW_USE_ASAN sanitizer.")
@click.option("--use-tsan", default=None, type=BOOL,
help="Toggle ARROW_USE_TSAN sanitizer.")
@click.option("--use-ubsan", default=None, type=BOOL,
help="Toggle ARROW_USE_UBSAN sanitizer.")
@click.option("--with-fuzzing", default=None, type=BOOL,
help="Toggle ARROW_FUZZING.")
# Components
@click.option("--with-compute", default=None, type=BOOL,
help="Build the Arrow compute module.")
@click.option("--with-csv", default=None, type=BOOL,
help="Build the Arrow CSV parser module.")
@click.option("--with-cuda", default=None, type=BOOL,
help="Build the Arrow CUDA extensions.")
@click.option("--with-dataset", default=None, type=BOOL,
help="Build the Arrow dataset module.")
@click.option("--with-filesystem", default=None, type=BOOL,
help="Build the Arrow filesystem layer.")
@click.option("--with-flight", default=None, type=BOOL,
help="Build with Flight rpc support.")
@click.option("--with-gandiva", default=None, type=BOOL,
help="Build with Gandiva expression compiler support.")
@click.option("--with-hdfs", default=None, type=BOOL,
help="Build the Arrow HDFS bridge.")
@click.option("--with-hiveserver2", default=None, type=BOOL,
help="Build the HiveServer2 client and arrow adapater.")
@click.option("--with-ipc", default=None, type=BOOL,
help="Build the Arrow IPC extensions.")
@click.option("--with-json", default=None, type=BOOL,
help="Build the Arrow JSON parser module.")
@click.option("--with-jni", default=None, type=BOOL,
help="Build the Arrow JNI lib.")
@click.option("--with-mimalloc", default=None, type=BOOL,
help="Build the Arrow mimalloc based allocator.")
@click.option("--with-parquet", default=None, type=BOOL,
help="Build with Parquet file support.")
@click.option("--with-plasma", default=None, type=BOOL,
help="Build with Plasma object store support.")
@click.option("--with-python", default=None, type=BOOL,
help="Build the Arrow CPython extesions.")
@click.option("--with-r", default=None, type=BOOL,
help="Build the Arrow R extensions. This is not a CMake option, "
"it will toggle required options")
@click.option("--with-s3", default=None, type=BOOL,
help="Build Arrow with S3 support.")
# Compressions
@click.option("--with-brotli", default=None, type=BOOL,
help="Build Arrow with brotli compression.")
@click.option("--with-bz2", default=None, type=BOOL,
help="Build Arrow with bz2 compression.")
@click.option("--with-lz4", default=None, type=BOOL,
help="Build Arrow with lz4 compression.")
@click.option("--with-snappy", default=None, type=BOOL,
help="Build Arrow with snappy compression.")
@click.option("--with-zlib", default=None, type=BOOL,
help="Build Arrow with zlib compression.")
@click.option("--with-zstd", default=None, type=BOOL,
help="Build Arrow with zstd compression.")
# CMake extra feature
@click.option("--cmake-extras", type=str, multiple=True,
help="Extra flags/options to pass to cmake invocation. "
"Can be stacked")
@click.option("--install-prefix", type=str,
help="Destination directory where files are installed. Expand to"
"CMAKE_INSTALL_PREFIX. Defaults to to $CONDA_PREFIX if the"
"variable exists.")
# misc
@click.option("-f", "--force", type=BOOL, is_flag=True, default=False,
help="Delete existing build directory if found.")
@click.option("--targets", type=str, multiple=True,
help="Generator targets to run. Can be stacked.")
@click.argument("build_dir", type=build_dir_type)
@click.pass_context
def build(ctx, src, build_dir, force, targets, **kwargs):
""" Initialize a C++ build directory.
The build command creates a directory initialized with Arrow's cpp source
cmake and configuration. It can also optionally invoke the generator to
test the build (and used in scripts).
Note that archery will carry the caller environment. It will also not touch
an existing directory, one must use the `--force` option to remove the
existing directory.
Examples:
\b
# Initialize build with clang8 and avx2 support in directory `clang8-build`
\b
archery build --cc=clang-8 --cxx=clang++-8 --cxx-flags=-mavx2 clang8-build
\b
# Builds and run test
archery build --targets=all --targets=test build
"""
# Arrow's cpp cmake configuration
conf = CppConfiguration(**kwargs)
# This is a closure around cmake invocation, e.g. calling `def.build()`
# yields a directory ready to be run with the generator
cmake_def = CppCMakeDefinition(src.cpp, conf)
# Create build directory
build = cmake_def.build(build_dir, force=force)
for target in targets:
build.run(target)
LintCheck = namedtuple('LintCheck', ('option_name', 'help'))
lint_checks = [
LintCheck('clang-format', "Format C++ files with clang-format."),
LintCheck('clang-tidy', "Lint C++ files with clang-tidy."),
LintCheck('cpplint', "Lint C++ files with cpplint."),
LintCheck('iwyu', "Lint changed C++ files with Include-What-You-Use."),
LintCheck('python',
"Format and lint Python files with autopep8 and flake8."),
LintCheck('numpydoc', "Lint Python files with numpydoc."),
LintCheck('cmake-format', "Format CMake files with cmake-format.py."),
LintCheck('rat',
"Check all sources files for license texts via Apache RAT."),
LintCheck('r', "Lint R files."),
LintCheck('docker', "Lint Dockerfiles with hadolint."),
]
def decorate_lint_command(cmd):
"""
Decorate the lint() command function to add individual per-check options.
"""
for check in lint_checks:
option = click.option("--{0}/--no-{0}".format(check.option_name),
default=None, help=check.help)
cmd = option(cmd)
return cmd
@archery.command(short_help="Check Arrow source tree for errors")
@click.option("--src", metavar="<arrow_src>", default=None,
callback=validate_arrow_sources,
help="Specify Arrow source directory")
@click.option("--fix", is_flag=True, type=BOOL, default=False,
help="Toggle fixing the lint errors if the linter supports it.")
@click.option("--iwyu_all", is_flag=True, type=BOOL, default=False,
help="Run IWYU on all C++ files if enabled")
@click.option("-a", "--all", is_flag=True, default=False,
help="Enable all checks.")
@decorate_lint_command
@click.pass_context
def lint(ctx, src, fix, iwyu_all, **checks):
if checks.pop('all'):
# "--all" is given => enable all non-selected checks
for k, v in checks.items():
if v is None:
checks[k] = True
if not any(checks.values()):
raise click.UsageError(
"Need to enable at least one lint check (try --help)")
try:
linter(src, fix, iwyu_all=iwyu_all, **checks)
except LintValidationException:
sys.exit(1)
@archery.command(short_help="Lint python docstring with NumpyDoc")
@click.argument('symbols', nargs=-1)
@click.option("--src", metavar="<arrow_src>", default=None,
callback=validate_arrow_sources,
help="Specify Arrow source directory")
@click.option("--allow-rule", "-a", multiple=True,
help="Allow only these rules")
@click.option("--disallow-rule", "-d", multiple=True,
help="Disallow these rules")
def numpydoc(src, symbols, allow_rule, disallow_rule):
"""
Pass list of modules or symbols as arguments to restrict the validation.
By default all modules of pyarrow are tried to be validated.
Examples
--------
archery numpydoc pyarrow.dataset
archery numpydoc pyarrow.csv pyarrow.json pyarrow.parquet
archery numpydoc pyarrow.array
"""
disallow_rule = disallow_rule or {'GL01', 'SA01', 'EX01', 'ES01'}
try:
results = python_numpydoc(symbols, allow_rules=allow_rule,
disallow_rules=disallow_rule)
for result in results:
result.ok()
except LintValidationException:
sys.exit(1)
@archery.group()
@click.pass_context
def benchmark(ctx):
""" Arrow benchmarking.
Use the diff sub-command to benchmark revisions, and/or build directories.
"""
pass
def benchmark_common_options(cmd):
def check_language(ctx, param, value):
if value not in {"cpp", "java"}:
raise click.BadParameter("cpp or java is supported now")
return value
options = [
click.option("--src", metavar="<arrow_src>", show_default=True,
default=None, callback=validate_arrow_sources,
help="Specify Arrow source directory"),
click.option("--preserve", type=BOOL, default=False, show_default=True,
is_flag=True,
help="Preserve workspace for investigation."),
click.option("--output", metavar="<output>",
type=click.File("w", encoding="utf8"), default="-",
help="Capture output result into file."),
click.option("--language", metavar="<lang>", type=str, default="cpp",
show_default=True, callback=check_language,
help="Specify target language for the benchmark"),
click.option("--build-extras", type=str, multiple=True,
help="Extra flags/options to pass to mvn build. "
"Can be stacked. For language=java"),
click.option("--benchmark-extras", type=str, multiple=True,
help="Extra flags/options to pass to mvn benchmark. "
"Can be stacked. For language=java"),
click.option("--cmake-extras", type=str, multiple=True,
help="Extra flags/options to pass to cmake invocation. "
"Can be stacked. For language=cpp")
]
cmd = java_toolchain_options(cmd)
cmd = cpp_toolchain_options(cmd)
return _apply_options(cmd, options)
def benchmark_filter_options(cmd):
options = [
click.option("--suite-filter", metavar="<regex>", show_default=True,
type=str, default=None,
help="Regex filtering benchmark suites."),
click.option("--benchmark-filter", metavar="<regex>",
show_default=True, type=str, default=None,
help="Regex filtering benchmarks.")
]
return _apply_options(cmd, options)
@benchmark.command(name="list", short_help="List benchmark suite")
@click.argument("rev_or_path", metavar="[<rev_or_path>]",
default="WORKSPACE", required=False)
@benchmark_common_options
@click.pass_context
def benchmark_list(ctx, rev_or_path, src, preserve, output, cmake_extras,
java_home, java_options, build_extras, benchmark_extras,
language, **kwargs):
""" List benchmark suite.
"""
with tmpdir(preserve=preserve) as root:
logger.debug("Running benchmark {}".format(rev_or_path))
if language == "cpp":
conf = CppBenchmarkRunner.default_configuration(
cmake_extras=cmake_extras, **kwargs)
runner_base = CppBenchmarkRunner.from_rev_or_path(
src, root, rev_or_path, conf)
elif language == "java":
for key in {'cpp_package_prefix', 'cxx_flags', 'cxx', 'cc'}:
del kwargs[key]
conf = JavaBenchmarkRunner.default_configuration(
java_home=java_home, java_options=java_options,
build_extras=build_extras, benchmark_extras=benchmark_extras,
**kwargs)
runner_base = JavaBenchmarkRunner.from_rev_or_path(
src, root, rev_or_path, conf)
for b in runner_base.list_benchmarks:
click.echo(b, file=output)
@benchmark.command(name="run", short_help="Run benchmark suite")
@click.argument("rev_or_path", metavar="[<rev_or_path>]",
default="WORKSPACE", required=False)
@benchmark_common_options
@benchmark_filter_options
@click.option("--repetitions", type=int, default=-1,
help=("Number of repetitions of each benchmark. Increasing "
"may improve result precision. "
"[default: 1 for cpp, 5 for java"))
@click.pass_context
def benchmark_run(ctx, rev_or_path, src, preserve, output, cmake_extras,
java_home, java_options, build_extras, benchmark_extras,
language, suite_filter, benchmark_filter, repetitions,
**kwargs):
""" Run benchmark suite.
This command will run the benchmark suite for a single build. This is
used to capture (and/or publish) the results.
The caller can optionally specify a target which is either a git revision
(commit, tag, special values like HEAD) or a cmake build directory.
When a commit is referenced, a local clone of the arrow sources (specified
via --src) is performed and the proper branch is created. This is done in
a temporary directory which can be left intact with the `--preserve` flag.
The special token "WORKSPACE" is reserved to specify the current git
workspace. This imply that no clone will be performed.
Examples:
\b
# Run the benchmarks on current git workspace
\b
archery benchmark run
\b
# Run the benchmarks on current previous commit
\b
archery benchmark run HEAD~1
\b
# Run the benchmarks on current previous commit
\b
archery benchmark run --output=run.json
"""
with tmpdir(preserve=preserve) as root:
logger.debug("Running benchmark {}".format(rev_or_path))
if language == "cpp":
conf = CppBenchmarkRunner.default_configuration(
cmake_extras=cmake_extras, **kwargs)
repetitions = repetitions if repetitions != -1 else 1
runner_base = CppBenchmarkRunner.from_rev_or_path(
src, root, rev_or_path, conf,
repetitions=repetitions,
suite_filter=suite_filter, benchmark_filter=benchmark_filter)
elif language == "java":
for key in {'cpp_package_prefix', 'cxx_flags', 'cxx', 'cc'}:
del kwargs[key]
conf = JavaBenchmarkRunner.default_configuration(
java_home=java_home, java_options=java_options,
build_extras=build_extras, benchmark_extras=benchmark_extras,
**kwargs)
repetitions = repetitions if repetitions != -1 else 5
runner_base = JavaBenchmarkRunner.from_rev_or_path(
src, root, rev_or_path, conf,
repetitions=repetitions,
benchmark_filter=benchmark_filter)
json.dump(runner_base, output, cls=JsonEncoder)
@benchmark.command(name="diff", short_help="Compare benchmark suites")
@benchmark_common_options
@benchmark_filter_options
@click.option("--threshold", type=float, default=DEFAULT_THRESHOLD,
show_default=True,
help="Regression failure threshold in percentage.")
@click.option("--repetitions", type=int, default=1, show_default=True,
help=("Number of repetitions of each benchmark. Increasing "
"may improve result precision. "
"[default: 1 for cpp, 5 for java"))
@click.option("--no-counters", type=BOOL, default=False, is_flag=True,
help="Hide counters field in diff report.")
@click.argument("contender", metavar="[<contender>",
default=ArrowSources.WORKSPACE, required=False)
@click.argument("baseline", metavar="[<baseline>]]", default="origin/master",
required=False)
@click.pass_context
def benchmark_diff(ctx, src, preserve, output, language, cmake_extras,
suite_filter, benchmark_filter, repetitions, no_counters,
java_home, java_options, build_extras, benchmark_extras,
threshold, contender, baseline, **kwargs):
"""Compare (diff) benchmark runs.
This command acts like git-diff but for benchmark results.
The caller can optionally specify both the contender and the baseline. If
unspecified, the contender will default to the current workspace (like git)
and the baseline will default to master.
Each target (contender or baseline) can either be a git revision
(commit, tag, special values like HEAD) or a cmake build directory. This
allow comparing git commits, and/or different compilers and/or compiler
flags.
When a commit is referenced, a local clone of the arrow sources (specified
via --src) is performed and the proper branch is created. This is done in
a temporary directory which can be left intact with the `--preserve` flag.
The special token "WORKSPACE" is reserved to specify the current git
workspace. This imply that no clone will be performed.
Examples:
\b
# Compare workspace (contender) with master (baseline)
\b
archery benchmark diff
\b
# Compare master (contender) with latest version (baseline)
\b
export LAST=$(git tag -l "apache-arrow-[0-9]*" | sort -rV | head -1)
\b
archery benchmark diff master "$LAST"
\b
# Compare g++7 (contender) with clang++-8 (baseline) builds
\b
archery build --with-benchmarks=true \\
--cxx-flags=-ftree-vectorize \\
--cc=gcc-7 --cxx=g++-7 gcc7-build
\b
archery build --with-benchmarks=true \\
--cxx-flags=-flax-vector-conversions \\
--cc=clang-8 --cxx=clang++-8 clang8-build
\b
archery benchmark diff gcc7-build clang8-build
\b
# Compare default targets but scoped to the suites matching
# `^arrow-compute-aggregate` and benchmarks matching `(Sum|Mean)Kernel`.
\b
archery benchmark diff --suite-filter="^arrow-compute-aggregate" \\
--benchmark-filter="(Sum|Mean)Kernel"
\b
# Capture result in file `result.json`
\b
archery benchmark diff --output=result.json
\b
# Equivalently with no stdout clutter.
archery --quiet benchmark diff > result.json
\b
# Comparing with a cached results from `archery benchmark run`
\b
archery benchmark run --output=run.json HEAD~1
\b
# This should not recompute the benchmark from run.json
archery --quiet benchmark diff WORKSPACE run.json > result.json
"""
with tmpdir(preserve=preserve) as root:
logger.debug("Comparing {} (contender) with {} (baseline)"
.format(contender, baseline))
if language == "cpp":
conf = CppBenchmarkRunner.default_configuration(
cmake_extras=cmake_extras, **kwargs)
repetitions = repetitions if repetitions != -1 else 1
runner_cont = CppBenchmarkRunner.from_rev_or_path(
src, root, contender, conf,
repetitions=repetitions,
suite_filter=suite_filter,
benchmark_filter=benchmark_filter)
runner_base = CppBenchmarkRunner.from_rev_or_path(
src, root, baseline, conf,
repetitions=repetitions,
suite_filter=suite_filter,
benchmark_filter=benchmark_filter)
elif language == "java":
for key in {'cpp_package_prefix', 'cxx_flags', 'cxx', 'cc'}:
del kwargs[key]
conf = JavaBenchmarkRunner.default_configuration(
java_home=java_home, java_options=java_options,
build_extras=build_extras, benchmark_extras=benchmark_extras,
**kwargs)
repetitions = repetitions if repetitions != -1 else 5
runner_cont = JavaBenchmarkRunner.from_rev_or_path(
src, root, contender, conf,
repetitions=repetitions,
benchmark_filter=benchmark_filter)
runner_base = JavaBenchmarkRunner.from_rev_or_path(
src, root, baseline, conf,
repetitions=repetitions,
benchmark_filter=benchmark_filter)
runner_comp = RunnerComparator(runner_cont, runner_base, threshold)
# TODO(kszucs): test that the output is properly formatted jsonlines
comparisons_json = _get_comparisons_as_json(runner_comp.comparisons)
ren_counters = language == "java"
formatted = _format_comparisons_with_pandas(comparisons_json,
no_counters, ren_counters)
output.write(formatted)
output.write('\n')
def _get_comparisons_as_json(comparisons):
buf = StringIO()
for comparator in comparisons:
json.dump(comparator, buf, cls=JsonEncoder)
buf.write("\n")
return buf.getvalue()
def _format_comparisons_with_pandas(comparisons_json, no_counters,
ren_counters):
pd = _import_pandas()
df = pd.read_json(StringIO(comparisons_json), lines=True)
# parse change % so we can sort by it
df['change %'] = df.pop('change').str[:-1].map(float)
first_regression = len(df) - df['regression'].sum()
fields = ['benchmark', 'baseline', 'contender', 'change %']
if not no_counters:
fields += ['counters']
df = df[fields]
if ren_counters:
df = df.rename(columns={'counters': 'configurations'})
df = df.sort_values(by='change %', ascending=False)
def labelled(title, df):
if len(df) == 0:
return ''
title += ': ({})'.format(len(df))
df_str = df.to_string(index=False)
bar = '-' * df_str.index('\n')
return '\n'.join([bar, title, bar, df_str])
return '\n\n'.join([labelled('Non-regressions', df[:first_regression]),
labelled('Regressions', df[first_regression:])])
# ----------------------------------------------------------------------
# Integration testing
def _set_default(opt, default):
if opt is None:
return default
return opt
@archery.command(short_help="Execute protocol and Flight integration tests")
@click.option('--with-all', is_flag=True, default=False,
help=('Include all known languages by default '
'in integration tests'))
@click.option('--random-seed', type=int, default=12345,
help="Seed for PRNG when generating test data")
@click.option('--with-cpp', type=bool, default=False,
help='Include C++ in integration tests')
@click.option('--with-java', type=bool, default=False,
help='Include Java in integration tests')
@click.option('--with-js', type=bool, default=False,
help='Include JavaScript in integration tests')
@click.option('--with-go', type=bool, default=False,
help='Include Go in integration tests')
@click.option('--with-rust', type=bool, default=False,
help='Include Rust in integration tests',
envvar="ARCHERY_INTEGRATION_WITH_RUST")
@click.option('--write_generated_json', default=False,
help='Generate test JSON to indicated path')
@click.option('--run-flight', is_flag=True, default=False,
help='Run Flight integration tests')
@click.option('--debug', is_flag=True, default=False,
help='Run executables in debug mode as relevant')
@click.option('--serial', is_flag=True, default=False,
help='Run tests serially, rather than in parallel')
@click.option('--tempdir', default=None,
help=('Directory to use for writing '
'integration test temporary files'))
@click.option('stop_on_error', '-x', '--stop-on-error',
is_flag=True, default=False,
help='Stop on first error')
@click.option('--gold-dirs', multiple=True,
help="gold integration test file paths")
@click.option('-k', '--match',
help=("Substring for test names to include in run, "
"e.g. -k primitive"))
def integration(with_all=False, random_seed=12345, **args):
from .integration.runner import write_js_test_json, run_all_tests
import numpy as np
# FIXME(bkietz) Include help strings for individual testers.
# For example, CPPTester's ARROW_CPP_EXE_PATH environment variable.
# Make runs involving data generation deterministic
np.random.seed(random_seed)
gen_path = args['write_generated_json']
languages = ['cpp', 'java', 'js', 'go', 'rust']
enabled_languages = 0
for lang in languages:
param = 'with_{}'.format(lang)
if with_all:
args[param] = with_all
if args[param]:
enabled_languages += 1
if gen_path:
try:
os.makedirs(gen_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
write_js_test_json(gen_path)
else:
if enabled_languages == 0:
raise Exception("Must enable at least 1 language to test")
run_all_tests(**args)
@archery.command()
@click.option('--event-name', '-n', required=True)
@click.option('--event-payload', '-p', type=click.File('r', encoding='utf8'),
default='-', required=True)
@click.option('--arrow-token', envvar='ARROW_GITHUB_TOKEN',
help='OAuth token for responding comment in the arrow repo')
def trigger_bot(event_name, event_payload, arrow_token):
from .bot import CommentBot, actions
event_payload = json.loads(event_payload.read())
bot = CommentBot(name='github-actions', handler=actions, token=arrow_token)
bot.handle(event_name, event_payload)
@archery.group('release')
@click.option("--src", metavar="<arrow_src>", default=None,
callback=validate_arrow_sources,
help="Specify Arrow source directory.")
@click.option("--jira-cache", type=click.Path(), default=None,
help="File path to cache queried JIRA issues per version.")
@click.pass_obj
def release(obj, src, jira_cache):
"""Release releated commands."""
from .release import Jira, CachedJira
jira = Jira()
if jira_cache is not None:
jira = CachedJira(jira_cache, jira=jira)
obj['jira'] = jira
obj['repo'] = src.path
@release.command('curate')
@click.argument('version')
@click.pass_obj
def release_curate(obj, version):
"""Release curation."""
from .release import Release
release = Release.from_jira(version, jira=obj['jira'], repo=obj['repo'])
curation = release.curate()
click.echo(curation.render('console'))
@release.group('changelog')
def release_changelog():
"""Release changelog."""
pass
@release_changelog.command('add')
@click.argument('version')
@click.pass_obj
def release_changelog_add(obj, version):
"""Prepend the changelog with the current release"""
from .release import Release
jira, repo = obj['jira'], obj['repo']
# just handle the current version
release = Release.from_jira(version, jira=jira, repo=repo)
if release.is_released:
raise ValueError('This version has been already released!')
changelog = release.changelog()
changelog_path = pathlib.Path(repo) / 'CHANGELOG.md'
current_content = changelog_path.read_text()
new_content = changelog.render('markdown') + current_content
changelog_path.write_text(new_content)
click.echo("CHANGELOG.md is updated!")
@release_changelog.command('generate')
@click.argument('version')
@click.argument('output', type=click.File('w', encoding='utf8'), default='-')
@click.pass_obj
def release_changelog_generate(obj, version, output):
"""Generate the changelog of a specific release."""
from .release import Release
jira, repo = obj['jira'], obj['repo']
# just handle the current version
release = Release.from_jira(version, jira=jira, repo=repo)
changelog = release.changelog()
output.write(changelog.render('markdown'))
@release_changelog.command('regenerate')
@click.pass_obj
def release_changelog_regenerate(obj):
"""Regeneretate the whole CHANGELOG.md file"""
from .release import Release
jira, repo = obj['jira'], obj['repo']
changelogs = []
for version in jira.arrow_versions():
if not version.released:
continue
release = Release.from_jira(version, jira=jira, repo=repo)
click.echo('Querying changelog for version: {}'.format(version))
changelogs.append(release.changelog())
click.echo('Rendering new CHANGELOG.md file...')
changelog_path = pathlib.Path(repo) / 'CHANGELOG.md'
with changelog_path.open('w') as fp:
for cl in changelogs:
fp.write(cl.render('markdown'))
@release.command('cherry-pick')
@click.argument('version')
@click.option('--dry-run/--execute', default=True,
help="Display the git commands instead of executing them.")
@click.option('--recreate/--continue', default=True,
help="Recreate the maintenance branch or only apply unapplied "
"patches.")
@click.pass_obj
def release_cherry_pick(obj, version, dry_run, recreate):
"""
Cherry pick commits.
"""
from .release import Release, MinorRelease, PatchRelease
release = Release.from_jira(version, jira=obj['jira'], repo=obj['repo'])
if not isinstance(release, (MinorRelease, PatchRelease)):
raise click.UsageError('Cherry-pick command only supported for minor '
'and patch releases')
if not dry_run:
release.cherry_pick_commits(recreate_branch=recreate)
click.echo('Executed the following commands:\n')
click.echo(
'git checkout {} -b {}'.format(release.previous.tag, release.branch)
)
for commit in release.commits_to_pick():
click.echo('git cherry-pick {}'.format(commit.hexsha))
@archery.group("linking")
@click.pass_obj
def linking(obj):
"""
Quick and dirty utilities for checking library linkage.
"""
pass
@linking.command("check-dependencies")
@click.argument("paths", nargs=-1)
@click.option("--allow", "-a", "allowed", multiple=True,
help="Name of the allowed libraries")
@click.option("--disallow", "-d", "disallowed", multiple=True,
help="Name of the disallowed libraries")
@click.pass_obj
def linking_check_dependencies(obj, allowed, disallowed, paths):
from .linking import check_dynamic_library_dependencies, DependencyError
allowed, disallowed = set(allowed), set(disallowed)
try:
for path in map(pathlib.Path, paths):
check_dynamic_library_dependencies(path, allowed=allowed,
disallowed=disallowed)
except DependencyError as e:
raise click.ClickException(str(e))
add_optional_command("docker", module=".docker.cli", function="docker",
parent=archery)
add_optional_command("crossbow", module=".crossbow.cli", function="crossbow",
parent=archery)
if __name__ == "__main__":
archery(obj={})
| |
# Copyright 2022 The T5 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for providing data to Mesh TF transformer."""
import functools
from absl import logging
import gin
import mesh_tensorflow.transformer.dataset as transformer_dataset
import t5.data
from t5.models import utils as model_utils
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
DEPRECATED_GIN_REFERENCES = (
"configurable_vocabulary",
"get_sentencepiece_model_path",
"maybe_print_dataset",
"num_parallel_calls",
"SentencePieceVocabulary",
"t5.data.sentencepiece_vocabulary.SentencePieceVocabulary",
"t5.models.mesh_transformer.get_sentencepiece_model_path",
"train_model",
"vocabularies.Vocabulary",
"Vocabulary",
)
@gin.configurable()
def mesh_train_dataset_fn(
mixture_or_task_name,
sequence_length,
vocabulary=None,
dataset_split=tfds.Split.TRAIN,
shuffle=True,
seed=None,
use_cached=False,
pack=True):
"""Returns the tf.data.Dataset for training on a given mixture.
This uses the format required for utils.run's `train_dataset_fn` argument in
the Mesh TF transformer standalone.
Args:
mixture_or_task_name: string, an identifier for a Mixture or Task in the
appropriate registry. Must be specified via gin.
sequence_length: dict mapping feature key to the int length for that feature
the max sequence length.
vocabulary: unused argument, maintains compatibility with other dataset_fns.
dataset_split: string, which split of the dataset to load. In most cases
this should be "train".
shuffle: Whether or not to shuffle dataset.
seed: tf.int64 scalar tf.Tensor (or None). Used for both the global seed and
shuffle seed for tf.data
use_cached: bool, whether to load the cached version of this dataset.
pack: bool, whether to pack the dataset.
Returns:
A tf.data.Dataset of preprocessed, tokenized, and batched examples.
"""
del vocabulary
mixture_or_task = t5.data.get_mixture_or_task(mixture_or_task_name)
ds = mixture_or_task.get_dataset(
sequence_length, split=dataset_split, use_cached=use_cached,
shuffle=shuffle, num_epochs=None, seed=seed)
# Select just the output features which are present in the dataset.
feature_keys = tuple(k for k in mixture_or_task.output_features
if k in tf.data.get_output_shapes(ds))
# Filtering feature keys is done in pack_or_pad function. However, when
# packing is turned off, input_features aren't filtered leading to training
# problems due to strings showing up in the input example. Filtering features
# ensures that we don't rely on pack_or_pad to filter features for training.
def _filter_features(ex):
return {k: ex[k] for k in feature_keys}
ds = ds.map(
_filter_features, num_parallel_calls=tf.data.experimental.AUTOTUNE)
eos_keys = set(
k for k, f in mixture_or_task.output_features.items() if f.add_eos)
ds = transformer_dataset.pack_or_pad(
ds, sequence_length, pack=pack,
feature_keys=feature_keys, ensure_eos=eos_keys)
return ds
@gin.configurable()
def mesh_inference_dataset_fn(
mixture_or_task_name,
sequence_length,
dataset_split,
shuffle=False,
seed=None,
vocabulary=None,
num_inference_examples=-1,
use_cached=False,
priming_sequence_length=None):
"""Returns all tf.data.Datasets for LM inference on a given mixture.
For Tasks without inputs (such as language modeling), the first
`priming_sequence_length` tokens in the target are used as the "inputs" for
inference.
Args:
mixture_or_task_name: string, an identifier for a Mixture or Task in the
appropriate registry. Must be specified via gin.
sequence_length: dict mapping feature key to the int length for that feature
the max sequence length. If set to None, packing and padding will be
disabled.
dataset_split: string, which split of the dataset to load. NOTE, this
function does NOT receive the split specified in utils.run. It needs to be
specified separately.
shuffle: Whether or not to shuffle dataset.
seed: tf.int64 scalar tf.Tensor (or None). Used as shuffle seed for tf.data.
vocabulary: unused argument, maintains compatibility with other dataaset_fns
num_inference_examples: maximum number of examples per task to do inference
on. If None or less than 0, use all examples.
use_cached: bool, whether to load the cached version of this dataset.
evals but should not be used for iterative decoding.
priming_sequence_length: If the Task only has "targets", select the first
this many tokens from each target sequence to use as "inputs". This is
useful for decoder-only language models where you would like to use a
portion of the targets as a priming sequence for generation.
Returns:
A list of mesh_tensorflow.transformer.dataset.EvalDataset tuples.
"""
del vocabulary
mixture_or_task = t5.data.get_mixture_or_task(mixture_or_task_name)
def _split_targets_for_primed_inference(ex):
ex["inputs"] = ex["targets"][:priming_sequence_length]
ex["targets"] = ex["targets"][priming_sequence_length:]
ex["inputs"] = tf.pad(
ex["inputs"],
[[0, priming_sequence_length - tf.shape(ex["inputs"])[0]]], "CONSTANT")
ex["inputs"] = tf.reshape(ex["inputs"], shape=(priming_sequence_length,))
return ex
def _prepare_for_unprimed_inference(ex):
ex["inputs"] = tf.constant([], dtype=tf.int64)
return ex
def _get_dataset_for_single_task(task, sequence_length):
"""Get a tensorflow.data.Dataset for the provided task."""
ds = task.get_dataset(
sequence_length, split=dataset_split, use_cached=use_cached,
shuffle=shuffle, seed=seed)
if "inputs" not in ds.element_spec:
if not priming_sequence_length or priming_sequence_length <= 0:
logging.warning("Priming sequence length not specified so priming "
"with the empty string.")
ds = ds.map(_prepare_for_unprimed_inference)
else:
logging.info("Using the first %d tokens of each target as input.",
priming_sequence_length)
ds = ds.map(_split_targets_for_primed_inference)
elif priming_sequence_length is not None:
raise ValueError(
"Setting a priming sequence length only makes sense for decoder-only "
"Tasks, which have `targets` but no `inputs`.")
eos_keys = set(
k for k, f in mixture_or_task.output_features.items() if f.add_eos)
logging.info(
"Padding '%s' with sequence lengths: %s", task.name, sequence_length)
ds = transformer_dataset.pack_or_pad(
ds,
sequence_length,
pack=False,
feature_keys=tuple(task.output_features),
ensure_eos=eos_keys)
if num_inference_examples is not None and num_inference_examples >= 0:
ds = ds.take(num_inference_examples)
return ds
outputs = []
for task in t5.data.get_subtasks(mixture_or_task):
if dataset_split not in task.splits:
logging.info("Task %s has no '%s' split, skipping inference.",
task.name, dataset_split)
continue
outputs.append(
transformer_dataset.EvalDataset(
task.name,
functools.partial(
_get_dataset_for_single_task,
task=task,
sequence_length=sequence_length),
task.postprocess_fn,
task.metric_fns,
)
)
if not outputs:
logging.warning("No %s data found for %s.",
dataset_split, mixture_or_task_name)
return outputs
@gin.configurable()
def mesh_eval_dataset_fn(
mixture_or_task_name,
sequence_length,
dataset_split,
vocabulary=None,
num_eval_examples=-1,
use_cached=False,
pack=False,
shuffle_eval_examples=False,
seed=None):
"""Returns all tf.data.Datasets for evaluation on a given mixture.
This uses the format required for utils.run's `eval_dataset_fn` argument in
the Mesh TF transformer standalone.
Args:
mixture_or_task_name: string, an identifier for a Mixture or Task in the
appropriate registry. Must be specified via gin.
sequence_length: dict mapping feature key to the int length for that feature
the max sequence length. If set to None, packing and padding will be
disabled.
dataset_split: string, which split of the dataset to load.
vocabulary: unused argument, maintains compatibility with other dataaset_fns
num_eval_examples: maximum number of examples per task to use for continuous
eval. If None or less than 0, use all examples.
use_cached: bool, whether to load the cached version of this dataset.
pack: a boolean, whether to pack examples. This is useful for perplexity
evals but should not be used for iterative decoding.
shuffle_eval_examples: boolean, whether to shuffle eval examples, applied
only when num_eval_examples is not None. Intended to be able to eval on a
different eval slice at every iteration.
seed: tf.int64 scalar tf.Tensor (or None). Used for both the global seed and
shuffle seed for tf.data
Returns:
A list of mesh_tensorflow.transformer.dataset.EvalDataset tuples.
"""
del vocabulary
mixture_or_task = t5.data.get_mixture_or_task(mixture_or_task_name)
def _get_dataset_for_single_task(task, sequence_length):
"""Get a tensorflow.data.Dataset for the provided task."""
if shuffle_eval_examples and seed is None:
logging.warning(("shuffle_seed_examples is true but no seed was ",
"provided. Using a random seed."))
ds = task.get_dataset(
sequence_length, split=dataset_split,
use_cached=use_cached, shuffle=shuffle_eval_examples, seed=seed,
)
eos_keys = set(
k for k, f in mixture_or_task.output_features.items() if f.add_eos)
if sequence_length is None:
logging.info(
"Skipping packing/padding for '%s' since sequence length is None.",
task.name)
else:
logging.info(
"%sing '%s' with sequence lengths: %s",
"Pack" if pack else "Padd", task.name, sequence_length)
ds = transformer_dataset.pack_or_pad(
ds,
sequence_length,
pack=pack,
feature_keys=tuple(task.output_features),
ensure_eos=eos_keys)
if num_eval_examples is not None and num_eval_examples >= 0:
ds = ds.take(num_eval_examples)
return ds
outputs = []
for task in t5.data.get_subtasks(mixture_or_task):
if dataset_split not in task.splits:
logging.info(
"Task %s has no '%s' split, skipping eval.", task.name, dataset_split
)
continue
outputs.append(
transformer_dataset.EvalDataset(
task.name,
functools.partial(
_get_dataset_for_single_task,
task=task,
sequence_length=sequence_length),
task.postprocess_fn,
task.metric_fns,
)
)
if not outputs:
logging.warning("No %s data found for %s.",
dataset_split, mixture_or_task_name)
return outputs
@gin.configurable()
def tsv_dataset_fn(
filename,
sequence_length,
dataset_split,
vocabulary,
shuffle_buffer_size=10000):
r"""Returns a dataset based on a TSV file formatted as `<input>\t<target>`."""
# Currently `tf.gfile.glob` is broken on GCS, so we only read a file or
# list of files.
return transformer_dataset.packed_parallel_tsv_dataset(
dataset=tf.data.TextLineDataset(filename).shuffle(shuffle_buffer_size),
sequence_length=sequence_length,
vocabulary=vocabulary,
dataset_split=dataset_split,
append_eos=True,
eos_id=1)
@gin.configurable()
def get_vocabulary(mixture_or_task_name=None):
"""Get the appropriate value for the utils.run.vocabulary argument.
Args:
mixture_or_task_name: string, an identifier for a Mixture or Task in the
appropriate registry. Must be specified via gin.
Returns:
Either a single t5.data.vocabularies.Vocabulary or a tuple of
t5.data.vocabularies.Vocabulary for inputs and targets.
"""
return model_utils.get_vocabulary(mixture_or_task_name)
| |
"""
tests.test_util
~~~~~~~~~~~~~~~~~
Tests Home Assistant util methods.
"""
# pylint: disable=too-many-public-methods
import unittest
import time
from datetime import datetime, timedelta
import homeassistant.util as util
class TestUtil(unittest.TestCase):
""" Tests util methods. """
def test_sanitize_filename(self):
""" Test sanitize_filename. """
self.assertEqual("test", util.sanitize_filename("test"))
self.assertEqual("test", util.sanitize_filename("/test"))
self.assertEqual("test", util.sanitize_filename("..test"))
self.assertEqual("test", util.sanitize_filename("\\test"))
self.assertEqual("test", util.sanitize_filename("\\../test"))
def test_sanitize_path(self):
""" Test sanitize_path. """
self.assertEqual("test/path", util.sanitize_path("test/path"))
self.assertEqual("test/path", util.sanitize_path("~test/path"))
self.assertEqual("//test/path",
util.sanitize_path("~/../test/path"))
def test_slugify(self):
""" Test slugify. """
self.assertEqual("Test", util.slugify("T-!@#$!#@$!$est"))
self.assertEqual("Test_More", util.slugify("Test More"))
self.assertEqual("Test_More", util.slugify("Test_(More)"))
def test_datetime_to_str(self):
""" Test datetime_to_str. """
self.assertEqual("12:00:00 09-07-1986",
util.datetime_to_str(datetime(1986, 7, 9, 12, 0, 0)))
def test_str_to_datetime(self):
""" Test str_to_datetime. """
self.assertEqual(datetime(1986, 7, 9, 12, 0, 0),
util.str_to_datetime("12:00:00 09-07-1986"))
self.assertIsNone(util.str_to_datetime("not a datetime string"))
def test_split_entity_id(self):
""" Test split_entity_id. """
self.assertEqual(['domain', 'object_id'],
util.split_entity_id('domain.object_id'))
def test_repr_helper(self):
""" Test repr_helper. """
self.assertEqual("A", util.repr_helper("A"))
self.assertEqual("5", util.repr_helper(5))
self.assertEqual("True", util.repr_helper(True))
self.assertEqual("test=1",
util.repr_helper({"test": 1}))
self.assertEqual("12:00:00 09-07-1986",
util.repr_helper(datetime(1986, 7, 9, 12, 0, 0)))
# pylint: disable=invalid-name
def test_color_RGB_to_xy(self):
""" Test color_RGB_to_xy. """
self.assertEqual((0, 0), util.color_RGB_to_xy(0, 0, 0))
self.assertEqual((0.3127159072215825, 0.3290014805066623),
util.color_RGB_to_xy(255, 255, 255))
self.assertEqual((0.15001662234042554, 0.060006648936170214),
util.color_RGB_to_xy(0, 0, 255))
self.assertEqual((0.3, 0.6), util.color_RGB_to_xy(0, 255, 0))
self.assertEqual((0.6400744994567747, 0.3299705106316933),
util.color_RGB_to_xy(255, 0, 0))
def test_convert(self):
""" Test convert. """
self.assertEqual(5, util.convert("5", int))
self.assertEqual(5.0, util.convert("5", float))
self.assertEqual(True, util.convert("True", bool))
self.assertEqual(1, util.convert("NOT A NUMBER", int, 1))
self.assertEqual(1, util.convert(None, int, 1))
def test_ensure_unique_string(self):
""" Test ensure_unique_string. """
self.assertEqual(
"Beer_3",
util.ensure_unique_string("Beer", ["Beer", "Beer_2"]))
self.assertEqual(
"Beer",
util.ensure_unique_string("Beer", ["Wine", "Soda"]))
def test_ordered_enum(self):
""" Test the ordered enum class. """
class TestEnum(util.OrderedEnum):
""" Test enum that can be ordered. """
FIRST = 1
SECOND = 2
THIRD = 3
self.assertTrue(TestEnum.SECOND >= TestEnum.FIRST)
self.assertTrue(TestEnum.SECOND >= TestEnum.SECOND)
self.assertFalse(TestEnum.SECOND >= TestEnum.THIRD)
self.assertTrue(TestEnum.SECOND > TestEnum.FIRST)
self.assertFalse(TestEnum.SECOND > TestEnum.SECOND)
self.assertFalse(TestEnum.SECOND > TestEnum.THIRD)
self.assertFalse(TestEnum.SECOND <= TestEnum.FIRST)
self.assertTrue(TestEnum.SECOND <= TestEnum.SECOND)
self.assertTrue(TestEnum.SECOND <= TestEnum.THIRD)
self.assertFalse(TestEnum.SECOND < TestEnum.FIRST)
self.assertFalse(TestEnum.SECOND < TestEnum.SECOND)
self.assertTrue(TestEnum.SECOND < TestEnum.THIRD)
# Python will raise a TypeError if the <, <=, >, >= methods
# raise a NotImplemented error.
self.assertRaises(TypeError,
lambda x, y: x < y, TestEnum.FIRST, 1)
self.assertRaises(TypeError,
lambda x, y: x <= y, TestEnum.FIRST, 1)
self.assertRaises(TypeError,
lambda x, y: x > y, TestEnum.FIRST, 1)
self.assertRaises(TypeError,
lambda x, y: x >= y, TestEnum.FIRST, 1)
def test_ordered_set(self):
set1 = util.OrderedSet([1, 2, 3, 4])
set2 = util.OrderedSet([3, 4, 5])
self.assertEqual(4, len(set1))
self.assertEqual(3, len(set2))
self.assertIn(1, set1)
self.assertIn(2, set1)
self.assertIn(3, set1)
self.assertIn(4, set1)
self.assertNotIn(5, set1)
self.assertNotIn(1, set2)
self.assertNotIn(2, set2)
self.assertIn(3, set2)
self.assertIn(4, set2)
self.assertIn(5, set2)
set1.add(5)
self.assertIn(5, set1)
set1.discard(5)
self.assertNotIn(5, set1)
# Try again while key is not in
set1.discard(5)
self.assertNotIn(5, set1)
self.assertEqual([1, 2, 3, 4], list(set1))
self.assertEqual([4, 3, 2, 1], list(reversed(set1)))
self.assertEqual(1, set1.pop(False))
self.assertEqual([2, 3, 4], list(set1))
self.assertEqual(4, set1.pop())
self.assertEqual([2, 3], list(set1))
self.assertEqual('OrderedSet()', str(util.OrderedSet()))
self.assertEqual('OrderedSet([2, 3])', str(set1))
self.assertEqual(set1, util.OrderedSet([2, 3]))
self.assertNotEqual(set1, util.OrderedSet([3, 2]))
self.assertEqual(set1, set([2, 3]))
self.assertEqual(set1, {3, 2})
self.assertEqual(set1, [2, 3])
self.assertEqual(set1, [3, 2])
self.assertNotEqual(set1, {2})
set3 = util.OrderedSet(set1)
set3.update(set2)
self.assertEqual([3, 4, 5, 2], set3)
self.assertEqual([3, 4, 5, 2], set1 | set2)
self.assertEqual([3], set1 & set2)
self.assertEqual([2], set1 - set2)
set1.update([1, 2], [5, 6])
self.assertEqual([2, 3, 1, 5, 6], set1)
def test_throttle(self):
""" Test the add cooldown decorator. """
calls1 = []
@util.Throttle(timedelta(milliseconds=500))
def test_throttle1():
calls1.append(1)
calls2 = []
@util.Throttle(
timedelta(milliseconds=500), timedelta(milliseconds=250))
def test_throttle2():
calls2.append(1)
# Ensure init is ok
self.assertEqual(0, len(calls1))
self.assertEqual(0, len(calls2))
# Call first time and ensure methods got called
test_throttle1()
test_throttle2()
self.assertEqual(1, len(calls1))
self.assertEqual(1, len(calls2))
# Call second time. Methods should not get called
test_throttle1()
test_throttle2()
self.assertEqual(1, len(calls1))
self.assertEqual(1, len(calls2))
# Call again, overriding throttle, only first one should fire
test_throttle1(no_throttle=True)
test_throttle2(no_throttle=True)
self.assertEqual(2, len(calls1))
self.assertEqual(1, len(calls2))
# Sleep past the no throttle interval for throttle2
time.sleep(.3)
test_throttle1()
test_throttle2()
self.assertEqual(2, len(calls1))
self.assertEqual(1, len(calls2))
test_throttle1(no_throttle=True)
test_throttle2(no_throttle=True)
self.assertEqual(3, len(calls1))
self.assertEqual(2, len(calls2))
time.sleep(.5)
test_throttle1()
test_throttle2()
self.assertEqual(4, len(calls1))
self.assertEqual(3, len(calls2))
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Bulkloader CSV reading and writing.
Handle the CSV format specified in a bulkloader.yaml file.
"""
import codecs
import cStringIO
import csv
import encodings
import encodings.ascii
import encodings.cp1252
import encodings.latin_1
import encodings.utf_8
from google.appengine.ext.bulkload import bulkloader_errors
from google.appengine.ext.bulkload import connector_interface
def utf8_recoder(stream, encoding):
"""Generator that reads an encoded stream and reencodes to UTF-8."""
for line in codecs.getreader(encoding)(stream):
yield line.encode('utf-8')
class UnicodeDictWriter(object):
"""Based on UnicodeWriter in http://docs.python.org/library/csv.html."""
def __init__(self, stream, fieldnames, encoding='utf-8', **kwds):
"""Initialzer.
Args:
stream: Stream to write to.
fieldnames: Fieldnames to pass to the DictWriter.
encoding: Desired encoding.
kwds: Additional arguments to pass to the DictWriter.
"""
writer = codecs.getwriter(encoding)
if (writer is encodings.utf_8.StreamWriter or
writer is encodings.ascii.StreamWriter or
writer is encodings.latin_1.StreamWriter or
writer is encodings.cp1252.StreamWriter):
self.no_recoding = True
self.encoder = codecs.getencoder(encoding)
self.writer = csv.DictWriter(stream, fieldnames, **kwds)
else:
self.no_recoding = False
self.encoder = codecs.getencoder('utf-8')
self.queue = cStringIO.StringIO()
self.writer = csv.DictWriter(self.queue, fieldnames, **kwds)
self.stream = writer(stream)
def writerow(self, row):
"""Wrap writerow method."""
row_encoded = dict([(k, self.encoder(v)[0]) for (k, v) in row.iteritems()])
self.writer.writerow(row_encoded)
if self.no_recoding:
return
data = self.queue.getvalue()
data = data.decode('utf-8')
self.stream.write(data)
self.queue.truncate(0)
class CsvConnector(connector_interface.ConnectorInterface):
"""Read/write a (possibly encoded) CSV file."""
@classmethod
def create_from_options(cls, options, name):
"""Factory using an options dictionary.
Args:
options: Dictionary of options:
columns: 'from_header' or blank.
column_list: overrides columns specifically.
encoding: encoding of the file. e.g. 'utf-8' (default), 'windows-1252'.
skip_import_header_row: True to ignore the header line on import.
Defaults False, except must be True if columns=from_header.
print_export_header_row: True to print a header line on export.
Defaults to False except if columns=from_header.
import_options: Other kwargs to pass in, like "dialect".
export_options: Other kwargs to pass in, like "dialect".
name: The name of this transformer, for use in error messages.
Returns:
CsvConnector object described by the specified options.
Raises:
InvalidConfiguration: If the config is invalid.
"""
column_list = options.get('column_list', None)
columns = None
if not column_list:
columns = options.get('columns', 'from_header')
if columns != 'from_header':
raise bulkloader_errors.InvalidConfiguration(
'CSV columns must be "from_header", or a column_list '
'must be specified. (In transformer name %s.)' % name)
csv_encoding = options.get('encoding', 'utf-8')
skip_import_header_row = options.get('skip_import_header_row',
columns == 'from_header')
if columns == 'from_header' and not skip_import_header_row:
raise bulkloader_errors.InvalidConfiguration(
'When CSV columns are "from_header", the header row must always '
'be skipped. (In transformer name %s.)' % name)
print_export_header_row = options.get('print_export_header_row',
columns == 'from_header')
import_options = options.get('import_options', {})
export_options = options.get('export_options', {})
return cls(columns, column_list, skip_import_header_row,
print_export_header_row, csv_encoding, import_options,
export_options)
def __init__(self, columns, column_list, skip_import_header_row,
print_export_header_row, csv_encoding=None,
import_options=None, export_options=None):
"""Initializer.
Args:
columns: 'from_header' or blank
column_list: overrides columns specifically.
skip_import_header_row: True to ignore the header line on import.
Defaults False, except must be True if columns=from_header.
print_export_header_row: True to print a header line on export.
Defaults to False except if columns=from_header.
csv_encoding: encoding of the file.
import_options: Other kwargs to pass in, like "dialect".
export_options: Other kwargs to pass in, like "dialect".
"""
self.columns = columns
self.from_header = (columns == 'from_header')
self.column_list = column_list
self.skip_import_header_row = skip_import_header_row
self.print_export_header_row = print_export_header_row
self.csv_encoding = csv_encoding
self.dict_generator = None
self.output_stream = None
self.csv_writer = None
self.bulkload_state = None
self.import_options = import_options or {}
self.export_options = export_options or {}
def generate_import_record(self, filename, bulkload_state):
"""Generator, yields dicts for nodes found as described in the options.
Args:
filename: Filename to read.
bulkload_state: Passed bulkload_state.
Yields:
Neutral dict, one per row in the CSV file.
"""
self.bulkload_state = bulkload_state
input_stream = open(filename)
input_stream = utf8_recoder(input_stream, self.csv_encoding)
self.dict_generator = csv.DictReader(input_stream, self.column_list,
**self.import_options)
discard_line = self.skip_import_header_row and not self.from_header
line_number = 0
for input_dict in self.dict_generator:
line_number = line_number + 1
if discard_line:
discard_line = False
continue
decoded_dict = {}
for key, value in input_dict.iteritems():
if key == None:
raise bulkloader_errors.InvalidImportData(
'Got more values in row than headers on line %d.'
% (line_number))
if not self.column_list:
key = unicode(key, 'utf-8')
if value:
value = unicode(value, 'utf-8')
decoded_dict[key] = value
yield decoded_dict
def initialize_export(self, filename, bulkload_state):
"""Initialize the output file.
Args:
filename: Filename to write.
bulkload_state: Passed bulkload_state.
"""
self.bulkload_state = bulkload_state
self.output_stream = open(filename, 'wb')
def __initialize_csv_writer(self, dictionary):
"""Actual initialization, happens on the first entity being written."""
write_header = self.print_export_header_row
if self.from_header:
export_column_list = tuple(dictionary)
else:
export_column_list = self.column_list
self.csv_writer = UnicodeDictWriter(self.output_stream, export_column_list,
self.csv_encoding,
**self.export_options)
if write_header:
self.csv_writer.writerow(dict(zip(export_column_list,
export_column_list)))
def write_dict(self, dictionary):
"""Write one record for the specified entity."""
if not self.csv_writer:
self.__initialize_csv_writer(dictionary)
self.csv_writer.writerow(dictionary)
def finalize_export(self):
self.output_stream.close()
| |
import numpy as np
from pandas._libs import (index as libindex,
algos as libalgos, join as libjoin)
from pandas.core.dtypes.common import (
is_dtype_equal,
pandas_dtype,
is_float_dtype,
is_object_dtype,
is_integer_dtype,
is_bool,
is_bool_dtype,
is_scalar)
from pandas.core.common import _asarray_tuplesafe, _values_from_object
from pandas import compat
from pandas.core import algorithms
from pandas.core.indexes.base import (
Index, InvalidIndexError, _index_shared_docs)
from pandas.util._decorators import Appender, cache_readonly
import pandas.core.dtypes.concat as _concat
import pandas.core.indexes.base as ibase
_num_index_shared_docs = dict()
class NumericIndex(Index):
"""
Provide numeric type operations
This is an abstract class
"""
_is_numeric_dtype = True
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False):
if fastpath:
return cls._simple_new(data, name=name)
# isscalar, generators handled in coerce_to_ndarray
data = cls._coerce_to_ndarray(data)
if issubclass(data.dtype.type, compat.string_types):
cls._string_data_error(data)
if copy or not is_dtype_equal(data.dtype, cls._default_dtype):
subarr = np.array(data, dtype=cls._default_dtype, copy=copy)
cls._assert_safe_casting(data, subarr)
else:
subarr = data
if name is None and hasattr(data, 'name'):
name = data.name
return cls._simple_new(subarr, name=name)
@Appender(_index_shared_docs['_maybe_cast_slice_bound'])
def _maybe_cast_slice_bound(self, label, side, kind):
assert kind in ['ix', 'loc', 'getitem', None]
# we will try to coerce to integers
return self._maybe_cast_indexer(label)
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
if is_bool(value) or is_bool_dtype(value):
# force conversion to object
# so we don't lose the bools
raise TypeError
return value
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
if not np.issubdtype(tolerance.dtype, np.number):
if tolerance.ndim > 0:
raise ValueError(('tolerance argument for %s must contain '
'numeric elements if it is list type') %
(type(self).__name__,))
else:
raise ValueError(('tolerance argument for %s must be numeric '
'if it is a scalar: %r') %
(type(self).__name__, tolerance))
return tolerance
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Subclasses need to override this only if the process of casting data
from some accepted dtype to the internal dtype(s) bears the risk of
truncation (e.g. float to int).
"""
pass
def _concat_same_dtype(self, indexes, name):
return _concat._concat_index_same_dtype(indexes).rename(name)
@property
def is_all_dates(self):
"""
Checks that all the labels are datetime objects
"""
return False
_num_index_shared_docs['class_descr'] = """
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects. %(klass)s is a special case
of `Index` with purely %(ltype)s labels. %(extra)s
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: %(dtype)s)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Attributes
----------
inferred_type
Methods
-------
None
Notes
-----
An Index instance can **only** contain hashable objects.
See also
--------
Index : The base pandas Index type
"""
_int64_descr_args = dict(
klass='Int64Index',
ltype='integer',
dtype='int64',
extra=''
)
class Int64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _int64_descr_args
_typ = 'int64index'
_arrmap = libalgos.arrmap_int64
_left_indexer_unique = libjoin.left_join_indexer_unique_int64
_left_indexer = libjoin.left_join_indexer_int64
_inner_indexer = libjoin.inner_join_indexer_int64
_outer_indexer = libjoin.outer_join_indexer_int64
_can_hold_na = False
_engine_type = libindex.Int64Engine
_default_dtype = np.int64
@property
def inferred_type(self):
"""Always 'integer' for ``Int64Index``"""
return 'integer'
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# don't coerce ilocs to integers
if kind != 'iloc':
key = self._maybe_cast_indexer(key)
return (super(Int64Index, self)
._convert_scalar_indexer(key, kind=kind))
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Int64Index(joined, name=name)
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Ensure incoming data can be represented as ints.
"""
if not issubclass(data.dtype.type, np.signedinteger):
if not np.array_equal(data, subarr):
raise TypeError('Unsafe NumPy casting, you must '
'explicitly cast')
Int64Index._add_numeric_methods()
Int64Index._add_logical_methods()
_uint64_descr_args = dict(
klass='UInt64Index',
ltype='unsigned integer',
dtype='uint64',
extra=''
)
class UInt64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _uint64_descr_args
_typ = 'uint64index'
_arrmap = libalgos.arrmap_uint64
_left_indexer_unique = libjoin.left_join_indexer_unique_uint64
_left_indexer = libjoin.left_join_indexer_uint64
_inner_indexer = libjoin.inner_join_indexer_uint64
_outer_indexer = libjoin.outer_join_indexer_uint64
_can_hold_na = False
_engine_type = libindex.UInt64Engine
_default_dtype = np.uint64
@property
def inferred_type(self):
"""Always 'integer' for ``UInt64Index``"""
return 'integer'
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('u8')
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# don't coerce ilocs to integers
if kind != 'iloc':
key = self._maybe_cast_indexer(key)
return (super(UInt64Index, self)
._convert_scalar_indexer(key, kind=kind))
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
keyarr = _asarray_tuplesafe(keyarr)
if is_integer_dtype(keyarr):
return _asarray_tuplesafe(keyarr, dtype=np.uint64)
return keyarr
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
if keyarr.is_integer():
return keyarr.astype(np.uint64)
return keyarr
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return UInt64Index(joined, name=name)
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Ensure incoming data can be represented as uints.
"""
if not issubclass(data.dtype.type, np.unsignedinteger):
if not np.array_equal(data, subarr):
raise TypeError('Unsafe NumPy casting, you must '
'explicitly cast')
UInt64Index._add_numeric_methods()
UInt64Index._add_logical_methods()
_float64_descr_args = dict(
klass='Float64Index',
dtype='float64',
ltype='float',
extra=''
)
class Float64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _float64_descr_args
_typ = 'float64index'
_engine_type = libindex.Float64Engine
_arrmap = libalgos.arrmap_float64
_left_indexer_unique = libjoin.left_join_indexer_unique_float64
_left_indexer = libjoin.left_join_indexer_float64
_inner_indexer = libjoin.inner_join_indexer_float64
_outer_indexer = libjoin.outer_join_indexer_float64
_default_dtype = np.float64
@property
def inferred_type(self):
"""Always 'floating' for ``Float64Index``"""
return 'floating'
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_float_dtype(dtype):
values = self._values.astype(dtype, copy=copy)
elif is_integer_dtype(dtype):
if self.hasnans:
raise ValueError('cannot convert float NaN to integer')
values = self._values.astype(dtype, copy=copy)
elif is_object_dtype(dtype):
values = self._values.astype('object', copy=copy)
else:
raise TypeError('Setting %s dtype to anything other than '
'float64 or object is not supported' %
self.__class__)
return Index(values, name=self.name, dtype=dtype)
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
return key
@Appender(_index_shared_docs['_convert_slice_indexer'])
def _convert_slice_indexer(self, key, kind=None):
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
if kind == 'iloc':
return super(Float64Index, self)._convert_slice_indexer(key,
kind=kind)
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step, kind=kind)
def _format_native_types(self, na_rep='', float_format=None, decimal='.',
quoting=None, **kwargs):
from pandas.io.formats.format import FloatArrayFormatter
formatter = FloatArrayFormatter(self.values, na_rep=na_rep,
float_format=float_format,
decimal=decimal, quoting=quoting,
fixed_width=False)
return formatter.get_result_as_array()
def get_value(self, series, key):
""" we always want to get an index value, never a value """
if not is_scalar(key):
raise InvalidIndexError
k = _values_from_object(key)
loc = self.get_loc(k)
new_values = _values_from_object(series)[loc]
return new_values
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self is other:
return True
if not isinstance(other, Index):
return False
# need to compare nans locations and make sure that they are the same
# since nans don't compare equal this is a bit tricky
try:
if not isinstance(other, Float64Index):
other = self._constructor(other)
if (not is_dtype_equal(self.dtype, other.dtype) or
self.shape != other.shape):
return False
left, right = self._values, other._values
return ((left == right) | (self._isnan & other._isnan)).all()
except (TypeError, ValueError):
return False
def __contains__(self, other):
if super(Float64Index, self).__contains__(other):
return True
try:
# if other is a sequence this throws a ValueError
return np.isnan(other) and self.hasnans
except ValueError:
try:
return len(other) <= 1 and ibase._try_get_item(other) in self
except TypeError:
return False
except:
return False
@Appender(_index_shared_docs['get_loc'])
def get_loc(self, key, method=None, tolerance=None):
try:
if np.all(np.isnan(key)):
nan_idxs = self._nan_idxs
try:
return nan_idxs.item()
except (ValueError, IndexError):
# should only need to catch ValueError here but on numpy
# 1.7 .item() can raise IndexError when NaNs are present
if not len(nan_idxs):
raise KeyError(key)
return nan_idxs
except (TypeError, NotImplementedError):
pass
return super(Float64Index, self).get_loc(key, method=method,
tolerance=tolerance)
@cache_readonly
def is_unique(self):
return super(Float64Index, self).is_unique and self._nan_idxs.size < 2
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is not None:
self._validate_index_level(level)
return algorithms.isin(np.array(self), values)
Float64Index._add_numeric_methods()
Float64Index._add_logical_methods_disabled()
| |
# =============================================================
#Input
#----------------------------------------------------------
#array_size ------> Number of spectrums in dataset
#rangeFd ------> Range of Rotation Measures
#rangeSig ------> Range of Sigmas
#rangeChi ------> Range of ChiNot
#rangeFlux ------> Range of Fluxes
#is_complex ------> Number of Sources
###########################################################
#Output
#----------------------------------------------------------
#X_2[count,:,0] ------> Normalized Real component of array
#X_2[count,:,1] ------> Normalized Complex component of array
#Y[count] ------> Categorical array ==> 0 if simple, 1 if complex
#FD[count] ------> Array of size array_size of all Rotation Measure (FD) parameters
#CH[count] ------> Array of size array_size of all ChiNot parameters
#FL[count] ------> Array of size array_size of all Flux parameters
#SI[count] ------> Array of size array_size of all sigma parameters
###########################################################
# =============================================================
def dmlCreateSpectrum(array_size=6,rangeFd=[-69,69],rangeSig=[0.01,1],rangeChi=[0,3.14], rangeFlux=[0.01,1],num_sources=2):
import numpy as np
from matplotlib import pyplot as plt
import time
plt.ion()
c = 2.99e+08 #speed of light in m/s
mhz = 1.0e+06 #mega
FWHM = 23 # -3FWHM : 3FWHM 22.4 (use 23) rad/m**2
def createFrequency(numin=700.,numax=1800., nchan=100.):
# Create an array of evenly spaced frequencies
# numin and numax are in [MHz]
# ===========================================
numax=numax*mhz
numin=numin*mhz
nu=np.arange(nchan)*(numax-numin)/(nchan-1)+numin
return nu
def createPOSSUMSpectrum():
# Create an array with POSSUM Early Science
# frequency coverage
# ===========================================
band1n2=createFrequency(700.,1300,nchan=600)
band3=createFrequency(1500.,1800.,nchan=300)
return np.concatenate((band1n2,band3))
def create2Spectrum(nu=False, flux1=1.,flux2=1.,fdepth1=1,fdepth2=70.,chinot1=0.,chinot2=0.):
spec = flux1*np.exp(2*1j*(chinot1+fdepth1*(c/nu)**2))+flux2*np.exp(2*1j*(chinot2+fdepth2*(c/nu)**2))
return spec
def create1Spectrum(nu=False, flux1=1.,fdepth1=7.,chinot1=0.):
spec = flux1*np.exp(2*1j*(chinot1+fdepth1*(c/nu)**2))
return spec
def createNoiseSpectrum(nu=False, sigma=0.01):
noise1 = sigma*np.random.standard_normal(nu.shape)
noise2 = sigma*np.random.standard_normal(nu.shape)
return noise1, noise2
def createFaradaySpectrum(pol, nu,philow=-100,phihi=101):
F=[]
phi=[]
chinot=np.sqrt(np.mean((c/nu)**2))
for far in range(philow,phihi):
phi.append(far)
temp=np.sum(pol*np.exp(-2*1j*far*((c/nu)**2-chinot**2)))
F.append(temp)
return np.asarray(phi), np.asarray(F)/len(nu)
###################
#Entry Parameters
###################
s = (array_size,201,2)
nu = createPOSSUMSpectrum()
count= 0
###################
#Array Setup
###################
X_2 = np.zeros(s)
Y = np.zeros(s[0])
FD = np.zeros(s[0])
CH = np.zeros(s[0])
FL = np.zeros(s[0])
SI = np.zeros(s[0])
###################
#Complex 2 Source
###################
if(num_sources==2):
for i in range(array_size):
if(i%1000==1):
print(str(i),'/',str(array_size))
vary_ch = rangeChi[0] + np.random.rand()*(rangeChi[1]- rangeChi[0])
vary_fd = rangeFd[0] + np.random.rand()*(rangeFd[1]-rangeFd[0])
vary_fl = rangeFlux[0] + np.random.rand()*(rangeFlux[1]-rangeFlux[0])
vary_si = rangeSig[0] + np.random.rand()*(rangeSig[1] -rangeSig[0])
vary_ch2 = rangeChi[0] + np.random.rand()*(rangeChi[1]- rangeChi[0])
vary_fd2 = rangeFd[0] + np.random.rand()*(rangeFd[1]-rangeFd[0])
spec = create2Spectrum(nu, flux2 = vary_fl, fdepth2 = vary_fd,
chinot2= vary_ch, fdepth1=vary_fd2, chinot1=vary_ch2)
noise1, noise2 = createNoiseSpectrum(nu, vary_si)
spec += noise1+noise2*1j
phi, Faraday = createFaradaySpectrum(spec, nu)
max = np.max(np.abs(Faraday))
Faraday_Normalized=Faraday/max
X_2[count,:,0]= Faraday_Normalized.imag
X_2[count,:,1]= Faraday_Normalized.real
Y[count]=1 #Complex Spectrum (uses create2Spectrum)
FD[count]=vary_fd
CH[count]=vary_ch
FL[count]=vary_fl
SI[count]=vary_si
count+=1
###################
#Simple Source
###################
elif(num_sources==1):
for i in range(array_size):
if(i%1000==0):
print(str(i),'/',str(array_size))
vary_ch = rangeChi[0] + np.random.rand()*(rangeChi[1]- rangeChi[0])
vary_fd = rangeFd[0] + np.random.rand()*(rangeFd[1]-rangeFd[0])
vary_fl = rangeFlux[0] + np.random.rand()*(rangeFlux[1]-rangeFlux[0])
vary_si = rangeSig[0] + np.random.rand()*(rangeSig[1] -rangeSig[0])
spec = create1Spectrum(nu, flux1 = vary_fl, fdepth1 = vary_fd, chinot1= vary_ch)
noise1, noise2 = createNoiseSpectrum(nu, vary_si)
spec += noise1+noise2*1j
phi, Faraday = createFaradaySpectrum(spec, nu)
max = np.max(np.abs(Faraday))
Faraday_Normalized=Faraday/max
X_2[count,:,0]= Faraday_Normalized.imag
X_2[count,:,1]= Faraday_Normalized.real
Y[count]=0 #Complex Spectrum (uses create2Spectrum)
FD[count]=vary_fd
CH[count]=vary_ch
FL[count]=vary_fl
SI[count]=vary_si
count+=1
elif(num_sources==3):
for i in range(array_size//2):
if(i%1000==0):
print(str(i),'/',str(array_size))
vary_ch = rangeChi[0] + np.random.rand()*(rangeChi[1]- rangeChi[0])
vary_fd = rangeFd[0] + np.random.rand()*(rangeFd[1]-rangeFd[0])
vary_fl = rangeFlux[0] + np.random.rand()*(rangeFlux[1]-rangeFlux[0])
vary_si = rangeSig[0] + np.random.rand()*(rangeSig[1] -rangeSig[0])
spec = create1Spectrum(nu, flux1 = vary_fl, fdepth1 = vary_fd, chinot1= vary_ch)
noise1, noise2 = createNoiseSpectrum(nu, vary_si)
spec += noise1+noise2*1j
phi, Faraday = createFaradaySpectrum(spec, nu)
max = np.max(np.abs(Faraday))
Faraday_Normalized=Faraday/max
X_2[count,:,0]= Faraday_Normalized.imag
X_2[count,:,1]= Faraday_Normalized.real
Y[count]=0 #Complex Spectrum (uses create2Spectrum)
FD[count]=vary_fd
CH[count]=vary_ch
FL[count]=vary_fl
SI[count]=vary_si
count+=1
print(count,array_size,array_size//2)
for i in range(array_size//2,array_size):
if(i%1000==0):
print(str(i),'/',str(array_size))
vary_ch = rangeChi[0] + np.random.rand()*(rangeChi[1] - rangeChi[0])
vary_fd = rangeFd[0] + np.random.rand()*(rangeFd[1] - rangeFd[0])
vary_fl = rangeFlux[0] + np.random.rand()*(rangeFlux[1]- rangeFlux[0])
vary_si = rangeSig[0] + np.random.rand()*(rangeSig[1] - rangeSig[0])
vary_ch2 = rangeChi[0] + np.random.rand()*(rangeChi[1]- rangeChi[0])
vary_fd2 = rangeFd[0] + np.random.rand()*(rangeFd[1]-rangeFd[0])
spec = create2Spectrum(nu, flux2 = vary_fl, fdepth2 = vary_fd,
chinot2= vary_ch, fdepth1=vary_fd2, chinot1=vary_ch2)
noise1, noise2 = createNoiseSpectrum(nu, vary_si)
spec += noise1+noise2*1j
phi, Faraday = createFaradaySpectrum(spec, nu)
max = np.max(np.abs(Faraday))
Faraday_Normalized=Faraday/max
X_2[count,:,0]= Faraday_Normalized.imag
X_2[count,:,1]= Faraday_Normalized.real
Y[count]=1 #Complex Spectrum (uses create2Spectrum)
FD[count]=vary_fd
CH[count]=vary_ch
FL[count]=vary_fl
SI[count]=vary_si
count+=1
#for i in range(7):
# X_train, y_train = shuffle(X_train, y_train, random_state=0)
# X_test, y_test = shuffle(X_test, y_test,random_state=0)
print("Done creating data")
return X_2, Y, FD,CH,FL,SI
| |
# The MIT License (MIT)
#
# Copyright (c) 2015 Leon Jacobs
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
Generic Daemon Class:
Source: https://github.com/serverdensity/python-daemon
'''
# Core modules
import atexit
import os
import sys
import time
import signal
class Daemon(object):
'''
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
'''
def __init__ (self, pidfile, stdin = os.devnull,
stdout = os.devnull, stderr = os.devnull,
home_dir = '.', umask = 022, verbose = 1, use_gevent = False):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
self.home_dir = home_dir
self.verbose = verbose
self.umask = umask
self.daemon_alive = True
self.use_gevent = use_gevent
def daemonize (self):
'''
Do the UNIX double-fork magic, see Stevens' 'Advanced
Programming in the UNIX Environment' for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
'''
try:
pid = os.fork()
if pid > 0:
# Exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write(
'fork #1 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment
os.chdir(self.home_dir)
os.setsid()
os.umask(self.umask)
# Do second fork
try:
pid = os.fork()
if pid > 0:
# Exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write(
'fork #2 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
# This block breaks on OS X
if sys.platform != 'darwin':
# Redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
if self.stderr:
se = file(self.stderr, 'a+', 0)
else:
se = so
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
def sigtermhandler (signum, frame):
self.daemon_alive = False
sys.exit()
if self.use_gevent:
import gevent
gevent.reinit()
gevent.signal(signal.SIGTERM, sigtermhandler, signal.SIGTERM, None)
gevent.signal(signal.SIGINT, sigtermhandler, signal.SIGINT, None)
else:
signal.signal(signal.SIGTERM, sigtermhandler)
signal.signal(signal.SIGINT, sigtermhandler)
if self.verbose >= 1:
print 'Started'
# Make sure pid file is removed if we quit
atexit.register(self.delpid)
# Write pidfile
pid = str(os.getpid())
file(self.pidfile, 'w+').write('%s\n' % pid)
def delpid (self):
os.remove(self.pidfile)
def start (self, *args, **kwargs):
'''
Start the daemon
'''
if self.verbose >= 1:
print 'Starting...'
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
except SystemExit:
pid = None
if pid:
message = 'pidfile %s already exists. Is it already running?\n'
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run(*args, **kwargs)
def stop (self):
'''
Stop the daemon
'''
if self.verbose >= 1:
print 'Stopping...'
# Get the pid from the pidfile
pid = self.get_pid()
if not pid:
message = 'pidfile %s does not exist. Not running?\n'
sys.stderr.write(message % self.pidfile)
# Just to be sure. A ValueError might occur if the PID file is
# empty but does actually exist
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
return # Not an error in a restart
# Try killing the daemon process
try:
i = 0
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
i += 1
if i % 10 == 0:
os.kill(pid, signal.SIGHUP)
except OSError, err:
err = str(err)
if err.find('No such process') > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
if self.verbose >= 1:
print 'Stopped'
def restart (self):
'''
Restart the daemon
'''
self.stop()
self.start()
def get_pid (self):
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
except SystemExit:
pid = None
return pid
def is_running (self):
pid = self.get_pid()
if pid is None:
print 'Process is stopped'
elif os.path.exists('/proc/%d' % pid):
print 'Process (pid %d) is running...' % pid
else:
print 'Process (pid %d) is killed' % pid
return pid and os.path.exists('/proc/%d' % pid)
def run (self):
'''
You should override this method when you subclass Daemon.
It will be called after the process has been
daemonized by start() or restart().
'''
raise NotImplementedError
| |
"""
Tests to make sure the schemas.yml file is structurally sound.
"""
import re
import unittest
import os
import cea.config
import cea.inputlocator
import cea.scripts
import cea.schemas
__author__ = "Daren Thomas"
__copyright__ = "Copyright 2017, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Daren Thomas", "Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
# FIXME: remove this once we have fixed the remaining problems...
SKIP_LMS = {
"get_building_weekly_schedules",
"get_optimization_individuals_in_generation",
"get_optimization_slave_cooling_activation_pattern",
}
class TestSchemas(unittest.TestCase):
def test_all_locator_methods_described(self):
schemas = cea.schemas.schemas(plugins=[])
config = cea.config.Configuration()
locator = cea.inputlocator.InputLocator(config.scenario)
for method in extract_locator_methods(locator):
self.assertIn(method, schemas.keys())
def test_all_locator_methods_have_a_file_path(self):
schemas = cea.schemas.schemas(plugins=[])
for lm in schemas:
self.assertIn("file_path", schemas[lm], "{lm} does not have a file_path".format(lm=lm))
self.assertIsInstance(schemas[lm]["file_path"], str, "{lm} does not have a file_path".format(lm=lm))
self.assertNotIn("\\", schemas[lm]["file_path"], "{lm} has backslashes in it's file_path".format(lm=lm))
def test_all_columns_have_description(self):
schemas = cea.schemas.schemas(plugins=[])
for lm in schemas:
if lm == "get_database_standard_schedules_use":
# the schema for schedules is non-standard
continue
if schemas[lm]["file_type"] in {"xls", "xlsx"}:
for ws in schemas[lm]["schema"]:
for col in schemas[lm]["schema"][ws]["columns"]:
self.assertIn("description", schemas[lm]["schema"][ws]["columns"][col],
"Missing description for {lm}/{ws}/{col}".format(lm=lm, ws=ws, col=col))
else:
for col in schemas[lm]["schema"]["columns"]:
self.assertIn("description", schemas[lm]["schema"]["columns"][col],
"Missing description for {lm}/{col}".format(lm=lm, col=col))
def test_all_schemas_have_a_columns_entry(self):
schemas = cea.schemas.schemas(plugins=[])
for lm in schemas:
if lm == "get_database_standard_schedules_use":
# the schema for schedules is non-standard
continue
if schemas[lm]["file_type"] in {"xls", "xlsx"}:
for ws in schemas[lm]["schema"]:
self.assertIn("columns", schemas[lm]["schema"][ws], "Missing columns for {lm}/{ws}".format(
lm=lm, ws=ws))
else:
self.assertIn("columns", schemas[lm]["schema"], "Missing columns for {lm}".format(lm=lm))
def test_all_schema_columns_documented(self):
schemas = cea.schemas.schemas(plugins=[])
for lm in schemas.keys():
if lm in SKIP_LMS:
# these can't be documented properly due to the file format
continue
schema = schemas[lm]["schema"]
if schemas[lm]["file_type"] in {"xls", "xlsx"}:
for ws in schema.keys():
ws_schema = schema[ws]["columns"]
for col in ws_schema.keys():
self.assertNotEqual(ws_schema[col]["description"].strip(), "TODO",
"Missing description for {lm}/{ws}/{col}/description".format(
lm=lm, ws=ws, col=col))
self.assertNotEqual(ws_schema[col]["unit"].strip(), "TODO",
"Missing description for {lm}/{ws}/{col}/unit".format(
lm=lm, ws=ws, col=col))
self.assertNotEqual(ws_schema[col]["values"].strip(), "TODO",
"Missing description for {lm}/{ws}/{col}/description".format(
lm=lm, ws=ws, col=col))
elif schemas[lm]["file_type"] in {"shp", "dbf", "csv"}:
for col in schema["columns"].keys():
try:
self.assertNotEqual(schema["columns"][col]["description"].strip(), "TODO",
"Missing description for {lm}/{col}/description".format(
lm=lm, col=col))
self.assertNotEqual(schema["columns"][col]["unit"].strip(), "TODO",
"Missing description for {lm}/{col}/description".format(
lm=lm, col=col))
self.assertNotEqual(schema["columns"][col]["values"].strip(), "TODO",
"Missing description for {lm}/{col}/description".format(
lm=lm, col=col))
except BaseException as e:
self.fail("Problem with lm={lm}, col={col}, message: {m}".format(lm=lm, col=col, m=e))
def test_each_column_has_type(self):
schemas = cea.schemas.schemas(plugins=[])
valid_types = {"string", "int", "boolean", "float", "date", "Point", "Polygon", "LineString"}
for lm in schemas.keys():
if lm in SKIP_LMS:
# these can't be documented properly due to the file format
continue
schema = schemas[lm]["schema"]
if schemas[lm]["file_type"] in {"xls", "xlsx"}:
for ws in schema.keys():
ws_schema = schema[ws]["columns"]
for col in ws_schema.keys():
self.assertIn("type", ws_schema[col],
"Missing type definition for {lm}/{ws}/{col}".format(
lm=lm, ws=ws, col=col))
col_type = ws_schema[col]["type"]
self.assertIn(col_type, valid_types,
"Invalid type definition for {lm}/{ws}/{col}: {type}".format(
lm=lm, ws=ws, col=col, type=col_type))
elif schemas[lm]["file_type"] in {"shp", "dbf", "csv"}:
for col in schema["columns"].keys():
self.assertIn("type", schema["columns"][col],
"Missing type definition for {lm}/{col}".format(lm=lm, col=col))
col_type = schema["columns"][col]["type"]
self.assertIn(col_type, valid_types,
"Invalid type definition for {lm}/{col}: {type}".format(lm=lm, col=col,
type=col_type))
def test_each_lm_has_created_by(self):
schemas = cea.schemas.schemas(plugins=[])
for lm in schemas:
self.assertIn("created_by", schemas[lm], "{lm} missing created_by entry".format(lm=lm))
self.assertIsInstance(schemas[lm]["created_by"], list,
"created_by entry of {lm} must be a list".format(lm=lm))
def test_each_lm_has_used_by(self):
schemas = cea.schemas.schemas(plugins=[])
for lm in schemas:
self.assertIn("used_by", schemas[lm], "{lm} missing used_by entry".format(lm=lm))
self.assertIsInstance(schemas[lm]["used_by"], list,
"used_by entry of {lm} must be a list".format(lm=lm))
def test_each_lm_has_method(self):
schemas = cea.schemas.schemas(plugins=[])
locator = cea.inputlocator.InputLocator(None)
for lm in schemas:
self.assertIn(lm, dir(locator),
"schemas.yml contains {lm} but no corresponding method in InputLocator".format(lm=lm))
def test_each_folder_unique(self):
locator = cea.inputlocator.ReferenceCaseOpenLocator()
folders = {} # map path -> lm
for attrib in dir(locator):
if attrib.endswith("_folder") and not attrib.startswith("_"):
method = getattr(locator, attrib)
parameters = {
"network_type": "DC",
"network_name": "",
"gen_num": 1,
"category": "demand",
"type_of_district_network": "space-heating",
}
for p in list(parameters.keys()):
if not p in method.__code__.co_varnames:
del parameters[p]
folder = method(**parameters)
folder = os.path.normcase(os.path.normpath(os.path.abspath(folder)))
self.assertNotIn(folder, folders,
"{attrib} duplicates the result of {prev}".format(
attrib=attrib, prev=folders.get(folder, None)))
folders[folder] = attrib
def test_scripts_use_underscores_not_hyphen(self):
schemas = cea.schemas.schemas(plugins=[])
for lm in schemas:
used_by = schemas[lm]["used_by"]
created_by = schemas[lm]["created_by"]
for script in used_by:
self.assertNotIn("-", script, "{lm} used_by script {script} contains hyphen".format(**locals()))
for script in created_by:
self.assertNotIn("-", script, "{lm} created_by script {script} contains hyphen".format(**locals()))
def test_read_glossary_df(self):
import cea.glossary
cea.glossary.read_glossary_df(plugins=[])
def test_numerical_ranges(self):
def check_range(schema):
if 'type' in schema and schema['type'] in ['float', 'int']:
if "values" in schema:
values = schema['values']
values_min, values_max = parse_numerical_range_value(values, schema['type'])
schema_min = schema.get('min')
schema_max = schema.get('max')
if values_min != schema_min or values_max != schema_max:
raise ValueError(
'values property do not match range properties. '
'values: {values}, min: {schema_min}, max: {schema_max}'.format(
values=values, schema_min=schema_min, schema_max=schema_max))
schemas = cea.schemas.schemas(plugins=[])
for lm in schemas:
if lm == "get_database_standard_schedules_use" or lm in SKIP_LMS:
# the schema for schedules is non-standard
continue
if schemas[lm]["file_type"] in {"xls", "xlsx"}:
for ws in schemas[lm]["schema"]:
for col, col_schema in schemas[lm]["schema"][ws]["columns"].items():
try:
check_range(col_schema)
except ValueError as e:
col_label = ":".join([lm, ws, col])
print("Error in column {col_label}:\n{message}\n".format(col_label=col_label,
message=e))
else:
for col, col_schema in schemas[lm]["schema"]["columns"].items():
try:
check_range(col_schema)
except ValueError as e:
col_label = ":".join([lm, col])
print(
"Error in column {col_label}:\n{message}\n".format(col_label=col_label, message=e))
def extract_locator_methods(locator):
"""Return the list of locator methods that point to files"""
ignore = {
"ensure_parent_folder_exists",
"get_plant_nodes",
"get_temporary_file",
"get_weather_names",
"get_zone_building_names",
"verify_database_template",
"get_optimization_network_all_individuals_results_file", # TODO: remove this when we know how
"get_optimization_network_generation_individuals_results_file", # TODO: remove this when we know how
"get_optimization_network_individual_results_file", # TODO: remove this when we know how
"get_optimization_network_layout_costs_file", # TODO: remove this when we know how
"get_timeseries_plots_file", # TODO: remove this when we know how
}
for m in dir(locator):
if not callable(getattr(locator, m)):
# normal attributes (fields) are not locator methods
continue
if m.startswith("_"):
# these are private methods, ignore
continue
if m in ignore:
# keep a list of special methods to ignore
continue
if m.endswith("_folder"):
# not interested in folders
continue
yield m
def parse_numerical_range_value(value, num_type):
def parse_string_num(string_num):
if string_num == 'n':
return None
elif num_type == 'float':
return float(string_num)
elif num_type == 'int':
return int(string_num)
else:
raise TypeError("Unable to cast type `{type}`".format(type=num_type))
num = r'-?\d+(?:.\d+)?'
num_or_n = r'{num}|n'.format(num=num)
# match {1...n}-style values
regex = r'{{(?P<first>{num_or_n})(...|,)(?P<second>{num_or_n})}}'.format(num_or_n=num_or_n)
match = re.match(regex, value)
if match is None:
raise ValueError("values property not in '{{n...n}}' format. Got: '{value}'".format(value=value))
return parse_string_num(match.group("first")), parse_string_num(match.group("second"))
if __name__ == '__main__':
unittest.main()
| |
from westtools import (WESTToolComponent, WESTDataReader, IterRangeSelection, WESTSubcommand,
ProgressIndicatorComponent)
import mclib
from mclib import mcbs_correltime, mcbs_ci_correl, _1D_simple_eval_block, _2D_simple_eval_block
from westpa import h5io
import numpy
from westtools.dtypes import iter_block_ci_dtype as ci_dtype
# A function to just help with creating future objects for the work manager.
def generate_future(work_manager, name, eval_block, kwargs):
submit_kwargs = {'name': name}
submit_kwargs.update(kwargs)
future = work_manager.submit(eval_block, kwargs=submit_kwargs)
return future
class WESTKineticsBase(WESTSubcommand):
'''
Common argument processing for w_direct/w_reweight subcommands.
Mostly limited to handling input and output from w_assign.
'''
def __init__(self, parent):
super(WESTKineticsBase,self).__init__(parent)
self.data_reader = WESTDataReader()
self.iter_range = IterRangeSelection()
self.progress = ProgressIndicatorComponent()
self.output_filename = None
# This is actually applicable to both.
self.assignment_filename = None
self.output_file = None
self.assignments_file = None
self.evolution_mode = None
self.mcbs_alpha = None
self.mcbs_acalpha = None
self.mcbs_nsets = None
# Now we're adding in things that come from the old w_kinetics
self.do_compression = True
def add_args(self, parser):
self.progress.add_args(parser)
self.data_reader.add_args(parser)
self.iter_range.include_args['iter_step'] = True
self.iter_range.add_args(parser)
iogroup = parser.add_argument_group('input/output options')
iogroup.add_argument('-a', '--assignments', default='assign.h5',
help='''Bin assignments and macrostate definitions are in ASSIGNMENTS
(default: %(default)s).''')
iogroup.add_argument('-o', '--output', dest='output', default=self.default_output_file,
help='''Store results in OUTPUT (default: %(default)s).''')
def process_args(self, args):
self.progress.process_args(args)
self.data_reader.process_args(args)
with self.data_reader:
self.iter_range.process_args(args, default_iter_step=None)
if self.iter_range.iter_step is None:
#use about 10 blocks by default
self.iter_range.iter_step = max(1, (self.iter_range.iter_stop - self.iter_range.iter_start) // 10)
self.output_filename = args.output
self.assignments_filename = args.assignments
# This provides some convenience functions, modified from w_kinavg, to help with calculating evolution and averages for observables with the mclib library in a consistent manner.
# It's used in both w_direct and w_reweight.
class AverageCommands(WESTKineticsBase):
default_output_file = 'direct.h5'
def __init__(self, parent):
# Ideally, this is stuff general to all the calculations we want to perform.
super(AverageCommands,self).__init__(parent)
self.kinetics_filename = None
self.kinetics_file = None
def add_args(self, parser):
iogroup = parser.add_argument_group('input/output options')
# self.default_kinetics_file will be picked up as a class attribute from the appropriate subclass
# We can do this with the output file, too...
# ... by default, however, we're going to use {direct/reweight}.h5 for everything.
# Modules which are called with different default values will, of course, still use those.
iogroup.add_argument('-k', '--kinetics', default=self.default_kinetics_file,
help='''Populations and transition rates are stored in KINETICS
(default: %(default)s).''')
cgroup = parser.add_argument_group('confidence interval calculation options')
cgroup.add_argument('--disable-bootstrap', '-db', dest='bootstrap', action='store_const', const=False,
help='''Enable the use of Monte Carlo Block Bootstrapping.''')
cgroup.add_argument('--disable-correl', '-dc', dest='correl', action='store_const', const=False,
help='''Disable the correlation analysis.''')
cgroup.add_argument('--alpha', type=float, default=0.05,
help='''Calculate a (1-ALPHA) confidence interval'
(default: %(default)s)''')
cgroup.add_argument('--autocorrel-alpha', type=float, dest='acalpha', metavar='ACALPHA',
help='''Evaluate autocorrelation to (1-ACALPHA) significance.
Note that too small an ACALPHA will result in failure to detect autocorrelation
in a noisy flux signal. (Default: same as ALPHA.)''')
cgroup.add_argument('--nsets', type=int,
help='''Use NSETS samples for bootstrapping (default: chosen based on ALPHA)''')
cogroup = parser.add_argument_group('calculation options')
cogroup.add_argument('-e', '--evolution-mode', choices=['cumulative', 'blocked', 'none'], default='none',
help='''How to calculate time evolution of rate estimates.
``cumulative`` evaluates rates over windows starting with --start-iter and getting progressively
wider to --stop-iter by steps of --step-iter.
``blocked`` evaluates rates over windows of width --step-iter, the first of which begins at
--start-iter.
``none`` (the default) disables calculation of the time evolution of rate estimates.''')
cogroup.add_argument('--window-frac', type=float, default=1.0,
help='''Fraction of iterations to use in each window when running in ``cumulative`` mode.
The (1 - frac) fraction of iterations will be discarded from the start of each window.''')
mgroup = parser.add_argument_group('misc options')
mgroup.add_argument('--disable-averages', '-da', dest='display_averages', action='store_false',
help='''Whether or not the averages should be printed to the console (set to FALSE if flag is used).''')
def process_args(self, args):
self.kinetics_filename = args.kinetics
# Disable the bootstrap or the correlation analysis.
self.mcbs_enable = args.bootstrap if args.bootstrap is not None else True
self.do_correl = args.correl if args.correl is not None else True
self.mcbs_alpha = args.alpha
self.mcbs_acalpha = args.acalpha if args.acalpha else self.mcbs_alpha
self.mcbs_nsets = args.nsets if args.nsets else mclib.get_bssize(self.mcbs_alpha)
self.display_averages = args.display_averages
self.evolution_mode = args.evolution_mode
self.evol_window_frac = args.window_frac
if self.evol_window_frac <= 0 or self.evol_window_frac > 1:
raise ValueError('Parameter error -- fractional window defined by --window-frac must be in (0,1]')
def stamp_mcbs_info(self, dataset):
dataset.attrs['mcbs_alpha'] = self.mcbs_alpha
dataset.attrs['mcbs_acalpha'] = self.mcbs_acalpha
dataset.attrs['mcbs_nsets'] = self.mcbs_nsets
def open_files(self):
self.output_file = h5io.WESTPAH5File(self.output_filename, 'a', creating_program=True)
h5io.stamp_creator_data(self.output_file)
self.assignments_file = h5io.WESTPAH5File(self.assignments_filename, 'r')#, driver='core', backing_store=False)
self.kinetics_file = h5io.WESTPAH5File(self.kinetics_filename, 'r')#, driver='core', backing_store=False)
if not self.iter_range.check_data_iter_range_least(self.assignments_file):
raise ValueError('assignments data do not span the requested iterations')
def open_assignments(self):
# Actually, I should rename this, as we're not OPENING assignments.
# This seems to be stuff we're going to be using a lot, so.
self.nstates = self.assignments_file.attrs['nstates']
self.nbins = self.assignments_file.attrs['nbins']
self.state_labels = self.assignments_file['state_labels'][...]
assert self.nstates == len(self.state_labels)
self.start_iter, self.stop_iter, self.step_iter = self.iter_range.iter_start, self.iter_range.iter_stop, self.iter_range.iter_step
# Import for the reweighting code.
state_map = self.assignments_file['state_map'][...]
# We've moved this into a different step so that it's compatible with
# loading up from the all command.
# Otherwise, we try to load the kinetics (since we're just mixing subclasses)
# before it's actually run, and so we fail out.
if not self.iter_range.check_data_iter_range_least(self.kinetics_file):
raise ValueError('kinetics data do not span the requested iterations')
def print_averages(self, dataset, header, dim=1):
print(header)
maxlabellen = max(list(map(len,self.state_labels)))
for istate in range(self.nstates):
if dim == 1:
print('{:{maxlabellen}s}: mean={:21.15e} CI=({:21.15e}, {:21.15e}) * tau^-1'
.format(self.state_labels[istate],
dataset['expected'][istate],
dataset['ci_lbound'][istate],
dataset['ci_ubound'][istate],
maxlabellen=maxlabellen))
else:
for jstate in range(self.nstates):
if istate == jstate: continue
print('{:{maxlabellen}s} -> {:{maxlabellen}s}: mean={:21.15e} CI=({:21.15e}, {:21.15e}) * tau^-1'
.format(self.state_labels[istate], self.state_labels[jstate],
dataset['expected'][istate,jstate],
dataset['ci_lbound'][istate,jstate],
dataset['ci_ubound'][istate,jstate],
maxlabellen=maxlabellen))
def run_calculation(self, pi, nstates, start_iter, stop_iter, step_iter, dataset, eval_block, name, dim, do_averages=False, **extra):
# We want to use the same codepath to run a quick average as we do the longer evolution sets, so...
if do_averages:
start_pts = [start_iter, stop_iter]
else:
start_pts = list(range(start_iter, stop_iter, step_iter))
# Our evolution dataset!
if dim == 2:
evolution_dataset = numpy.zeros((len(start_pts), nstates, nstates), dtype=ci_dtype)
elif dim == 1:
evolution_dataset = numpy.zeros((len(start_pts), nstates), dtype=ci_dtype)
else:
# Temp.
print("What's wrong?")
# This is appropriate for bootstrapped quantities, I think.
if True:
futures = []
for iblock, start in enumerate(start_pts):
stop = min(start+step_iter, stop_iter)
if self.evolution_mode == 'cumulative' or do_averages == True:
windowsize = int(self.evol_window_frac * (stop - start_iter))
block_start = max(start_iter, stop - windowsize)
else: # self.evolution_mode == 'blocked'
block_start = start
# Create a basic set of kwargs for this iteration slice.
future_kwargs = dict(iblock=iblock, start=block_start, stop=stop,
nstates=nstates,
mcbs_alpha=self.mcbs_alpha, mcbs_nsets=self.mcbs_nsets,
mcbs_acalpha=self.mcbs_acalpha,
do_correl=self.do_correl,name=name,
mcbs_enable=self.mcbs_enable,
data_input={},
**extra)
# Slice up the datasets for this iteration slice.
# We're assuming they're all h5io iter blocked datasets; it's up to the calling routine
# to ensure this is true.
# Actually, I'm less sure how to handle this for pre-calculated datasets. Need to consider this. But for now...
for key, value in dataset.items():
try:
future_kwargs['data_input'][key] = value.iter_slice(block_start,stop) if hasattr(value, 'iter_slice') else value[block_start:stop]
except:
future_kwargs['data_input'][key] = value.iter_slice(block_start,stop) if hasattr(value, 'iter_slice') else value[block_start:stop,:]
#print(future_kwargs['data_input'][key])
# We create a future object with the appropriate name, and then append it to the work manager.
futures.append(generate_future(self.work_manager, name, eval_block, future_kwargs))
pi.new_operation('Calculating {}'.format(name), extent=len(futures))
# Now, we wait to get the result back; we'll store it in the result, and return it.
for future in self.work_manager.as_completed(futures):
pi.progress = iblock / step_iter
future_result = future.get_result(discard=True)
if dim == 2:
for result in future_result:
name,iblock,istate,jstate,ci_result = result
evolution_dataset[iblock, istate, jstate] = ci_result
elif dim == 1:
for result in future_result:
name,iblock,istate,ci_result = result
evolution_dataset[iblock, istate] = ci_result
return evolution_dataset
| |
#
# The Python Imaging Library.
# $Id$
#
# base class for image file handlers
#
# history:
# 1995-09-09 fl Created
# 1996-03-11 fl Fixed load mechanism.
# 1996-04-15 fl Added pcx/xbm decoders.
# 1996-04-30 fl Added encoders.
# 1996-12-14 fl Added load helpers
# 1997-01-11 fl Use encode_to_file where possible
# 1997-08-27 fl Flush output in _save
# 1998-03-05 fl Use memory mapping for some modes
# 1999-02-04 fl Use memory mapping also for "I;16" and "I;16B"
# 1999-05-31 fl Added image parser
# 2000-10-12 fl Set readonly flag on memory-mapped images
# 2002-03-20 fl Use better messages for common decoder errors
# 2003-04-21 fl Fall back on mmap/map_buffer if map is not available
# 2003-10-30 fl Added StubImageFile class
# 2004-02-25 fl Made incremental parser more robust
#
# Copyright (c) 1997-2004 by Secret Labs AB
# Copyright (c) 1995-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from . import Image
from ._util import isPath
import io
import sys
import struct
MAXBLOCK = 65536
SAFEBLOCK = 1024*1024
LOAD_TRUNCATED_IMAGES = False
ERRORS = {
-1: "image buffer overrun error",
-2: "decoding error",
-3: "unknown error",
-8: "bad configuration",
-9: "out of memory error"
}
def raise_ioerror(error):
try:
message = Image.core.getcodecstatus(error)
except AttributeError:
message = ERRORS.get(error)
if not message:
message = "decoder error %d" % error
raise IOError(message + " when reading image file")
#
# --------------------------------------------------------------------
# Helpers
def _tilesort(t):
# sort on offset
return t[2]
#
# --------------------------------------------------------------------
# ImageFile base class
class ImageFile(Image.Image):
"Base class for image file format handlers."
def __init__(self, fp=None, filename=None):
Image.Image.__init__(self)
self._min_frame = 0
self.custom_mimetype = None
self.tile = None
self.readonly = 1 # until we know better
self.decoderconfig = ()
self.decodermaxblock = MAXBLOCK
if isPath(fp):
# filename
self.fp = open(fp, "rb")
self.filename = fp
self._exclusive_fp = True
else:
# stream
self.fp = fp
self.filename = filename
# can be overridden
self._exclusive_fp = None
try:
self._open()
except (IndexError, # end of data
TypeError, # end of data (ord)
KeyError, # unsupported mode
EOFError, # got header but not the first frame
struct.error) as v:
# close the file only if we have opened it this constructor
if self._exclusive_fp:
self.fp.close()
raise SyntaxError(v)
if not self.mode or self.size[0] <= 0:
raise SyntaxError("not identified by this driver")
def draft(self, mode, size):
"Set draft mode"
pass
def get_format_mimetype(self):
if self.format is None:
return
return self.custom_mimetype or Image.MIME.get(self.format.upper())
def verify(self):
"Check file integrity"
# raise exception if something's wrong. must be called
# directly after open, and closes file when finished.
if self._exclusive_fp:
self.fp.close()
self.fp = None
def load(self):
"Load image data based on tile list"
pixel = Image.Image.load(self)
if self.tile is None:
raise IOError("cannot load this image")
if not self.tile:
return pixel
self.map = None
use_mmap = self.filename and len(self.tile) == 1
# As of pypy 2.1.0, memory mapping was failing here.
use_mmap = use_mmap and not hasattr(sys, 'pypy_version_info')
readonly = 0
# look for read/seek overrides
try:
read = self.load_read
# don't use mmap if there are custom read/seek functions
use_mmap = False
except AttributeError:
read = self.fp.read
try:
seek = self.load_seek
use_mmap = False
except AttributeError:
seek = self.fp.seek
if use_mmap:
# try memory mapping
decoder_name, extents, offset, args = self.tile[0]
if decoder_name == "raw" and len(args) >= 3 and \
args[0] == self.mode and \
args[0] in Image._MAPMODES:
try:
if hasattr(Image.core, "map"):
# use built-in mapper WIN32 only
self.map = Image.core.map(self.filename)
self.map.seek(offset)
self.im = self.map.readimage(
self.mode, self.size, args[1], args[2]
)
else:
# use mmap, if possible
import mmap
with open(self.filename, "r") as fp:
self.map = mmap.mmap(fp.fileno(), 0,
access=mmap.ACCESS_READ)
self.im = Image.core.map_buffer(
self.map, self.size, decoder_name, extents,
offset, args)
readonly = 1
# After trashing self.im,
# we might need to reload the palette data.
if self.palette:
self.palette.dirty = 1
except (AttributeError, EnvironmentError, ImportError):
self.map = None
self.load_prepare()
err_code = -3 # initialize to unknown error
if not self.map:
# sort tiles in file order
self.tile.sort(key=_tilesort)
try:
# FIXME: This is a hack to handle TIFF's JpegTables tag.
prefix = self.tile_prefix
except AttributeError:
prefix = b""
for decoder_name, extents, offset, args in self.tile:
decoder = Image._getdecoder(self.mode, decoder_name,
args, self.decoderconfig)
try:
seek(offset)
decoder.setimage(self.im, extents)
if decoder.pulls_fd:
decoder.setfd(self.fp)
status, err_code = decoder.decode(b"")
else:
b = prefix
while True:
try:
s = read(self.decodermaxblock)
except (IndexError, struct.error):
# truncated png/gif
if LOAD_TRUNCATED_IMAGES:
break
else:
raise IOError("image file is truncated")
if not s: # truncated jpeg
if LOAD_TRUNCATED_IMAGES:
break
else:
self.tile = []
raise IOError("image file is truncated "
"(%d bytes not processed)" %
len(b))
b = b + s
n, err_code = decoder.decode(b)
if n < 0:
break
b = b[n:]
finally:
# Need to cleanup here to prevent leaks
decoder.cleanup()
self.tile = []
self.readonly = readonly
self.load_end()
if self._exclusive_fp and self._close_exclusive_fp_after_loading:
self.fp.close()
self.fp = None
if not self.map and not LOAD_TRUNCATED_IMAGES and err_code < 0:
# still raised if decoder fails to return anything
raise_ioerror(err_code)
return Image.Image.load(self)
def load_prepare(self):
# create image memory if necessary
if not self.im or\
self.im.mode != self.mode or self.im.size != self.size:
self.im = Image.core.new(self.mode, self.size)
# create palette (optional)
if self.mode == "P":
Image.Image.load(self)
def load_end(self):
# may be overridden
pass
# may be defined for contained formats
# def load_seek(self, pos):
# pass
# may be defined for blocked formats (e.g. PNG)
# def load_read(self, bytes):
# pass
def _seek_check(self, frame):
if (frame < self._min_frame or
# Only check upper limit on frames if additional seek operations
# are not required to do so
(not (hasattr(self, "_n_frames") and self._n_frames is None) and
frame >= self.n_frames+self._min_frame)):
raise EOFError("attempt to seek outside sequence")
return self.tell() != frame
class StubImageFile(ImageFile):
"""
Base class for stub image loaders.
A stub loader is an image loader that can identify files of a
certain format, but relies on external code to load the file.
"""
def _open(self):
raise NotImplementedError(
"StubImageFile subclass must implement _open"
)
def load(self):
loader = self._load()
if loader is None:
raise IOError("cannot find loader for this %s file" % self.format)
image = loader.load(self)
assert image is not None
# become the other object (!)
self.__class__ = image.__class__
self.__dict__ = image.__dict__
def _load(self):
"(Hook) Find actual image loader."
raise NotImplementedError(
"StubImageFile subclass must implement _load"
)
class Parser(object):
"""
Incremental image parser. This class implements the standard
feed/close consumer interface.
"""
incremental = None
image = None
data = None
decoder = None
offset = 0
finished = 0
def reset(self):
"""
(Consumer) Reset the parser. Note that you can only call this
method immediately after you've created a parser; parser
instances cannot be reused.
"""
assert self.data is None, "cannot reuse parsers"
def feed(self, data):
"""
(Consumer) Feed data to the parser.
:param data: A string buffer.
:exception IOError: If the parser failed to parse the image file.
"""
# collect data
if self.finished:
return
if self.data is None:
self.data = data
else:
self.data = self.data + data
# parse what we have
if self.decoder:
if self.offset > 0:
# skip header
skip = min(len(self.data), self.offset)
self.data = self.data[skip:]
self.offset = self.offset - skip
if self.offset > 0 or not self.data:
return
n, e = self.decoder.decode(self.data)
if n < 0:
# end of stream
self.data = None
self.finished = 1
if e < 0:
# decoding error
self.image = None
raise_ioerror(e)
else:
# end of image
return
self.data = self.data[n:]
elif self.image:
# if we end up here with no decoder, this file cannot
# be incrementally parsed. wait until we've gotten all
# available data
pass
else:
# attempt to open this file
try:
with io.BytesIO(self.data) as fp:
im = Image.open(fp)
except IOError:
# traceback.print_exc()
pass # not enough data
else:
flag = hasattr(im, "load_seek") or hasattr(im, "load_read")
if flag or len(im.tile) != 1:
# custom load code, or multiple tiles
self.decode = None
else:
# initialize decoder
im.load_prepare()
d, e, o, a = im.tile[0]
im.tile = []
self.decoder = Image._getdecoder(
im.mode, d, a, im.decoderconfig
)
self.decoder.setimage(im.im, e)
# calculate decoder offset
self.offset = o
if self.offset <= len(self.data):
self.data = self.data[self.offset:]
self.offset = 0
self.image = im
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
"""
(Consumer) Close the stream.
:returns: An image object.
:exception IOError: If the parser failed to parse the image file either
because it cannot be identified or cannot be
decoded.
"""
# finish decoding
if self.decoder:
# get rid of what's left in the buffers
self.feed(b"")
self.data = self.decoder = None
if not self.finished:
raise IOError("image was incomplete")
if not self.image:
raise IOError("cannot parse this image")
if self.data:
# incremental parsing not possible; reopen the file
# not that we have all data
with io.BytesIO(self.data) as fp:
try:
self.image = Image.open(fp)
finally:
self.image.load()
return self.image
# --------------------------------------------------------------------
def _save(im, fp, tile, bufsize=0):
"""Helper to save image based on tile list
:param im: Image object.
:param fp: File object.
:param tile: Tile list.
:param bufsize: Optional buffer size
"""
im.load()
if not hasattr(im, "encoderconfig"):
im.encoderconfig = ()
tile.sort(key=_tilesort)
# FIXME: make MAXBLOCK a configuration parameter
# It would be great if we could have the encoder specify what it needs
# But, it would need at least the image size in most cases. RawEncode is
# a tricky case.
bufsize = max(MAXBLOCK, bufsize, im.size[0] * 4) # see RawEncode.c
if fp == sys.stdout:
fp.flush()
return
try:
fh = fp.fileno()
fp.flush()
except (AttributeError, io.UnsupportedOperation):
# compress to Python file-compatible object
for e, b, o, a in tile:
e = Image._getencoder(im.mode, e, a, im.encoderconfig)
if o > 0:
fp.seek(o, 0)
e.setimage(im.im, b)
if e.pushes_fd:
e.setfd(fp)
l, s = e.encode_to_pyfd()
else:
while True:
l, s, d = e.encode(bufsize)
fp.write(d)
if s:
break
if s < 0:
raise IOError("encoder error %d when writing image file" % s)
e.cleanup()
else:
# slight speedup: compress to real file object
for e, b, o, a in tile:
e = Image._getencoder(im.mode, e, a, im.encoderconfig)
if o > 0:
fp.seek(o, 0)
e.setimage(im.im, b)
if e.pushes_fd:
e.setfd(fp)
l, s = e.encode_to_pyfd()
else:
s = e.encode_to_file(fh, bufsize)
if s < 0:
raise IOError("encoder error %d when writing image file" % s)
e.cleanup()
if hasattr(fp, "flush"):
fp.flush()
def _safe_read(fp, size):
"""
Reads large blocks in a safe way. Unlike fp.read(n), this function
doesn't trust the user. If the requested size is larger than
SAFEBLOCK, the file is read block by block.
:param fp: File handle. Must implement a <b>read</b> method.
:param size: Number of bytes to read.
:returns: A string containing up to <i>size</i> bytes of data.
"""
if size <= 0:
return b""
if size <= SAFEBLOCK:
return fp.read(size)
data = []
while size > 0:
block = fp.read(min(size, SAFEBLOCK))
if not block:
break
data.append(block)
size -= len(block)
return b"".join(data)
class PyCodecState(object):
def __init__(self):
self.xsize = 0
self.ysize = 0
self.xoff = 0
self.yoff = 0
def extents(self):
return (self.xoff, self.yoff,
self.xoff+self.xsize, self.yoff+self.ysize)
class PyDecoder(object):
"""
Python implementation of a format decoder. Override this class and
add the decoding logic in the `decode` method.
See :ref:`Writing Your Own File Decoder in Python<file-decoders-py>`
"""
_pulls_fd = False
def __init__(self, mode, *args):
self.im = None
self.state = PyCodecState()
self.fd = None
self.mode = mode
self.init(args)
def init(self, args):
"""
Override to perform decoder specific initialization
:param args: Array of args items from the tile entry
:returns: None
"""
self.args = args
@property
def pulls_fd(self):
return self._pulls_fd
def decode(self, buffer):
"""
Override to perform the decoding process.
:param buffer: A bytes object with the data to be decoded.
If `handles_eof` is set, then `buffer` will be empty and `self.fd`
will be set.
:returns: A tuple of (bytes consumed, errcode).
If finished with decoding return <0 for the bytes consumed.
Err codes are from `ERRORS`
"""
raise NotImplementedError()
def cleanup(self):
"""
Override to perform decoder specific cleanup
:returns: None
"""
pass
def setfd(self, fd):
"""
Called from ImageFile to set the python file-like object
:param fd: A python file-like object
:returns: None
"""
self.fd = fd
def setimage(self, im, extents=None):
"""
Called from ImageFile to set the core output image for the decoder
:param im: A core image object
:param extents: a 4 tuple of (x0, y0, x1, y1) defining the rectangle
for this tile
:returns: None
"""
# following c code
self.im = im
if extents:
(x0, y0, x1, y1) = extents
else:
(x0, y0, x1, y1) = (0, 0, 0, 0)
if x0 == 0 and x1 == 0:
self.state.xsize, self.state.ysize = self.im.size
else:
self.state.xoff = x0
self.state.yoff = y0
self.state.xsize = x1 - x0
self.state.ysize = y1 - y0
if self.state.xsize <= 0 or self.state.ysize <= 0:
raise ValueError("Size cannot be negative")
if (self.state.xsize + self.state.xoff > self.im.size[0] or
self.state.ysize + self.state.yoff > self.im.size[1]):
raise ValueError("Tile cannot extend outside image")
def set_as_raw(self, data, rawmode=None):
"""
Convenience method to set the internal image from a stream of raw data
:param data: Bytes to be set
:param rawmode: The rawmode to be used for the decoder.
If not specified, it will default to the mode of the image
:returns: None
"""
if not rawmode:
rawmode = self.mode
d = Image._getdecoder(self.mode, 'raw', (rawmode))
d.setimage(self.im, self.state.extents())
s = d.decode(data)
if s[0] >= 0:
raise ValueError("not enough image data")
if s[1] != 0:
raise ValueError("cannot decode image data")
| |
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-access-approval documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-access-approval"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-access-approval",
"github_user": "googleapis",
"github_repo": "python-access-approval",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-access-approval-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-access-approval.tex",
"google-cloud-access-approval Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
root_doc,
"google-cloud-access-approval",
"google-cloud-access-approval Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-access-approval",
"google-cloud-access-approval Documentation",
author,
"google-cloud-access-approval",
"google-cloud-access-approval Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| |
import asyncore
import unittest
import select
import os
import socket
import sys
import time
import errno
import struct
import threading
from test import support
from io import BytesIO
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
TIMEOUT = 3
HAS_UNIX_SOCKETS = hasattr(socket, 'AF_UNIX')
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen()
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
start = time.monotonic()
while n > 0 and time.monotonic() - start < 3.0:
r, w, e = select.select([conn], [], [], 0.1)
if r:
n -= 1
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace(b'\n', b''))
if b'\n' in data:
break
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
def bind_af_aware(sock, addr):
"""Helper function to bind a socket according to its family."""
if HAS_UNIX_SOCKETS and sock.family == socket.AF_UNIX:
# Make sure the path doesn't exist.
support.unlink(addr)
support.bind_unix_socket(sock, addr)
else:
sock.bind(addr)
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
else:
self.fail("Expected exception")
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py')
self.assertEqual(function, 'test_compact_traceback')
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d))
def test_log(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log() (to stderr)
l1 = "Lovely spam! Wonderful spam!"
l2 = "I don't like spam!"
with support.captured_stderr() as stderr:
d.log(l1)
d.log(l2)
lines = stderr.getvalue().splitlines()
self.assertEqual(lines, ['log: %s' % l1, 'log: %s' % l2])
def test_log_info(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log_info() (to stdout via print)
l1 = "Have you got anything without spam?"
l2 = "Why can't she have egg bacon spam and sausage?"
l3 = "THAT'S got spam in it!"
with support.captured_stdout() as stdout:
d.log_info(l1, 'EGGS')
d.log_info(l2)
d.log_info(l3, 'SPAM')
lines = stdout.getvalue().splitlines()
expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3]
self.assertEqual(lines, expected)
def test_unhandled(self):
d = asyncore.dispatcher()
d.ignore_log_types = ()
# capture output of dispatcher.log_info() (to stdout via print)
with support.captured_stdout() as stdout:
d.handle_expt()
d.handle_read()
d.handle_write()
d.handle_connect()
lines = stdout.getvalue().splitlines()
expected = ['warning: unhandled incoming priority event',
'warning: unhandled read event',
'warning: unhandled write event',
'warning: unhandled connect event']
self.assertEqual(lines, expected)
def test_strerror(self):
# refers to bug #8573
err = asyncore._strerror(errno.EPERM)
if hasattr(os, 'strerror'):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asyncore._strerror(-1)
self.assertTrue(err != "")
class dispatcherwithsend_noread(asyncore.dispatcher_with_send):
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
@support.reap_threads
def test_send(self):
evt = threading.Event()
sock = socket.socket()
sock.settimeout(3)
port = support.bind_port(sock)
cap = BytesIO()
args = (evt, cap, sock)
t = threading.Thread(target=capture_server, args=args)
t.start()
try:
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = b"Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket()
d.connect((support.HOST, port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send(b'\n')
n = 1000
while d.out_buffer and n > 0:
asyncore.poll()
n -= 1
evt.wait()
self.assertEqual(cap.getvalue(), data*2)
finally:
support.join_thread(t, timeout=TIMEOUT)
@unittest.skipUnless(hasattr(asyncore, 'file_wrapper'),
'asyncore.file_wrapper required')
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = b"It's not dead, it's sleeping!"
with open(support.TESTFN, 'wb') as file:
file.write(self.d)
def tearDown(self):
support.unlink(support.TESTFN)
def test_recv(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), b"It's not dead")
self.assertEqual(w.read(6), b", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = b"Come again?"
d2 = b"I want to buy some cheese."
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
with open(support.TESTFN, 'rb') as file:
self.assertEqual(file.read(), self.d + d1 + d2)
@unittest.skipUnless(hasattr(asyncore, 'file_dispatcher'),
'asyncore.file_dispatcher required')
def test_dispatcher(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
data = []
class FileDispatcher(asyncore.file_dispatcher):
def handle_read(self):
data.append(self.recv(29))
s = FileDispatcher(fd)
os.close(fd)
asyncore.loop(timeout=0.01, use_poll=True, count=2)
self.assertEqual(b"".join(data), self.d)
def test_resource_warning(self):
# Issue #11453
fd = os.open(support.TESTFN, os.O_RDONLY)
f = asyncore.file_wrapper(fd)
os.close(fd)
with support.check_warnings(('', ResourceWarning)):
f = None
support.gc_collect()
def test_close_twice(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
f = asyncore.file_wrapper(fd)
os.close(fd)
os.close(f.fd) # file_wrapper dupped fd
with self.assertRaises(OSError):
f.close()
self.assertEqual(f.fd, -1)
# calling close twice should not fail
f.close()
class BaseTestHandler(asyncore.dispatcher):
def __init__(self, sock=None):
asyncore.dispatcher.__init__(self, sock)
self.flag = False
def handle_accept(self):
raise Exception("handle_accept not supposed to be called")
def handle_accepted(self):
raise Exception("handle_accepted not supposed to be called")
def handle_connect(self):
raise Exception("handle_connect not supposed to be called")
def handle_expt(self):
raise Exception("handle_expt not supposed to be called")
def handle_close(self):
raise Exception("handle_close not supposed to be called")
def handle_error(self):
raise
class BaseServer(asyncore.dispatcher):
"""A server which listens on an address and dispatches the
connection to a handler.
"""
def __init__(self, family, addr, handler=BaseTestHandler):
asyncore.dispatcher.__init__(self)
self.create_socket(family)
self.set_reuse_addr()
bind_af_aware(self.socket, addr)
self.listen(5)
self.handler = handler
@property
def address(self):
return self.socket.getsockname()
def handle_accepted(self, sock, addr):
self.handler(sock)
def handle_error(self):
raise
class BaseClient(BaseTestHandler):
def __init__(self, family, address):
BaseTestHandler.__init__(self)
self.create_socket(family)
self.connect(address)
def handle_connect(self):
pass
class BaseTestAPI:
def tearDown(self):
asyncore.close_all(ignore_all=True)
def loop_waiting_for_flag(self, instance, timeout=5):
timeout = float(timeout) / 100
count = 100
while asyncore.socket_map and count > 0:
asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll)
if instance.flag:
return
count -= 1
time.sleep(timeout)
self.fail("flag not set")
def test_handle_connect(self):
# make sure handle_connect is called on connect()
class TestClient(BaseClient):
def handle_connect(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_accept(self):
# make sure handle_accept() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_accepted(self):
# make sure handle_accepted() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
asyncore.dispatcher.handle_accept(self)
def handle_accepted(self, sock, addr):
sock.close()
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_read(self):
# make sure handle_read is called on data received
class TestClient(BaseClient):
def handle_read(self):
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.send(b'x' * 1024)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_write(self):
# make sure handle_write is called
class TestClient(BaseClient):
def handle_write(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close(self):
# make sure handle_close is called when the other end closes
# the connection
class TestClient(BaseClient):
def handle_read(self):
# in order to make handle_close be called we are supposed
# to make at least one recv() call
self.recv(1024)
def handle_close(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.close()
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close_after_conn_broken(self):
# Check that ECONNRESET/EPIPE is correctly handled (issues #5661 and
# #11265).
data = b'\0' * 128
class TestClient(BaseClient):
def handle_write(self):
self.send(data)
def handle_close(self):
self.flag = True
self.close()
def handle_expt(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def handle_read(self):
self.recv(len(data))
self.close()
def writable(self):
return False
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
@unittest.skipIf(sys.platform.startswith("sunos"),
"OOB support is broken on Solaris")
def test_handle_expt(self):
# Make sure handle_expt is called on OOB data received.
# Note: this might fail on some platforms as OOB data is
# tenuously supported and rarely used.
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
if sys.platform == "darwin" and self.use_poll:
self.skipTest("poll may fail on macOS; see issue #28087")
class TestClient(BaseClient):
def handle_expt(self):
self.socket.recv(1024, socket.MSG_OOB)
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.socket.send(bytes(chr(244), 'latin-1'), socket.MSG_OOB)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_error(self):
class TestClient(BaseClient):
def handle_write(self):
1.0 / 0
def handle_error(self):
self.flag = True
try:
raise
except ZeroDivisionError:
pass
else:
raise Exception("exception not raised")
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_connection_attributes(self):
server = BaseServer(self.family, self.addr)
client = BaseClient(self.family, server.address)
# we start disconnected
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
# this can't be taken for granted across all platforms
#self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# execute some loops so that client connects to server
asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100)
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertTrue(client.connected)
self.assertFalse(client.accepting)
# disconnect the client
client.close()
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# stop serving
server.close()
self.assertFalse(server.connected)
self.assertFalse(server.accepting)
def test_create_socket(self):
s = asyncore.dispatcher()
s.create_socket(self.family)
self.assertEqual(s.socket.type, socket.SOCK_STREAM)
self.assertEqual(s.socket.family, self.family)
self.assertEqual(s.socket.gettimeout(), 0)
self.assertFalse(s.socket.get_inheritable())
def test_bind(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
s1 = asyncore.dispatcher()
s1.create_socket(self.family)
s1.bind(self.addr)
s1.listen(5)
port = s1.socket.getsockname()[1]
s2 = asyncore.dispatcher()
s2.create_socket(self.family)
# EADDRINUSE indicates the socket was correctly bound
self.assertRaises(OSError, s2.bind, (self.addr[0], port))
def test_set_reuse_addr(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
with socket.socket(self.family) as sock:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except OSError:
unittest.skip("SO_REUSEADDR not supported on this platform")
else:
# if SO_REUSEADDR succeeded for sock we expect asyncore
# to do the same
s = asyncore.dispatcher(socket.socket(self.family))
self.assertFalse(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
s.socket.close()
s.create_socket(self.family)
s.set_reuse_addr()
self.assertTrue(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
@support.reap_threads
def test_quick_connect(self):
# see: http://bugs.python.org/issue10340
if self.family not in (socket.AF_INET, getattr(socket, "AF_INET6", object())):
self.skipTest("test specific to AF_INET and AF_INET6")
server = BaseServer(self.family, self.addr)
# run the thread 500 ms: the socket should be connected in 200 ms
t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1,
count=5))
t.start()
try:
with socket.socket(self.family, socket.SOCK_STREAM) as s:
s.settimeout(.2)
s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', 1, 0))
try:
s.connect(server.address)
except OSError:
pass
finally:
support.join_thread(t, timeout=TIMEOUT)
class TestAPI_UseIPv4Sockets(BaseTestAPI):
family = socket.AF_INET
addr = (support.HOST, 0)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 support required')
class TestAPI_UseIPv6Sockets(BaseTestAPI):
family = socket.AF_INET6
addr = (support.HOSTv6, 0)
@unittest.skipUnless(HAS_UNIX_SOCKETS, 'Unix sockets required')
class TestAPI_UseUnixSockets(BaseTestAPI):
if HAS_UNIX_SOCKETS:
family = socket.AF_UNIX
addr = support.TESTFN
def tearDown(self):
support.unlink(self.addr)
BaseTestAPI.tearDown(self)
class TestAPI_UseIPv4Select(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseIPv4Poll(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseIPv6Select(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseIPv6Poll(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseUnixSocketsSelect(TestAPI_UseUnixSockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseUnixSocketsPoll(TestAPI_UseUnixSockets, unittest.TestCase):
use_poll = True
if __name__ == "__main__":
unittest.main()
| |
'''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
import os
import unittest
import tempfile
import shutil
import uuid
import sys as _sys
from .. import util as glutil
from .. import SFrame, SArray, SGraph, get_graphlab_object_type, get_runtime_config, set_runtime_config
from . import util
class UtilTests(unittest.TestCase):
def test_archive_utils(self):
# Arrange
sf = SFrame([1, 2, 3, 4, 5])
dir = tempfile.mkdtemp(prefix='archive-tests')
try:
sf.save(dir)
# Act & Assert
self.assertTrue(glutil.is_directory_archive(dir))
self.assertEquals(glutil.get_archive_type(dir), 'sframe')
self.assertFalse(glutil.is_directory_archive('/tmp'))
self.assertRaises(TypeError, lambda: glutil.get_archive_type('/tmp'))
finally:
shutil.rmtree(dir)
def test_crossproduct(self):
s = util.SFrameComparer()
d = {'opt1': [1, 2, 3],
'opt2': ['a', 'b']}
actual = glutil.crossproduct(d)
actual = actual.sort('opt1')
expected = SFrame({'opt1': [1, 1, 2, 2, 3, 3],
'opt2': ['a', 'b', 'a', 'b', 'a', 'b']})
# Check columns individually since there is no
# guaranteed ordering among columns.
for k in d.keys():
s._assert_sarray_equal(actual[k],
expected[k])
def _validate_gl_object_type(self, obj, expected):
with util.TempDirectory() as temp_dir:
obj.save(temp_dir)
t = get_graphlab_object_type(temp_dir)
self.assertEquals(t, expected)
def test_get_graphlab_object_type(self):
sf = SFrame({"a":[1,2]})
self._validate_gl_object_type(sf, 'sframe')
sa = SArray([1,2])
self._validate_gl_object_type(sa, 'sarray')
d = SFrame(
{"__src_id":[175343, 163607, 44041, 101370, 64892],
"__dst_id":[1011, 7928, 7718, 12966, 11080]})
g = SGraph()
self._validate_gl_object_type(g, 'sgraph')
def test_sframe_equals(self):
# Empty SFrames should be equal
sf_a = SFrame()
sf_b = SFrame()
glutil._assert_sframe_equal(sf_a, sf_b)
the_data = [i for i in range(0,10)]
sf = SFrame()
sf['ints'] = SArray(data=the_data, dtype=int)
sf['floats'] = SArray(data=the_data, dtype=float)
sf['floats'] = sf['floats'] * .5
sf['strings'] = SArray(data=the_data, dtype=str)
sf['strings'] = sf['strings'].apply(lambda x: x+x+x)
# Make sure these aren't pointing to the same SFrame
sf_a = sf.filter_by([43], 'ints', exclude=True)
sf_b = sf.filter_by([43], 'ints', exclude=True)
glutil._assert_sframe_equal(sf_a, sf_b)
# Difference in number of columns
sf_a['extra'] = SArray(data=the_data)
with self.assertRaises(AssertionError):
glutil._assert_sframe_equal(sf_a, sf_b)
del sf_a['extra']
glutil._assert_sframe_equal(sf_a, sf_b)
# Difference in number of rows
with self.assertRaises(AssertionError):
glutil._assert_sframe_equal(sf_a, sf_b[0:5])
# Difference in types
sf_a['diff_type'] = sf_a['ints'].astype(str)
sf_b['diff_type'] = sf_b['ints']
with self.assertRaises(AssertionError):
glutil._assert_sframe_equal(sf_a, sf_b)
del sf_a['diff_type']
del sf_b['diff_type']
glutil._assert_sframe_equal(sf_a, sf_b)
# Difference in column name
sf_a.rename({'strings':'string'})
with self.assertRaises(AssertionError):
glutil._assert_sframe_equal(sf_a, sf_b)
glutil._assert_sframe_equal(sf_a, sf_b, check_column_names=False)
sf_a.rename({'string':'strings'})
glutil._assert_sframe_equal(sf_a, sf_b)
sf_a.rename({'ints':'floats1'})
sf_a.rename({'floats':'ints'})
sf_a.rename({'floats1':'floats'})
glutil._assert_sframe_equal(sf_a, sf_b, check_column_names=False)
sf_a = sf.filter_by([43], 'ints', exclude=True)
# Difference in column order
sf_a.swap_columns('strings', 'ints')
with self.assertRaises(AssertionError):
glutil._assert_sframe_equal(sf_a, sf_b)
glutil._assert_sframe_equal(sf_a, sf_b, check_column_order=False)
sf_a.swap_columns('strings', 'ints')
glutil._assert_sframe_equal(sf_a, sf_b)
# Difference in row order
sf_a = sf_a.append(sf[0:5])
sf_b = sf[0:5].append(sf_b)
with self.assertRaises(AssertionError):
glutil._assert_sframe_equal(sf_a, sf_b)
glutil._assert_sframe_equal(sf_a, sf_b, check_row_order=False)
# Difference in column order AND row order
sf_a.swap_columns('floats', 'strings')
with self.assertRaises(AssertionError):
glutil._assert_sframe_equal(sf_a, sf_b)
glutil._assert_sframe_equal(sf_a, sf_b, check_column_order=False, check_row_order=False)
# Column order, row order, names
sf_a.rename({'floats':'foo','strings':'bar','ints':'baz'})
with self.assertRaises(AssertionError):
glutil._assert_sframe_equal(sf_a, sf_b)
# Illegal stuff
with self.assertRaises(ValueError):
glutil._assert_sframe_equal(sf_a, sf_b, check_column_names=False, check_column_order=False)
with self.assertRaises(ValueError):
glutil._assert_sframe_equal(sf_a, sf_b, check_column_names=False, check_column_order=False, check_row_order=False)
with self.assertRaises(TypeError):
glutil._assert_sframe_equal(sf_b['floats'], sf_a['foo'])
def test_get_temp_file_location(self):
from ..util import _get_temp_file_location
from ..util import _convert_slashes
location = _get_temp_file_location()
self.assertTrue(os.path.isdir(location))
tmp = tempfile.mkdtemp(prefix='test_gl_util')
default_tmp = get_runtime_config()['GRAPHLAB_CACHE_FILE_LOCATIONS']
try:
set_runtime_config('GRAPHLAB_CACHE_FILE_LOCATIONS', tmp)
location = _convert_slashes(_get_temp_file_location())
self.assertTrue(location.startswith(_convert_slashes(tmp)))
finally:
shutil.rmtree(tmp)
set_runtime_config('GRAPHLAB_CACHE_FILE_LOCATIONS', default_tmp)
def test_make_temp_directory(self):
from ..util import _make_temp_directory, _get_temp_file_location
tmp_root = _get_temp_file_location()
location = _make_temp_directory(prefix=None)
try:
self.assertTrue(os.path.isdir(location))
self.assertTrue(location.startswith(tmp_root))
finally:
shutil.rmtree(location)
prefix = 'abc_'
location = _make_temp_directory(prefix=prefix)
try:
self.assertTrue(os.path.isdir(location))
self.assertTrue(location.startswith(tmp_root))
self.assertTrue(os.path.basename(location).startswith(prefix))
finally:
shutil.rmtree(location)
def test_make_temp_filename(self):
from ..util import _make_temp_filename, _get_temp_file_location
tmp_root = _get_temp_file_location()
location = _make_temp_filename(prefix=None)
self.assertFalse(os.path.isfile(location))
self.assertFalse(os.path.exists(location))
self.assertTrue(location.startswith(tmp_root))
self.assertTrue(isinstance(location, str))
prefix = 'abc_'
location = _make_temp_filename(prefix=prefix)
self.assertFalse(os.path.isfile(location))
self.assertFalse(os.path.exists(location))
self.assertTrue(location.startswith(tmp_root))
self.assertTrue(isinstance(location, str))
self.assertTrue(os.path.basename(location).startswith(prefix))
@unittest.skipIf(_sys.platform == 'win32' or _sys.platform == 'darwin', 'Not supported on Windows or Mac')
class SubprocessExecTest(unittest.TestCase):
def test_exec(self):
ret_dict = glutil.subprocess_exe('echo', ['hello_world'])
self.assertEquals(ret_dict['success'], True)
self.assertEquals(ret_dict['return_code'], 0)
self.assertEquals(ret_dict['stdout'], 'hello_world\n')
self.assertEquals(ret_dict['stderr'], '')
self.assertEquals(ret_dict['python_exception'], None)
def test_exec_with_setup_teardown(self):
f = tempfile.NamedTemporaryFile(delete=False)
def setup_fn():
f.write(b'hello_world\n')
f.close()
def teardown_fn():
os.remove(f.name)
ret_dict = glutil.subprocess_exe('cat', [f.name], setup_fn, teardown_fn)
self.assertEquals(ret_dict['success'], True)
self.assertEquals(ret_dict['return_code'], 0)
self.assertEquals(ret_dict['stdout'], 'hello_world\n')
self.assertEquals(ret_dict['stderr'], '')
self.assertEquals(ret_dict['python_exception'], None)
def test_setup_exception(self):
def setup_fn():
raise RuntimeError('error')
ret_dict = glutil.subprocess_exe('ls', [], setup_fn)
self.assertEquals(ret_dict['success'], False)
self.assertEquals(ret_dict['return_code'], None)
self.assertEquals(ret_dict['stdout'], None)
self.assertEquals(ret_dict['stderr'], None)
self.assertEquals(type(ret_dict['python_exception']), RuntimeError)
self.assertEquals(str(ret_dict['python_exception']), 'error')
def test_process_exception(self):
ret_dict = glutil.subprocess_exe('cp', [])
self.assertEquals(ret_dict['success'], False)
self.assertNotEquals(ret_dict['return_code'], 0)
self.assertEquals(ret_dict['stdout'], "")
self.assertNotEquals(ret_dict['stderr'], "")
self.assertEquals(ret_dict['python_exception'], None)
def test_tear_down_exception(self):
def teardown_fn():
raise RuntimeError('error')
ret_dict = glutil.subprocess_exe('echo', [], teardown=teardown_fn)
self.assertEquals(ret_dict['success'], True)
self.assertEquals(ret_dict['return_code'], 0)
self.assertEquals(ret_dict['stdout'], "\n")
self.assertEquals(ret_dict['stderr'], "")
self.assertEquals(ret_dict['python_exception'], None)
self.assertEquals(type(ret_dict['_tear_down_exception']), RuntimeError)
self.assertEquals(str(ret_dict['_tear_down_exception']), 'error')
def test_exec_with_logfile(self):
log_prefix = 'test-log-' + str(uuid.uuid4())
expected_fout = log_prefix + '.stdout'
expected_ferr = log_prefix + '.stderr'
ret_dict = glutil.subprocess_exe('echo', ['hello_world'],
out_log_prefix=log_prefix)
self.assertEquals(ret_dict['success'], True)
self.assertEquals(ret_dict['return_code'], 0)
self.assertEquals(ret_dict['stdout'], expected_fout)
self.assertEquals(ret_dict['stderr'], expected_ferr)
self.assertEquals(ret_dict['python_exception'], None)
with open(expected_fout) as f:
stdout = f.read()
self.assertEquals(stdout, 'hello_world\n')
os.remove(expected_fout)
os.remove(expected_ferr)
def test_exec_with_logfile_exception(self):
log_prefix = 'unknown_protocol://test-log-' + str(uuid.uuid4())
ret_dict = glutil.subprocess_exe('echo', ['hello_world'],
out_log_prefix=log_prefix)
self.assertEquals(ret_dict['success'], True)
self.assertEquals(ret_dict['return_code'], 0)
self.assertEquals(ret_dict['stdout'], None)
self.assertEquals(ret_dict['stderr'], None)
self.assertEquals(ret_dict['python_exception'], None)
self.assertNotEquals(ret_dict['_save_log_exception'], None)
| |
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from chaco.default_colormaps import color_map_name_dict
from traits.api import Int, Bool, Float, Property, on_trait_change, Enum, List, Dict, Button, Str, Color
from pychron.options.aux_plot import AuxPlot
from pychron.options.group.ideogram_group_options import IdeogramGroupOptions
from pychron.options.options import AgeOptions
from pychron.options.views.ideogram_views import VIEWS
from pychron.pychron_constants import NULL_STR, FONTS, SIZES, SIG_FIGS, MAIN, APPEARANCE, DISPLAY, GROUPS, STD_SIG_FIGS
class IdeogramAuxPlot(AuxPlot):
names = List([NULL_STR, 'Analysis Number Nonsorted', 'Analysis Number',
'Radiogenic 40Ar', 'K/Ca', 'K/Cl', 'Mol K39', 'Signal K39', 'Ideogram'],
transient=True)
_plot_names = List(['', 'analysis_number_nonsorted', 'analysis_number', 'radiogenic_yield',
'kca', 'kcl', 'moles_k39', 'signal_k39', 'relative_probability'],
transient=True)
class IdeogramOptions(AgeOptions):
naux_plots = 8
aux_plot_klass = IdeogramAuxPlot
edit_label_format_button = Button
edit_mean_format_button = Button
mean_label_format = Str
mean_label_display = Str
# edit_label_format = Button
# refresh_asymptotic_button = Button
index_attrs = Dict(transient=True)
probability_curve_kind = Enum('cumulative', 'kernel')
mean_calculation_kind = Enum('weighted mean', 'kernel')
use_centered_range = Bool
use_static_limits = Bool
xlow = Float
xhigh = Float
reverse_x_axis = Bool(False)
centered_range = Float(0.5)
display_mean_indicator = Bool(True)
display_mean = Bool(True)
display_mean_mswd = Bool(True)
display_mean_n = Bool(True)
display_mswd_pvalue = Bool(True)
display_percent_error = Bool(True)
# display_identifier_on_mean = Bool(False)
# display_sample_on_mean = Bool(False)
label_all_peaks = Bool(True)
peak_label_sigfigs = Int
peak_label_bgcolor = Color
peak_label_border = Int
peak_label_border_color = Color
peak_label_bgcolor_enabled = Bool(False)
aux_plot_name = 'Ideogram'
use_asymptotic_limits = Bool
# asymptotic_width = Float)
asymptotic_height_percent = Float
analysis_number_sorting = Enum('Oldest @Top', 'Youngest @Top')
global_analysis_number_sorting = Bool(True)
mean_indicator_font = Property
mean_indicator_fontname = Enum(*FONTS)
mean_indicator_fontsize = Enum(*SIZES)
mean_sig_figs = Enum(*STD_SIG_FIGS)
mswd_sig_figs = Enum(*SIG_FIGS)
use_cmap_analysis_number = Bool(False)
cmap_analysis_number = Enum(list(color_map_name_dict.keys()))
use_latest_overlay = Bool(False)
show_results_table = Bool(False)
show_ttest_table = Bool(False)
show_rvalues = Bool(False)
group_options_klass = IdeogramGroupOptions
include_group_legend = Bool(True)
group_legend_label_attribute = Enum('Group', 'Label Name', 'Sample', 'Aliquot')
_use_centered_range = Bool
_use_asymptotic_limits = Bool
_suppress_xlimits_clear = Bool
def initialize(self):
self.subview_names = [MAIN, 'Ideogram', APPEARANCE, 'Calculations', DISPLAY, GROUPS]
def to_dict(self):
d = super(IdeogramOptions, self).to_dict()
aux_plots = self.to_dict_aux_plots()
# groups = self.to_dict_groups()
d['aux_plots'] = aux_plots
# d['groups'] = groups
return d
def to_dict_aux_plots(self):
return [ap.to_dict() for ap in self.aux_plots]
def to_dict_groups(self):
pass
def to_dict_test(self, k):
return k not in ('_suppress_xlimits_clear', 'aux_plots', 'groups', 'index_attrs')
def get_plot_dict(self, group_id, subgroup_id):
n = len(self.groups)
gid = group_id % n
fg = self.groups[gid]
line_color = fg.line_color
color = fg.color
# if subgroup_id:
# rgb = color.red(), color.blue(), color.green()
# rgb = [c*0.9*subgroup_id for c in rgb]
# color.setRgb(*rgb)
d = {'color': color,
'edge_color': line_color,
'edge_width': fg.line_width,
'line_width': fg.line_width,
'line_color': line_color}
if fg.use_fill:
color = fg.color.toRgb()
color.setAlphaF(fg.alpha * 0.01)
d['fill_color'] = color
d['type'] = 'filled_line'
if fg.marker_non_default():
d['marker'] = fg.marker
if fg.marker_size_non_default():
d['marker_size'] = fg.marker_size
return d
# private
def _get_subview(self, name):
return VIEWS[name]
# handlers
@on_trait_change('use_static_limits, use_centered_range')
def _handle_use_limits(self, new):
# persist use asymptotic limits
self._suppress_xlimits_clear = True
if new:
self._use_asymptotic_limits = self.use_asymptotic_limits
self.trait_set(use_asymptotic_limits=False)
else:
self.trait_set(use_asymptotic_limits=self._use_asymptotic_limits)
self._suppress_xlimits_clear = False
def _use_asymptotic_limits_changed(self, new):
# persist use_centered range
if not self._suppress_xlimits_clear:
if new:
self._use_centered_range = self.use_centered_range
self.trait_set(use_centered_range=False)
else:
self.trait_set(use_centered_range=self._use_centered_range)
@on_trait_change('xlow, xhigh')
def _handle_static_limits(self):
for ap in self.aux_plots:
ap.clear_xlimits()
@on_trait_change('use_asymptotic_limits, asymptotic+, use_centered_range, centered_range, use_static_limits')
def _handle_asymptotic(self, name, new):
# if name.startswith('use') and not new:
# return
if not self._suppress_xlimits_clear:
for ap in self.aux_plots:
ap.clear_xlimits()
def _index_attr_changed(self):
for ap in self.aux_plots:
ap.clear_ylimits()
def _edit_label_format_button_fired(self):
from pychron.options.label_maker import LabelTemplater, LabelTemplateView
lm = LabelTemplater(label=self.analysis_label_display)
lv = LabelTemplateView(model=lm)
info = lv.edit_traits()
if info.result:
self.analysis_label_format = lm.formatter
self.analysis_label_display = lm.label
# self.refresh_plot_needed = True
def _edit_mean_format_button_fired(self):
from pychron.options.label_maker import MeanLabelTemplater, MeanLabelTemplateView
lm = MeanLabelTemplater(label=self.mean_label_display)
lv = MeanLabelTemplateView(model=lm)
info = lv.edit_traits()
if info.result:
self.mean_label_format = lm.formatter
self.mean_label_display = lm.label
def _get_mean_indicator_font(self):
return '{} {}'.format(self.mean_indicator_fontname,
self.mean_indicator_fontsize)
def _index_attrs_default(self):
return {'uage': '01:Age',
'uF': '02:Ar40*/Ar39k',
'Ar40/Ar36': '03:Ar40/Ar36',
'Ar40/Ar39': '04:Ar40/Ar39',
'Ar40/Ar38': '05:Ar40/Ar38',
'Ar39/Ar37': '06:Ar39/Ar37',
'uAr40/Ar36': '07:uncor. Ar40/Ar36',
'Ar40': '08:Ar40',
'Ar39': '09:Ar39',
'Ar38': '10:Ar38',
'Ar37': '11:Ar37',
'Ar36': '12:Ar36',
'j': '13:J'}
# ============= EOF =============================================
| |
import clustering
import matplotlib.pyplot as plt
from shapely.geometry import Polygon,MultiPolygon
import itertools
import math
from shapely.validation import explain_validity
import matplotlib
from shapely.ops import cascaded_union
import numpy
def findsubsets(S,m):
return set(itertools.combinations(S, m))
def nCr(n,r):
f = math.factorial
return f(n) / f(r) / f(n-r)
class QuadTree:
def __init__(self,((lb_x,lb_y),(ub_x,ub_y)),parent=None):
self.lb_x = lb_x
self.ub_x = ub_x
self.lb_y = lb_y
self.ub_y = ub_y
self.children = None
self.parent = parent
self.polygons = {}
try:
self.bounding_box = Polygon([(lb_x,lb_y),(ub_x,lb_y),(ub_x,ub_y),(lb_x,ub_y)])
except:
print [(lb_x,lb_y),(ub_x,lb_y),(ub_x,ub_y),(lb_x,ub_y)]
raise
self.user_ids = []
def __get_splits__(self):
if (self.bounding_box.area < 5) or (len(self.polygons) <= 2):
return []
complete_agreement = 0
# check to see if there are at least three users with polygons completely covering this particular area
# if so - we consider this area to be not noise
# todo - check if this is an actual problem (not just a hypothetical one)
# for one area, we could have three users who have each covered this area with 2 (possibly overlapping)
# polygons that just happen to line up perfectly so that they completely cover the area.
# The polygons (or rectangles) of each user probably refer to two different things but since we won't
# subdivide this box any further, we act as if all of the users are referring to only one thing
for user in self.polygons:
# such a user is referring to two or more different things so we should skip them
if len(zip(*self.polygons[user])[0]) > 1:
continue
# extract just the polygons - don't worry about type
u = cascaded_union(zip(*self.polygons[user])[0])
# 1 here is a margin of error - the area will be in terms of pixels squared so a pretty large
# number - 1 is pretty trivial compared to
if math.fabs(self.bounding_box.intersection(u).area - self.bounding_box.area) < 1:
# if this is the root node, the bounding box should cover every thing
if self.parent is None:
x,y = self.polygons[user][0][0].exterior.xy
assert self.polygons[user][0][0].area <= self.bounding_box.area
complete_agreement += 1
if complete_agreement >= 3:
return []
# calculate the height and width of the new children nodes
new_width = (self.lb_x+self.ub_x)/2. - self.lb_x
new_height = (self.ub_y+self.lb_y)/2. - self.lb_y
lower_left = (self.lb_x,self.lb_y),(self.lb_x+new_width,self.lb_y+new_height)
lower_right = (self.lb_x+new_width,self.lb_y),(self.ub_x,self.lb_y+new_height)
upper_left = (self.lb_x,self.lb_y+new_height),(self.lb_x+new_width,self.ub_y)
upper_right = (self.lb_x+new_width,self.lb_y+new_height),(self.ub_x,self.ub_y)
self.children = [QuadTree(lower_left,self) ,QuadTree(lower_right,self),QuadTree(upper_left,self),QuadTree(upper_right,self)]
for c in self.children:
assert isinstance(c,QuadTree)
assert math.fabs((c.bounding_box.area - self.bounding_box.area/4.) < 0.0001)
return self.children
def __plot__(self,ax):
if self.children is None:
if len(self.polygons) >= 3:
plt.plot((self.lb_x,self.ub_x,self.ub_x,self.lb_x,self.lb_x),(self.lb_y,self.lb_y,self.ub_y,self.ub_y,self.lb_y),color="red")
# rect = plt.Rectangle((self.lb_x,self.lb_y),(self.ub_x-self.lb_x),(self.ub_y-self.lb_y),color="red")
# print (self.lb_x,self.lb_y),(self.ub_x-self.lb_x),(self.ub_y-self.lb_y)
# ax.add_artist(rect)
else:
for c in self.children:
assert isinstance(c,QuadTree)
c.__plot__(ax)
def __aggregate__(self,total_area):
"""
total_area is needed for calculating the "noise" area
:param total_area:
:return:
"""
if self.children is None:
if len(self.polygons) >= 3:
# what is the majority vote for what type of "kind" this box outlines
# for example, some people might say broad leave tree while others say it is a needle leaf tree
# technically speaking, people could outline this region with different polygons
# if so, we'll ignore such users, under the assumption that we don't really know
vote_counts = {}
for u in self.polygons:
polys, types = zip(*self.polygons[u])
if min(types) == max(types):
vote = min(types)
if vote not in vote_counts:
vote_counts[vote] = 1
else:
vote_counts[vote] += 1
# extract the most likely type according to pluraity voting
# ties will get resolved in an arbitrary fashion
most_likely,num_votes = sorted(vote_counts.items(),key = lambda x:x[1],reverse=True)[0]
percentage = num_votes/float(sum(vote_counts.values()))
# return two sets of values - one with the bounding box
# the other with the voting results
# last value is the noise area
polygons_by_user_density = {i:[self.bounding_box] for i in range(3,len(self.polygons)+1)}
return {most_likely:self.bounding_box},{most_likely:(percentage,self.bounding_box.area)},0,polygons_by_user_density
else:
# no polygons in this box - so no noise either
if len(self.polygons) == 0:
return {},{},0,{}
else:
# we have come polygons inside this box which amount to noise
# find out the area of these polygons - with respect to inside this box
# for smaller boxes, we could just use the area of the box as an approximation
# but for larger boxes, especially if we are the root, we need to do better
if (self.bounding_box.area/float(total_area)) > 0.02:
noise_polygons = []
for poly_list in self.polygons.values():
noise_polygons.extend(zip(*poly_list)[0])
combined_polygon = cascaded_union(noise_polygons).intersection(self.bounding_box)
return {},{},combined_polygon.area,{}
else:
return {},{},self.bounding_box.area,{}
else:
return_polygons = {}
return_stats = {}
new_area = {}
new_percentage = {}
all_users = set()
total_incorrect_area = 0
total_polygons_user_density = {}
for c in self.children:
# get all the polygons that are in c's bounding boxes plus stats about which tools made
# which reasons
c_polygons,c_stats,incorrect_area,polygons_by_user_density = c.__aggregate__(total_area)
for u in polygons_by_user_density:
if u not in total_polygons_user_density:
total_polygons_user_density[u] = polygons_by_user_density[u]
else:
total_polygons_user_density[u].extend(polygons_by_user_density[u])
total_incorrect_area += incorrect_area
# go through the
for tool_id in c_polygons:
# first time we've seen this polygon
if tool_id not in return_polygons:
return_polygons[tool_id] = c_polygons[tool_id]
# return_stats[tool_id] = c_stats[tool_id]
new_area[tool_id] = [c_stats[tool_id][1],]
new_percentage[tool_id] = [c_stats[tool_id][0],]
else:
# we need to merge (yay!!)
# note this will probably result in more than one polygon
return_polygons[tool_id] = return_polygons[tool_id].union(c_polygons[tool_id])
# I think I could fold these values in as I go, but just to be careful (don't want to mess
# with the stats) save the values until end
new_area[tool_id].append(c_stats[tool_id][1])
new_percentage[tool_id].append(c_stats[tool_id][0])
# now that we've gone through all of the children
# calculate the weighted values
for tool_id in new_area:
# print new_percentage[tool_id]
return_stats[tool_id] = numpy.average(new_percentage[tool_id],weights=new_area[tool_id]),sum(new_area[tool_id])
assert not isinstance(return_polygons[tool_id],list)
if self.parent == None:
for u in total_polygons_user_density:
total_polygons_user_density[u] = cascaded_union(total_polygons_user_density[u])
return return_polygons,return_stats,total_incorrect_area,total_polygons_user_density
def __add_polygon__(self,user,polygon,poly_type):
assert isinstance(polygon,Polygon)
# don't add it if there is no intersection
if self.bounding_box.intersection(polygon).is_empty:
return
if user not in self.polygons:
self.polygons[user] = [(polygon,poly_type)]
self.user_ids.append(user)
else:
self.polygons[user].append((polygon,poly_type))
def __poly_iteration__(self):
class Iterator:
def __init__(self,node):
assert isinstance(node,QuadTree)
self.node = node
self.user_index = None
self.polygon_index = None
def __iter__(self):
return self
def next(self):
if self.user_index is None:
self.user_index = 0
self.polygon_index = 0
else:
self.polygon_index += 1
if self.polygon_index == len(self.node.polygons[self.node.user_ids[self.user_index]]):
self.polygon_index = 0
self.user_index += 1
if self.user_index == len(self.node.user_ids):
raise StopIteration
user_id = self.node.user_ids[self.user_index]
return user_id,self.node.polygons[user_id][self.polygon_index]
return Iterator(self)
class BlobClustering(clustering.Cluster):
def __init__(self,shape,dim_reduction_alg):
assert shape != "point"
clustering.Cluster.__init__(self,shape,dim_reduction_alg)
self.rectangle = (shape == "rectangle")
def __fix_polygon__(self,points):
fixed_polygons = None
points = list(points)
validity = explain_validity(Polygon(points))
# x,y = zip(*Polygon(points).exterior.coords)
# x = list(x)
# y = list(y)
# x.append(x[0])
# y.append(y[0])
# plt.plot(x,y)
# plt.show()
assert isinstance(validity,str)
s,t = validity.split("[")
x_0,y_0 = t.split(" ")
x_0 = float(x_0)
y_0 = float(y_0[:-1])
# search for all of the line segments which touch the intersection point
# we need to wrap around to the beginning to get all of the line segments
splits = []
for line_index in range(len(points)):
(x_1,y_1) = points[line_index]
(x_2,y_2) = points[(line_index+1)%len(points)]
# the equation from a point to the nearest place on a line is from
# https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line
try:
dist = math.fabs((y_2-y_1)*x_0-(x_2-x_1)*y_0+x_2*y_1-y_2*x_1)/math.sqrt((y_2-y_1)**2+(x_2-x_1)**2)
except ZeroDivisionError:
print points
print line_index
raise
if dist < 0.01:
splits.append(line_index)
# seems to be the easiest way to dealing with needing to extract
# sublists which wrap around the end/beginning of the list
points.extend(points)
for intersection_index,line_index in enumerate(splits):
# find the index for the next line segment with intersect (x_0,y_0)
# if we have reached the end of the list then we need to wrap around
if (intersection_index+1) < len(splits):
line_index2 = splits[intersection_index+1]
else:
# keep in mind that we've doubled the length of points
line_index2 = splits[0] + len(points)/2
# always create the new polygon starting at the intersection point
new_polygon_points = [(x_0,y_0)]
new_polygon_points.extend(points[line_index+1:line_index2+1])
if explain_validity(Polygon(new_polygon_points)) != "Valid Geometry":
# if this is the first "sub"polygon - just accept it
if fixed_polygons is None:
fixed_polygons = self.__fix_polygon__(new_polygon_points)
# else try to merge the results in
else:
fixed_polygons.extend(self.__fix_polygon__(new_polygon_points))
else:
if fixed_polygons is None:
fixed_polygons = [Polygon(new_polygon_points)]
else:
fixed_polygons.append(Polygon(new_polygon_points))
return fixed_polygons
def __cluster__(self,markings,user_ids,tools,reduced_markings,dimensions):
poly_dictionary = {}
# the polygon dictionary will contain the "processed" polygons for each user along with that
# polygon's type so the points stored for those polygons might not actuall correspond to the users
# original points. This means that we cannot use those points as a reference (or pointer if you will ;))
# when dealing with followup questions
# so we will also add an index value so that we can look back at the original set of markings
for marking_index,(polygon_pts,u,t) in enumerate(zip(markings,user_ids,tools)):
# we need at least 3 points to made a valid polygon
if len(polygon_pts) < 3:
continue
poly = Polygon(polygon_pts)
validity = explain_validity(poly)
if "Too few points" in validity:
continue
# correct the geometry if we have to - will probably result in a multipolygon
# which we will keep as one object and NOT split into individual polygons
elif validity != "Valid Geometry":
corrected_polygon = self.__fix_polygon__(polygon_pts)
if u not in poly_dictionary:
poly_dictionary[u] = [(corrected_polygon,t,marking_index),]
else:
poly_dictionary[u].append((corrected_polygon,t,marking_index))
else:
if u not in poly_dictionary:
poly_dictionary[u] = [(poly,t,marking_index)]
else:
poly_dictionary[u].append((poly,t,marking_index))
# if dimensions have been provided, use those as our initial bounding box
# otherwise, use the minimum and maximum values actually found
# todo - might this be best in any case - even if we have the image dimensions?
if dimensions == (None,None):
max_x = -float("inf")
max_y = -float("inf")
min_x = float("inf")
min_y = float("inf")
for m in markings:
X,Y = zip(*m)
max_x = max(max_x,max(X))
max_y = max(max_y,max(Y))
min_x = min(min_x,min(X))
min_y = min(min_y,min(Y))
assert max_x != min_x
assert max_y != min_y
box = [[min_x,min_y],[max_x,min_y],[max_x,max_y],[min_x,max_y]]
# bit of a proxy
image_area = max_x*max_y
else:
box = [[0,0],[dimensions[1],0],[dimensions[1],dimensions[0]],[0,dimensions[1]]]
image_area = dimensions[0]*dimensions[1]
quad_root = QuadTree((box[0],box[2]))
for user,polygon_list in poly_dictionary.items():
for polygon,poly_type,index in polygon_list:
quad_root.__add_polygon__(user,polygon,poly_type)
to_process = [quad_root]
# do a depth first traversal of the tree to populate each of the nodes
while to_process != []:
node = to_process.pop(-1)
assert isinstance(node,QuadTree)
# if (we have parent => !the root) => need to read in parent's polygons
# some of which will become ours
if node.parent is not None:
for user,(poly,poly_type) in node.parent.__poly_iteration__():
node.__add_polygon__(user,poly,poly_type)
new_children = node.__get_splits__()
to_process.extend(new_children)
# now get the results - start from the root node
aggregate_polygons,aggregate_stats,total_incorrect_area,polygons_by_user_density = quad_root.__aggregate__(image_area)
for tool_id in aggregate_stats:
vote_percentage,tool_area = aggregate_stats[tool_id]
aggregate_stats[tool_id] = vote_percentage,tool_area/float(image_area)
# decompose each aggregate polygon into a list of polygons (if we have a multipolygon)
# or just a singleton list if we have just one polygon
# might not be necessary but I want to make sure that the ordering of the individual polygons
# inside of a multipolygon don't change
# also a good moment to set up the cluster members list
polygon_members = {}
for poly_type,agg_poly in aggregate_polygons.items():
polygon_members[poly_type] = []
if isinstance(agg_poly,Polygon):
aggregate_polygons[poly_type] = [agg_poly]
polygon_members[poly_type].append([])
else:
assert isinstance(agg_poly,MultiPolygon)
aggregate_polygons[poly_type] = []
for p in agg_poly:
aggregate_polygons[poly_type].append(p)
polygon_members[poly_type].append([])
incorrect_area_as_percent = total_incorrect_area/image_area
# find the users for each polygon - might be a way to do this when I am making
intersecting_users = []
# find which users have a polygon actually intersecting with this particular aggregate one
# for now - we will make some important assumes
# todo - review assumptions
# if a user has more than one polygon (or rectangle) inside this aggregate one
# those will still count towards the aggregate polygon not being noise
# however, if there are any follow up questions associated with this polygon tool
# we will not consider ANY of the answers associated with ANY of the polygons
# mainly because we don't know which one to use (since we can only really take one answer)
# moreover, there is a fundamental difference between this user and the rest
# the other
# also if the a user's polygon covers (a lot of) more than one aggregate polygon then again we'll
# ignore any related follow up questions
# find which user polygon maps to which aggregate polygon
# note that the types might not match up - the user could have said that
# a polygon outlines a region of one type of tree while the majority
# said a different kind
# however, we will ignore such mismatches since that means that the follow up questions must match up
for u in poly_dictionary:
for poly_index,(user_poly,poly_type,marking_index) in enumerate(poly_dictionary[u]):
covers = []
belongs_to = []
# were there any aggregate polygons of the right type?
if poly_type in aggregate_polygons:
for p in aggregate_polygons[poly_type]:
inter = p.intersection(user_poly)
if inter.area/p.area > 0.5:
covers.append(poly_index)
if inter.area/user_poly.area > 0.5:
belongs_to.append(poly_index)
if (len(covers) == 1) and (covers == belongs_to):
polygon_members[poly_type][covers[0]].append((u,user_poly,marking_index))
results = []
# a lot of this stuff is done in the classification code for other tool types but it makes more
# sense for polygons to do it here
for tool_id in aggregate_polygons:
# have one set of results per tool type
# the center will be a list of all polygons
for poly_index,agg_poly in enumerate(aggregate_polygons[tool_id]):
next_result = dict()
# if we are doing rectangles, make sure to keep them as rectangles
if self.rectangle:
x,y = agg_poly.exterior.xy
next_result["center"] = [(max(x),max(y)),(min(x),min(y))]
else:
next_result["center"] = [zip(agg_poly.exterior.xy[0],agg_poly.exterior.xy[0])]
# cluster members are the individual polygons
# users are the corresponding user ids
# todo - many only store the first few points
next_result["cluster members"] = []
next_result["users"] = []
next_result["tools"] = []
for user,poly,marking_index in polygon_members[tool_id][poly_index]:
next_result["cluster members"].append(markings[marking_index][:5])
next_result["users"].append(user)
next_result["tools"].append(tool_id)
next_result["tool classification"] = tool_id
results.append(next_result)
# # we will either have a multi-polygon as our aggregation result - or if we are really
# # lucky, a single polygon
# if isinstance(aggregate_polygons[tool_id],Polygon):
# next_result = dict()
#
# poly = aggregate_polygons[tool_id]
#
# next_result["cluster members"] = None
# next_result["users"] = intersecting_users
# next_result["num users"] = len(intersecting_users)
# next_result["tool classification"] = tool_id
# next_result["area"] = aggregate_stats[tool_id][1]
#
# # todo - the certainty calculation should probably be done here instead of just
# # todo relying on the returned value
# next_result["certainty"] = aggregate_stats[tool_id][0]
#
# # these are global values which are not really specific to any one polygon
# # but this seems to be the best place to store the values
# next_result["incorrect area"] = incorrect_area_as_percent
#
# results.append(next_result)
#
# elif isinstance(aggregate_polygons[tool_id], MultiPolygon):
# # todo - implement cluster membership here
# assert False
# next_result["center"] = []
# # todo - how to aggregate for multiple rectangles?
# # todo - important to figure this out for rectangles
# for poly in aggregate_polygons[tool_id]:
#
# # go through each of the individual polygons making up this multipolygon
# if isinstance(poly,Polygon):
# next_result["center"].append(zip(poly.exterior.xy[0],poly.exterior.xy[0]))
# else:
# assert False
# else:
# # unknown type
# print type(aggregate_polygons[tool_id])
# assert False
# # a value of None -> not really relevant to polygon aggregation
# # or really hard to keep track of
# next_result["users"] = None
# next_result["num users"] = None
#
#
# # todo - don't hard code this
# next_result["minimum users"] = 3
# next_result["area"] = aggregate_stats[tool_id][1]
# results.append(next_result)
# todo - find if I really need to use this
# # these results might get really big so I don't want to waste space repeatedly storing the values
# # so with slight abuse of setup ...
# for num_users,polygons in polygons_by_user_density.items():
# # probably never going to wind up a just a single polygon
# # but we can always hope
# if isinstance(polygons,Polygon):
# # todo - this will matter for rectangle aggregation
# pass
# else:
# for single_poly in polygons:
# assert isinstance(single_poly,Polygon)
# x,y = single_poly.exterior.xy
# pts = zip(x,y)
#
# next_result = {"users":None,"num users": num_users,"tool classification" : None, "area":single_poly.area, "certainty":None}
# next_result["center"] = pts
#
# results.append(next_result)
# if results == []:
# # add in a dummy polygon so that we can report back the size of
# # the incorrect area
# results = [{"area":0,"incorrect area":incorrect_area_as_percent,"certainty": -1},]
return results,0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.