gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# -*- coding: utf-8 -*-
"""Test the functionality of the Manager."""
from datetime import datetime
from ocspdash.manager import Manager
from ocspdash.models import Chain, Result
from .constants import (
TEST_BAD_CERTIFICATE_CHAIN_UUID,
TEST_KEY_ID,
TEST_LOCATION_NAME,
TEST_PUBLIC_KEY,
)
def test_count_authorities(manager_function: Manager):
"""Test the counting query for authorities."""
for i in range(10):
manager_function.ensure_authority(
name=f'Test Authority {i}', cardinality=i * 10 + 7
)
assert manager_function.count_authorities() == 10
def test_count_responders(manager_function: Manager):
"""Test the counting query for responders."""
authority = manager_function.ensure_authority(
name='Test Authority', cardinality=1234
)
for i in range(10):
manager_function.ensure_responder(
authority=authority,
url=f'http://test-responder.url/{i}',
cardinality=i * 9 - 3,
)
assert manager_function.count_responders() == 10
def test_count_chains(manager_function: Manager):
"""Test the counting query for chains."""
authority = manager_function.ensure_authority(
name='Test Authority', cardinality=1234
)
responder = manager_function.ensure_responder(
authority=authority, url='http://test-responder.url/', cardinality=123
)
for i in range(10):
chain = Chain(
responder=responder,
subject=f'c{i}s'.encode('utf-8'),
issuer=f'c{i}i'.encode('utf-8'),
)
manager_function.session.add(chain)
manager_function.session.commit()
assert manager_function.count_chains() == 10
def test_get_authority_by_name(manager_function: Manager):
"""Test getting an authority by its name."""
authority = manager_function.ensure_authority(
name='Test Authority', cardinality=1234
)
assert manager_function.get_authority_by_name('Test Authority') is authority
assert manager_function.get_authority_by_name('Nonexistent Authority') is None
def test_ensure_authority(manager_function: Manager):
"""Test the creation of Authority objects."""
authority1 = manager_function.ensure_authority(
name='Test Authority', cardinality=1234
)
assert authority1.name == 'Test Authority'
assert authority1.cardinality == 1234
authority2 = manager_function.ensure_authority(
name='Test Authority', cardinality=2345
)
assert authority1 is authority2
assert authority2.name == 'Test Authority'
assert authority2.cardinality == 2345
def test_get_responder(manager_function: Manager):
"""Test getting a responder for an Authority and URL."""
authority = manager_function.ensure_authority(
name='Test Authority', cardinality=1234
)
responder = manager_function.ensure_responder(
authority=authority, url='http://test-responder.url/', cardinality=123
)
assert (
manager_function.get_responder(authority, 'http://test-responder.url/')
is responder
)
assert manager_function.get_responder(authority, 'http://non-existent.url/') is None
def test_ensure_responder(manager_function: Manager):
"""Test the creation of Responder objects."""
authority = manager_function.ensure_authority(
name='Test Authority', cardinality=1234
)
responder1 = manager_function.ensure_responder(
authority=authority, url='http://test-responder.url/', cardinality=123
)
assert responder1.authority is authority
assert responder1.url == 'http://test-responder.url/'
assert responder1.cardinality == 123
responder2 = manager_function.ensure_responder(
authority=authority, url='http://test-responder.url/', cardinality=234
)
assert responder2 is responder1
assert responder2.url == 'http://test-responder.url/'
assert responder2.cardinality == 234
def test_get_chain_by_certificate_chain_uuid(manager_function: Manager):
"""Test retrieving a Chain by its certificate chain UUID."""
authority = manager_function.ensure_authority(
name='Test Authority', cardinality=1234
)
responder = manager_function.ensure_responder(
authority=authority, url='http://test-responder.url/', cardinality=234
)
chain = Chain(responder=responder, subject=b'cs', issuer=b'ci')
manager_function.session.add(chain)
manager_function.session.commit()
certificate_hash = chain.certificate_chain_uuid
assert (
manager_function.get_chain_by_certificate_chain_uuid(certificate_hash) is chain
)
assert (
manager_function.get_chain_by_certificate_chain_uuid(
TEST_BAD_CERTIFICATE_CHAIN_UUID
)
is None
)
def test_get_most_recent_chain_by_responder(manager_function: Manager):
"""Test that we get the proper Chain for a Responder."""
authority = manager_function.ensure_authority(
name='Test Authority', cardinality=1234
)
responder = manager_function.ensure_responder(
authority=authority, url='http://test-responder.url/', cardinality=234
)
c1 = Chain(
responder=responder,
subject=b'c1s',
issuer=b'c1i',
retrieved=datetime(2018, 7, 1),
)
c2 = Chain(
responder=responder,
subject=b'c2s',
issuer=b'c2i',
retrieved=datetime(2018, 7, 2),
)
c3 = Chain(
responder=responder,
subject=b'c3s',
issuer=b'c3i',
retrieved=datetime(2018, 7, 3),
)
c4 = Chain(
responder=responder,
subject=b'c4s',
issuer=b'c4i',
retrieved=datetime(2018, 7, 4),
)
manager_function.session.add_all([c1, c2, c3, c4])
manager_function.session.commit()
assert manager_function.get_most_recent_chain_by_responder(responder) is c4
def test_get_location_by_name(manager_function: Manager):
"""Test getting a location by its name."""
selector, validator = manager_function.create_location(TEST_LOCATION_NAME)
location = manager_function.get_location_by_selector(selector)
assert location
assert manager_function.get_location_by_name(TEST_LOCATION_NAME) is location
assert manager_function.get_location_by_name('Nonexistent Location') is None
def test_location_invites(manager_function: Manager):
"""Test the invite functionality of Location objects."""
selector, validator = manager_function.create_location(TEST_LOCATION_NAME)
location = manager_function.get_location_by_selector(selector)
assert location.name == TEST_LOCATION_NAME
assert not location.verify(b'random wrong value')
assert location.verify(validator)
assert location.pubkey is None
assert location.key_id is None
processed_location = manager_function.process_location(
b''.join((selector, validator)), TEST_PUBLIC_KEY
)
assert location is processed_location
assert isinstance(processed_location.b64encoded_pubkey, str)
assert processed_location.b64encoded_pubkey == TEST_PUBLIC_KEY
assert processed_location.key_id == TEST_KEY_ID
def test_get_all_locations(manager_function: Manager):
"""Test the retrieval of all locations (that have test results)."""
manager_function.create_location('l1')
manager_function.create_location('l2')
manager_function.create_location('l3')
l1 = manager_function.get_location_by_name('l1')
assert l1 is not None
l2 = manager_function.get_location_by_name('l2')
assert l2 is not None
l3 = manager_function.get_location_by_name('l3')
assert l3 is not None
r1 = Result(location=l1, ping=True, ocsp=True) # bool values don't matter here
r2 = Result(location=l1, ping=True, ocsp=True) # but need to be set to satisfy
r3 = Result(location=l1, ping=True, ocsp=True) # db not null constraint
r4 = Result(location=l2, ping=True, ocsp=True)
manager_function.session.add_all([r1, r2, r3, r4])
manager_function.session.commit()
locations = manager_function.get_all_locations_with_test_results()
assert l1 in locations
assert l2 in locations
assert l3 not in locations
def test_get_most_recent_chains_for_authorities(manager_function: Manager):
"""Test getting the most recent chain for each top authority. This becomes the manifest."""
a1 = manager_function.ensure_authority('a1', 5)
assert a1 is not None
a2 = manager_function.ensure_authority('a2', 5)
assert a2 is not None
a3 = manager_function.ensure_authority('a3', 5)
assert a3 is not None
assert 3 == manager_function.count_authorities()
r1 = manager_function.ensure_responder(a1, 'url1', 5)
r2 = manager_function.ensure_responder(a1, 'url2', 5)
r3 = manager_function.ensure_responder(a2, 'url3', 5)
r4 = manager_function.ensure_responder(a2, 'url4', 5)
assert 4 == manager_function.count_responders()
c1 = Chain(responder=r1, subject=b'c1s', issuer=b'c1i')
c2 = Chain(responder=r2, subject=b'c2s', issuer=b'c2i')
c3 = Chain(responder=r3, subject=b'c3s', issuer=b'c3i')
c4 = Chain(responder=r4, subject=b'c4s', issuer=b'c4i')
manager_function.session.add_all([c1, c2, c3, c4])
manager_function.session.commit()
assert 4 == manager_function.count_chains()
chains = manager_function.get_most_recent_chains_for_authorities()
assert 4 == len(chains)
assert c1 in chains
assert c2 in chains
assert c3 in chains
assert c4 in chains
c5 = Chain(responder=r1, subject=b'c5s', issuer=b'c5i')
c6 = Chain(responder=r3, subject=b'c6s', issuer=b'c6i')
manager_function.session.add_all([c5, c6])
manager_function.session.commit()
assert 6 == manager_function.count_chains()
chains = manager_function.get_most_recent_chains_for_authorities()
assert 4 == len(chains)
assert c5 in chains
assert c2 in chains
assert c6 in chains
assert c4 in chains
def test_recent_results(manager_function: Manager):
"""Test that nothing crashes if you try and get the recent results."""
manager_function.get_most_recent_result_for_each_location()
def test_get_payload(manager_function: Manager):
"""Test that nothing crashes if you try and get the payload."""
manager_function.get_payload()
| |
# Fall 2012 6.034 Lab 2: Search
from search import *
## The graphs you will use for the problem set.
## The heuristic values
## are lower bounds on the distance to the node with the id of
## "Common Area"
GRAPH1 = Graph(edgesdict = \
[{NAME:'e1', VAL: 5, NODE1:'Common Area', NODE2:'Stairs'},
{NAME:'e2', VAL:15, NODE1:'Entrance Hall', NODE2:'Hospital'},
{NAME:'e3', VAL: 7, NODE1:'Classroom 11', NODE2:'Hospital'},
{NAME:'e4', VAL:25, NODE1:'Haunted Bathroom', NODE2:'The Chamber'},
{NAME:'e5', VAL: 5, NODE1:'Forbidden Area', NODE2:'Trophy Room'},
{NAME:'e6', VAL: 3, NODE1:'Mirrored Room', NODE2:'Statues'},
{NAME:'e7', VAL: 1, NODE1:'Grand Hall', NODE2:'Entrance Hall'},
{NAME:'e8', VAL: 4, NODE1:'Dungeon 5', NODE2:'Haunted Bathroom'},
{NAME:'e9', VAL: 2, NODE1:'Stairs', NODE2:'Grand Hall' },
{NAME:'e10', VAL: 9, NODE1:'Statues', NODE2:'Stairs' },
{NAME:'e11', VAL: 6, NODE1:'Entrance Hall', NODE2:'Haunted Bathroom' },
{NAME:'e12', VAL: 4, NODE1:'Forbidden Area', NODE2:'Stairs' },
{NAME:'e13', VAL:10, NODE1:'Classroom 11', NODE2:'Entrance Hall' },
{NAME:'e14', VAL: 5, NODE1:'Trophy Room', NODE2:'Stairs' },
{NAME:'e15', VAL: 8, NODE1:'Stairs', NODE2:'Mirrored Room' },
{NAME:'e16', VAL: 3, NODE1:'Entrance Hall', NODE2:'Stairs' },
{NAME:'e17', VAL: 8, NODE1:'Necessary Room', NODE2:'Common Area'}
],
heuristic = \
{'Common Area':
{'Hospital':17,
'Classroom 11':10,
'Entrance Hall':7,
'Haunted Bathroom':13,
'Dungeon 5':15,
'The Chamber':14,
'Forbidden Area':8,
'Trophy Room':6,
'Stairs':4,
'Grand Hall':6,
'Common Area':0,
'Statues':12,
'Mirrored Room':10,
'Necessary Room':6 }})
GRAPH2 = Graph(edgesdict=[
{NAME: 'e1', VAL:10, NODE1:'S', NODE2:'A' },
{NAME: 'e2', VAL: 4, NODE1:'S', NODE2:'B' },
{NAME: 'e3', VAL: 9, NODE1:'A', NODE2:'C' },
{NAME: 'e4', VAL: 8, NODE1:'B', NODE2:'C' },
{NAME: 'e5', VAL: 7, NODE1:'C', NODE2:'D' },
{NAME: 'e6', VAL: 9, NODE1:'C', NODE2:'E' },
{NAME: 'e7', VAL: 7, NODE1:'D', NODE2:'E' },
{NAME: 'e8', VAL:13, NODE1:'D', NODE2:'F' },
{NAME: 'e9', VAL: 8, NODE1:'E', NODE2:'F' },
{NAME: 'e10', VAL: 5, NODE1:'E', NODE2:'G' },
{NAME: 'e11', VAL:10, NODE1:'F', NODE2:'G' } ],
heuristic={'G':{'S':25, 'A':20, 'B':22, 'C':15, 'D':8, 'E':3, 'F':9}})
GRAPH3 = Graph(edgesdict=[
{NAME: 'e1', VAL: 6, NODE1:'S', NODE2:'B' },
{NAME: 'e2', VAL:10, NODE1:'S', NODE2:'A' },
{NAME: 'e3', VAL:10, NODE1:'A', NODE2:'B' },
{NAME: 'e4', VAL: 7, NODE1:'B', NODE2:'C' },
{NAME: 'e5', VAL: 4, NODE1:'A', NODE2:'D' },
{NAME: 'e6', VAL: 2, NODE1:'C', NODE2:'D' },
{NAME: 'e7', VAL: 6, NODE1:'C', NODE2:'G' },
{NAME: 'e8', VAL: 8, NODE1:'G', NODE2:'D' } ],
heuristic={'G':{"S":0,"A":2,"B":5,"C":6,"D":5}})
GRAPH4 = Graph(edgesdict=[
{NAME: 'e1', VAL:1, NODE1:'S', NODE2:'A' },
{NAME: 'e2', VAL:1, NODE1:'S', NODE2:'B' },
{NAME: 'e3', VAL:1, NODE1:'A', NODE2:'B' },
{NAME: 'e4', VAL:1, NODE1:'C', NODE2:'A' },
{NAME: 'e5', VAL:1, NODE1:'C', NODE2:'B' },
{NAME: 'e6', VAL:1, NODE1:'D', NODE2:'C' },
{NAME: 'e7', VAL:1, NODE1:'D', NODE2:'B' },
{NAME: 'e8', VAL:1, NODE1:'E', NODE2:'C' },
{NAME: 'e9', VAL:1, NODE1:'E', NODE2:'D' },
{NAME: 'e10', VAL:1, NODE1:'F', NODE2:'D' },
{NAME: 'e11', VAL:1, NODE1:'F', NODE2:'E' },
{NAME: 'e12', VAL:1, NODE1:'G', NODE2:'E' },
{NAME: 'e13', VAL:1, NODE1:'G', NODE2:'F' } ],
heuristic={"G":{"S":1,"A":3,"B":3,"C":2,"D":2,"E":1,"F":1}})
GRAPH5 = Graph(edgesdict=[
{NAME: 'e1', VAL: 1, NODE1:'S', NODE2:'A' },
{NAME: 'e2', VAL: 1, NODE1:'G', NODE2:'C' },
{NAME: 'e3', VAL:100, NODE1:'B', NODE2:'C' },
{NAME: 'e4', VAL: 10, NODE1:'S', NODE2:'B' },
{NAME: 'e5', VAL: 10, NODE1:'C', NODE2:'A' } ],
heuristic={"G":{"S":10,"A":1000,"B":5,"C":5}})
SAQG = Graph(edgesdict=[
{'NAME': 'SA', 'LENGTH': 1, 'NODE1': 'S', 'NODE2': 'A'},
{'NAME': 'SQ', 'LENGTH': 1, 'NODE1': 'S', 'NODE2': 'Q'},
{'NAME': 'AG', 'LENGTH': 1, 'NODE1': 'A', 'NODE2': 'G'},
{'NAME': 'QG', 'LENGTH': 1, 'NODE1': 'Q', 'NODE2': 'G'},
{'NAME': 'SG', 'LENGTH': 1, 'NODE1': 'S', 'NODE2': 'G'}])
NEWGRAPH1 = Graph(edgesdict=[
{ 'NAME': 'e1', 'LENGTH': 6, 'NODE1': 'S', 'NODE2': 'A' },
{ 'NAME': 'e2', 'LENGTH': 4, 'NODE1': 'A', 'NODE2': 'B' },
{ 'NAME': 'e3', 'LENGTH': 7, 'NODE1': 'B', 'NODE2': 'F' },
{ 'NAME': 'e4', 'LENGTH': 6, 'NODE1': 'C', 'NODE2': 'D' },
{ 'NAME': 'e5', 'LENGTH': 3, 'NODE1': 'C', 'NODE2': 'A' },
{ 'NAME': 'e6', 'LENGTH': 7, 'NODE1': 'E', 'NODE2': 'D' },
{ 'NAME': 'e7', 'LENGTH': 6, 'NODE1': 'D', 'NODE2': 'H' },
{ 'NAME': 'e8', 'LENGTH': 2, 'NODE1': 'S', 'NODE2': 'C' },
{ 'NAME': 'e9', 'LENGTH': 2, 'NODE1': 'B', 'NODE2': 'D' },
{ 'NAME': 'e10', 'LENGTH': 25, 'NODE1': 'E', 'NODE2': 'G' },
{ 'NAME': 'e11', 'LENGTH': 5, 'NODE1': 'E', 'NODE2': 'C' } ],
heuristic={"G":{'S': 11,
'A': 9,
'B': 6,
'C': 12,
'D': 8,
'E': 15,
'F': 1,
'H': 2},
"H":{'S': 11,
'A': 9,
'B': 6,
'D': 12,
'E': 8,
'F': 15,
'G': 14},
'A':{'S':5, # admissible
"B":1, # h(d) > h(b)+c(d->b) ... 6 > 1 + 2
"C":3,
"D":6,
"E":8,
"F":11,
"G":33,
"H":12},
'C':{"S":2, # consistent
"A":3,
"B":7,
"D":6,
"E":5,
"F":14,
"G":30,
"H":12},
"D":{"D":3}, # dumb
"E":{} # empty
})
NEWGRAPH2 = Graph(edgesdict=
[ { 'NAME': 'e1', 'LENGTH': 2, 'NODE1': 'D', 'NODE2': 'F' },
{ 'NAME': 'e2', 'LENGTH': 4, 'NODE1': 'C', 'NODE2': 'E' },
{ 'NAME': 'e3', 'LENGTH': 2, 'NODE1': 'S', 'NODE2': 'B' },
{ 'NAME': 'e4', 'LENGTH': 5, 'NODE1': 'S', 'NODE2': 'C' },
{ 'NAME': 'e5', 'LENGTH': 4, 'NODE1': 'S', 'NODE2': 'A' },
{ 'NAME': 'e6', 'LENGTH': 8, 'NODE1': 'F', 'NODE2': 'G' },
{ 'NAME': 'e7', 'LENGTH': 5, 'NODE1': 'D', 'NODE2': 'C' },
{ 'NAME': 'e8', 'LENGTH': 6, 'NODE1': 'D', 'NODE2': 'H' } ],
heuristic={"G":{'S': 9,
'A': 1,
'B': 2,
'C': 3,
'D': 6,
'E': 5,
'F': 15,
'H': 10}})
NEWGRAPH3 = Graph(nodes=["S"])
NEWGRAPH4 = Graph(nodes=["S","A", "B", "C", "D", "E", "F", "H", "J", "K",
"L", "T" ],
edgesdict = [{ 'NAME': 'eSA', 'LENGTH': 2, 'NODE1': 'S', 'NODE2': 'A' },
{ 'NAME': 'eSB', 'LENGTH': 10, 'NODE1': 'S', 'NODE2':'B' },
{ 'NAME': 'eBC', 'LENGTH': 5, 'NODE1': 'B', 'NODE2':'C' },
{ 'NAME': 'eBF', 'LENGTH': 2, 'NODE1': 'B', 'NODE2':'F' },
{ 'NAME': 'eCE', 'LENGTH': 5, 'NODE1': 'C', 'NODE2':'E' },
{ 'NAME': 'eCJ', 'LENGTH': 12, 'NODE1': 'C', 'NODE2':'J' },
{ 'NAME': 'eFH', 'LENGTH': 8, 'NODE1': 'F', 'NODE2':'H' },
{ 'NAME': 'eHD', 'LENGTH': 3, 'NODE1': 'H', 'NODE2':'D' },
{ 'NAME': 'eHK', 'LENGTH': 5, 'NODE1': 'H', 'NODE2':'K' },
{ 'NAME': 'eKJ', 'LENGTH': 1, 'NODE1': 'K', 'NODE2':'J' },
{ 'NAME': 'eJL', 'LENGTH': 4, 'NODE1': 'J', 'NODE2':'L' },
{ 'NAME': 'eKT', 'LENGTH': 7, 'NODE1': 'K', 'NODE2':'T' },
{ 'NAME': 'eLT', 'LENGTH': 5, 'NODE1': 'L', 'NODE2':'T' },
],
heuristic={"T":{'S': 10,
'A': 6,
'B': 5,
'C': 2,
'D': 5,
'E': 1,
'F': 100,
'H': 2,
'J': 3,
'K': 100,
'L': 4,
'T': 0,}})
# graph used in a_star test 7 (Test 31),
# to differentiate using an extended-list vs not.
# the heuristic is admissible but not consistent,
# so if you use an extended-list (as you're supposed to),
# it won't find an optimal path.
AGRAPH = Graph(nodes = ['S', 'A', 'B', 'C', 'G'],
edgesdict = [{'NAME': 'eSA', 'LENGTH': 3, 'NODE1': 'S', 'NODE2': 'A'},
{'NAME': 'eSB', 'LENGTH': 1, 'NODE1': 'S', 'NODE2': 'B'},
{'NAME': 'eAB', 'LENGTH': 1, 'NODE1': 'A', 'NODE2': 'B'},
{'NAME': 'eAC', 'LENGTH': 1, 'NODE1': 'A', 'NODE2': 'C'},
{'NAME': 'eCG', 'LENGTH': 10, 'NODE1': 'C', 'NODE2': 'G'}],
heuristic = {'G':{'S': 12,
'A': 9,
'B': 12,
'C': 8,
'G': 0}})
| |
import HTMLParser
import cookielib
import json
import os
import re
import sys
import urllib
import urllib2
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
import time
import socket, ssl
import m3u8
#import requests
## Settings
settings = xbmcaddon.Addon(id='plugin.video.digi-online')
login_User = settings.getSetting('login_User')
login_Password = settings.getSetting('login_Password')
login_Enabled = settings.getSetting('login_Enabled')
debug_Enabled = settings.getSetting('debug_Enabled')
http_log_Enable = settings.getSetting('http_log_Enable')
osdInfo_Enabled = settings.getSetting('popup_Enabled')
epgInfo_Enabled = settings.getSetting('popup_EPGinfo')
extra_streamSRV = settings.getSetting('extra_streamSRV')
hiddenProgrammes = ['discoverye', 'tv5mondee', 'tlce', 'travelmixchannele', 'eentertainmente', 'connectmedia']
digiMaster = 'balancer.digi24.ro'
keyMaster = 'http://balancer.digi24.ro/streamer/make_key.php'
digiwebSite = 'www.digi-online.ro'
digiURL = 'http://www.digi-online.ro'
loginURL = 'http://www.digi-online.ro/xhr-login.php'
userAgent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'
browser='chrome'
deviceModel = '61'
deviceOS='macintel'
device_id = 'chrome_61_macintel_333806a4cf23e251087b9da0892b177c_PCBROWSER'
myLogFile = os.path.join(settings.getAddonInfo('path'), 'resources', 'plugin_video_digi-online.log')
myCookieFile = os.path.join(settings.getAddonInfo('path'), 'resources', 'cookie.txt')
myPlayFile = os.path.join(settings.getAddonInfo('path'), 'resources', 'playlist.m3u8')
stream_Quality = settings.getSetting('quality')
if stream_Quality == '':
stream_Quality = 'hq'
search_thumb = os.path.join(settings.getAddonInfo('path'), 'resources', 'media', 'search.png')
movies_thumb = os.path.join(settings.getAddonInfo('path'), 'resources', 'media', 'movies.png')
next_thumb = os.path.join(settings.getAddonInfo('path'), 'resources', 'media', 'next.png')
addon_thumb = os.path.join(settings.getAddonInfo('path'), 'icon.png')
addon_fanart = os.path.join(settings.getAddonInfo('path'), 'fanart.jpg')
LF = open(myLogFile, 'w+')
LF.write('--- INIT -------------------' + '\n')
LF.close()
def removeSubstr(string, suffix):
return string[:string.index(suffix) + len(suffix)]
def trimSubstrEnd(string, prefix):
part = string.split(prefix)
return str(part[1])
def trimSubstr(string, suffix):
part = string.split(suffix)
return str(part[0])
def setIcon(thumb_file):
thumb_file_name = thumb_file.replace(' ', '')[:-4].upper()
try:
thumb_file_name = os.path.join(settings.getAddonInfo('path'), 'resources', 'media', thumb_file)
except:
thumb_file_name = movies_thumb
return thumb_file_name
def log_MyVars():
LF = open(myLogFile, 'a')
LF.write('---- MyVars ----------------' + '\n')
LF.write("login_User: '" + "********" + '\'\n')
LF.write("login_Password: '" + "********" + '\'\n')
LF.write("Digi-Online login enabled: '" + str(login_Enabled) + '\'\n')
LF.write("OSD Info Popup: '" + str(osdInfo_Enabled) + '\'\n')
LF.write("OSD EPG Info: '" + str(epgInfo_Enabled) + '\'\n')
LF.write("Preferred stream_Quality: '" + stream_Quality + '\'\n')
LF.write("http_log_Enable: '" + str(http_log_Enable) + '\'\n')
LF.write("userAgent: '" + userAgent + '\'\n')
LF.write("device_id: '" + device_id + '\'\n')
LF.write('----------------------------' + '\n')
LF.close()
def ROOT():
addDir('Digi24', 'http://www.digi-online.ro/tv/digi24/', setIcon('Digi24.png'))
addDir('digi24.ro', 'http://www.digi24.ro/live/digi24', setIcon('Digi24.png'))
addDir('B1 TV', 'http://www.digi-online.ro/tv/b1+tv/', setIcon('B1TV.png'))
addDir('Realitatea TV', 'http://www.digi-online.ro/tv/realitatea+tv/', setIcon('RealitateaTV.png'))
addDir('Romania TV', 'http://www.digi-online.ro/tv/romania+tv/', setIcon('RomaniaTV.png'))
addDir('France 24 [EN]', 'http://www.digi-online.ro/tv/france+24/', setIcon('France24.png'))
addDir('TV5 Monde [FR]', 'http://www.digi-online.ro/tv/tv5+monde/', setIcon('tv5monde.png'))
addDir('CNN [EN]', 'http://www.digi-online.ro/tv/cnn/', setIcon('CNN.png'))
#if settings.getSetting('75') == 'true':
#addDir('TV5 Monde [FR]', 'http://' + str(extra_streamSRV) +'/digiedge2/tv5mondee' + str(stream_Quality) +'/index.m3u8?is=75&src=app&t=00000000000000000000000000000000', setIcon('tv5monde.png'))
#if settings.getSetting('71') == 'true':
#addDir('Discovery Channel', 'http://' + str(extra_streamSRV) +'/digiedge2/discoverye' + str(stream_Quality) +'/index.m3u8?is=71&src=app&t=00000000000000000000000000000000', setIcon('DiscoveryChannel.png'))
#if settings.getSetting('72') == 'true':
#addDir('TLC Entertainment', 'http://' + str(extra_streamSRV) +'/digiedge2/tlce' + str(stream_Quality) +'/index.m3u8?is=72&src=app&t=00000000000000000000000000000000', setIcon('TLC.png'))
#if settings.getSetting('73') == 'true':
#addDir('Epop Entertainment', 'http://' + str(extra_streamSRV) +'/digiedge2/eentertainmente' + str(stream_Quality) +'/index.m3u8?is=73&src=app&t=00000000000000000000000000000000', setIcon('tv.png'))
#if settings.getSetting('74') == 'true':
#addDir('Travel Mix Channel', 'http://' + str(extra_streamSRV) +'/travelmixchannele' + str(stream_Quality) +'/index.m3u8?is=74&src=app&t=00000000000000000000000000000000', setIcon('tv.png'))
addDir('Travel Channel', 'http://www.digi-online.ro/tv/travel+channel/', setIcon('TravelChannel.png'))
addDir('Digi Life', 'http://www.digi-online.ro/tv/digi+life/', setIcon('DigiLife.png'))
addDir('Paprika TV', 'http://www.digi-online.ro/tv/tv+paprika/', setIcon('PaprikaTV.png'))
addDir('Digi World', 'http://www.digi-online.ro/tv/digi+world/', setIcon('DigiWorld.png'))
addDir('Viasat Explorer', 'http://www.digi-online.ro/tv/viasat+explorer/', setIcon('ViasatExplore.png'))
addDir('Discovery Channel', 'http://www.digi-online.ro/tv/discovery+channel/', setIcon('DiscoveryChannel.png'))
addDir('National Geographic', 'http://www.digi-online.ro/tv/national+geographic/', setIcon('NatGeographic.png'))
addDir('History Channel', 'http://www.digi-online.ro/tv/history+channel/', setIcon('HistoryChannel.png'))
addDir('Viasat History', 'http://www.digi-online.ro/tv/viasat+history/', setIcon('ViasatHistory.png'))
addDir('National Geographic Wild', 'http://www.digi-online.ro/tv/national+geographic+wild/', setIcon('NatGeoWild.png'))
addDir('BBC Earth', 'http://www.digi-online.ro/tv/bbc+earth/', setIcon('BBC_Earth.png'))
addDir('Digi Animal World', 'http://www.digi-online.ro/tv/digi+animal+world/', setIcon('DigiAnimalWorld.png'))
addDir('Viasat Nature', 'http://www.digi-online.ro/tv/viasat+nature/', setIcon('ViasatNature.png'))
addDir('Fishing & Hunting', 'http://www.digi-online.ro/tv/fishing+and+hunting/', setIcon('PVTV.png'))
addDir('CBS Reality', 'http://www.digi-online.ro/tv/cbs+reality/', setIcon('CBSReality.png'))
addDir('TLC Entertainment', 'http://www.digi-online.ro/tv/tlc/', setIcon('TLC.png'))
addDir('Travel Mix', 'http://www.digi-online.ro/tv/travel+mix/', setIcon('TravelMixTV.png'))
addDir('E Entertainment', 'http://www.digi-online.ro/tv/e+entertainment/', setIcon('EpopEntertainment.png'))
addDir('AXN', 'http://www.digi-online.ro/tv/axn/', setIcon('AXN.png'))
addDir('AXN Spin', 'http://www.digi-online.ro/tv/axn+spin/', setIcon('AXN_Spin.png'))
addDir('AXN White', 'http://www.digi-online.ro/tv/axn+white/', setIcon('AXN_White.png'))
addDir('AXN Black', 'http://www.digi-online.ro/tv/axn+black/', setIcon('AXN_Black.png'))
addDir('Film Cafe', 'http://www.digi-online.ro/tv/film+cafe/', setIcon('FilmCafe.png'))
addDir('Comedy Central', 'http://www.digi-online.ro/tv/comedy+central/', setIcon('ComedyCentral.png'))
addDir('TNT', 'http://www.digi-online.ro/tv/tnt/', setIcon('TNT2.png'))
addDir('TV1000', 'http://www.digi-online.ro/tv/tv+1000/', setIcon('TV1000.png'))
if login_Enabled == "true":
addDir('Digi Film', 'http://www.digi-online.ro/tv/digi+film/', setIcon('DigiFilm.png'))
addDir('UTV', 'http://www.digi-online.ro/tv/utv/', setIcon('UTV.png'))
addDir('Music Channel', 'http://www.digi-online.ro/tv/music+channel/', setIcon('MusicChannel.png'))
addDir('Kiss TV', 'http://www.digi-online.ro/tv/kiss+tv/', setIcon('KissTV.png'))
addDir('HitMusic Channel','http://www.digi-online.ro/tv/hit+music+channel/', setIcon('HitMusicChannel.png'))
addDir('Mezzo','http://www.digi-online.ro/tv/mezzo/', setIcon('MezzoTV.png'))
addDir('Slager TV [HU]','http://www.digi-online.ro/tv/slager+tv/', setIcon('SlagerTV.png'))
addDir('Disney Channel', 'http://www.digi-online.ro/tv/disney+channel/', setIcon('DisneyChannel.png'))
addDir('Megamax', 'http://www.digi-online.ro/tv/megamax/', setIcon('Megamax.png'))
addDir('Nickelodeon', 'http://www.digi-online.ro/tv/nickelodeon/', setIcon('Nickelodeon.png'))
addDir('Minimax', 'http://www.digi-online.ro/tv/minimax/', setIcon('Minimax.png'))
addDir('Disney Junior', 'http://www.digi-online.ro/tv/disney+junior/', setIcon('DisneyJunior.png'))
addDir('Cartoon Network', 'http://www.digi-online.ro/tv/cartoon+network/', setIcon('CartoonNetw.png'))
addDir('Boomerang', 'http://www.digi-online.ro/tv/boomerang/', setIcon('Boomerang.png'))
addDir('Davinci Learning', 'http://www.digi-online.ro/tv/davinci+learning/', setIcon('DaVinciLearning.png'))
addDir('DigiSport 1', 'http://www.digi-online.ro/tv/digisport+1/', setIcon('DigiSport1.png'))
addDir('DigiSport 2', 'http://www.digi-online.ro/tv/digisport+2/', setIcon('DigiSport2.png'))
addDir('DigiSport 3', 'http://www.digi-online.ro/tv/digisport+3/', setIcon('DigiSport3.png'))
addDir('DigiSport 4', 'http://www.digi-online.ro/tv/digisport+4/', setIcon('DigiSport4.png'))
addDir('EuroSport 1', 'http://www.digi-online.ro/tv/eurosport/', setIcon('EuroSport1.png'))
addDir('EuroSport 2', 'http://www.digi-online.ro/tv/eurosport+2/', setIcon('EuroSport2.png'))
addDir('TVR 1', 'http://www.digi-online.ro/tv/tvr+1/', setIcon('TVR1.png'))
addDir('TVR 2', 'http://www.digi-online.ro/tv/tvr+2/', setIcon('TVR2.png'))
addDir('Digi24 Oradea', 'http://www.digi-online.ro/tv/digi24+oradea/', setIcon('Digi24.png'))
addDir('Digi24 Brasov', 'http://www.digi-online.ro/tv/digi24+brasov/', setIcon('Digi24.png'))
addDir('Digi24 Cluj', 'http://www.digi24.ro/live/digi24-cluj-napoca', setIcon('Digi24.png'))
#addDir('M1', 'https://c402-node62-cdn.connectmedia.hu/1100/746f4587970e6a9d1d77231922604086/5a19fb6f/05.m3u8', setIcon('tv.png'))
def addDir(name, url, iconimage):
iconimage = urllib.unquote(urllib.unquote(iconimage))
u = sys.argv[0] + "?url=" + urllib.quote_plus(url) + "&name=" + urllib.quote_plus(name) + "&thumb=" + urllib.quote_plus(iconimage)
listedItem = xbmcgui.ListItem(name, iconImage=movies_thumb, thumbnailImage=iconimage)
itemInfo = {
'type': 'Video',
'genre': 'Live Stream',
'title': name,
'playcount': '0'
}
listedItem.setInfo('video', itemInfo)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=listedItem)
if debug_Enabled == 'true':
LF = open(myLogFile, 'a')
LF.write("addDir: '" + name + "', '" + url + "', '" + iconimage + '\'\n')
LF.close()
return ok
def getParams():
param = []
paramstring = sys.argv[2]
if len(paramstring) >= 2:
params = sys.argv[2]
cleanedparams = params.replace('?', '')
if (params[len(params) - 1] == '/'):
params = params[0:len(params) - 2]
pairsofparams = cleanedparams.split('&')
param = {}
for i in range(len(pairsofparams)):
splitparams = {}
splitparams = pairsofparams[i].split('=')
if (len(splitparams)) == 2:
param[splitparams[0]] = splitparams[1]
#-----------------------------------------------------------------------------------------------------------
#'url': 'http%3A%2F%2Fwww.digi-online.ro%2Ftv%2Frealitatea%2Btv%2F', 'name': 'Realitatea+TV'
#-----------------------------------------------------------------------------------------------------------
if debug_Enabled == 'true':
LF = open(myLogFile, 'a')
LF.write("getParams: '" + str(param) + '\'\n')
LF.close()
return param
def makeCookie(name, value, domain):
return cookielib.Cookie(
version=0,
name=name,
value=value,
port=None,
port_specified=False,
domain=domain,
domain_specified=True,
domain_initial_dot=False,
path="/",
path_specified=True,
secure=False,
expires=None,
discard=False,
comment=None,
comment_url=None,
rest=None
)
def processLink(url):
global myCookieJar
global uloggedIN
global sessionID
f = HTMLParser.HTMLParser()
url = f.unescape(url)
if debug_Enabled == 'true':
LF = open(myLogFile, 'a')
LF.write("processLink parse URL: '" + url + '\'\n')
if "www.digi-online.ro/tv/" in url:
#myCookieJar.set_cookie(makeCookie("cookie_desclimer", "true", digiwebSite))
#myCookieJar.set_cookie(makeCookie("_ga", "GA1.2.2001247683.1507822507", ".digi.online.ro"))
#myCookieJar.set_cookie(makeCookie("_gid", "GA1.2.1402381958.1507822507", ".digi.online.ro"))
#myCookieJar.set_cookie(makeCookie("_gat", "1", ".digi.online.ro"))
myCookieJar.set_cookie(makeCookie("device_id", device_id, digiwebSite))
urlopener = urllib2.build_opener(urllib2.HTTPCookieProcessor(myCookieJar))
else:
urlopener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
## LOGIN
if login_Enabled == 'true' and "www.digi-online.ro/tv/" in url:
urlopener.addheaders = [
('Host', digiwebSite),
('Accept', '*/*'),
('Origin', digiURL),
('X-Requested-With', 'XMLHttpRequest'),
('User-Agent', userAgent),
('Content-type', 'application/x-www-form-urlencoded'),
('Referer', digiURL),
('Accept-Encoding', 'identity'),
('Accept-Language', 'en-ie'),
('Connection', 'close')
]
logindata = urllib.urlencode({
'user': login_User,
'password': login_Password,
'browser': browser,
'model': deviceModel,
'os': deviceOS
})
try:
httpPost = urlopener.open(loginURL, logindata)
response = httpPost.read()
if debug_Enabled == 'true':
LF.write("processLink HTTP POST: '" + loginURL + '\'\n')
LF.write("processLink HTTP/1.1 200 OK: '" + response + '\'\n')
if str(response) == 'true':
uloggedIN = True
for cookie in myCookieJar:
#print cookie.name, cookie.value, cookie.domain #etc etc
if str(cookie.name) == 'sid':
sessionID = str(cookie.value)
else:
xbmcgui.Dialog().ok('Login Error', response)
except:
xbmcgui.Dialog().ok('HTTP POST Error', 'Could not access ' + str(loginURL))
errMsg1="processLink HTTP POST error '" + loginURL + '\'\n'
pass
if debug_Enabled == 'true':
LF.write(errMsg1)
LF.write("processLink uloggedIN: '" + str(uloggedIN) + '\'\n')
LF.write("processLink sessionID: '" + str(sessionID) + '\'\n')
## END LOGIN
## Load URL
urlopener.addheaders = [
('Host', digiwebSite),
('Upgrade-Insecure-Requests', '1'),
('User-Agent', userAgent),
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'),
('Referer', 'http://www.digi-online.ro/tv/'),
('Accept-Encoding', 'identity'),
('Accept-Language', 'en-ie'),
('Connection', 'keep-alive')
]
## Load Page
try:
httpGet = urlopener.open(url)
link = httpGet.read()
## List cookies
if debug_Enabled == 'true':
LF.write("processLink HTTP GET '" + url + '\'\n')
for cookie in (myCookieJar):
LF.write("processLink cookie: " + str(cookie) + '\n')
if http_log_Enable == 'true':
LF.write("processLink HTTP/1.1 200 OK: --- LINK --- ---" + '\n')
LF.write(link + '\n')
LF.write("processLink: --------- END LINK -----" + '\n')
return link
except:
return False
if debug_Enabled == 'true':
LF.write('----------------------------' + '\n')
LF.close()
def parseInput(url):
global myCookieJar
global httpURLopener
result = None
item = None
infos = None
match = None
errMsg1 = ''
## if parsed URL is one of the hidden DIGI-Online/RDS programmes
for prog in (hiddenProgrammes):
if prog in url:
result = url
httpURLopener = urllib2.build_opener()
match = [prog]
if debug_Enabled == 'true':
LF = open(myLogFile, 'a')
LF.write("parseInput URL: '" + url + '\'\n')
LF.write("parseInput hiddenProgrammes: '" + prog + '\'\n')
if result is None:
link = processLink(url)
if epgInfo_Enabled == 'true':
getEPGdata(link)
## Case 1: "scope":"digi24" (www.digi24.ro)
if "www.digi24.ro/live" in url:
match = re.compile('"scope":"(.+?)"').findall(link)
## Case 2: data-balancer-scope-name="utv" (www.digi-online.ro)
elif "www.digi-online.ro/tv/" in url:
match = re.compile('data-balancer-scope-name="(.+?)"').findall(link)
if len(match) > 0:
print match
else:
match = ['digi24']
print match
xbmcgui.Dialog().ok('Error', 'Could not access ' + url)
errMsg1='\n' + "parseInput Error: Could not access '" + url + '\'\n'
if "http://www.digi-online.ro/tv/digi+film/" in url:
xbmcgui.Dialog().ok('Error', 'DIGI FILM not yet implemented')
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
httpURLopener = urllib2.build_opener(urllib2.HTTPCookieProcessor(myCookieJar), urllib2.HTTPSHandler(context=ctx))
httpURLopener.addheaders = [
('Host', digiwebSite),
('Accept', '*/*'),
('Origin', digiURL),
('User-Agent', userAgent),
('Referer', url),
('Accept-Encoding', 'identity'),
('Accept-Language', 'en-GB,en;q=0.5'),
('X-Requested-With', 'XMLHttpRequest'),
('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8'),
('Connection', 'close')
]
link = 'http://www.digi-online.ro/xhr-gen-stream.php'
formdata = urllib.urlencode({'scope': 'digifilm'})
httpGet = httpURLopener.open(link, formdata)
response = httpGet.read()
if debug_Enabled == 'true':
LF.write(errMsg1)
for cookie in (myCookieJar):
LF.write("parseInput cookie: " + str(cookie) + '\n')
LF.write("parseInput HTTP POST: '" + link + ' ' + formdata + '\'\n')
LF.write("parseInput HTTP/1.1 200 OK: '" + response + '\'\n')
httpURLopener.addheaders = [
('Host', 'digiapis.rcs-rds.ro'),
('Accept', 'application/json, text/javascript, */*; q=0.01'),
('Origin', digiURL),
('User-Agent', userAgent),
('Referer', 'http://www.digi-online.ro/digifilm-player'),
('Accept-Encoding', 'identity'),
('Accept-Language', 'en-GB,en;q=0.5'),
('Connection', 'close')
]
sslurl = 'https://digiapis.rcs-rds.ro/digionline/api/v11/streams_l.php?action=getStream&id_stream=7&quality=' + stream_Quality + '&id_device=' + device_id + '&platform=Browser&version_platform='+ deviceOS + '_' + browser + '_' + deviceModel + '&version_app=1.0.0&cd=0'
#ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
#response = urllib2.urlopen(sslurl, context=ctx)
httpGet = httpURLopener.open(sslurl)
mydata = httpGet.read()
mydata = mydata.replace('\\', '')
split = mydata.split(',')
result = str(split[0])
result = result.replace('"', '')
result = result.replace('{stream_url:', '')
if "http://" not in result:
result = "".join(("http:", result))
if debug_Enabled == 'true':
LF.write(errMsg1)
LF.write("parseInput HTTPS GET: '" + sslurl + '\'\n')
LF.write("parseInput HTTPS OK (list): '" + mydata + '\'\n')
LF.write("parseInput result: '" + result + '\'\n')
LF.write("----------------------------"+'\n')
elif result is None and match is not None:
httpURLopener = urllib2.build_opener(urllib2.HTTPCookieProcessor(myCookieJar))
httpURLopener.addheaders = [
('Host', digiMaster),
('Accept', '*/*'),
('Origin', digiURL),
('User-Agent', userAgent),
('Referer', url),
('Accept-Encoding', 'identity'),
('Accept-Language', 'en-GB,en;q=0.5'),
('Connection', 'close')
]
httpGet = httpURLopener.open(keyMaster)
myKey = httpGet.read()
## http://balancer.digi24.ro/streamer.php?&scope=digi24brasov&key=980cd632c5f0000df058486ff2df7e35&outputFormat=json&type=hls&quality=hq
link = 'http://balancer.digi24.ro/streamer.php?&scope=' + match[0] + '&key=' + myKey + '&outputFormat=json&type=hls&quality=' + str(stream_Quality)
if debug_Enabled == 'true':
LF.write(errMsg1)
LF.write("parseInput scope: '" + str(match) + '\'\n')
LF.write("parseInput HTTP GET: '" + keyMaster + '\'\n')
LF.write("parseInput HTTP/1.1 200 OK (key): '" + myKey + '\'\n')
for cookie in (myCookieJar):
LF.write("processLink cookie: " + str(cookie) + '\n')
if login_Enabled == 'true':
slink = 'http://www.digi-online.ro/xhr-gen-stream.php'
formdata = urllib.urlencode({'scope': match[0]})
httpURLopener.addheaders = [
('X-Requested-With', 'XMLHttpRequest')
]
httpGet = httpURLopener.open(slink, formdata)
response = httpGet.read()
if debug_Enabled == 'true':
LF.write("parseInput HTTP POST: '" + slink + ' ' + formdata +'\'\n')
LF.write("parseInput HTTP/1.1 200 OK: '" + response + '\'\n')
try:
file = httpURLopener.open(link).read()
infos = json.loads(file)
result = infos['file']
if "http://" not in result:
result = "".join(("http:", result))
except:
xbmcgui.Dialog().ok('Error', 'Could not access ' + url)
errMsg1="parseInput: Could not access '" + url + '\'\n'
if debug_Enabled == 'true':
LF.write(errMsg1)
LF.write("parseInput HTTP GET: '" + link + '\'\n')
LF.write("parseInput HTTP/1.1 200 OK (json): '" + str(infos) + '\'\n')
LF.write("parseInput result: '" + result + '\'\n')
LF.write("----------------------------"+'\n')
## Build ListItem
if result is not None:
try:
item = xbmcgui.ListItem(path=result, iconImage=addon_thumb, thumbnailImage=nowPlayingThumb)
itemInfo = {
'type': 'Video',
'genre': 'Live Stream',
'title': nowPlayingTitle,
'playcount': '0'
}
item.setInfo('video', itemInfo)
except:
xbmcgui.Dialog().ok('Error', 'Could not access media')
errMsg1="parseInput: Could not access '" + result + '\'\n'
## Play stream
if item is not None and result is not None:
if debug_Enabled == 'true':
LF.write(errMsg1)
LF.write("xbmc.Player().play(" + result + "," + str(item) + ")" + '\n')
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
#xbmc.Player().play(result)
xbmc.Player().play(result, item)
if epgInfo_Enabled == 'true':
if infoEPGnowP == '':
xbmc.executebuiltin("Notification(Digi-Online, " + nowPlayingTitle + ")")
else:
xbmc.executebuiltin("Notification(" + infoEPGnowP + ", " + '\n\n' + infoEPGnext + ")")
elif osdInfo_Enabled == 'true':
xbmc.executebuiltin("Notification(Digi-Online, " + nowPlayingTitle + ")")
if debug_Enabled == 'true':
LF.write(errMsg1)
savePlayList(result)
LF.close()
def savePlayList(url):
global httpURLopener
PF = open(myPlayFile, 'w+')
if "http://" not in url:
url = "".join(("http:", url))
if debug_Enabled == 'true':
LF = open(myLogFile, 'a')
LF.write("savePlayList URL: '" + url + '\'\n')
if "index.m3u8" in url:
url = removeSubstr(url, 'index.m3u8')
response = httpURLopener.open(url)
mydata = response.read()
if debug_Enabled == 'true':
LF.write("savePlayList HTTP GET: '" + url + '\'\n')
LF.write("savePlayList HTTP/1.1 200 OK: \n" + mydata + '\n')
PF.write(re.sub('([_A-Za-z0-9.]+).m3u8', '', mydata) + '\n')
if ".m3u8" in mydata:
variant = str((re.compile('(.+?.m3u8)').findall(mydata))[0])
origin = url.replace('index.m3u8', '')
playlist = origin + variant
response = httpURLopener.open(playlist)
mydata = response.read()
if debug_Enabled == 'true':
LF.write("savePlayList HTTP GET: '" + playlist + '\'\n')
LF.write("savePlayList HTTP/1.1 200 OK: \n" + mydata + '\n')
mydata = mydata.replace('#EXTM3U', '')
mydata = mydata.replace('#EXT-X-VERSION:3', '')
for line in mydata.split('\n'):
if ".ts" in line:
nline = origin + line
mydata = mydata.replace(line, nline)
PF.write(mydata + '\n')
PF.close
if debug_Enabled == 'true':
LF.write('----------------------------' + '\n')
LF.close()
def getEPGdata(link):
global infoEPGnowP
global infoEPGnext
if '<div class="info" epg-data=' in link:
cruft = ["[", "]", "'", "{", "}", "start:", "stop:", "title:"]
myEPGInfo = str(re.compile('<div class="info" epg-data="(.+?)"').findall(link)).replace(""", "")
for i in range(len(cruft)):
myEPGInfo = myEPGInfo.replace(str(cruft[i]), "")
if len(myEPGInfo) > 0:
try:
epgscrape = myEPGInfo.split(',')
infoEPGnowP = str(epgscrape[0]) + '\n' + time.strftime("%H:%M", time.localtime(int(epgscrape[1]))) + " - " + time.strftime("%H:%M", time.localtime(int(epgscrape[2])))
infoEPGnext = str(epgscrape[3]) + '\n' + time.strftime("%H:%M", time.localtime(int(epgscrape[4]))) + " - " + time.strftime("%H:%M", time.localtime(int(epgscrape[5])))
except:
pass
if debug_Enabled == 'true':
LF = open(myLogFile, 'a')
LF.write('----------------------------' + '\n')
LF.write("getEPGdata infoEPGnowP: '" + infoEPGnowP.replace("\n", " ") + '\'\n')
LF.write("getEPGdata infoEPGnext: '" + infoEPGnext.replace("\n", " ") + '\'\n')
LF.write('----------------------------' + '\n')
LF.close()
#### RUN Addon ###
params = getParams()
url = None
nowPlayingThumb = None
nowPlayingTitle = None
httpURLopener = None
myCookieJar = cookielib.CookieJar()
uloggedIN = False;
sessionID = ''
infoEPGnowP = ''
infoEPGnext = ''
try:
url = urllib.unquote_plus(params["url"])
except:
pass
try:
nowPlayingTitle = urllib.unquote_plus(params["name"])
except:
nowPlayingTitle = str(url)
try:
nowPlayingThumb = urllib.unquote_plus(params["thumb"])
except:
nowPlayingThumb = movies_thumb
if debug_Enabled == 'true':
log_MyVars()
if url is None or len(url) < 1:
ROOT()
else:
parseInput(url)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
####################################################################################################
| |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from uuid import uuid4
from apiclient.discovery import build
from apiclient import errors
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
def _format_subscription(project, subscription):
return 'projects/{}/subscriptions/{}'.format(project, subscription)
def _format_topic(project, topic):
return 'projects/{}/topics/{}'.format(project, topic)
class PubSubException(Exception):
pass
class PubSubHook(GoogleCloudBaseHook):
"""Hook for accessing Google Pub/Sub.
The GCP project against which actions are applied is determined by
the project embedded in the Connection referenced by gcp_conn_id.
"""
def __init__(self, gcp_conn_id='google_cloud_default', delegate_to=None):
super(PubSubHook, self).__init__(gcp_conn_id, delegate_to=delegate_to)
def get_conn(self):
"""Returns a Pub/Sub service object.
:rtype: apiclient.discovery.Resource
"""
http_authorized = self._authorize()
return build('pubsub', 'v1', http=http_authorized)
def publish(self, project, topic, messages):
"""Publishes messages to a Pub/Sub topic.
:param project: the GCP project ID in which to publish
:type project: string
:param topic: the Pub/Sub topic to which to publish; do not
include the ``projects/{project}/topics/`` prefix.
:type topic: string
:param messages: messages to publish; if the data field in a
message is set, it should already be base64 encoded.
:type messages: list of PubSub messages; see
http://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage
"""
body = {'messages': messages}
full_topic = _format_topic(project, topic)
request = self.get_conn().projects().topics().publish(
topic=full_topic, body=body)
try:
request.execute()
except errors.HttpError as e:
raise PubSubException(
'Error publishing to topic {}'.format(full_topic), e)
def create_topic(self, project, topic, fail_if_exists=False):
"""Creates a Pub/Sub topic, if it does not already exist.
:param project: the GCP project ID in which to create
the topic
:type project: string
:param topic: the Pub/Sub topic name to create; do not
include the ``projects/{project}/topics/`` prefix.
:type topic: string
:param fail_if_exists: if set, raise an exception if the topic
already exists
:type fail_if_exists: bool
"""
service = self.get_conn()
full_topic = _format_topic(project, topic)
try:
service.projects().topics().create(
name=full_topic, body={}).execute()
except errors.HttpError as e:
# Status code 409 indicates that the topic already exists.
if str(e.resp['status']) == '409':
message = 'Topic already exists: {}'.format(full_topic)
self.log.warning(message)
if fail_if_exists:
raise PubSubException(message)
else:
raise PubSubException(
'Error creating topic {}'.format(full_topic), e)
def delete_topic(self, project, topic, fail_if_not_exists=False):
"""Deletes a Pub/Sub topic if it exists.
:param project: the GCP project ID in which to delete the topic
:type project: string
:param topic: the Pub/Sub topic name to delete; do not
include the ``projects/{project}/topics/`` prefix.
:type topic: string
:param fail_if_not_exists: if set, raise an exception if the topic
does not exist
:type fail_if_not_exists: bool
"""
service = self.get_conn()
full_topic = _format_topic(project, topic)
try:
service.projects().topics().delete(topic=full_topic).execute()
except errors.HttpError as e:
# Status code 409 indicates that the topic was not found
if str(e.resp['status']) == '404':
message = 'Topic does not exist: {}'.format(full_topic)
self.log.warning(message)
if fail_if_not_exists:
raise PubSubException(message)
else:
raise PubSubException(
'Error deleting topic {}'.format(full_topic), e)
def create_subscription(self, topic_project, topic, subscription=None,
subscription_project=None, ack_deadline_secs=10,
fail_if_exists=False):
"""Creates a Pub/Sub subscription, if it does not already exist.
:param topic_project: the GCP project ID of the topic that the
subscription will be bound to.
:type topic_project: string
:param topic: the Pub/Sub topic name that the subscription will be bound
to create; do not include the ``projects/{project}/subscriptions/``
prefix.
:type topic: string
:param subscription: the Pub/Sub subscription name. If empty, a random
name will be generated using the uuid module
:type subscription: string
:param subscription_project: the GCP project ID where the subscription
will be created. If unspecified, ``topic_project`` will be used.
:type subscription_project: string
:param ack_deadline_secs: Number of seconds that a subscriber has to
acknowledge each message pulled from the subscription
:type ack_deadline_secs: int
:param fail_if_exists: if set, raise an exception if the topic
already exists
:type fail_if_exists: bool
:return: subscription name which will be the system-generated value if
the ``subscription`` parameter is not supplied
:rtype: string
"""
service = self.get_conn()
full_topic = _format_topic(topic_project, topic)
if not subscription:
subscription = 'sub-{}'.format(uuid4())
if not subscription_project:
subscription_project = topic_project
full_subscription = _format_subscription(subscription_project,
subscription)
body = {
'topic': full_topic,
'ackDeadlineSeconds': ack_deadline_secs
}
try:
service.projects().subscriptions().create(
name=full_subscription, body=body).execute()
except errors.HttpError as e:
# Status code 409 indicates that the subscription already exists.
if str(e.resp['status']) == '409':
message = 'Subscription already exists: {}'.format(
full_subscription)
self.log.warning(message)
if fail_if_exists:
raise PubSubException(message)
else:
raise PubSubException(
'Error creating subscription {}'.format(full_subscription),
e)
return subscription
def delete_subscription(self, project, subscription,
fail_if_not_exists=False):
"""Deletes a Pub/Sub subscription, if it exists.
:param project: the GCP project ID where the subscription exists
:type project: string
:param subscription: the Pub/Sub subscription name to delete; do not
include the ``projects/{project}/subscriptions/`` prefix.
:type subscription: string
:param fail_if_not_exists: if set, raise an exception if the topic
does not exist
:type fail_if_not_exists: bool
"""
service = self.get_conn()
full_subscription = _format_subscription(project, subscription)
try:
service.projects().subscriptions().delete(
subscription=full_subscription).execute()
except errors.HttpError as e:
# Status code 404 indicates that the subscription was not found
if str(e.resp['status']) == '404':
message = 'Subscription does not exist: {}'.format(
full_subscription)
self.log.warning(message)
if fail_if_not_exists:
raise PubSubException(message)
else:
raise PubSubException(
'Error deleting subscription {}'.format(full_subscription),
e)
def pull(self, project, subscription, max_messages,
return_immediately=False):
"""Pulls up to ``max_messages`` messages from Pub/Sub subscription.
:param project: the GCP project ID where the subscription exists
:type project: string
:param subscription: the Pub/Sub subscription name to pull from; do not
include the 'projects/{project}/topics/' prefix.
:type subscription: string
:param max_messages: The maximum number of messages to return from
the Pub/Sub API.
:type max_messages: int
:param return_immediately: If set, the Pub/Sub API will immediately
return if no messages are available. Otherwise, the request will
block for an undisclosed, but bounded period of time
:type return_immediately: bool
:return A list of Pub/Sub ReceivedMessage objects each containing
an ``ackId`` property and a ``message`` property, which includes
the base64-encoded message content. See
https://cloud.google.com/pubsub/docs/reference/rest/v1/\
projects.subscriptions/pull#ReceivedMessage
"""
service = self.get_conn()
full_subscription = _format_subscription(project, subscription)
body = {
'maxMessages': max_messages,
'returnImmediately': return_immediately
}
try:
response = service.projects().subscriptions().pull(
subscription=full_subscription, body=body).execute()
return response.get('receivedMessages', [])
except errors.HttpError as e:
raise PubSubException(
'Error pulling messages from subscription {}'.format(
full_subscription), e)
def acknowledge(self, project, subscription, ack_ids):
"""Pulls up to ``max_messages`` messages from Pub/Sub subscription.
:param project: the GCP project name or ID in which to create
the topic
:type project: string
:param subscription: the Pub/Sub subscription name to delete; do not
include the 'projects/{project}/topics/' prefix.
:type subscription: string
:param ack_ids: List of ReceivedMessage ackIds from a previous pull
response
:type ack_ids: list
"""
service = self.get_conn()
full_subscription = _format_subscription(project, subscription)
try:
service.projects().subscriptions().acknowledge(
subscription=full_subscription, body={'ackIds': ack_ids}
).execute()
except errors.HttpError as e:
raise PubSubException(
'Error acknowledging {} messages pulled from subscription {}'
.format(len(ack_ids), full_subscription), e)
| |
import datetime
from django import forms
from django.shortcuts import render, redirect, get_object_or_404
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from djangopress.blog.models import Blog, Entry, Tag, Category, Comment, Flag
from django.utils.translation import ugettext as _
from django.conf import settings
from djangopress.core.util import get_client_ip, choose_form
from django.urls import reverse
from django.utils import timezone
try:
import akismet
except:
pass
def get_blog(blog_slug):
return get_object_or_404(Blog, slug=blog_slug, sites__id__exact=settings.SITE_ID)
def get_entries_for_page(paginator, page):
try:
page = int(page)
except ValueError:
page = 1
try:
return paginator.page(page)
except (EmptyPage, InvalidPage) as e:
raise e # this must be handeled
def index(request, blog_slug, page=1):
blog = get_blog(blog_slug)
entries_list = Entry.objects.get_entries(blog=blog)
paginator = Paginator(entries_list, 10)
try:
entries = get_entries_for_page(paginator, page)
except (EmptyPage, InvalidPage):
if page != 1:
return redirect(blog.get_absolute_url(paginator.num_pages))
data = {
"blog": blog,
"entries": entries,
"title": blog.name,
"respond": True,
}
return render(request, 'blog/index.html' , data)
def archive(request, blog_slug, year, month=None):
blog = get_blog(blog_slug)
data = {"format": "YEAR_MONTH_FORMAT"}
year = int(year)
with timezone.override(None):
entries_list = Entry.objects.get_entries(blog=blog).filter(posted__year=year)
if month is None:
month = 1
data["format"] = "Y"
else:
month = int(month)
entries_list = entries_list.filter(posted__month=month)
data["date"] = datetime.date(year=year, month=month, day=1)
paginator = Paginator(entries_list, 10)
try:
entries = get_entries_for_page(paginator, request.GET.get('page', 1))
except (EmptyPage, InvalidPage):
if request.GET.get('page', 1) != 1:
kwargs = {'blog_slug':blog_slug, 'year':year, 'month':month}
return redirect("%s?page=%s" % (reverse('blog-archive', kwargs=kwargs), paginator.num_pages))
data.update({
"blog": blog,
"entries": entries,
"title": blog.name,
"respond": True,
})
return render(request, 'blog/date_archive.html' , data)
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('user_name', 'user_email', 'user_url', 'comment_text')
user_name = forms.CharField(required=True)
user_email = forms.CharField(required=True)
class CommentUserForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('comment_text', )
def check_askmet_spam(request, entry, comment_form):
try:
api = akismet.Akismet(key=settings.AKISMET_API.get('key'),
blog_url=settings.AKISMET_API.get('blog_url'))
except akismet.ConfigurationError as e:
return False
except akismet.APIKeyError as e:
return False
if request.user.is_authenticated():
return api.comment_check(user_ip=get_client_ip(request),
user_agent=request.META.get("HTTP_USER_AGENT", ""),
referrer=request.META.get("HTTP_REFERER", ""),
comment_content=comment_form.cleaned_data["comment_text"],
comment_author=request.user.username,
comment_author_email=request.user.email,
comment_author_url=request.user.profile.homepage)
else:
return api.comment_check(user_ip=get_client_ip(request),
user_agent=request.META.get("HTTP_USER_AGENT", ""),
referrer=request.META.get("HTTP_REFERER", ""),
comment_content=comment_form.cleaned_data["message"],
comment_author= comment_form.cleaned_data["user_name"],
comment_author_email=comment_form.cleaned_data["user_email"],
comment_author_url=comment_form.cleaned_data["user_url"])
def post(request, blog_slug, year, month, day, slug):
blog = get_blog(blog_slug)
with timezone.override(None):
entry = get_object_or_404(Entry, blog=blog, slug=slug, posted__year=year, posted__month=month, posted__day=day)
try:
previous_post = Entry.get_previous_by_posted(entry, blog=blog)
except Entry.DoesNotExist:
previous_post = None
try:
next_post = Entry.get_next_by_posted(entry, blog=blog)
except Entry.DoesNotExist:
next_post = None
comments = Comment.objects.filter(entry=entry, is_public=True, is_spam=False).order_by('submit_date')
comment_message = ""
if entry.comments_open and blog.comments_enabled and request.method == 'POST':
comment_form = choose_form(request, CommentUserForm, CommentForm, request.POST)
if comment_form.is_valid():
comment = comment_form.save(commit=False)
if request.user.is_authenticated():
comment.user = request.user
comment.ip_address = get_client_ip(request)
comment.entry = entry
comment.user_agent = request.META.get("HTTP_USER_AGENT", "")
try:
comment.is_spam = check_askmet_spam(request, entry, comment_form)
except:
pass # it is not installed, no problem
comment.save()
comment_message = "Your comment has been saved."
else:
comment_form = choose_form(request, CommentUserForm, CommentForm)
data = {
"title": entry.title,
"entry": entry,
"respond": False,
"next": next_post,
"previous": previous_post,
"blog": blog,
"comments": comments,
"comment_count": comments.count(),
"comment_form": comment_form,
"comment_message": comment_message,
}
return render(request, "blog/post.html", data)
def tag(request, blog_slug, slug, page=1):
blog = get_blog(blog_slug)
post_tag = get_object_or_404(Tag, slug=slug, blog=blog)
entries_list = Entry.objects.get_entries(blog=blog).filter(tags__slug=slug)
paginator = Paginator(entries_list, 10)
try:
entries = get_entries_for_page(paginator, page)
except (EmptyPage, InvalidPage):
if page != 1:
kwargs = {'blog_slug':blog_slug, 'page':page}
return redirect(reverse('blog-tag', kwargs=kwargs))
data = {
"blog": blog,
"entries": entries,
"title": blog.name,
"respond": True,
"blog_heading": _("Posts Tagged '%s'") % post_tag.name
}
return render(request, 'blog/category.html' , data)
def category(request, blog_slug, slug, page=1):
blog = get_blog(blog_slug)
post_category = get_object_or_404(Category, slug=slug, blog=blog)
entries_list = Entry.objects.get_entries(blog=blog).filter(categories__slug=slug)
paginator = Paginator(entries_list, 10)
try:
entries = get_entries_for_page(paginator, page)
except (EmptyPage, InvalidPage):
if page != 1:
kwargs = {'blog_slug':blog_slug, 'page':page}
return redirect(reverse('blog-category', kwargs=kwargs))
data = {
"blog": blog,
"entries": entries,
"title": blog.name,
"respond": True,
"blog_heading": _("Archive for the '%s' Category") % post_category.name
}
return render(request, 'blog/category.html' , data)
def moved(request, blog_slug, post):
blog = get_blog(blog_slug)
entry = get_object_or_404(Entry, pk=post, blog=blog)
return redirect(entry.get_absolute_url(), permanent=True)
| |
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import time
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers import remotefs
VERSION = '1.3.0'
LOG = logging.getLogger(__name__)
nfs_opts = [
cfg.StrOpt('nfs_shares_config',
default='/etc/cinder/nfs_shares',
help='File with the list of available nfs shares'),
cfg.BoolOpt('nfs_sparsed_volumes',
default=True,
help=('Create volumes as sparsed files which take no space.'
'If set to False volume is created as regular file.'
'In such case volume creation takes a lot of time.')),
cfg.FloatOpt('nfs_used_ratio',
default=0.95,
help=('Percent of ACTUAL usage of the underlying volume '
'before no new volumes can be allocated to the volume '
'destination.')),
cfg.FloatOpt('nfs_oversub_ratio',
default=1.0,
help=('This will compare the allocated to available space on '
'the volume destination. If the ratio exceeds this '
'number, the destination will no longer be valid.')),
cfg.StrOpt('nfs_mount_point_base',
default='$state_path/mnt',
help=('Base dir containing mount points for nfs shares.')),
cfg.StrOpt('nfs_mount_options',
default=None,
help=('Mount options passed to the nfs client. See section '
'of the nfs man page for details.')),
cfg.IntOpt('nfs_mount_attempts',
default=3,
help=('The number of attempts to mount nfs shares before '
'raising an error. At least one attempt will be '
'made to mount an nfs share, regardless of the '
'value specified.')),
]
CONF = cfg.CONF
CONF.register_opts(nfs_opts)
class NfsDriver(driver.ExtendVD, remotefs.RemoteFSDriver):
"""NFS based cinder driver.
Creates file on NFS share for using it as block device on hypervisor.
"""
driver_volume_type = 'nfs'
driver_prefix = 'nfs'
volume_backend_name = 'Generic_NFS'
VERSION = VERSION
def __init__(self, execute=putils.execute, *args, **kwargs):
self._remotefsclient = None
super(NfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(nfs_opts)
root_helper = utils.get_root_helper()
# base bound to instance is used in RemoteFsConnector.
self.base = getattr(self.configuration,
'nfs_mount_point_base',
CONF.nfs_mount_point_base)
self.base = os.path.realpath(self.base)
opts = getattr(self.configuration,
'nfs_mount_options',
CONF.nfs_mount_options)
nas_mount_options = getattr(self.configuration,
'nas_mount_options',
None)
if nas_mount_options is not None:
LOG.debug('overriding nfs_mount_options with nas_mount_options')
opts = nas_mount_options
self._remotefsclient = remotefs_brick.RemoteFsClient(
'nfs', root_helper, execute=execute,
nfs_mount_point_base=self.base,
nfs_mount_options=opts)
self._sparse_copy_volume_data = True
def set_execute(self, execute):
super(NfsDriver, self).set_execute(execute)
if self._remotefsclient:
self._remotefsclient.set_execute(execute)
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(NfsDriver, self).do_setup(context)
config = self.configuration.nfs_shares_config
if not config:
msg = (_("There's no NFS config file configured (%s)") %
'nfs_shares_config')
LOG.warning(msg)
raise exception.NfsException(msg)
if not os.path.exists(config):
msg = (_("NFS config file at %(config)s doesn't exist") %
{'config': config})
LOG.warning(msg)
raise exception.NfsException(msg)
if not self.configuration.nfs_oversub_ratio > 0:
msg = _("NFS config 'nfs_oversub_ratio' invalid. Must be > 0: "
"%s") % self.configuration.nfs_oversub_ratio
LOG.error(msg)
raise exception.InvalidConfigurationValue(msg)
if not ((self.configuration.nfs_used_ratio > 0) and
(self.configuration.nfs_used_ratio <= 1)):
msg = _("NFS config 'nfs_used_ratio' invalid. Must be > 0 "
"and <= 1.0: %s") % self.configuration.nfs_used_ratio
LOG.error(msg)
raise exception.InvalidConfigurationValue(msg)
self.shares = {} # address : options
# Check if mount.nfs is installed on this system; note that we don't
# need to be root to see if the package is installed.
package = 'mount.nfs'
try:
self._execute(package, check_exit_code=False,
run_as_root=False)
except OSError as exc:
if exc.errno == errno.ENOENT:
msg = _('%s is not installed') % package
raise exception.NfsException(msg)
else:
raise
# Now that all configuration data has been loaded (shares),
# we can "set" our final NAS file security options.
self.set_nas_security_options(self._is_voldb_empty_at_startup)
def _ensure_share_mounted(self, nfs_share):
mnt_flags = []
if self.shares.get(nfs_share) is not None:
mnt_flags = self.shares[nfs_share].split()
num_attempts = max(1, self.configuration.nfs_mount_attempts)
for attempt in range(num_attempts):
try:
self._remotefsclient.mount(nfs_share, mnt_flags)
return
except Exception as e:
if attempt == (num_attempts - 1):
LOG.error(_LE('Mount failure for %(share)s after '
'%(count)d attempts.'), {
'share': nfs_share,
'count': num_attempts})
raise exception.NfsException(six.text_type(e))
LOG.debug('Mount attempt %(attempt)d failed: %(exc)s.\n'
'Retrying mount ...',
{'attempt': attempt, 'exc': e})
time.sleep(1)
def _find_share(self, volume_size_in_gib):
"""Choose NFS share among available ones for given volume size.
For instances with more than one share that meets the criteria, the
share with the least "allocated" space will be selected.
:param volume_size_in_gib: int size in GB
"""
if not self._mounted_shares:
raise exception.NfsNoSharesMounted()
target_share = None
target_share_reserved = 0
for nfs_share in self._mounted_shares:
if not self._is_share_eligible(nfs_share, volume_size_in_gib):
continue
_total_size, _total_available, total_allocated = \
self._get_capacity_info(nfs_share)
if target_share is not None:
if target_share_reserved > total_allocated:
target_share = nfs_share
target_share_reserved = total_allocated
else:
target_share = nfs_share
target_share_reserved = total_allocated
if target_share is None:
raise exception.NfsNoSuitableShareFound(
volume_size=volume_size_in_gib)
LOG.debug('Selected %s as target nfs share.', target_share)
return target_share
def _is_share_eligible(self, nfs_share, volume_size_in_gib):
"""Verifies NFS share is eligible to host volume with given size.
First validation step: ratio of actual space (used_space / total_space)
is less than 'nfs_used_ratio'. Second validation step: apparent space
allocated (differs from actual space used when using sparse files)
and compares the apparent available
space (total_available * nfs_oversub_ratio) to ensure enough space is
available for the new volume.
:param nfs_share: nfs share
:param volume_size_in_gib: int size in GB
"""
used_ratio = self.configuration.nfs_used_ratio
oversub_ratio = self.configuration.nfs_oversub_ratio
requested_volume_size = volume_size_in_gib * units.Gi
total_size, total_available, total_allocated = \
self._get_capacity_info(nfs_share)
apparent_size = max(0, total_size * oversub_ratio)
apparent_available = max(0, apparent_size - total_allocated)
used = (total_size - total_available) / total_size
if used > used_ratio:
# NOTE(morganfainberg): We check the used_ratio first since
# with oversubscription it is possible to not have the actual
# available space but be within our oversubscription limit
# therefore allowing this share to still be selected as a valid
# target.
LOG.debug('%s is above nfs_used_ratio', nfs_share)
return False
if apparent_available <= requested_volume_size:
LOG.debug('%s is above nfs_oversub_ratio', nfs_share)
return False
if total_allocated / total_size >= oversub_ratio:
LOG.debug('%s reserved space is above nfs_oversub_ratio',
nfs_share)
return False
return True
def _get_mount_point_for_share(self, nfs_share):
"""Needed by parent class."""
return self._remotefsclient.get_mount_point(nfs_share)
def _get_capacity_info(self, nfs_share):
"""Calculate available space on the NFS share.
:param nfs_share: example 172.18.194.100:/var/nfs
"""
run_as_root = self._execute_as_root
mount_point = self._get_mount_point_for_share(nfs_share)
df, _ = self._execute('stat', '-f', '-c', '%S %b %a', mount_point,
run_as_root=run_as_root)
block_size, blocks_total, blocks_avail = map(float, df.split())
total_available = block_size * blocks_avail
total_size = block_size * blocks_total
du, _ = self._execute('du', '-sb', '--apparent-size', '--exclude',
'*snapshot*', mount_point,
run_as_root=run_as_root)
total_allocated = float(du.split()[0])
return total_size, total_available, total_allocated
def _get_mount_point_base(self):
return self.base
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
LOG.info(_LI('Extending volume %s.'), volume['id'])
extend_by = int(new_size) - volume['size']
if not self._is_share_eligible(volume['provider_location'],
extend_by):
raise exception.ExtendVolumeError(reason='Insufficient space to'
' extend volume %s to %sG'
% (volume['id'], new_size))
path = self.local_path(volume)
LOG.info(_LI('Resizing file to %sG...'), new_size)
image_utils.resize_image(path, new_size,
run_as_root=self._execute_as_root)
if not self._is_file_size_equal(path, new_size):
raise exception.ExtendVolumeError(
reason='Resizing image file failed.')
def _is_file_size_equal(self, path, size):
"""Checks if file size at path is equal to size."""
data = image_utils.qemu_img_info(path,
run_as_root=self._execute_as_root)
virt_size = data.virtual_size / units.Gi
return virt_size == size
def set_nas_security_options(self, is_new_cinder_install):
"""Determine the setting to use for Secure NAS options.
Value of each NAS Security option is checked and updated. If the
option is currently 'auto', then it is set to either true or false
based upon if this is a new Cinder installation. The RemoteFS variable
'_execute_as_root' will be updated for this driver.
:param is_new_cinder_install: bool indication of new Cinder install
"""
doc_html = "http://docs.openstack.org/admin-guide-cloud" \
"/blockstorage_nfs_backend.html"
self._ensure_shares_mounted()
if not self._mounted_shares:
raise exception.NfsNoSharesMounted()
nfs_mount = self._get_mount_point_for_share(self._mounted_shares[0])
self.configuration.nas_secure_file_permissions = \
self._determine_nas_security_option_setting(
self.configuration.nas_secure_file_permissions,
nfs_mount, is_new_cinder_install)
LOG.debug('NAS variable secure_file_permissions setting is: %s',
self.configuration.nas_secure_file_permissions)
if self.configuration.nas_secure_file_permissions == 'false':
LOG.warning(_LW("The NAS file permissions mode will be 666 "
"(allowing other/world read & write access). "
"This is considered an insecure NAS environment. "
"Please see %s for information on a secure "
"NFS configuration."),
doc_html)
self.configuration.nas_secure_file_operations = \
self._determine_nas_security_option_setting(
self.configuration.nas_secure_file_operations,
nfs_mount, is_new_cinder_install)
# If secure NAS, update the '_execute_as_root' flag to not
# run as the root user; run as process' user ID.
if self.configuration.nas_secure_file_operations == 'true':
self._execute_as_root = False
LOG.debug('NAS variable secure_file_operations setting is: %s',
self.configuration.nas_secure_file_operations)
if self.configuration.nas_secure_file_operations == 'false':
LOG.warning(_LW("The NAS file operations will be run as "
"root: allowing root level access at the storage "
"backend. This is considered an insecure NAS "
"environment. Please see %s "
"for information on a secure NAS configuration."),
doc_html)
| |
# -*- coding: utf-8 -*-
import boto3
import json
import os
import re
import requests
import six
import threading
from six.moves import range
from girder import events
from girder.models.assetstore import Assetstore
from girder.models.file import File
from girder.models.folder import Folder
from girder.models.upload import Upload
from girder.models.user import User
from girder.utility import assetstore_utilities
from .. import base
from .. import mongo_replicaset
Chunk1, Chunk2 = ('hello ', 'world')
def setUpModule():
base.startServer(mockS3=True)
def tearDownModule():
base.stopServer()
def _send_s3_request(req, data=None):
req = requests.request(
method=req['method'], url=req['url'], headers=req.get('headers', {}), data=data)
if req.status_code != 200:
raise Exception('Moto S3 request error %d: %s' % (req.status_code, req.text))
return req
class UploadTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
admin = {
'email': 'admin@email.com',
'login': 'admin',
'firstName': 'Admin',
'lastName': 'Admin',
'password': 'adminpassword',
'admin': True
}
self.admin = User().createUser(**admin)
user = {
'email': 'good@email.com',
'login': 'goodlogin',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword',
'admin': False
}
self.user = User().createUser(**user)
folders = Folder().childFolders(parent=self.user, parentType='user', user=self.user)
for folder in folders:
if folder['public'] is True:
self.folder = folder
def _uploadFile(self, name, partial=False, largeFile=False):
"""
Upload a file either completely or partially.
:param name: the name of the file to upload.
:param partial: the number of steps to complete in the uploads: 0
initializes the upload, 1 uploads 1 chunk, etc. False
to complete the upload.
:param largeFile: if True, upload a file that is > 32Mb
:returns: the upload record which includes the upload id.
"""
if largeFile:
chunk1 = '-' * (1024 * 1024 * 32)
chunk2 = '-' * (1024 * 1024 * 1)
else:
chunk1 = Chunk1
chunk2 = Chunk2
resp = self.request(
path='/file', method='POST', user=self.user, params={
'parentType': 'folder',
'parentId': self.folder['_id'],
'name': name,
'size': len(chunk1) + len(chunk2),
'mimeType': 'text/plain'
})
self.assertStatusOk(resp)
upload = resp.json
if partial is not False and partial == 0:
return upload
if 's3' not in upload:
resp = self.request(
path='/file/chunk', method='POST', user=self.user, body=chunk1, params={
'uploadId': upload['_id']
}, type='text/plain')
self.assertStatusOk(resp)
if partial is not False:
return resp.json
resp = self.request(
path='/file/chunk', method='POST', user=self.user, body=chunk2, params={
'offset': len(chunk1),
'uploadId': upload['_id']
}, type='text/plain')
self.assertStatusOk(resp)
return upload
# s3 uses a different method for uploading chunks
# This has no error checking at all
if not upload['s3']['chunked']:
_send_s3_request(upload['s3']['request'], chunk1 + chunk2)
if partial is not False:
return
else:
chunk1 = chunk1 + chunk2
s3resp = _send_s3_request(upload['s3']['request'])
matches = re.search('<UploadId>(.*)</UploadId>', s3resp.text)
s3uploadId = matches.groups()[0]
offset = 0
chunkN = 1
etags = []
while len(chunk1):
params = {'offset': offset, 'uploadId': upload['_id']}
params['chunk'] = json.dumps({'s3UploadId': s3uploadId,
'partNumber': chunkN})
resp = self.request(
path='/file/chunk', method='POST', user=self.user, params=params)
self.assertStatusOk(resp)
upload = resp.json
if len(chunk1) > upload['s3']['chunkLength']:
chunk2 = chunk1[upload['s3']['chunkLength']:]
chunk1 = chunk1[:upload['s3']['chunkLength']]
else:
chunk2 = ''
resp = _send_s3_request(upload['s3']['request'], chunk1)
etags.append(resp.headers['ETag'])
chunk1 = chunk2
if partial is not False:
partial -= 1
chunkN += 1
if partial is not False and not partial:
return upload
resp = self.request(
path='/file/completion', method='POST', user=self.user,
params={'uploadId': upload['_id']})
self.assertStatusOk(resp)
if 's3FinalizeRequest' in resp.json:
xml = '<CompleteMultipartUpload>'
for i, tag in enumerate(etags, 1):
xml += '<Part><PartNumber>%d</PartNumber><ETag>%s</ETag></Part>' % (i, tag)
xml += '</CompleteMultipartUpload>'
_send_s3_request(resp.json['s3FinalizeRequest'], data=xml)
return upload
def _uploadFileWithInitialChunk(self, name, partial=False, largeFile=False, oneChunk=False):
"""
Upload a file either completely or partially, sending the first chunk
with the initial POST.
:param name: the name of the file to upload.
:param partial: the number of steps to complete in the uploads: 1
uploads 1 chunk. False to complete the upload.
:param largeFile: if True, upload a file that is > 32Mb
:param oneChunk: if True, upload everything as one chunk. Otherwise,
upload one chunk when creating the upload and one via the
file/chunk endpoint.
:returns: the upload record which includes the upload id.
"""
if not largeFile:
chunk1 = Chunk1
chunk2 = Chunk2
else:
chunk1 = '-' * (1024 * 1024 * 32)
chunk2 = '-' * (1024 * 1024 * 1)
if oneChunk:
chunk1 += chunk2
chunk2 = ''
params = {
'parentType': 'folder',
'parentId': str(self.folder['_id']),
'name': name,
'size': len(chunk1) + len(chunk2),
'mimeType': 'text/plain',
}
resp = self.request(
path='/file', method='POST', user=self.user,
params=params, body=chunk1, type='text/plain')
self.assertStatusOk(resp)
if partial is not False:
return resp.json
if not oneChunk:
upload = resp.json
params = {'offset': len(chunk1), 'uploadId': upload['_id']}
resp = self.request(
path='/file/chunk', method='POST', user=self.user,
params=params, body=chunk2, type='text/plain')
self.assertStatusOk(resp)
else:
upload = None
self.assertEqual(resp.json['_modelType'], 'file')
return upload
def _testUpload(self):
"""
Upload a file to the server and several partial files. Test that we
can delete a partial upload but not a completed upload. Test that we
can delete partial uploads that are older than a certain date.
"""
completeUpload = self._uploadFile('complete_upload')
# test uploading large files and one-chunk files
self._uploadFile('complete_large_upload', largeFile=True)
self._uploadFileWithInitialChunk('one_chunk_upload', oneChunk=True)
# test partial uploads
partialUploads = []
for largeFile in (False, True):
for partial in range(3):
partialUploads.append(self._uploadFile(
'partial_upload_%d_%s' % (partial, str(largeFile)),
partial, largeFile))
# The admin user should see all of the partial uploads, but not the
# complete uploads
resp = self.request(path='/system/uploads', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), len(partialUploads))
# We shouldn't be able to delete a completed upload
resp = self.request(
path='/system/uploads', method='DELETE', user=self.admin,
params={'uploadId': completeUpload['_id']})
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
resp = self.request(path='/system/uploads', user=self.admin)
self.assertEqual(len(resp.json), len(partialUploads))
# The admin should be able to ask for a partial upload by id
resp = self.request(
path='/system/uploads', user=self.admin,
params={'uploadId': partialUploads[0]['_id']})
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], partialUploads[0]['_id'])
# The admin should be able to ask for a partial upload by assetstore id
resp = self.request(
path='/system/uploads', user=self.admin,
params={'assetstoreId': self.assetstore['_id']})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), len(partialUploads))
# The admin should be able to ask for a partial upload by age.
# Everything should be more than 0 days old
resp = self.request(
path='/system/uploads', user=self.admin, params={'minimumAge': 0})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), len(partialUploads))
# The admin should be able to delete an upload
resp = self.request(
path='/system/uploads', method='DELETE', user=self.admin,
params={'uploadId': partialUploads[0]['_id']})
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], partialUploads[0]['_id'])
# We should now have one less partial upload
resp = self.request(path='/system/uploads', user=self.admin)
self.assertEqual(len(resp.json), len(partialUploads) - 1)
# If we ask to delete everything more than one day old, nothing should
# be deleted.
resp = self.request(
path='/system/uploads', method='DELETE', user=self.admin, params={'minimumAge': 1})
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
# Delete all partial uploads
resp = self.request(path='/system/uploads', method='DELETE', user=self.admin)
self.assertStatusOk(resp)
resp = self.request(path='/system/uploads', user=self.admin)
self.assertEqual(resp.json, [])
def testUploadWithInitialChunk(self):
"""
Upload a file to the server and several partial files. Test that we
can delete a partial upload but not a completed upload. Test that we
can delete partial uploads that are older than a certain date.
"""
self._uploadFileWithInitialChunk('upload1')
self._uploadFileWithInitialChunk('upload2', oneChunk=True)
# test uploading large files
self._uploadFileWithInitialChunk('upload3', largeFile=True)
partialUploads = []
for largeFile in (False, True):
for partial in range(1, 3):
partialUploads.append(self._uploadFileWithInitialChunk(
'partial_upload_%d_%s' % (partial, str(largeFile)),
partial, largeFile))
# check that a user cannot list partial uploads
resp = self.request(path='/system/uploads', method='GET',
user=self.user)
self.assertStatus(resp, 403)
# The admin user should see all of the partial uploads, but not the
# complete upload
resp = self.request(path='/system/uploads', method='GET',
user=self.admin)
self.assertStatusOk(resp)
foundUploads = resp.json
self.assertEqual(len(foundUploads), len(partialUploads))
# Check that the upload model is saved when we are using one chunk
self._uploadWasSaved = 0
def trackUploads(*args, **kwargs):
self._uploadWasSaved += 1
events.bind('model.upload.save', 'uploadWithInitialChunk', trackUploads)
self._uploadFileWithInitialChunk('upload4', oneChunk=True)
# This can be changed to assertEqual if one chunk uploads aren't saved
self.assertGreater(self._uploadWasSaved, 0)
self._uploadWasSaved = 0
# But that it is saved when using multiple chunks
self._uploadFileWithInitialChunk('upload5')
self.assertGreater(self._uploadWasSaved, 0)
events.unbind('model.upload.save', 'uploadWithInitialChunk')
def testFilesystemAssetstoreUpload(self):
self._testUpload()
# Test that a delete during an upload still results in one file
adapter = assetstore_utilities.getAssetstoreAdapter(self.assetstore)
size = 101
data = six.BytesIO(b' ' * size)
files = []
files.append(Upload().uploadFromFile(
data, size, 'progress', parentType='folder', parent=self.folder,
assetstore=self.assetstore))
fullPath0 = adapter.fullPath(files[0])
conditionRemoveDone = threading.Condition()
conditionInEvent = threading.Condition()
def waitForCondition(*args, **kwargs):
# Single that we are in the event and then wait to be told that
# the delete has occured before returning.
with conditionInEvent:
conditionInEvent.notify()
with conditionRemoveDone:
conditionRemoveDone.wait()
def uploadFileWithWait():
size = 101
data = six.BytesIO(b' ' * size)
files.append(Upload().uploadFromFile(
data, size, 'progress', parentType='folder', parent=self.folder,
assetstore=self.assetstore))
events.bind('model.file.finalizeUpload.before', 'waitForCondition',
waitForCondition)
# We create an upload that is bound to an event that waits during the
# finalizeUpload.before event so that the remove will be executed
# during this time.
with conditionInEvent:
t = threading.Thread(target=uploadFileWithWait)
t.start()
conditionInEvent.wait()
self.assertTrue(os.path.exists(fullPath0))
File().remove(files[0])
# We shouldn't actually remove the file here
self.assertTrue(os.path.exists(fullPath0))
with conditionRemoveDone:
conditionRemoveDone.notify()
t.join()
events.unbind('model.file.finalizeUpload.before', 'waitForCondition')
fullPath1 = adapter.fullPath(files[0])
self.assertEqual(fullPath0, fullPath1)
self.assertTrue(os.path.exists(fullPath1))
def testGridFSAssetstoreUpload(self):
# Clear any old DB data
base.dropGridFSDatabase('girder_test_upload_assetstore')
# Clear the assetstore database and create a GridFS assetstore
Assetstore().remove(Assetstore().getCurrent())
assetstore = Assetstore().createGridFsAssetstore(
name='Test', db='girder_test_upload_assetstore')
self.assetstore = assetstore
self._testUpload()
def testGridFSReplicaSetAssetstoreUpload(self):
verbose = 0
if 'REPLICASET' in os.environ.get('EXTRADEBUG', '').split():
verbose = 2
# Starting the replica sets takes time (~25 seconds)
rscfg = mongo_replicaset.makeConfig()
mongo_replicaset.startMongoReplicaSet(rscfg, verbose=verbose)
# Clear the assetstore database and create a GridFS assetstore
Assetstore().remove(Assetstore().getCurrent())
# When the mongo connection to one of the replica sets goes down, it
# takes twice the socket timeout for us to reconnect and get on with
# an upload. We can override the default timeout by passing it as a
# mongodb uri parameter.
assetstore = Assetstore().createGridFsAssetstore(
name='Test', db='girder_assetstore_rs_upload_test',
mongohost='mongodb://127.0.0.1:27070,127.0.0.1:27071,'
'127.0.0.1:27072/?socketTimeoutMS=5000&connectTimeoutMS=2500',
replicaset='replicaset')
self.assetstore = assetstore
self._testUpload()
# Test having the primary replica set going offline and then uploading
# again. If the current primary goes offline, it seems to take mongo
# 30 seconds to elect a new primary. If we step down the current
# primary before pausing it, then the new election will happen in 20
# seconds.
mongo_replicaset.stepDownMongoReplicaSet(rscfg, 0)
mongo_replicaset.waitForRSStatus(
rscfg,
mongo_replicaset.getMongoClient(rscfg, 0),
status=[2, (1, 2), (1, 2)],
verbose=verbose)
mongo_replicaset.pauseMongoReplicaSet(rscfg, [True], verbose=verbose)
self._uploadFile('rs_upload_1')
# Have a different member of the replica set go offline and the first
# come back. This takes a long time, so I am disabling it
# mongo_replicaset.pauseMongoReplicaSet(rscfg, [False, True], verbose=verbose)
# self._uploadFile('rs_upload_2')
# Have the set come back online and upload once more
mongo_replicaset.pauseMongoReplicaSet(rscfg, [False, False], verbose=verbose)
self._uploadFile('rs_upload_3')
mongo_replicaset.stopMongoReplicaSet(rscfg)
def testS3AssetstoreUpload(self):
# Clear the assetstore database and create an S3 assetstore
Assetstore().remove(self.assetstore)
params = {
'name': 'S3 Assetstore',
'bucket': 'bucketname',
'prefix': 'testprefix',
'accessKeyId': 'abc',
'secret': '123',
'service': base.mockS3Server.service
}
assetstore = Assetstore().createS3Assetstore(**params)
self.assetstore = assetstore
self._testUpload()
# make an untracked upload to test that we can find and clear it
client = boto3.client(
's3', endpoint_url=base.mockS3Server.service, aws_access_key_id='abc',
aws_secret_access_key='123')
client.create_multipart_upload(Bucket='bucketname', Key='testprefix/abandoned_upload')
resp = self.request(path='/system/uploads', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
# Ask to delete it
resp = self.request(path='/system/uploads', method='DELETE', user=self.admin)
self.assertStatusOk(resp)
# Check that it is gone
resp = self.request(path='/system/uploads', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A module that contains various classifiers"""
import cv2
import numpy as np
from abc import ABCMeta, abstractmethod
from matplotlib import pyplot as plt
__author__ = "Michael Beyeler"
__license__ = "GNU GPL 3.0 or later"
class Classifier:
"""
Abstract base class for all classifiers
A classifier needs to implement at least two methods:
- fit: A method to train the classifier by fitting the model to
the data.
- evaluate: A method to test the classifier by predicting labels of
some test data based on the trained model.
This class also provides method to calculate accuracy, precision,
recall, and the confusion matrix.
"""
__metaclass__ = ABCMeta
@abstractmethod
def fit(self, X_train, y_train):
pass
@abstractmethod
def evaluate(self, X_test, y_test, visualize=False):
pass
def _accuracy(self, y_test, Y_vote):
"""Calculates accuracy
This method calculates the accuracy based on a vector of
ground-truth labels (y_test) and a 2D voting matrix (Y_vote) of
size (len(y_test), num_classes).
:param y_test: vector of ground-truth labels
:param Y_vote: 2D voting matrix (rows=samples, cols=class votes)
:returns: accuracy e[0,1]
"""
# predicted classes
y_hat = np.argmax(Y_vote, axis=1)
# all cases where predicted class was correct
mask = y_hat == y_test
return np.float32(np.count_nonzero(mask)) / len(y_test)
def _precision(self, y_test, Y_vote):
"""Calculates precision
This method calculates precision extended to multi-class
classification by help of a confusion matrix.
:param y_test: vector of ground-truth labels
:param Y_vote: 2D voting matrix (rows=samples, cols=class votes)
:returns: precision e[0,1]
"""
# predicted classes
y_hat = np.argmax(Y_vote, axis=1)
if self.mode == "one-vs-one":
# need confusion matrix
conf = self._confusion(y_test, Y_vote)
# consider each class separately
prec = np.zeros(self.num_classes)
for c in xrange(self.num_classes):
# true positives: label is c, classifier predicted c
tp = conf[c, c]
# false positives: label is c, classifier predicted not c
fp = np.sum(conf[:, c]) - conf[c, c]
if tp + fp != 0:
prec[c] = tp * 1. / (tp + fp)
elif self.mode == "one-vs-all":
# consider each class separately
prec = np.zeros(self.num_classes)
for c in xrange(self.num_classes):
# true positives: label is c, classifier predicted c
tp = np.count_nonzero((y_test == c) * (y_hat == c))
# false positives: label is c, classifier predicted not c
fp = np.count_nonzero((y_test == c) * (y_hat != c))
if tp + fp != 0:
prec[c] = tp * 1. / (tp + fp)
return prec
def _recall(self, y_test, Y_vote):
"""Calculates recall
This method calculates recall extended to multi-class
classification by help of a confusion matrix.
:param y_test: vector of ground-truth labels
:param Y_vote: 2D voting matrix (rows=samples, cols=class votes)
:returns: recall e[0,1]
"""
# predicted classes
y_hat = np.argmax(Y_vote, axis=1)
if self.mode == "one-vs-one":
# need confusion matrix
conf = self._confusion(y_test, Y_vote)
# consider each class separately
recall = np.zeros(self.num_classes)
for c in xrange(self.num_classes):
# true positives: label is c, classifier predicted c
tp = conf[c, c]
# false negatives: label is not c, classifier predicted c
fn = np.sum(conf[c, :]) - conf[c, c]
if tp + fn != 0:
recall[c] = tp * 1. / (tp + fn)
elif self.mode == "one-vs-all":
# consider each class separately
recall = np.zeros(self.num_classes)
for c in xrange(self.num_classes):
# true positives: label is c, classifier predicted c
tp = np.count_nonzero((y_test == c) * (y_hat == c))
# false negatives: label is not c, classifier predicted c
fn = np.count_nonzero((y_test != c) * (y_hat == c))
if tp + fn != 0:
recall[c] = tp * 1. / (tp + fn)
return recall
def _confusion(self, y_test, Y_vote):
"""Calculates confusion matrix
This method calculates the confusion matrix based on a vector of
ground-truth labels (y-test) and a 2D voting matrix (Y_vote) of
size (len(y_test), num_classes).
Matrix element conf[r,c] will contain the number of samples that
were predicted to have label r but have ground-truth label c.
:param y_test: vector of ground-truth labels
:param Y_vote: 2D voting matrix (rows=samples, cols=class votes)
:returns: confusion matrix
"""
y_hat = np.argmax(Y_vote, axis=1)
conf = np.zeros((self.num_classes, self.num_classes)).astype(np.int32)
for c_true in xrange(self.num_classes):
# looking at all samples of a given class, c_true
# how many were classified as c_true? how many as others?
for c_pred in xrange(self.num_classes):
y_this = np.where((y_test == c_true) * (y_hat == c_pred))
conf[c_pred, c_true] = np.count_nonzero(y_this)
return conf
class MultiClassSVM(Classifier):
"""
Multi-class classification using Support Vector Machines (SVMs)
This class implements an SVM for multi-class classification. Whereas
some classifiers naturally permit the use of more than two classes
(such as neural networks), SVMs are binary in nature.
However, we can turn SVMs into multinomial classifiers using at least
two different strategies:
* one-vs-all: A single classifier is trained per class, with the
samples of that class as positives (label 1) and all
others as negatives (label 0).
* one-vs-one: For k classes, k*(k-1)/2 classifiers are trained for each
pair of classes, with the samples of the one class as
positives (label 1) and samples of the other class as
negatives (label 0).
Each classifier then votes for a particular class label, and the final
decision (classification) is based on a majority vote.
"""
def __init__(self, num_classes, mode="one-vs-all", params=None):
"""
The constructor makes sure the correct number of classifiers is
initialized, depending on the mode ("one-vs-all" or "one-vs-one").
:param num_classes: The number of classes in the data.
:param mode: Which classification mode to use.
"one-vs-all": single classifier per class
"one-vs-one": single classifier per class pair
Default: "one-vs-all"
:param params: SVM training parameters.
For now, default values are used for all SVMs.
Hyperparameter exploration can be achieved by
embedding the MultiClassSVM process flow in a
for-loop that classifies the data with
different parameter values, then pick the
values that yield the best accuracy.
Default: None
"""
self.num_classes = num_classes
self.mode = mode
self.params = params or dict()
# initialize correct number of classifiers
self.classifiers = []
if mode == "one-vs-one":
# k classes: need k*(k-1)/2 classifiers
for _ in xrange(num_classes*(num_classes - 1) / 2):
self.classifiers.append(cv2.SVM())
elif mode == "one-vs-all":
# k classes: need k classifiers
for _ in xrange(num_classes):
self.classifiers.append(cv2.SVM())
else:
print "Unknown mode ", mode
def fit(self, X_train, y_train, params=None):
"""Fits the model to training data
This method trains the classifier on data (X_train) using either
the "one-vs-one" or "one-vs-all" strategy.
:param X_train: input data (rows=samples, cols=features)
:param y_train: vector of class labels
:param params: dict to specify training options for cv2.SVM.train
leave blank to use the parameters passed to the
constructor
"""
if params is None:
params = self.params
if self.mode == "one-vs-one":
svm_id = 0
for c1 in xrange(self.num_classes):
for c2 in xrange(c1 + 1, self.num_classes):
data_id = np.where((y_train == c1) + (y_train == c2))[0]
X_train_id = X_train[data_id, :]
y_train_id = y_train[data_id]
# set class label to 1 where class==c1, else 0
y_train_bin = np.where(y_train_id == c1, 1, 0).flatten()
self.classifiers[svm_id].train(X_train_id, y_train_bin,
params=self.params)
svm_id += 1
elif self.mode == "one-vs-all":
for c in xrange(self.num_classes):
# train c-th SVM on class c vs. all other classes
# set class label to 1 where class==c, else 0
y_train_bin = np.where(y_train == c, 1, 0).flatten()
# train SVM
self.classifiers[c].train(X_train, y_train_bin,
params=self.params)
def evaluate(self, X_test, y_test, visualize=False):
"""Evaluates the model on test data
This method evaluates the classifier's performance on test data
(X_test) using either the "one-vs-one" or "one-vs-all" strategy.
:param X_test: input data (rows=samples, cols=features)
:param y_test: vector of class labels
:param visualize: flag whether to plot the results (True) or not
(False)
:returns: accuracy, precision, recall
"""
# prepare Y_vote: for each sample, count how many times we voted
# for each class
Y_vote = np.zeros((len(y_test), self.num_classes))
if self.mode == "one-vs-one":
svm_id = 0
for c1 in xrange(self.num_classes):
for c2 in xrange(c1 + 1, self.num_classes):
data_id = np.where((y_test == c1) + (y_test == c2))[0]
X_test_id = X_test[data_id, :]
y_test_id = y_test[data_id]
# set class label to 1 where class==c1, else 0
# y_test_bin = np.where(y_test_id==c1,1,0).reshape(-1,1)
# predict labels
y_hat = self.classifiers[svm_id].predict_all(X_test_id)
for i in xrange(len(y_hat)):
if y_hat[i] == 1:
Y_vote[data_id[i], c1] += 1
elif y_hat[i] == 0:
Y_vote[data_id[i], c2] += 1
else:
print "y_hat[", i, "] = ", y_hat[i]
# we vote for c1 where y_hat is 1, and for c2 where y_hat
# is 0 np.where serves as the inner index into the data_id
# array, which in turn serves as index into the results
# array
# Y_vote[data_id[np.where(y_hat == 1)[0]], c1] += 1
# Y_vote[data_id[np.where(y_hat == 0)[0]], c2] += 1
svm_id += 1
elif self.mode == "one-vs-all":
for c in xrange(self.num_classes):
# set class label to 1 where class==c, else 0
# predict class labels
# y_test_bin = np.where(y_test==c,1,0).reshape(-1,1)
# predict labels
y_hat = self.classifiers[c].predict_all(X_test)
# we vote for c where y_hat is 1
if np.any(y_hat):
Y_vote[np.where(y_hat == 1)[0], c] += 1
# with this voting scheme it's possible to end up with samples
# that have no label at all...in this case, pick a class at
# random...
no_label = np.where(np.sum(Y_vote, axis=1) == 0)[0]
Y_vote[no_label, np.random.randint(self.num_classes,
size=len(no_label))] = 1
accuracy = self._accuracy(y_test, Y_vote)
precision = self._precision(y_test, Y_vote)
recall = self._recall(y_test, Y_vote)
return accuracy, precision, recall
| |
"""
Tests for the throttling implementations in the permissions module.
"""
from __future__ import unicode_literals
import pytest
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpRequest
from django.test import TestCase
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.test import APIRequestFactory, force_authenticate
from rest_framework.throttling import (
AnonRateThrottle, BaseThrottle, ScopedRateThrottle, SimpleRateThrottle,
UserRateThrottle
)
from rest_framework.views import APIView
class User3SecRateThrottle(UserRateThrottle):
rate = '3/sec'
scope = 'seconds'
class User3MinRateThrottle(UserRateThrottle):
rate = '3/min'
scope = 'minutes'
class NonTimeThrottle(BaseThrottle):
def allow_request(self, request, view):
if not hasattr(self.__class__, 'called'):
self.__class__.called = True
return True
return False
class MockView(APIView):
throttle_classes = (User3SecRateThrottle,)
def get(self, request):
return Response('foo')
class MockView_MinuteThrottling(APIView):
throttle_classes = (User3MinRateThrottle,)
def get(self, request):
return Response('foo')
class MockView_NonTimeThrottling(APIView):
throttle_classes = (NonTimeThrottle,)
def get(self, request):
return Response('foo')
class ThrottlingTests(TestCase):
def setUp(self):
"""
Reset the cache so that no throttles will be active
"""
cache.clear()
self.factory = APIRequestFactory()
def test_requests_are_throttled(self):
"""
Ensure request rate is limited
"""
request = self.factory.get('/')
for dummy in range(4):
response = MockView.as_view()(request)
assert response.status_code == 429
def set_throttle_timer(self, view, value):
"""
Explicitly set the timer, overriding time.time()
"""
view.throttle_classes[0].timer = lambda self: value
def test_request_throttling_expires(self):
"""
Ensure request rate is limited for a limited duration only
"""
self.set_throttle_timer(MockView, 0)
request = self.factory.get('/')
for dummy in range(4):
response = MockView.as_view()(request)
assert response.status_code == 429
# Advance the timer by one second
self.set_throttle_timer(MockView, 1)
response = MockView.as_view()(request)
assert response.status_code == 200
def ensure_is_throttled(self, view, expect):
request = self.factory.get('/')
request.user = User.objects.create(username='a')
for dummy in range(3):
view.as_view()(request)
request.user = User.objects.create(username='b')
response = view.as_view()(request)
assert response.status_code == expect
def test_request_throttling_is_per_user(self):
"""
Ensure request rate is only limited per user, not globally for
PerUserThrottles
"""
self.ensure_is_throttled(MockView, 200)
def ensure_response_header_contains_proper_throttle_field(self, view, expected_headers):
"""
Ensure the response returns an Retry-After field with status and next attributes
set properly.
"""
request = self.factory.get('/')
for timer, expect in expected_headers:
self.set_throttle_timer(view, timer)
response = view.as_view()(request)
if expect is not None:
assert response['Retry-After'] == expect
else:
assert not'Retry-After' in response
def test_seconds_fields(self):
"""
Ensure for second based throttles.
"""
self.ensure_response_header_contains_proper_throttle_field(
MockView, (
(0, None),
(0, None),
(0, None),
(0, '1')
)
)
def test_minutes_fields(self):
"""
Ensure for minute based throttles.
"""
self.ensure_response_header_contains_proper_throttle_field(
MockView_MinuteThrottling, (
(0, None),
(0, None),
(0, None),
(0, '60')
)
)
def test_next_rate_remains_constant_if_followed(self):
"""
If a client follows the recommended next request rate,
the throttling rate should stay constant.
"""
self.ensure_response_header_contains_proper_throttle_field(
MockView_MinuteThrottling, (
(0, None),
(20, None),
(40, None),
(60, None),
(80, None)
)
)
def test_non_time_throttle(self):
"""
Ensure for second based throttles.
"""
request = self.factory.get('/')
self.assertFalse(hasattr(MockView_NonTimeThrottling.throttle_classes[0], 'called'))
response = MockView_NonTimeThrottling.as_view()(request)
self.assertFalse('Retry-After' in response)
self.assertTrue(MockView_NonTimeThrottling.throttle_classes[0].called)
response = MockView_NonTimeThrottling.as_view()(request)
self.assertFalse('Retry-After' in response)
class ScopedRateThrottleTests(TestCase):
"""
Tests for ScopedRateThrottle.
"""
def setUp(self):
self.throttle = ScopedRateThrottle()
class XYScopedRateThrottle(ScopedRateThrottle):
TIMER_SECONDS = 0
THROTTLE_RATES = {'x': '3/min', 'y': '1/min'}
def timer(self):
return self.TIMER_SECONDS
class XView(APIView):
throttle_classes = (XYScopedRateThrottle,)
throttle_scope = 'x'
def get(self, request):
return Response('x')
class YView(APIView):
throttle_classes = (XYScopedRateThrottle,)
throttle_scope = 'y'
def get(self, request):
return Response('y')
class UnscopedView(APIView):
throttle_classes = (XYScopedRateThrottle,)
def get(self, request):
return Response('y')
self.throttle_class = XYScopedRateThrottle
self.factory = APIRequestFactory()
self.x_view = XView.as_view()
self.y_view = YView.as_view()
self.unscoped_view = UnscopedView.as_view()
def increment_timer(self, seconds=1):
self.throttle_class.TIMER_SECONDS += seconds
def test_scoped_rate_throttle(self):
request = self.factory.get('/')
# Should be able to hit x view 3 times per minute.
response = self.x_view(request)
assert response.status_code == 200
self.increment_timer()
response = self.x_view(request)
assert response.status_code == 200
self.increment_timer()
response = self.x_view(request)
assert response.status_code == 200
self.increment_timer()
response = self.x_view(request)
assert response.status_code == 429
# Should be able to hit y view 1 time per minute.
self.increment_timer()
response = self.y_view(request)
assert response.status_code == 200
self.increment_timer()
response = self.y_view(request)
assert response.status_code == 429
# Ensure throttles properly reset by advancing the rest of the minute
self.increment_timer(55)
# Should still be able to hit x view 3 times per minute.
response = self.x_view(request)
assert response.status_code == 200
self.increment_timer()
response = self.x_view(request)
assert response.status_code == 200
self.increment_timer()
response = self.x_view(request)
assert response.status_code == 200
self.increment_timer()
response = self.x_view(request)
assert response.status_code == 429
# Should still be able to hit y view 1 time per minute.
self.increment_timer()
response = self.y_view(request)
assert response.status_code == 200
self.increment_timer()
response = self.y_view(request)
assert response.status_code == 429
def test_unscoped_view_not_throttled(self):
request = self.factory.get('/')
for idx in range(10):
self.increment_timer()
response = self.unscoped_view(request)
assert response.status_code == 200
def test_get_cache_key_returns_correct_key_if_user_is_authenticated(self):
class DummyView(object):
throttle_scope = 'user'
request = Request(HttpRequest())
user = User.objects.create(username='test')
force_authenticate(request, user)
request.user = user
self.throttle.allow_request(request, DummyView())
cache_key = self.throttle.get_cache_key(request, view=DummyView())
assert cache_key == 'throttle_user_%s' % user.pk
class XffTestingBase(TestCase):
def setUp(self):
class Throttle(ScopedRateThrottle):
THROTTLE_RATES = {'test_limit': '1/day'}
TIMER_SECONDS = 0
def timer(self):
return self.TIMER_SECONDS
class View(APIView):
throttle_classes = (Throttle,)
throttle_scope = 'test_limit'
def get(self, request):
return Response('test_limit')
cache.clear()
self.throttle = Throttle()
self.view = View.as_view()
self.request = APIRequestFactory().get('/some_uri')
self.request.META['REMOTE_ADDR'] = '3.3.3.3'
self.request.META['HTTP_X_FORWARDED_FOR'] = '0.0.0.0, 1.1.1.1, 2.2.2.2'
def config_proxy(self, num_proxies):
setattr(api_settings, 'NUM_PROXIES', num_proxies)
class IdWithXffBasicTests(XffTestingBase):
def test_accepts_request_under_limit(self):
self.config_proxy(0)
assert self.view(self.request).status_code == 200
def test_denies_request_over_limit(self):
self.config_proxy(0)
self.view(self.request)
assert self.view(self.request).status_code == 429
class XffSpoofingTests(XffTestingBase):
def test_xff_spoofing_doesnt_change_machine_id_with_one_app_proxy(self):
self.config_proxy(1)
self.view(self.request)
self.request.META['HTTP_X_FORWARDED_FOR'] = '4.4.4.4, 5.5.5.5, 2.2.2.2'
assert self.view(self.request).status_code == 429
def test_xff_spoofing_doesnt_change_machine_id_with_two_app_proxies(self):
self.config_proxy(2)
self.view(self.request)
self.request.META['HTTP_X_FORWARDED_FOR'] = '4.4.4.4, 1.1.1.1, 2.2.2.2'
assert self.view(self.request).status_code == 429
class XffUniqueMachinesTest(XffTestingBase):
def test_unique_clients_are_counted_independently_with_one_proxy(self):
self.config_proxy(1)
self.view(self.request)
self.request.META['HTTP_X_FORWARDED_FOR'] = '0.0.0.0, 1.1.1.1, 7.7.7.7'
assert self.view(self.request).status_code == 200
def test_unique_clients_are_counted_independently_with_two_proxies(self):
self.config_proxy(2)
self.view(self.request)
self.request.META['HTTP_X_FORWARDED_FOR'] = '0.0.0.0, 7.7.7.7, 2.2.2.2'
assert self.view(self.request).status_code == 200
class BaseThrottleTests(TestCase):
def test_allow_request_raises_not_implemented_error(self):
with pytest.raises(NotImplementedError):
BaseThrottle().allow_request(request={}, view={})
class SimpleRateThrottleTests(TestCase):
def setUp(self):
SimpleRateThrottle.scope = 'anon'
def test_get_rate_raises_error_if_scope_is_missing(self):
throttle = SimpleRateThrottle()
with pytest.raises(ImproperlyConfigured):
throttle.scope = None
throttle.get_rate()
def test_throttle_raises_error_if_rate_is_missing(self):
SimpleRateThrottle.scope = 'invalid scope'
with pytest.raises(ImproperlyConfigured):
SimpleRateThrottle()
def test_parse_rate_returns_tuple_with_none_if_rate_not_provided(self):
rate = SimpleRateThrottle().parse_rate(None)
assert rate == (None, None)
def test_allow_request_returns_true_if_rate_is_none(self):
assert SimpleRateThrottle().allow_request(request={}, view={}) is True
def test_get_cache_key_raises_not_implemented_error(self):
with pytest.raises(NotImplementedError):
SimpleRateThrottle().get_cache_key({}, {})
def test_allow_request_returns_true_if_key_is_none(self):
throttle = SimpleRateThrottle()
throttle.rate = 'some rate'
throttle.get_cache_key = lambda *args: None
assert throttle.allow_request(request={}, view={}) is True
def test_wait_returns_correct_waiting_time_without_history(self):
throttle = SimpleRateThrottle()
throttle.num_requests = 1
throttle.duration = 60
throttle.history = []
waiting_time = throttle.wait()
assert isinstance(waiting_time, float)
assert waiting_time == 30.0
def test_wait_returns_none_if_there_are_no_available_requests(self):
throttle = SimpleRateThrottle()
throttle.num_requests = 1
throttle.duration = 60
throttle.now = throttle.timer()
throttle.history = [throttle.timer() for _ in range(3)]
assert throttle.wait() is None
class AnonRateThrottleTests(TestCase):
def setUp(self):
self.throttle = AnonRateThrottle()
def test_authenticated_user_not_affected(self):
request = Request(HttpRequest())
user = User.objects.create(username='test')
force_authenticate(request, user)
request.user = user
assert self.throttle.get_cache_key(request, view={}) is None
def test_get_cache_key_returns_correct_value(self):
request = Request(HttpRequest())
cache_key = self.throttle.get_cache_key(request, view={})
assert cache_key == 'throttle_anon_None'
| |
# Scramble Solver v0.1.1_TE
# =========================
import solver
import os
import time
import csv
from sys import argv
LOG_STATS = True
class StatGetter:
def __init__(self):
self.sourcedir = os.getcwd()
self.datadir = self.sourcedir + "/solutiondata/"
os.chdir(self.datadir)
self.statfile = open('stats.csv', 'a')
self.csv_writer = csv.writer(self.statfile)
os.chdir(self.sourcedir)
def logstats(self, data):
self.csv_writer.writerow(data)
def close(self):
self.statfile.close()
class SolutionFile:
def __init__(self):
self.makefiles()
def makefiles(self):
self.maindir = os.getcwd()
self.datadir = self.maindir + "/solutiondata/"
os.chdir(self.datadir)
#create output files
self.wordsbig = file('words-big.txt' , 'w')
self.wordsmed = file('words-med.txt', 'w')
self.wordssml = file('words-sml.txt', 'w')
files = os.listdir(self.datadir)
for f in files:
if f == 'solution.txt':
os.remove(self.datadir + "solution.txt")
print "Old solution file removed"
# change back to src directory
os.chdir(self.maindir)
def writedata(self, data):
self.solutionfile.write(data)
# Functions to write found words to respective files based on size
# NOTE: probably want to eventually move this all to a post processing step
# that can do a finer sort, collect stats, randomize, etc.
def writebig(self, data):
self.wordsbig.write(data)
self.wordsbig.write('\n')
def writemed(self, data):
self.wordsmed.write(data)
self.wordsmed.write('\n')
def writesml(self, data):
self.wordssml.write(data)
self.wordssml.write('\n')
def catwords(self):
# close and reopen output files in read mode & create solutionfile
self.close()
os.chdir(self.datadir)
self.solutionfile = file('solution.txt', 'w')
self.wordsbig = file('words-big.txt' , 'r')
self.wordsmed = file('words-med.txt', 'r')
self.wordssml = file('words-sml.txt', 'r')
self.wordsbig.seek(0, 2)
bigend = self.wordsbig.tell()
self.wordsbig.seek(0)
self.wordsmed.seek(0, 2)
medend = self.wordsmed.tell()
self.wordsmed.seek(0)
self.wordssml.seek(0, 2)
smlend = self.wordssml.tell()
self.wordssml.seek(0)
while self.wordsbig.tell() < bigend:
data = self.wordsbig.readline()
data.rstrip('\n')
self.writedata(data)
while self.wordsmed.tell() < medend:
data = self.wordsmed.readline()
data.rstrip('\n')
self.writedata(data)
while self.wordssml.tell() < smlend:
data = self.wordssml.readline()
data.rstrip('\n')
self.writedata(data)
self.close()
self.solutionfile.close()
# lets remove the intermediate files here
try:
os.remove('words-big.txt')
os.remove('words-med.txt')
os.remove('words-sml.txt')
print "[+] All files succesfully closed"
except:
print "[-] Intermediate file delete FAILED!"
# Close all output files in one clean shot
def close(self):
self.wordsbig.close()
self.wordsmed.close()
self.wordssml.close()
class WordDictionary:
def __init__(self):
self.dictfile = file('dictionary.txt', 'r')
def getword(self):
word = self.dictfile.readline()
return word.rstrip('\n')
def jump(self, index):
self.dictfile.seek(index)
def close(self):
self.dictfile.close()
def get_input():
print ""
input_letters = raw_input("Enter Letters: ")
input_letters.upper()
return input_letters
def check_letters(letter_string):
if len(letter_string) != 16:
return False
else:
return True
def make_game_array(input_letters):
game = []
count = 0
for i in range(4):
row = []
for j in range(4):
if input_letters[count] == 'Q':
row.append('QU')
count += 1
else:
row.append(input_letters[count])
count = count + 1
game.append(row)
return game
def print_game(game_array):
print ""
print " ------------------"
for row in game_array:
print row
print " ------------------"
print ""
#-----
class GameEngine:
def __init__(self, inputstr=None):
if inputstr != None:
self.user_input = inputstr.lower()
else:
self.user_input = get_input()
if check_letters(self.user_input) != True:
print "[-] Input Error!"
exit()
else:
self.game = make_game_array(self.user_input)
print "[+] Game array created!"
self.dictionary = WordDictionary()
print "[+] Dictionary Loaded!"
self.solution = SolutionFile()
print "[+] Solution file loaded!"
if LOG_STATS:
self.stats = StatGetter()
print "[+] Stats file loaded!"
print_game(self.game)
self.run()
def run(self):
self.worker = solver.Worker(self.dictionary, self.game)
print "[+] Scramble solver starting!"
t_round = time.time()
for i in range(4):
for j in range(4):
time_start = time.time()
pause_time = time_start - t_round
if pause_time >= 22:
print "Times up!!!"
break
work_coord = (i, j)
work_letter = self.game[i][j]
work = solver.ChainRoot(work_letter, work_coord)
self.worker.work.append(work)
self.worker.process_work()
n = (i * 4) + j + 1
progress = (n / 16.0) * 100.0
outstr = "[+] %3.1f" % progress
outstr += "%"
outstr += " done"
time_done = time.time()
time_elapsed = time_done - time_start
print '\n' + outstr
print "Found %s Words" % len(self.worker.found_words)
print "Cell time: %4.2f" % time_elapsed
print "%s chains processed\n" % self.worker.workcount
if LOG_STATS:
self.stats.logstats([n, work_letter,
len(self.worker.found_words), self.worker.workcount,
time_elapsed])
big_count = 0
med_count = 0
sml_count = 0
for chain in self.worker.found_words:
data = chain[1]
datastr = ""
wordlength = 0
for c in data:
datastr = datastr + str(c[0]) + ', ' + str(c[1]) + '-'
wordlength += 1
if wordlength > 6:
self.solution.writebig(datastr)
big_count += 1
elif wordlength > 3:
self.solution.writemed(datastr)
med_count += 1
else:
self.solution.writesml(datastr)
sml_count += 1
print "Found %s big words, %s medium words, and %s small words" % (big_count, med_count, sml_count)
print "[+] Finalizing..."
self.solution.catwords()
self.solution.close()
self.dictionary.close()
self.stats.close()
print "[+] All solution files successfully written!"
print "Finished!"
if __name__=='__main__':
if len(argv) > 1:
newgame = GameEngine(inputstr=argv[1])
else:
newgame = GameEngine()
| |
# Lint as: python2, python3
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""A visitor class that generates protobufs for each python object."""
import enum
import sys
import six
from google.protobuf import message
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.tools.api.lib import api_objects_pb2
# Following object need to be handled individually.
_CORNER_CASES = {
'': {
'tools': {}
},
'test.TestCase': {},
'test.TestCase.failureException': {},
'train.NanLossDuringTrainingError': {
'message': {}
},
'estimator.NanLossDuringTrainingError': {
'message': {}
},
'train.LooperThread': {
'isAlive': {},
'join': {},
'native_id': {}
}
}
# Python 2 vs. 3 differences
if sys.version_info.major == 3:
_NORMALIZE_TYPE = {}
for t in ('property', 'object', 'getset_descriptor', 'int', 'str', 'type',
'tuple', 'module', 'collections.defaultdict', 'set', 'dict',
'NoneType', 'frozenset', 'member_descriptor'):
_NORMALIZE_TYPE["<class '%s'>" % t] = "<type '%s'>" % t
for e in 'Exception', 'RuntimeError':
_NORMALIZE_TYPE["<class '%s'>" % e] = "<type 'exceptions.%s'>" % e
_NORMALIZE_TYPE["<class 'abc.ABCMeta'>"] = "<type 'type'>"
_NORMALIZE_ISINSTANCE = {
"<class "
"'tensorflow.lite.python.op_hint.OpHint.OpHintArgumentTracker'>": # pylint: disable=line-too-long
"<class "
"'tensorflow.lite.python.op_hint.OpHintArgumentTracker'>",
"<class "
"'tensorflow.python.training.monitored_session._MonitoredSession.StepContext'>": # pylint: disable=line-too-long
"<class "
"'tensorflow.python.training.monitored_session.StepContext'>",
"<class "
"'tensorflow.python.ops.variables.Variable.SaveSliceInfo'>":
"<class "
"'tensorflow.python.ops.variables.SaveSliceInfo'>"
}
def _SkipMember(cls, member):
return (member == 'with_traceback' or member in ('name', 'value') and
isinstance(cls, type) and issubclass(cls, enum.Enum))
else:
_NORMALIZE_TYPE = {
"<class 'abc.ABCMeta'>":
"<type 'type'>",
"<class 'pybind11_type'>":
"<class 'pybind11_builtins.pybind11_type'>",
}
_NORMALIZE_ISINSTANCE = {
"<class 'pybind11_object'>":
"<class 'pybind11_builtins.pybind11_object'>",
}
def _SkipMember(cls, member): # pylint: disable=unused-argument
return False
# Differences created by typing implementations.
_NORMALIZE_TYPE[(
'tensorflow.python.framework.ops.Tensor')] = (
"<class 'tensorflow.python.framework.ops.Tensor'>")
_NORMALIZE_TYPE['typing.Generic'] = "<class 'typing.Generic'>"
# TODO(b/203104448): Remove once the golden files are generated in Python 3.7.
_NORMALIZE_TYPE["<class 'typing._GenericAlias'>"] = 'typing.Union'
# TODO(b/203104448): Remove once the golden files are generated in Python 3.9.
_NORMALIZE_TYPE["<class 'typing._UnionGenericAlias'>"] = 'typing.Union'
# TODO(b/203104448): Remove once the golden files are generated in Python 3.8.
_NORMALIZE_TYPE[
"<class 'typing_extensions._ProtocolMeta'>"] = ("<class "
"'typing._ProtocolMeta'>")
# TODO(b/203104448): Remove once the golden files are generated in Python 3.8.
_NORMALIZE_TYPE[
"<class 'typing_extensions.Protocol'>"] = "<class 'typing.Protocol'>"
if sys.version_info.major == 3 and sys.version_info.minor >= 8:
_NORMALIZE_TYPE["<class '_collections._tuplegetter'>"] = "<type 'property'>"
def _NormalizeType(ty):
return _NORMALIZE_TYPE.get(ty, ty)
def _NormalizeIsInstance(ty):
return _NORMALIZE_ISINSTANCE.get(ty, ty)
def _SanitizedArgSpec(obj):
"""Get an ArgSpec string that is free of addresses.
We have callables as function arg defaults. This results in addresses in
getargspec output. This function returns a sanitized string list of base
classes.
Args:
obj: A python routine for us the create the sanitized arspec of.
Returns:
string, a string representation of the argspec.
"""
output_string = ''
unsanitized_arg_spec = tf_inspect.getargspec(obj)
for clean_attr in ('args', 'varargs', 'keywords'):
output_string += '%s=%s, ' % (clean_attr,
getattr(unsanitized_arg_spec, clean_attr))
if unsanitized_arg_spec.defaults:
sanitized_defaults = []
for val in unsanitized_arg_spec.defaults:
str_val = str(val)
# Sanitize argspecs that have hex code in them.
if ' at 0x' in str_val:
sanitized_defaults.append('%s instance>' % str_val.split(' at ')[0])
else:
sanitized_defaults.append(str_val)
output_string += 'defaults=%s, ' % sanitized_defaults
else:
output_string += 'defaults=None'
return output_string
def _SanitizedMRO(obj):
"""Get a list of superclasses with minimal amount of non-TF classes.
Based on many parameters like python version, OS, protobuf implementation
or changes in google core libraries the list of superclasses of a class
can change. We only return the first non-TF class to be robust to non API
affecting changes. The Method Resolution Order returned by `tf_inspect.getmro`
is still maintained in the return value.
Args:
obj: A python routine for us the create the sanitized arspec of.
Returns:
list of strings, string representation of the class names.
"""
return_list = []
for cls in tf_inspect.getmro(obj):
if cls.__name__ == '_NewClass':
# Ignore class created by @deprecated_alias decorator.
continue
str_repr = _NormalizeType(str(cls))
return_list.append(str_repr)
# Class type that has keras in their name should also be monitored. This
# will cover any class that imported from third_party/py/keras or
# keras_preprocessing.
if 'tensorflow' not in str_repr and 'keras' not in str_repr:
break
# Hack - tensorflow.test.StubOutForTesting may or may not be type <object>
# depending on the environment. To avoid inconsistency, break after we add
# StubOutForTesting to the return_list.
if 'StubOutForTesting' in str_repr:
break
return return_list
def _IsProtoClass(obj):
"""Returns whether the passed obj is a Protocol Buffer class."""
return isinstance(obj, type) and issubclass(obj, message.Message)
class PythonObjectToProtoVisitor(object):
"""A visitor that summarizes given python objects as protobufs."""
def __init__(self, default_path='tensorflow'):
# A dict to store all protocol buffers.
# Keyed by "path" to the object.
self._protos = {}
self._default_path = default_path
def GetProtos(self):
"""Return the list of protos stored."""
return self._protos
def __call__(self, path, parent, children):
# The path to the object.
lib_path = self._default_path + '.' + path if path else self._default_path
_, parent = tf_decorator.unwrap(parent)
# A small helper method to construct members(children) protos.
def _AddMember(member_name, member_obj, proto):
"""Add the child object to the object being constructed."""
_, member_obj = tf_decorator.unwrap(member_obj)
if (_SkipMember(parent, member_name) or
isinstance(member_obj, deprecation.HiddenTfApiAttribute)):
return
if member_name == '__init__' or not six.ensure_str(
member_name).startswith('_'):
if tf_inspect.isroutine(member_obj):
new_method = proto.member_method.add()
new_method.name = member_name
# If member_obj is a python builtin, there is no way to get its
# argspec, because it is implemented on the C side. It also has no
# func_code.
if hasattr(member_obj, '__code__'):
new_method.argspec = _SanitizedArgSpec(member_obj)
else:
new_member = proto.member.add()
new_member.name = member_name
if tf_inspect.ismodule(member_obj):
new_member.mtype = "<type \'module\'>"
else:
new_member.mtype = _NormalizeType(str(type(member_obj)))
parent_corner_cases = _CORNER_CASES.get(path, {})
if path not in _CORNER_CASES or parent_corner_cases:
# Decide if we have a module or a class.
if tf_inspect.ismodule(parent):
# Create a module object.
module_obj = api_objects_pb2.TFAPIModule()
for name, child in children:
if name in parent_corner_cases:
# If we have an empty entry, skip this object.
if parent_corner_cases[name]:
module_obj.member.add(**(parent_corner_cases[name]))
else:
_AddMember(name, child, module_obj)
# Store the constructed module object.
self._protos[lib_path] = api_objects_pb2.TFAPIObject(
path=lib_path, tf_module=module_obj)
elif _IsProtoClass(parent):
proto_obj = api_objects_pb2.TFAPIProto()
parent.DESCRIPTOR.CopyToProto(proto_obj.descriptor)
# Store the constructed proto object.
self._protos[lib_path] = api_objects_pb2.TFAPIObject(
path=lib_path, tf_proto=proto_obj)
elif tf_inspect.isclass(parent):
# Construct a class.
class_obj = api_objects_pb2.TFAPIClass()
class_obj.is_instance.extend(
_NormalizeIsInstance(i) for i in _SanitizedMRO(parent))
for name, child in children:
if name in parent_corner_cases:
# If we have an empty entry, skip this object.
if parent_corner_cases[name]:
class_obj.member.add(**(parent_corner_cases[name]))
else:
_AddMember(name, child, class_obj)
# Store the constructed class object.
self._protos[lib_path] = api_objects_pb2.TFAPIObject(
path=lib_path, tf_class=class_obj)
else:
logging.error('Illegal call to ApiProtoDump::_py_obj_to_proto.'
'Object is neither a module nor a class: %s', path)
| |
# $Id: PC.py 31 2010-01-12 15:44:40Z tlev $
# -*- coding: iso-8859-1 -*-
'''
Created on 20. okt. 2009
@author: levin
'''
from math import *
import SEMBA as S
#Create empty dictionaries to load emission data in
Vehicle_Types = {}
def create_vehicle_list():
for k,v in S.H_PC.items():
k=0
Vehicle_Types[v[0]]= v[2] +" "+ v[3]+" "+v[4]
def CreatePC():
S.load_PC()
S.load_PCGradient()
create_vehicle_list()
def findGrade(gradient,TrafficSituation):
"""
Find gradent value for lookuptable
Finds the correct gradient value for lookup based on road category and
gadient. Three traffic situations exist:
urban
road
motorway
"""
g = 999999
if gradient <= -9 : g = -10
elif gradient <= -7 and gradient > -9 : g = -8
elif gradient <= -5 and gradient > -7 : g = -6
elif gradient <= -3 and gradient > -5 : g = -4
elif gradient <= -1 and gradient > -3 : g = -2
elif gradient <= 1 and gradient > -1 : g = 0
elif gradient <= 3 and gradient > 1 : g = 2
elif gradient <= 5 and gradient > 3 : g = 4
elif gradient <= 7 and gradient > 5 : g = 6
elif gradient <= 9 and gradient > 7 : g = 8
elif gradient > 9 : g = 10
if TrafficSituation =='urban' or TrafficSituation =='motorway':
#limits road gradients to supported gradients
if g < -6 : g = -6
if g > 6 : g = 6
return g
def CalculatePC(PCID, Component, Speed, Gradient, Engine, TrafficSituation):
"""
Calculation of emissions from private cars
UNITS:
Speed km/h
Gradient 0.06 is 6%
Emission calculated is g/km
Fuel is calculated from CO2 emissions by factors from SFT, Norway
Maximum speed is 125 km/h
Engine size:
PETROL
small < 1.4 liters
medium 1.4 -> 2.0 liters
large >2.0 liters
DIESEL
medium <2.0 liters
large >2.0 liters
"""
WarningText = []
CalculateFCfromCO2 = False
#Finds the correct gradient
Gradient = findGrade(Gradient,TrafficSituation)
if Component == "FC":
Component = "CO2"
CalculateFCfromCO2 = True
if not (Component == "C02" or Component == "FC") :
Engine = 'all'
if Speed >= 6:
#Get Data from the PC dictionary
key = str(PCID) + "_" + Component + "_" + Engine
value = S.H_PC[key]
data = value
VehicleID = data[0]
EmissionComponent = data[1]
FuelType = data[2]
EngineSize = data[3]
EuroClass = data[4]
EquationType = data[5]
Order = data[6]
a0 = float(data[7])
a1 = float(data[8])
a2 = float(data[9])
a3 = float(data[10])
a4 = float(data[11])
a5 = float(data[12])
Emission = -1
if EquationType == 'Polyn.':
Emission = float(a0) + \
float(a1) * float(Speed) + \
float(a2) * pow(float(Speed), 2) + \
float(a3) * pow(float(Speed), 3) + \
float(a4) * pow(float(Speed), 4) + \
float(a5) * pow(float(Speed), 5)
if EquationType == 'Power':
Emission = a0 * pow(Speed, a1)
if CalculateFCfromCO2:
if FuelType == "DIESEL":
Emission = Emission / 3.18
WarningText.append("Fuel Calculated from CO2 emission factor 3.18")
if FuelType == "PETROL":
Emission = Emission / 3.13
WarningText.append("Fuel Calculated from CO2 emission factor 3.13")
#Her ligger feilsjekkingsrutiner
if Speed > 125 :
WarningText.append("Emission Function used outside valid area")
if (len(WarningText) == 0):
WarningText.append("No Warnings")
if Speed < 6:
Emission = 0
WarningText.append("Speed Under 6kmh")
#Here comes correction for gradient
corrFactor = 0
GradeKey = EuroClass + "_" + TrafficSituation + "_" + str(Gradient)
value = S.H_PCGrade[GradeKey]
if FuelType == 'PETROL':
if Component == 'CO':
corrFactor = value[3]
if Component == 'HC':
corrFactor = value[4]
if Component == 'NOx':
corrFactor = value[5]
if Component == 'FC':
corrFactor = value[6]
if Component == 'CO2':
corrFactor = value[6]
if Component == 'PM':
corrFactor = 1 # ARTEMIS does not correct PM for gasoline for grades
elif FuelType == 'DIESEL':
if Component == 'CO':
corrFactor = value[7]
if Component == 'HC':
corrFactor = value[8]
if Component == 'NOx':
corrFactor = value[9]
if Component == 'PM':
corrFactor = value[10]
if Component == 'FC':
corrFactor = value[11]
if Component == 'CO2':
corrFactor = value[11]
CorrectedEmission = float(Emission) * float(corrFactor)
egps = S.Convert_gpkm_to_gps(CorrectedEmission, Speed)
return CorrectedEmission, "g/km", egps[0], egps[1], WarningText
def ListTypes():
"""
Lists all heavy duty vehicles available in the dataset that is loaded.
"""
#Function to sort as integers
def compare(a, b):
return cmp(int(a), int(b)) # compare as integers
keys = Vehicle_Types.keys()
keys.sort(compare)
print "Private car ID ; Description"
for key in keys:
print str(key)+ ' ; '+ Vehicle_Types[key]
###########____Load Data____##################
CreatePC()
#test segment for debuging purposes
if __name__ == "__main__":
import matplotlib.pyplot as plt #@UnresolvedImport
a = []
b = []
c = []
d = []
e = []
for i in range(6, 120):
b.append(i)
#def CalculateHDV(HDVID, Component, Speed, Gradient, Load):
a.append(CalculatePC(4,"FC",i,0.02,all,"urban")[0])
c.append(CalculatePC(4,"FC",i,0,all,"urban")[0])
plt.plot(b, a)
plt.plot(b, c)
#plt.plot(b, c,label='Diesel Euro 4')
#leg = plt.legend(loc=1)
#for t in leg.get_texts():
# t.set_fontsize('x-small') # the legend text fontsize
#plt.axis(ymin=0)
#plt.grid(True)
plt.ylabel('Fuel consumption Liter/10km')
plt.xlabel('Vehicle average speed')
plt.title('SEMBA PC Vehicle fuel consumption')
plt.ylim(ymin=0)
plt.show()
#PCID, Component, Speed, Gradient, Engine, TrafficSituation
print CalculatePC(3,"FC",100,0,all,"urban")
| |
__author__ = 'rencui'
from afinn import Afinn
import subprocess
import numpy
import utilities
from textstat.textstat import textstat
from sklearn.feature_extraction.text import *
from nltk.stem.porter import *
from sklearn.model_selection import train_test_split
from tokenizer import simpleTokenize
from scipy.sparse import hstack, csr_matrix
from sklearn import svm
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import ExtraTreesClassifier
import sklearn.metrics
from sklearn.ensemble import AdaBoostClassifier
from sklearn.naive_bayes import MultinomialNB
import sys
from sklearn.model_selection import KFold
reload(sys)
sys.setdefaultencoding('utf-8')
dayMapper = {'Mon': 1, 'Tue': 2, 'Wed': 3, 'Thu': 4, 'Fri': 5, 'Sat': 6, 'Sun': 7}
stemmer = PorterStemmer()
def stemContent(input):
words = simpleTokenize(input)
out = ''
for word in words:
temp = stemmer.stem(word.encode('utf-8').decode('utf-8'))
out += temp + ' '
return out.strip()
# vectorMode 1: tfidf, 2: binaryCount
# featureMode 0: semantic only, 1: vector only, 2: both
def runModel(groupSize, groupTitle, vectorMode, featureMode, trainMode, ablationIndex):
outputFile = 'results/'+groupTitle+'_'+trainMode+'.result'
resultFile = open(outputFile, 'a')
mentionMapper = utilities.mapMention('dataset/experiment/mention.json')
print groupTitle
print trainMode
resultFile.write(groupTitle + '\n')
for group in range(groupSize):
print 'group: ' + str(group)
resultFile.write('group: ' + str(group) + '\n')
# happy_log_probs, sad_log_probs = utilities.readSentimentList('twitter_sentiment_list.csv')
afinn = Afinn()
posFile = open('dataset/experiment/groups/' + groupTitle + '/group_' + str(group) + '.pos', 'r')
negFile = open('dataset/experiment/groups/' + groupTitle + '/group_' + str(group) + '.neg', 'r')
posParseLengthFile = open('dataset/experiment/groups/' + groupTitle + '/parserLength_' + str(group) + '.pos', 'r')
negParseLengthFile = open('dataset/experiment/groups/' + groupTitle + '/parserLength_' + str(group) + '.neg', 'r')
posHeadCountFile = open('dataset/experiment/groups/' + groupTitle + '/parserHeadCount_' + str(group) + '.pos', 'r')
negHeadCountFile = open('dataset/experiment/groups/' + groupTitle + '/parserHeadCount_' + str(group) + '.neg', 'r')
posPOSCountFile = open('dataset/experiment/groups/' + groupTitle + '/parserPOSCount_' + str(group) + '.pos', 'r')
negPOSCountFile = open('dataset/experiment/groups/' + groupTitle + '/parserPOSCount_' + str(group) + '.neg', 'r')
ids = []
contents = []
scores = []
days = []
time = []
authorStatusCount = []
authorFavoriteCount = []
authorListedCount = []
authorIntervals = []
labels = []
parseLength = []
headCount = []
usernames = []
additionalFeatures = []
classes = []
POScounts = []
followers = []
print 'loading data...'
for line in posFile:
seg = line.strip().split(' :: ')
text = seg[3]
username = seg[7].split(';')
time.append(utilities.hourMapper(int(seg[2])))
day = seg[1]
score = float(seg[0])
ids.append(seg[5])
authorStatusCount.append(float(seg[8]))
authorFavoriteCount.append(float(seg[9]))
authorListedCount.append(float(seg[10]))
authorIntervals.append(float(seg[11]))
usernames.append(username)
followers.append(float(seg[12]))
days.append(dayMapper[day])
contents.append(text)
scores.append(score)
labels.append(1)
for line in negFile:
seg = line.strip().split(' :: ')
text = seg[3]
username = seg[7].split(';')
time.append(utilities.hourMapper(int(seg[2])))
day = seg[1]
score = float(seg[0])
ids.append(seg[5])
authorStatusCount.append(float(seg[8]))
authorFavoriteCount.append(float(seg[9]))
authorListedCount.append(float(seg[10]))
authorIntervals.append(float(seg[11]))
usernames.append(username)
followers.append(float(seg[12]))
days.append(dayMapper[day])
contents.append(text)
scores.append(score)
labels.append(0)
distMapper = {}
if vectorMode == 1:
resultFile.write('tfidf \n')
vectorizer = TfidfVectorizer(analyzer='word', ngram_range=(1, 5), min_df=2, stop_words='english')
vectorMatrix = vectorizer.fit_transform(contents)
elif vectorMode == 2:
resultFile.write('binary count \n')
vectorizer = CountVectorizer(analyzer='word', ngram_range=(1, 5), min_df=2, stop_words='english',
binary='True')
vectorMatrix = vectorizer.fit_transform(contents)
elif vectorMode == 3:
listFile = open('LDA/LDAinput.list', 'r')
idMapper = {}
for index, line in enumerate(listFile):
idMapper[index] = line.strip()
subprocess.check_output('java -Xmx1024m -jar LDA/tmt-0.4.0.jar LDA/assign2.scala', shell=True)
distFile = open('LDA/TMTSnapshots/document-topic-distributions.csv', 'r')
for line in distFile:
seg = line.strip().split(',')
outList = []
for item in seg[1:]:
outList.append(float(item))
distMapper[idMapper[int(seg[0])]] = outList
distFile.close()
else:
resultFile.write('no vector features \n')
for line in posParseLengthFile:
parseLength.append(int(line.strip(' :: ')[0]))
for line in negParseLengthFile:
parseLength.append(int(line.strip(' :: ')[0]))
for line in posHeadCountFile:
headCount.append(int(line.strip(' :: ')[0]))
for line in negHeadCountFile:
headCount.append(int(line.strip(' :: ')[0]))
for line in posPOSCountFile:
POScounts.append(utilities.POSRatio(line.strip().split(' :: ')[0].split(' ')))
for line in negPOSCountFile:
POScounts.append(utilities.POSRatio(line.strip().split(' :: ')[0].split(' ')))
posHeadCountFile.close()
negHeadCountFile.close()
posParseLengthFile.close()
negParseLengthFile.close()
posPOSCountFile.close()
negPOSCountFile.close()
posFile.close()
negFile.close()
for index, content in enumerate(contents):
temp = []
words = simpleTokenize(content)
twLen = float(len(words))
sentiScore = afinn.score(content)
# posProb, negProb = utilities.classifySentiment(words, happy_log_probs, sad_log_probs)
readScore = textstat.coleman_liau_index(content)
if ablationIndex != 0:
temp.append(sentiScore / twLen)
if ablationIndex != 1:
temp.append(twLen)
temp.append(readScore)
temp.append(parseLength[index] / twLen)
temp.append(headCount[index] / twLen)
if ablationIndex != 2:
temp.append(authorStatusCount[index]/authorIntervals[index])
temp.append(authorFavoriteCount[index]/authorStatusCount[index])
temp.append(authorListedCount[index]/followers[index])
if ablationIndex != 3:
temp.append(days[index])
temp.append(time[index])
if ablationIndex != 4:
if any(char.isdigit() for char in content):
temp.append(1)
else:
temp.append(0)
if ablationIndex != 5:
temp += POScounts[index]
if ablationIndex != 6:
# temp.append(content.count('URRL'))
if content.count('http://URL') > 0:
temp.append(1)
else:
temp.append(0)
# temp.append(content.count('HHTTG'))
if content.count('#HTG') > 0:
temp.append(1)
else:
temp.append(0)
# temp.append(content.count('USSERNM'))
if content.count('@URNM') > 0:
temp.append(1)
else:
temp.append(0)
if ablationIndex != 7:
# temp.append(content.count('!'))
if content.count('!') > 0:
temp.append(1)
else:
temp.append(0)
# temp.append(content.count('?'))
if content.count('?') > 0:
temp.append(1)
else:
temp.append(0)
if ablationIndex != 8:
mentionFlag = 0
mentionFollowers = 0
userCount = 0.0
for user in usernames[index]:
if user in mentionMapper:
userCount += 1
if mentionMapper[user][0] == 1:
mentionFlag = 1
mentionFollowers += mentionMapper[user][1]
temp.append(mentionFlag)
if userCount == 0:
temp.append(0.0)
else:
temp.append(mentionFollowers / userCount)
additionalFeatures.append(numpy.array(temp))
classes.append(labels[index])
if featureMode == 0:
resultFile.write('semantic features only \n')
features = csr_matrix(numpy.array(additionalFeatures))
elif featureMode == 1:
resultFile.write('vector features only \n')
features = vectorMatrix
elif featureMode == 2:
resultFile.write('embedding features only \n')
embedFeatures = []
for id in ids:
embedFeatures.append(numpy.array(distMapper[id]))
features = csr_matrix(numpy.array(embedFeatures))
elif featureMode == 3:
resultFile.write('embedding and semantic features only \n')
embedFeatures = []
for id in ids:
embedFeatures.append(numpy.array(distMapper[id]))
features = hstack((csr_matrix(numpy.array(additionalFeatures)), csr_matrix(numpy.array(embedFeatures))), format='csr')
else:
resultFile.write('vector and semantic features \n')
features = hstack((vectorMatrix, csr_matrix(numpy.array(additionalFeatures))), format='csr')
resultFile.write('Ablation Index: '+str(ablationIndex)+'\n')
precisionSum = 0.0
recallSum = 0.0
f1Sum = 0.0
accuracySum = 0.0
aucSum = 0.0
resultFile.flush()
print 'running 5-fold CV...'
roundNum = 0
kf = KFold(n_splits=5, shuffle=True)
for train_indices, test_indices in kf.split(classes):
#for i in range(5):
#print 'case ' + str(i)
print 'Round: ' + str(roundNum)
#feature_train, feature_test, label_train, label_test = train_test_split(features, classes, test_size=0.2, random_state=0)
feature_train, feature_test = features[train_indices], features[test_indices]
label_train = []
label_test = []
for train_index in train_indices:
label_train.append(classes[train_index])
for test_index in test_indices:
label_test.append(classes[test_index])
roundNum += 1
print label_test
if trainMode == 'MaxEnt':
model = LogisticRegression()
elif trainMode == 'NaiveBayes':
model = MultinomialNB()
elif trainMode == 'RF':
model = ExtraTreesClassifier(n_estimators=50, random_state=0)
elif trainMode == 'Ada':
model = AdaBoostClassifier()
elif trainMode == 'MLP':
model = MLPClassifier(activation='logistic', solver='sgd', learning_rate_init=0.02, learning_rate='constant', batch_size=100)
else:
model = svm.SVC()
model.fit(feature_train, label_train)
predictions = model.predict(feature_test)
if len(predictions) != len(label_test):
print 'inference error!'
resultFile.write('inferece error!\n')
accuracy = model.score(feature_test, label_test)
precision = sklearn.metrics.precision_score(label_test, predictions)
recall = sklearn.metrics.recall_score(label_test, predictions)
f1 = sklearn.metrics.f1_score(label_test, predictions)
auc = sklearn.metrics.roc_auc_score(label_test, predictions)
aucSum += auc
precisionSum += precision
recallSum += recall
f1Sum += f1
accuracySum += accuracy
resultFile.flush()
# print confusion_matrix(label_test, predictions)
outputPrecision = precisionSum / 5
outputRecall = recallSum / 5
outputAccuracy = accuracySum / 5
outputF1 = f1Sum / 5
outputAUC = aucSum / 5
'''
if (outputRecall + outputPrecision) == 0:
outputF1 = 0.0
else:
outputF1 = 2 * outputRecall * outputPrecision / (outputRecall + outputPrecision)
'''
print outputPrecision
print outputRecall
print outputF1
print outputAUC
print outputAccuracy
print ''
resultFile.write(str(outputPrecision) + '\n')
resultFile.write(str(outputRecall) + '\n')
resultFile.write(str(outputF1) + '\n')
resultFile.write(str(outputAUC) + '\n')
resultFile.write(str(outputAccuracy) + '\n')
resultFile.write('\n')
resultFile.flush()
resultFile.close()
if __name__ == "__main__":
# vectorMode 1: tfidf, 2: binaryCount, 3:LDA dist
# featureMode 0: content only, 1: ngram only, 2: embedding only, 3: embedding and semantic, 4: content and ngram
runModel(1, 'totalGroup', 4, 0, 'SVM', 100)
#runModel(1, 'totalGroup', 4, 4, 'SVM', 100)
#for index in range(9):
#runModel(1, 'totalGroup', 2, 0, 'SVM', index)
#runModel(1, 'totalGroup', 2, 4, 'SVM', 100)
| |
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import cPickle as pickle
from collections import defaultdict
from gzip import GzipFile
from os.path import getmtime
import struct
from time import time
import os
from io import BufferedReader
from hashlib import md5
from itertools import chain
from swift.common.utils import json
from swift.common.ondisk import hash_path, validate_configuration
from swift.common.ring.utils import tiers_for_dev
class RingData(object):
"""Partitioned consistent hashing ring data (used for serialization)."""
def __init__(self, replica2part2dev_id, devs, part_shift):
self.devs = devs
self._replica2part2dev_id = replica2part2dev_id
self._part_shift = part_shift
for dev in self.devs:
if dev is not None:
dev.setdefault("region", 1)
@classmethod
def deserialize_v1(cls, gz_file):
json_len, = struct.unpack('!I', gz_file.read(4))
ring_dict = json.loads(gz_file.read(json_len))
ring_dict['replica2part2dev_id'] = []
partition_count = 1 << (32 - ring_dict['part_shift'])
for x in xrange(ring_dict['replica_count']):
ring_dict['replica2part2dev_id'].append(
array.array('H', gz_file.read(2 * partition_count)))
return ring_dict
@classmethod
def load(cls, filename):
"""
Load ring data from a file.
:param filename: Path to a file serialized by the save() method.
:returns: A RingData instance containing the loaded data.
"""
gz_file = GzipFile(filename, 'rb')
# Python 2.6 GzipFile doesn't support BufferedIO
if hasattr(gz_file, '_checkReadable'):
gz_file = BufferedReader(gz_file)
# See if the file is in the new format
magic = gz_file.read(4)
if magic == 'R1NG':
version, = struct.unpack('!H', gz_file.read(2))
if version == 1:
ring_data = cls.deserialize_v1(gz_file)
else:
raise Exception('Unknown ring format version %d' % version)
else:
# Assume old-style pickled ring
gz_file.seek(0)
ring_data = pickle.load(gz_file)
if not hasattr(ring_data, 'devs'):
ring_data = RingData(ring_data['replica2part2dev_id'],
ring_data['devs'], ring_data['part_shift'])
return ring_data
def serialize_v1(self, file_obj):
# Write out new-style serialization magic and version:
file_obj.write(struct.pack('!4sH', 'R1NG', 1))
ring = self.to_dict()
json_encoder = json.JSONEncoder(sort_keys=True)
json_text = json_encoder.encode(
{'devs': ring['devs'], 'part_shift': ring['part_shift'],
'replica_count': len(ring['replica2part2dev_id'])})
json_len = len(json_text)
file_obj.write(struct.pack('!I', json_len))
file_obj.write(json_text)
for part2dev_id in ring['replica2part2dev_id']:
file_obj.write(part2dev_id.tostring())
def save(self, filename):
"""
Serialize this RingData instance to disk.
:param filename: File into which this instance should be serialized.
"""
# Override the timestamp so that the same ring data creates
# the same bytes on disk. This makes a checksum comparison a
# good way to see if two rings are identical.
#
# This only works on Python 2.7; on 2.6, we always get the
# current time in the gzip output.
try:
gz_file = GzipFile(filename, 'wb', mtime=1300507380.0)
except TypeError:
gz_file = GzipFile(filename, 'wb')
self.serialize_v1(gz_file)
gz_file.close()
def to_dict(self):
return {'devs': self.devs,
'replica2part2dev_id': self._replica2part2dev_id,
'part_shift': self._part_shift}
class Ring(object):
"""
Partitioned consistent hashing ring.
:param serialized_path: path to serialized RingData instance
:param reload_time: time interval in seconds to check for a ring change
"""
def __init__(self, serialized_path, reload_time=15, ring_name=None):
# Can't use the ring unless the on-disk configuration is valid
validate_configuration()
if ring_name:
self.serialized_path = os.path.join(serialized_path,
ring_name + '.ring.gz')
else:
self.serialized_path = os.path.join(serialized_path)
self.reload_time = reload_time
self._reload(force=True)
def _reload(self, force=False):
self._rtime = time() + self.reload_time
if force or self.has_changed():
ring_data = RingData.load(self.serialized_path)
self._mtime = getmtime(self.serialized_path)
self._devs = ring_data.devs
# NOTE(akscram): Replication parameters like replication_ip
# and replication_port are required for
# replication process. An old replication
# ring doesn't contain this parameters into
# device.
for dev in self._devs:
if dev:
if 'ip' in dev:
dev.setdefault('replication_ip', dev['ip'])
if 'port' in dev:
dev.setdefault('replication_port', dev['port'])
self._replica2part2dev_id = ring_data._replica2part2dev_id
self._part_shift = ring_data._part_shift
self._rebuild_tier_data()
# Do this now, when we know the data has changed, rather then
# doing it on every call to get_more_nodes().
regions = set()
zones = set()
self._num_devs = 0
for dev in self._devs:
if dev:
regions.add(dev['region'])
zones.add((dev['region'], dev['zone']))
self._num_devs += 1
self._num_regions = len(regions)
self._num_zones = len(zones)
def _rebuild_tier_data(self):
self.tier2devs = defaultdict(list)
for dev in self._devs:
if not dev:
continue
for tier in tiers_for_dev(dev):
self.tier2devs[tier].append(dev)
tiers_by_length = defaultdict(list)
for tier in self.tier2devs:
tiers_by_length[len(tier)].append(tier)
self.tiers_by_length = sorted(tiers_by_length.values(),
key=lambda x: len(x[0]))
for tiers in self.tiers_by_length:
tiers.sort()
@property
def replica_count(self):
"""Number of replicas (full or partial) used in the ring."""
return len(self._replica2part2dev_id)
@property
def partition_count(self):
"""Number of partitions in the ring."""
return len(self._replica2part2dev_id[0])
@property
def devs(self):
"""devices in the ring"""
if time() > self._rtime:
self._reload()
return self._devs
def has_changed(self):
"""
Check to see if the ring on disk is different than the current one in
memory.
:returns: True if the ring on disk has changed, False otherwise
"""
return getmtime(self.serialized_path) != self._mtime
def _get_part_nodes(self, part):
part_nodes = []
seen_ids = set()
for r2p2d in self._replica2part2dev_id:
if part < len(r2p2d):
dev_id = r2p2d[part]
if dev_id not in seen_ids:
part_nodes.append(self.devs[dev_id])
seen_ids.add(dev_id)
return part_nodes
def get_part(self, account, container=None, obj=None):
"""
Get the partition for an account/container/object.
:param account: account name
:param container: container name
:param obj: object name
:returns: the partition number
"""
key = hash_path(account, container, obj, raw_digest=True)
if time() > self._rtime:
self._reload()
part = struct.unpack_from('>I', key)[0] >> self._part_shift
return part
def get_part_nodes(self, part):
"""
Get the nodes that are responsible for the partition. If one
node is responsible for more than one replica of the same
partition, it will only appear in the output once.
:param part: partition to get nodes for
:returns: list of node dicts
See :func:`get_nodes` for a description of the node dicts.
"""
if time() > self._rtime:
self._reload()
return self._get_part_nodes(part)
def get_nodes(self, account, container=None, obj=None):
"""
Get the partition and nodes for an account/container/object.
If a node is responsible for more than one replica, it will
only appear in the output once.
:param account: account name
:param container: container name
:param obj: object name
:returns: a tuple of (partition, list of node dicts)
Each node dict will have at least the following keys:
====== ===============================================================
id unique integer identifier amongst devices
weight a float of the relative weight of this device as compared to
others; this indicates how many partitions the builder will try
to assign to this device
zone integer indicating which zone the device is in; a given
partition will not be assigned to multiple devices within the
same zone
ip the ip address of the device
port the tcp port of the device
device the device's name on disk (sdb1, for example)
meta general use 'extra' field; for example: the online date, the
hardware description
====== ===============================================================
"""
part = self.get_part(account, container, obj)
return part, self._get_part_nodes(part)
def get_more_nodes(self, part):
"""
Generator to get extra nodes for a partition for hinted handoff.
The handoff nodes will try to be in zones other than the
primary zones, will take into account the device weights, and
will usually keep the same sequences of handoffs even with
ring changes.
:param part: partition to get handoff nodes for
:returns: generator of node dicts
See :func:`get_nodes` for a description of the node dicts.
"""
if time() > self._rtime:
self._reload()
primary_nodes = self._get_part_nodes(part)
used = set(d['id'] for d in primary_nodes)
same_regions = set(d['region'] for d in primary_nodes)
same_zones = set((d['region'], d['zone']) for d in primary_nodes)
parts = len(self._replica2part2dev_id[0])
start = struct.unpack_from(
'>I', md5(str(part)).digest())[0] >> self._part_shift
inc = int(parts / 65536) or 1
# Multiple loops for execution speed; the checks and bookkeeping get
# simpler as you go along
hit_all_regions = len(same_regions) == self._num_regions
for handoff_part in chain(xrange(start, parts, inc),
xrange(inc - ((parts - start) % inc),
start, inc)):
if hit_all_regions:
# At this point, there are no regions left untouched, so we
# can stop looking.
break
for part2dev_id in self._replica2part2dev_id:
if handoff_part < len(part2dev_id):
dev_id = part2dev_id[handoff_part]
dev = self._devs[dev_id]
region = dev['region']
zone = (region, dev['zone'])
if dev_id not in used and region not in same_regions:
yield dev
used.add(dev_id)
same_regions.add(region)
same_zones.add(zone)
if len(same_regions) == self._num_regions:
hit_all_regions = True
break
hit_all_zones = len(same_zones) == self._num_zones
for handoff_part in chain(xrange(start, parts, inc),
xrange(inc - ((parts - start) % inc),
start, inc)):
if hit_all_zones:
# Much like we stopped looking for fresh regions before, we
# can now stop looking for fresh zones; there are no more.
break
for part2dev_id in self._replica2part2dev_id:
if handoff_part < len(part2dev_id):
dev_id = part2dev_id[handoff_part]
dev = self._devs[dev_id]
zone = (dev['region'], dev['zone'])
if dev_id not in used and zone not in same_zones:
yield dev
used.add(dev_id)
same_zones.add(zone)
if len(same_zones) == self._num_zones:
hit_all_zones = True
break
hit_all_devs = len(used) == self._num_devs
for handoff_part in chain(xrange(start, parts, inc),
xrange(inc - ((parts - start) % inc),
start, inc)):
if hit_all_devs:
# We've used every device we have, so let's stop looking for
# unused devices now.
break
for part2dev_id in self._replica2part2dev_id:
if handoff_part < len(part2dev_id):
dev_id = part2dev_id[handoff_part]
if dev_id not in used:
yield self._devs[dev_id]
used.add(dev_id)
if len(used) == self._num_devs:
hit_all_devs = True
break
| |
# -*- coding: utf-8 -*-
"""
nuage_acl_learner is a tool which can be used in a clean test environment to create a set of ACL rules which are being used by the applications running in that environment.
After you configured your VRS's to point their flow logs to the IP of the server where you run this tool, you can start it and it will start listening for TCP connections on port 514.
At the start, the tool will investigate the specified domain and will create a set of learning ACL rules (both ingress and egress). These rules will be used to enable logging for all traffic.
Once a flow log messages is sent to the tool from a VRS, the tool will investigate the flow and will implement a matching ACL rule entry.
The ACL rule entry will be created using either Policy Groups, Zones or Subnets, depending on the type specified at runtime. If the destination of the traffic is outside of the domain, a network macro for the destination will be created and used in the rule.
The tool can either specify 'any' as source port, or, if specified at runtime, the tool will be very strict and create a rule with the source port set to the one used in the flow. In most cases this strict policy is a bit overkill: most client connections use a random port, using a strict policy for source port would block the next traffic attempt because it is a different source port.
The original idea came from Jeroen van Bemmel.
--- Author ---
Philippe Dellaert <philippe.dellaert@nuagenetworks.net>
--- Version history ---
2016-01-22 - 1.0 - Only Ingress rules for now
--- VRS configuration ---
To configure your VRS, you have to edit the (r)syslog configuration to send everything matching 'ACLAUDIT' to the server running this tool on port 514 via a TCP connection.
Example rsyslogd rule if the tool is running on 10.167.43.23:
:msg,contains,"ACLAUDIT" @@10.167.43.23:514
--- Limitations ---
- When working with Policy Groups, it will only use one of the PG's for the rule
- If POLICYGROUP is specified as type, and a VM has no Policy Group assigned, no rule will be created
- Only creates ingress rules for now
- It does not use the commit/rollback system for ACLs as this is an automated tool. The rules impact traffic immediatly
--- Usage ---
run 'python nuage_acl_learner.py -h' for an overview
--- Documentation ---
http://github.com/nuagenetworks/vspk-examples/blob/master/docs/nuage_acl_learner.md
--- Example ---
---- Set non-strict source port rules using Policy Groups ----
python nuage_acl_learner.py -d -D "Main Customer Domain" -E csp -H 10.167.43.64 -P 443 -p csproot -u csproot -S -t POLICYGROUP
"""
from future import standard_library
standard_library.install_aliases()
from builtins import str
import argparse
import getpass
import logging
import re
import string
import socketserver
import time
from vspk import v6 as vsdk
# Global variables
nc = None
nc_enterprise = None
nc_domain = None
nc_subnetmap = {}
nc_policygroupmap = {}
nc_vportmap = {}
nc_networkmacromap = {}
ingress_learning_acl = None
egress_learning_acl = None
logger = None
configuration = {}
flows = {}
ip_regex = re.compile('.*dir: (\w+).*ipv4\(src=([\d\.]+)[^,]*,dst=([\d\.]+)[^,]*,proto=(\w+).*')
traffic_regex = re.compile('.*(tcp|udp)\(src=(\d+)[^,]*,dst=(\d+)[^\)]*\).*')
class ACLTCPHandler(socketserver.StreamRequestHandler):
"""
Will handle ACL log messages and create appropriate ACLs
"""
def handle(self):
global flows, nc_networkmacromap, configuration
data = self.rfile.readline().strip()
logger.debug('Received message from %s: %s' % (self.client_address[0], data))
# Parsing message
ip_matches = ip_regex.match(data)
if ip_matches is None:
logger.debug('No valid stream found')
return 0
flow_matches = traffic_regex.match(data)
if flow_matches is None:
logger.debug('No valid TCP/UDP stream found')
return 0
stream_type = flow_matches.group(1)
stream_direction = ip_matches.group(1)
stream_src_ip = ip_matches.group(2)
stream_src_port = flow_matches.group(2)
stream_dst_ip = ip_matches.group(3)
stream_dst_port = flow_matches.group(3)
stream_protocol = ip_matches.group(4)
logger.debug('Found %s stream: direction %s - source ip %s - source port %s - destination ip %s - destination port %s - protocol %s' % (stream_type, stream_direction, stream_src_ip, stream_src_port, stream_dst_ip, stream_dst_port, stream_protocol))
if configuration['strictsource']:
flow_id = '%s_%s_%s_%s_%s' % (stream_type, stream_src_ip, stream_src_port, stream_dst_ip, stream_dst_port)
else:
flow_id = '%s_%s_%s_%s' % (stream_type, stream_src_ip, stream_dst_ip, stream_dst_port)
stream_src_port = '*'
if flow_id in flows:
logger.info('ACL already exists in the known flows, skipping handling it.')
return 0
src_vport = None
dst_vport = None
src_subnet = None
dst_subnet = None
src_pg = None
dst_pg = None
dst_nm = None
if stream_src_ip in nc_vportmap:
src_vport = nc_vportmap[stream_src_ip]
logger.debug('Found source vPort for IP %s with MAC %s' % (stream_src_ip, src_vport['mac']))
if configuration['acl_type'] == 'SUBNET' or configuration['acl_type'] == 'ZONE':
src_subnet = nc_subnetmap[src_vport['subnet']]
logger.debug('Found source subnet for IP %s: %s-%s' % (stream_src_ip, src_subnet['address'], src_subnet['netmask']))
if configuration['acl_type'] == 'POLICYGROUP':
if src_vport['policygroups'] > 0:
src_pg = src_vport['policygroups'][0]
logger.debug('Found source Policy Group %s for IP %s' % (src_pg['name'], stream_src_ip))
else:
logger.error('Source vPort with IP %s does not have a Policy Group assigned, can not create ACL rules' % stream_src_ip)
return 1
else:
logger.error('Unknown vPort for source IP %s, skipping this flow' % stream_src_ip)
return 1
if stream_dst_ip in nc_vportmap:
dst_vport = nc_vportmap[stream_dst_ip]
logger.debug('Found destination vPort for IP %s with MAC %s' % (stream_dst_ip, dst_vport['mac']))
if configuration['acl_type'] == 'SUBNET' or configuration['acl_type'] == 'ZONE':
dst_subnet = nc_subnetmap[dst_vport['subnet']]
logger.debug('Found destination subnet for IP %s: %s-%s' % (stream_dst_ip, dst_subnet['address'], dst_subnet['netmask']))
if configuration['acl_type'] == 'POLICYGROUP':
if dst_vport['policygroups'] > 0:
dst_pg = dst_vport['policygroups'][0]
logger.debug('Found destination Policy Group %s for IP %s' % (dst_pg['name'], stream_dst_ip))
else:
logger.error('Destination vPort with IP %s does not have a Policy Group assigned, can not create ACL rules' % stream_src_ip)
return 1
elif '%s-255.255.255.255' % stream_dst_ip in nc_networkmacromap:
logger.debug('vPort for destination IP %s does not exist, using existing /32 Network Macro' % stream_dst_ip)
dst_nm = nc_networkmacromap['%s-255.255.255.255' % stream_dst_ip]
logger.debug('Found destination network macro for IP %s' % stream_dst_ip)
else:
logger.debug('vPort or Network Macro for destination IP %s does not exist, creating a /32 Network Macro' % stream_dst_ip)
temp_nm_name = string.replace('%s-255.255.255.255' % stream_dst_ip, '.', '_')
temp_nm = vsdk.NUEnterpriseNetwork(
name=temp_nm_name,
address=stream_dst_ip,
netmask='255.255.255.255'
)
nc_enterprise.create_child(temp_nm)
logger.info('Created new Network Macro for destination IP %s' % stream_dst_ip)
dst_nm = {
'id': temp_nm.id,
'address': stream_dst_ip,
'netmask': '255.255.255.255'
}
nc_networkmacromap['%s-255.255.255.255' % stream_dst_ip] = dst_nm
src_type = None
src_id = None
if configuration['acl_type'] == 'ZONE':
src_type = 'ZONE'
src_id = src_subnet['zone']
elif configuration['acl_type'] == 'SUBNET':
src_type = 'SUBNET'
src_id = src_subnet['id']
elif configuration['acl_type'] == 'POLICYGROUP':
src_type = 'POLICYGROUP'
src_id = src_pg['id']
dst_type = None
dst_id = None
if dst_vport is not None and configuration['acl_type'] == 'ZONE':
dst_type = 'ZONE'
dst_id = dst_subnet['zone']
elif dst_vport is not None and configuration['acl_type'] == 'SUBNET':
dst_type = 'SUBNET'
dst_id = dst_subnet['id']
elif dst_vport is not None and configuration['acl_type'] == 'POLICYGROUP':
dst_type = 'POLICYGROUP'
dst_id = dst_pg['id']
else:
dst_type = 'ENTERPRISE_NETWORK'
dst_id = dst_nm['id']
stream_protocol = '17'
if stream_type == 'tcp':
stream_protocol = '6'
logger.debug('Creating new Ingress ACL rule with values: action FORWARD - ether_type 0x0800 - location_type %s - location_id %s - network_type %s - network_id %s - protocol %s - source_port %s - destination_port %s - dscp * - reflexive True - priority %s' % (src_type, src_id, dst_type, dst_id, stream_protocol, stream_src_port, stream_dst_port, configuration['next_priority']))
ingress_acl_entry = vsdk.NUIngressACLEntryTemplate(
action='FORWARD',
description='Learned - %s %s:%s to %s:%s' % (stream_type, stream_src_ip, stream_src_port, stream_dst_ip, stream_dst_port),
ether_type='0x0800',
location_type=src_type,
location_id=src_id,
network_type=dst_type,
network_id=dst_id,
protocol=stream_protocol,
source_port=stream_src_port,
destination_port=stream_dst_port,
dscp='*',
reflexive=True,
priority=configuration['next_priority']
)
# For now we work without jobs, way easier...
ingress_learning_acl.create_child(ingress_acl_entry, as_async=False)
flows[flow_id] = {
'action': 'FORWARD',
'description': 'Learned - %s %s:%s to %s:%s' % (stream_type, stream_src_ip, stream_src_port, stream_dst_ip, stream_dst_port),
'ether_type': '0x0800',
'location_type': src_type,
'location_id': src_id,
'network_type': dst_type,
'network_id': dst_id,
'protocol': stream_protocol,
'source_port': stream_src_port,
'destination_port': stream_dst_port,
'dscp': '*',
'reflexive': True,
'priority': configuration['next_priority']
}
configuration['next_priority'] += 1
return 0
def get_args():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(description="Tool which will create ACLs learned from flow logs from the VRS. It will actively listen to incomming syslog connections on port 514.")
parser.add_argument('-d', '--debug', required=False, help='Enable debug output', dest='debug', action='store_true')
parser.add_argument('-f', '--first-priority', required=False, help='The priority of the first created rule (will be incremented for each next rule), default is 100', dest='first_priority', type=int, default=100)
parser.add_argument('-l', '--log-file', required=False, help='File to log to (default = stdout)', dest='logfile', type=str)
parser.add_argument('-D', '--nuage-domain', required=True, help='The domain to investigate and set ACLs on', dest='nuage_domain', type=str)
parser.add_argument('-E', '--nuage-enterprise', required=True, help='The enterprise with which to connect to the Nuage VSD/SDK host', dest='nuage_enterprise', type=str)
parser.add_argument('-H', '--nuage-host', required=True, help='The Nuage VSD/SDK endpoint to connect to', dest='nuage_host', type=str)
parser.add_argument('-P', '--nuage-port', required=False, help='The Nuage VSD/SDK server port to connect to (default = 8443)', dest='nuage_port', type=int, default=8443)
parser.add_argument('-p', '--nuage-password', required=False, help='The password with which to connect to the Nuage VSD/SDK host. If not specified, the user is prompted at runtime for a password', dest='nuage_password', type=str)
parser.add_argument('-u', '--nuage-user', required=True, help='The username with which to connect to the Nuage VSD/SDK host', dest='nuage_username', type=str)
parser.add_argument('-S', '--disable-SSL-certificate-verification', required=False, help='Disable SSL certificate verification on connect (deprecated)', dest='nosslcheck', action='store_true')
parser.add_argument('-s', '--strict-source-ports', required=False, help='Use strict source ports, this will set the specific source port instead of the default * setting for Ingress rules.', dest='strictsource', action='store_true')
parser.add_argument('-t', '--type', required=True, help='On what entity type should the ACLs be applied. Valid responses: POLICYGROUP, ZONE, SUBNET', dest='acl_type', type=str, choices=['POLICYGROUP', 'ZONE', 'SUBNET'])
parser.add_argument('-v', '--verbose', required=False, help='Enable verbose output', dest='verbose', action='store_true')
args = parser.parse_args()
return args
def wait_for_job(parent, job):
logger.debug('Creating Job with command %s' % job.command)
parent.create_child(job)
while True:
logger.debug('Fetching update on the job with command %s' % job.command)
job.fetch()
if job.status == 'SUCCESS':
logger.debug('Job with command %s executed succesfully returning result %s' % (job.command, job.result))
return job.result
elif job.status != 'RUNNING':
logger.error('Job with command %s failed, status is %s, returning False' % (job.command, job.status))
return False
time.sleep(1)
def main():
"""
Main function to handle vcenter vm names and the mapping to a policy group
"""
global logger, configuration, nc, nc_enterprise, nc_domain, nc_subnetmap, nc_policygroupmap, nc_vportmap, nc_networkmacromap, ingress_learning_acl, egress_learning_acl
# Handling arguments
args = get_args()
configuration['debug'] = args.debug
configuration['next_priority'] = args.first_priority
configuration['log_file'] = None
if args.logfile:
configuration['log_file'] = args.logfile
configuration['nuage_domain'] = args.nuage_domain
configuration['nuage_enterprise'] = args.nuage_enterprise
configuration['nuage_host'] = args.nuage_host
configuration['nuage_port'] = args.nuage_port
configuration['nuage_password'] = None
if args.nuage_password:
configuration['nuage_password'] = args.nuage_password
configuration['nuage_username'] = args.nuage_username
configuration['strictsource'] = args.strictsource
configuration['nosslcheck'] = args.nosslcheck
configuration['acl_type'] = args.acl_type
configuration['verbose'] = args.verbose
# Logging settings
if configuration['debug']:
log_level = logging.DEBUG
elif configuration['verbose']:
log_level = logging.INFO
else:
log_level = logging.WARNING
logging.basicConfig(filename=configuration['log_file'], format='%(asctime)s %(levelname)s %(message)s', level=log_level)
logger = logging.getLogger(__name__)
# Getting user password for Nuage connection
if configuration['nuage_password'] is None:
logger.debug('No command line Nuage password received, requesting Nuage password from user')
configuration['nuage_password'] = getpass.getpass(prompt='Enter password for Nuage host %s for user %s: ' % (configuration['nuage_host'], configuration['nuage_username']))
try:
# Connecting to Nuage
logger.info('Connecting to Nuage server %s:%s with username %s' % (configuration['nuage_host'], configuration['nuage_port'], configuration['nuage_username']))
nc = vsdk.NUVSDSession(username=configuration['nuage_username'], password=configuration['nuage_password'], enterprise=configuration['nuage_enterprise'], api_url="https://%s:%s" % (configuration['nuage_host'], configuration['nuage_port']))
nc.start()
except Exception as e:
logger.error('Could not connect to Nuage host %s with user %s and specified password' % (configuration['nuage_host'], configuration['nuage_username']))
logger.critical('Caught exception: %s' % str(e))
return 1
# Finding domain
logger.debug('Finding domain %s' % configuration['nuage_domain'])
nc_domain = nc.user.domains.get_first(filter="name == '%s'" % configuration['nuage_domain'])
if nc_domain is None:
logger.critical('Unable to find domain %s, quiting' % configuration['nuage_domain'])
return 1
logger.info('Found domain %s' % nc_domain.name)
# Getting enterprise
logger.debug('Getting enterprise for domain %s' % nc_domain.name)
nc_enterprise = vsdk.NUEnterprise(id=nc_domain.parent_id)
nc_enterprise.fetch()
if configuration['acl_type'] == 'SUBNET' or configuration['acl_type'] == 'ZONE':
# Mapping subnets
logger.debug('Mapping subnets for domain %s' % nc_domain.name)
for nc_subnet in nc_domain.subnets.get():
logger.debug('Found subnet with network %s/%s in domain %s' % (nc_subnet.address, nc_subnet.netmask, nc_domain.name))
nc_subnetmap[nc_subnet.id] = {
'id': nc_subnet.id,
'address': nc_subnet.address,
'netmask': nc_subnet.netmask,
'zone': nc_subnet.parent_id
}
if configuration['acl_type'] == 'POLICYGROUP':
# Mapping policy groups
logger.debug('Mapping policy groups for domain %s' % nc_domain.name)
for nc_policygroup in nc_domain.policy_groups.get():
logger.debug('Found policy group %s in domain %s' % (nc_policygroup.name, nc_domain.name))
nc_policygroupmap[nc_policygroup.id] = {
'id': nc_policygroup.id,
'name': nc_policygroup.name
}
# Mapping vPorts
logger.debug('Mapping vPorts for domain %s' % nc_domain.name)
for nc_vport in nc_domain.vports.get():
logger.debug('Found vPort with IP %s and MAC %s in domain %s' % (nc_vport.vm_interfaces.get_first().ip_address, nc_vport.vm_interfaces.get_first().mac, nc_domain.name))
nc_vportmap[nc_vport.vm_interfaces.get_first().ip_address] = {
'id': nc_vport.id,
'mac': nc_vport.vm_interfaces.get_first().mac,
'subnet': nc_vport.parent_id,
'policygroups': []
}
for nc_policygroup in nc_vport.policy_groups.get():
logger.debug('Found policy group %s for vPort with %s and MAC %s in domain %s' % (nc_policygroup.name, nc_vport.vm_interfaces.get_first().ip_address, nc_vport.vm_interfaces.get_first().mac, nc_domain.name))
nc_vportmap[nc_vport.vm_interfaces.get_first().ip_address]['policygroups'].append({
'id': nc_policygroup.id,
'name': nc_policygroup.name
})
# Mapping Network Macros
logger.debug('Mapping Network Macros for enterprise %s' % nc_enterprise.name)
for nc_networkmacro in nc_enterprise.enterprise_networks.get():
logger.debug('Found Network Macro with IP %s and netmask %s for Enterprise %s' % (nc_networkmacro.address, nc_networkmacro.netmask, nc_enterprise.name))
nc_networkmacromap['%s-%s' % (nc_networkmacro.address, nc_networkmacro.netmask)] = {
'id': nc_networkmacro.id,
'address': nc_networkmacro.address,
'netmask': nc_networkmacro.netmask
}
# Checking if ACL logging rules are present
ingress_learning_acl = nc_domain.ingress_acl_templates.get_first(filter="name == 'Ingress Learning ACLs'")
egress_learning_acl = nc_domain.egress_acl_templates.get_first(filter="name == 'Egress Learning ACLs'")
if ingress_learning_acl is None:
logger.info('Creating Ingress Learning ACLs')
ingress_learning_acl = vsdk.NUIngressACLTemplate(
name='Ingress Learning ACLs',
priority_type='NONE',
priority=100,
default_allow_non_ip=False,
default_allow_ip=False,
allow_l2_address_spoof=False,
active=True
)
nc_domain.create_child(ingress_learning_acl, as_async=False)
logger.debug('Creating Ingress ACL TCP rule')
ingress_acl_entry_1 = vsdk.NUIngressACLEntryTemplate(
action='FORWARD',
description='Learning ACL for TCP traffic',
ether_type='0x0800',
flow_logging_enabled=True,
location_type='ANY',
network_type='ANY',
priority=1000,
protocol=6,
reflexive=True,
source_port='*',
destination_port='*',
dscp='*'
)
ingress_learning_acl.create_child(ingress_acl_entry_1, as_async=False)
logger.debug('Creating Ingress ACL UDP rule')
ingress_acl_entry_2 = vsdk.NUIngressACLEntryTemplate(
action='FORWARD',
description='Learning ACL for UDP traffic',
ether_type='0x0800',
flow_logging_enabled=True,
location_type='ANY',
network_type='ANY',
priority=1001,
protocol=17,
reflexive=True,
source_port='*',
destination_port='*',
dscp='*'
)
ingress_learning_acl.create_child(ingress_acl_entry_2, as_async=False)
logger.debug('Creating Ingress ACL other rule')
ingress_acl_entry_3 = vsdk.NUIngressACLEntryTemplate(
action='FORWARD',
description='Learning ACL for other traffic',
ether_type='0x0800',
flow_logging_enabled=True,
location_type='ANY',
network_type='ANY',
priority=1002,
protocol='ANY',
source_port=None,
destination_port=None,
dscp='*'
)
ingress_learning_acl.create_child(ingress_acl_entry_3, as_async=False)
logger.info('Ingress ACL rules created')
if egress_learning_acl is None:
logger.info('Creating Egress Learning ACLs')
egress_learning_acl = vsdk.NUEgressACLTemplate(
name='Egress Learning ACLs',
priority_type='NONE',
priority=100,
default_allow_non_ip=False,
default_allow_ip=False,
default_install_acl_implicit_rules=True,
active=True
)
nc_domain.create_child(egress_learning_acl, as_async=False)
logger.debug('Creating Egress ACL TCP rule')
egress_acl_entry_1 = vsdk.NUEgressACLEntryTemplate(
action='FORWARD',
description='ACL for TCP traffic',
ether_type='0x0800',
flow_logging_enabled=True,
location_type='ANY',
network_type='ANY',
priority=1000,
protocol=6,
reflexive=True,
source_port='*',
destination_port='*',
dscp='*'
)
egress_learning_acl.create_child(egress_acl_entry_1, as_async=False)
logger.debug('Creating Egress ACL UDP rule')
egress_acl_entry_2 = vsdk.NUEgressACLEntryTemplate(
action='FORWARD',
description='ACL for UDP traffic',
ether_type='0x0800',
flow_logging_enabled=True,
location_type='ANY',
network_type='ANY',
priority=1001,
protocol=17,
reflexive=True,
source_port='*',
destination_port='*',
dscp='*'
)
egress_learning_acl.create_child(egress_acl_entry_2, as_async=False)
logger.debug('Creating Egress ACL other rule')
egress_acl_entry_3 = vsdk.NUEgressACLEntryTemplate(
action='FORWARD',
description='ACL for other traffic',
ether_type='0x0800',
flow_logging_enabled=True,
location_type='ANY',
network_type='ANY',
priority=1002,
protocol='ANY',
source_port=None,
destination_port=None,
dscp='*'
)
egress_learning_acl.create_child(egress_acl_entry_3, as_async=False)
logger.info('Egress ACL rules created')
logger.info('Starting capture server on port 514')
capture_server = socketserver.TCPServer(('0.0.0.0', 514), ACLTCPHandler)
try:
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
capture_server.serve_forever()
except KeyboardInterrupt:
logger.info('Received interrupt, finishing up')
capture_server.shutdown()
logger.info('All done!')
return 1
# Start program
if __name__ == "__main__":
main()
| |
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
import logging
import threading
import time
from collections import namedtuple
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
STDERR_HANDLER = logging.StreamHandler()
LOGGER.addHandler(STDERR_HANDLER)
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that `wait` and `as_completed` block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by `as_completed`."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by `wait(return_when=FIRST_COMPLETED)`."""
def add_result(self, future):
super(_FirstCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
super(_FirstCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
super(_FirstCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by `wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)`."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
super(_AllCompletedWaiter, self).__init__()
def _decrement_pending_calls(self):
if self.num_pending_calls == len(self.finished_futures):
self.event.set()
def add_result(self, future):
super(_AllCompletedWaiter, self).add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super(_AllCompletedWaiter, self).add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super(_AllCompletedWaiter, self).add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of `Future`
conditions.
"""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count,
stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count,
stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Parameters
----------
fs
The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout
The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns
-------
An iterator that yields the given Futures as they complete (finished or
cancelled).
Raises
------
TimeoutError
If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = set(fs) - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
for future in finished:
yield future
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Parameters
----------
fs
The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout
The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when
Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns
-------
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<Future at %s state=%s raised %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<Future at %s state=%s returned %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<Future at %s state=%s>' % (
hex(id(self)), _STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future has cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
raise self._exception
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Parameters
----------
fn
A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if (self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED,
FINISHED]):
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Parameters
----------
timeout
The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns
-------
The result of the call that the future represents.
Raises
------
CancelledError
If the future was cancelled.
TimeoutError
If the future didn't finish executing before the given
timeout.
Exception
If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Parameters
----------
timeout
The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns
-------
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises
------
CancelledError
If the future was cancelled.
TimeoutError
If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
else:
raise TimeoutError()
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by `Executor` implementations and unit tests.
If the future has been cancelled (`cancel` was called and returned
True) then any threads waiting on the future completing (though calls
to `as_completed` or `wait`) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to `running` will return True) and True is returned.
This method should be called by `Executor` implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns
-------
False if the Future was cancelled, True otherwise.
Raises
------
RuntimeError
if this method was already called or if `set_result`
or `set_exception` was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self.future),
self.future._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by `Executor` implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by `Executor` implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as `fn(*args, **kwargs)` and
returns a `Future` instance representing the execution of the callable.
Returns
-------
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, **kwargs):
"""Returns a iterator equivalent to `map(fn, iter)`.
Parameters
----------
fn
A callable that will take take as many arguments as there are
passed iterables.
timeout
The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns
-------
An iterator equivalent to: `map(func, *iterables)` but the calls may
be evaluated out-of-order.
Raises
------
TimeoutError
If the entire result iterator could not be generated
before the given timeout.
Exception
If `fn(*args)` raises for any values.
"""
timeout = kwargs.get('timeout')
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
try:
for future in fs:
if timeout is None:
yield future.result()
else:
yield future.result(end_time - time.time())
finally:
for future in fs:
future.cancel()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the `Executor`.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Parameters
----------
wait
If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
| |
import os
import requests
from webstore_manager import logging_helper, util
from webstore_manager.constants import ErrorCodes
from webstore_manager.store.store import Store
logger = logging_helper.get_logger(__file__)
class ChromeStore(Store):
"""
Class representing Chrome Webstore. Holds info about the client, app and its refresh token.
"""
TARGET_PUBLIC = 0
TARGET_TRUSTED = 1
GOOGLE_OAUTH_TOKEN = 'https://www.googleapis.com/oauth2/v4/token'
def __init__(self, client_id, client_secret, refresh_token=None, app_id="", session=None):
"""
Args:
client_id:
client_secret:
refresh_token:
app_id:
session: If none, a new requests session will be created. Otherwise the supplied one will be used.
"""
super().__init__(session)
self.client_id = client_id
self.client_secret = client_secret
self.app_id = app_id
self.refresh_token = refresh_token
self.update_item_url = "https://www.googleapis.com/upload/chromewebstore/v1.1/items/{}".format(app_id)
self.new_item_url = "https://www.googleapis.com/upload/chromewebstore/v1.1/items"
self.publish_item_url = "https://www.googleapis.com/chromewebstore/v1.1/items/{}/publish?publishTarget={{}}".format(
app_id)
self.get_status_url = "https://www.googleapis.com/chromewebstore/v1.1/items/{}?projection=draft"
def publish(self, target):
"""
Publish an existing extension. It has to be uploaded to the Webstore first, its name is obtained from
the ChromeStore's app_id field.
Args:
target: Target audience to publish to. May be ChromeStore.TARGET_PUBLIC or TARGET_TRUSTED.
Returns:
None
"""
auth_token = self.generate_access_token()
headers = {"Authorization": "Bearer {}".format(auth_token),
"x-goog-api-version": "2",
"Content-Length": "0"}
# Note: webstore API documentation is inconsistent whether it requires publishTarget in headers or in URL
# so I will use both, to be sure.
if target == self.TARGET_PUBLIC:
target = "default"
elif target == self.TARGET_TRUSTED:
headers["publishTarget"] = "trustedTesters"
target = "trustedTesters"
else:
logger.error("Unknown publish target: {}".format(target))
exit(ErrorCodes.chrome_publish_bad_target)
logger.debug("Making publish query to {}".format(self.publish_item_url.format(target)))
response = self.session.post(self.publish_item_url.format(target),
headers=headers)
try:
res_json = response.json()
status = res_json['status']
if len(status) == 0 or (len(status) == 1 and status[0] == 'OK'):
self.app_id = res_json['item_id']
logger.info("Publishing completed. Item ID: {}".format(self.app_id))
return self.app_id
else:
logger.error("Status is not empty (something bad happened).")
logger.error("Response: {}".format(res_json))
exit(ErrorCodes.chrome_publish_bad_status)
except KeyError as error:
logger.error("Key 'status' not found in returned JSON.")
logger.error(error)
logger.error("Response: {}".format(response.json()))
exit(ErrorCodes.chrome_upload_key_not_found)
except ValueError:
logger.error("Response could not be decoded as JSON.")
logger.error("Response code: {}".format(response.status_code))
logger.error("Response: {}".format(response.content))
exit(ErrorCodes.response_not_json)
def upload(self, filename, new_item=False):
"""
Uploads a zip-archived extension to the webstore; either as a completely new extension, or as a
version update to an existing one.
Args:
filename(str): Path to the archive.
new_item(bool): If true, this is a new extension. If false, this is an update to an existing one.
Returns:
str: Item ID of the created or updated extension.
"""
if new_item:
logger.info("Uploading a new extension - new file: {}".format(filename))
else:
logger.info("Uploading an update - file: {}".format(filename))
if not new_item and not self.app_id:
logger.error("To upload a new version of an extension, supply the app_id parameter!")
exit(ErrorCodes.chrome_upload_no_appid)
auth_token = self.generate_access_token()
headers = {"Authorization": "Bearer {}".format(auth_token),
"x-goog-api-version": "2"}
data = open(filename, 'rb')
if new_item:
response = self.session.post(self.new_item_url,
headers=headers,
data=data)
else:
response = self.session.put(self.update_item_url,
headers=headers,
data=data)
try:
response.raise_for_status()
except requests.HTTPError as error:
logger.error(error)
logger.error("Response: {}".format(response.json()))
exit(ErrorCodes.chrome_upload_generic_error)
try:
rjson = response.json()
state = rjson['uploadState']
if not state == 'SUCCESS':
logger.error("Uploading state is not SUCCESS.")
logger.error("Response: {}".format(rjson))
exit(ErrorCodes.chrome_upload_app_not_found)
else:
self.app_id = rjson['id']
logger.info("Upload completed. Item ID: {}".format(self.app_id))
logger.info("Done.")
return self.app_id
except KeyError as error:
logger.error("Key 'uploadState' not found in returned JSON.")
logger.error(error)
logger.error("Response: {}".format(response.json()))
exit(ErrorCodes.chrome_upload_key_not_found)
def get_uploaded_version(self):
"""
Finds version of an extension that is currently uploaded in the web store.
Returns:
str: Version as specified in the original manifest.
"""
auth_token = self.generate_access_token()
headers = {"Authorization": "Bearer {}".format(auth_token),
"x-goog-api-version": "2",
"Content-Length": "0",
"Expect": ""}
final_url = self.get_status_url.format(self.app_id)
logger.debug("Checking status at {}".format(final_url))
response = self.session.get(final_url,
headers=headers)
try:
res_json = response.json()
reported_version = res_json['crxVersion']
reported_state = res_json['uploadState'] # No use right now
logger.info("Status obtained. Item ID: {}, version: {}, state: {}".format(self.app_id, reported_version,
reported_state))
return reported_version
except KeyError as error:
logger.error("Key 'crxVersion' or 'uploadState' not found in returned JSON.")
logger.error(error)
logger.error("Response: {}".format(response.json()))
exit(ErrorCodes.chrome_upload_key_not_found)
except ValueError:
logger.error("Response could not be decoded as JSON.")
logger.error("Response code: {}".format(response.status_code))
logger.error("Response: {}".format(response.content))
exit(ErrorCodes.response_not_json)
def generate_access_token(self):
"""
Generate a new access token from a saved refresh token.
Returns:
Access token.
"""
auth_token = self.gen_access_token(self.client_id, self.client_secret, self.refresh_token, session=self.session)
logger.info("Obtained an auth token: {}".format(auth_token))
return auth_token
def authenticate(self, code):
"""
Authenticate by exchanging a given code for a refresh token.
Save the refresh token as a field of this ChromeStore object.
Args:
code: Code obtained from Google.
Returns:
None.
"""
_, self.refresh_token = ChromeStore.redeem_code(self.client_id, self.client_secret, code, self.session)
@staticmethod
def redeem_code(client_id, client_secret, code, session=None):
"""
Obtain access and refresh tokens from Google OAuth from client ID, secret and one-time code.
Args:
client_id(str): ID of the client (see developer console - credentials - OAuth 2.0 client IDs).
client_secret(str): Secret of the client (see developer console - credentials - OAuth 2.0 client IDs).
code(str): Auth code obtained from confirming access at
https://accounts.google.com/o/oauth2/auth?response_type=code&scope=https://www.googleapis.com/auth/chromewebstore&client_id=$CLIENT_ID&redirect_uri=urn:ietf:wg:oauth:2.0:oob.
session(requests.Session, optional): If set, use this session for HTTP requests.
Returns:
str, str: access_token, refresh_token
"""
logger.debug("Requesting tokens using parameters:")
logger.debug(" Client ID: {}".format(client_id))
logger.debug(" Client secret: {}".format(client_secret))
logger.debug(" Code: {}".format(code))
session = session or requests.Session()
response = session.post(ChromeStore.GOOGLE_OAUTH_TOKEN,
data={
"client_id": client_id,
"client_secret": client_secret,
"code": code,
"grant_type": "authorization_code",
"redirect_uri": "urn:ietf:wg:oauth:2.0:oob"
})
try:
response.raise_for_status()
except requests.HTTPError as error:
logger.error(error)
logger.error("Response: {}".format(response.json()))
exit(ErrorCodes.response_error)
res_json = response.json()
return res_json['access_token'], res_json['refresh_token']
@staticmethod
def gen_access_token(client_id, client_secret, refresh_token, session=None):
"""
Use refresh token to generate a new client access token.
Args:
client_id(str): Client ID field of Developer Console OAuth client credentials.
client_secret(str): Client secret field of Developer Console OAuth client credentials.
refresh_token(str): Refresh token obtained when calling get_tokens method.
session(requests.Session, optional): If set, use this session for HTTP requests.
Returns:
str: New user token valid (by default) for 1 hour.
"""
session = session or requests.Session()
response = session.post(ChromeStore.GOOGLE_OAUTH_TOKEN,
data={"client_id": client_id,
"client_secret": client_secret,
"refresh_token": refresh_token,
"grant_type": "refresh_token",
"redirect_uri": "urn:ietf:wg:oauth:2.0:oob"
}
)
try:
response.raise_for_status()
except requests.HTTPError as error:
logger.error(error)
logger.error("Response: {}".format(response.json()))
exit(ErrorCodes.response_error)
res_json = response.json()
return res_json['access_token']
def repack_crx(filename, target_dir=""):
"""
Repacks the given .crx file into a .zip file. Will physically create the file on disk.
Args:
filename(str): A .crx Chrome Extension file.
target_dir(str, optional): If set, zip file will be created in the given directory (instead of temporary dir).
Returns:
str: Filename of the newly created zip file. (full path)
"""
temp_dir = util.build_dir
util.unzip(filename, temp_dir)
fn_noext = os.path.basename(os.path.splitext(filename)[0])
zip_new_name = fn_noext + ".zip"
if not target_dir:
full_name = util.make_zip(zip_new_name, temp_dir, util.build_dir)
else:
full_name = util.make_zip(zip_new_name, temp_dir, target_dir)
return full_name
| |
"""Provide access to Python's configuration information. The specific
configuration variables available depend heavily on the platform and
configuration. The values may be retrieved using
get_config_var(name), and the list of variables is available via
get_config_vars().keys(). Additional convenience functions are also
available.
Written by: Fred L. Drake, Jr.
Email: <fdrake@acm.org>
"""
import _imp
import os
import re
import sys
from .errors import DistutilsPlatformError
from .util import get_platform, get_host_platform
# These are needed in a couple of spots, so just compute them once.
PREFIX = os.path.normpath(sys.prefix)
EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
BASE_PREFIX = os.path.normpath(sys.base_prefix)
BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix)
# Path to the base directory of the project. On Windows the binary may
# live in project/PCbuild/win32 or project/PCbuild/amd64.
# set for cross builds
if "_PYTHON_PROJECT_BASE" in os.environ:
project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"])
else:
if sys.executable:
project_base = os.path.dirname(os.path.abspath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
project_base = os.getcwd()
# python_build: (Boolean) if true, we're either building Python or
# building an extension with an un-installed Python, so we use
# different (hard-wired) directories.
def _is_python_source_dir(d):
for fn in ("Setup", "Setup.local"):
if os.path.isfile(os.path.join(d, "Modules", fn)):
return True
return False
_sys_home = getattr(sys, '_home', None)
if os.name == 'nt':
def _fix_pcbuild(d):
if d and os.path.normcase(d).startswith(
os.path.normcase(os.path.join(PREFIX, "PCbuild"))):
return PREFIX
return d
project_base = _fix_pcbuild(project_base)
_sys_home = _fix_pcbuild(_sys_home)
def _python_build():
if _sys_home:
return _is_python_source_dir(_sys_home)
return _is_python_source_dir(project_base)
python_build = _python_build()
# Calculate the build qualifier flags if they are defined. Adding the flags
# to the include and lib directories only makes sense for an installation, not
# an in-source build.
build_flags = ''
try:
if not python_build:
build_flags = sys.abiflags
except AttributeError:
# It's not a configure-based build, so the sys module doesn't have
# this attribute, which is fine.
pass
def get_python_version():
"""Return a string containing the major and minor Python version,
leaving off the patchlevel. Sample return values could be '1.5'
or '2.2'.
"""
return '%d.%d' % sys.version_info[:2]
def get_python_inc(plat_specific=0, prefix=None):
"""Return the directory containing installed Python header files.
If 'plat_specific' is false (the default), this is the path to the
non-platform-specific header files, i.e. Python.h and so on;
otherwise, this is the path to platform-specific header files
(namely pyconfig.h).
If 'prefix' is supplied, use it instead of sys.base_prefix or
sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
if os.name == "posix":
if python_build:
# Assume the executable is in the build directory. The
# pyconfig.h file should be in the same directory. Since
# the build directory may not be the source directory, we
# must use "srcdir" from the makefile to find the "Include"
# directory.
if plat_specific:
return _sys_home or project_base
else:
incdir = os.path.join(get_config_var('srcdir'), 'Include')
return os.path.normpath(incdir)
python_dir = 'python' + get_python_version() + build_flags
return os.path.join(prefix, "include", python_dir)
elif os.name == "nt":
if python_build:
# Include both the include and PC dir to ensure we can find
# pyconfig.h
return (os.path.join(prefix, "include") + os.path.pathsep +
os.path.join(prefix, "PC"))
return os.path.join(prefix, "include")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its C header files "
"on platform '%s'" % os.name)
def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
"""Return the directory containing the Python library (standard or
site additions).
If 'plat_specific' is true, return the directory containing
platform-specific modules, i.e. any module from a non-pure-Python
module distribution; otherwise, return the platform-shared library
directory. If 'standard_lib' is true, return the directory
containing standard Python library modules; otherwise, return the
directory for site-specific modules.
If 'prefix' is supplied, use it instead of sys.base_prefix or
sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
if standard_lib:
prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
else:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
libpython = os.path.join(prefix,
"lib", "python" + get_python_version())
if standard_lib:
return libpython
else:
return os.path.join(libpython, "site-packages")
elif os.name == "nt":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
return os.path.join(prefix, "Lib", "site-packages")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its library "
"on platform '%s'" % os.name)
def customize_compiler(compiler):
"""Do any platform-specific customization of a CCompiler instance.
Mainly needed on Unix, so we can plug in the information that
varies across Unices and is stored in Python's Makefile.
"""
if compiler.compiler_type == "unix":
if sys.platform == "darwin":
# Perform first-time customization of compiler-related
# config vars on OS X now that we know we need a compiler.
# This is primarily to support Pythons from binary
# installers. The kind and paths to build tools on
# the user system may vary significantly from the system
# that Python itself was built on. Also the user OS
# version and build tools may not support the same set
# of CPU architectures for universal builds.
global _config_vars
# Use get_config_var() to ensure _config_vars is initialized.
if not get_config_var('CUSTOMIZED_OSX_COMPILER'):
import _osx_support
_osx_support.customize_compiler(_config_vars)
_config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
(cc, cxx, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \
get_config_vars('CC', 'CXX', 'CFLAGS',
'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS')
if 'CC' in os.environ:
newcc = os.environ['CC']
if (sys.platform == 'darwin'
and 'LDSHARED' not in os.environ
and ldshared.startswith(cc)):
# On OS X, if CC is overridden, use that as the default
# command for LDSHARED as well
ldshared = newcc + ldshared[len(cc):]
cc = newcc
if 'CXX' in os.environ:
cxx = os.environ['CXX']
if 'LDSHARED' in os.environ:
ldshared = os.environ['LDSHARED']
if 'CPP' in os.environ:
cpp = os.environ['CPP']
else:
cpp = cc + " -E" # not always
if 'LDFLAGS' in os.environ:
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
if 'CFLAGS' in os.environ:
cflags = cflags + ' ' + os.environ['CFLAGS']
ldshared = ldshared + ' ' + os.environ['CFLAGS']
if 'CPPFLAGS' in os.environ:
cpp = cpp + ' ' + os.environ['CPPFLAGS']
cflags = cflags + ' ' + os.environ['CPPFLAGS']
ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
if 'AR' in os.environ:
ar = os.environ['AR']
if 'ARFLAGS' in os.environ:
archiver = ar + ' ' + os.environ['ARFLAGS']
else:
archiver = ar + ' ' + ar_flags
cc_cmd = cc + ' ' + cflags
compiler.set_executables(
preprocessor=cpp,
compiler=cc_cmd,
compiler_so=cc_cmd + ' ' + ccshared,
compiler_cxx=cxx,
linker_so=ldshared,
linker_exe=cc,
archiver=archiver)
compiler.shared_lib_extension = shlib_suffix
def get_config_h_filename():
"""Return full pathname of installed pyconfig.h file."""
if python_build:
if os.name == "nt":
inc_dir = os.path.join(_sys_home or project_base, "PC")
else:
inc_dir = _sys_home or project_base
else:
inc_dir = get_python_inc(plat_specific=1)
return os.path.join(inc_dir, 'pyconfig.h')
def get_makefile_filename():
"""Return full pathname of installed Makefile from the Python build."""
if python_build:
return os.path.join(_sys_home or project_base, "Makefile")
lib_dir = get_python_lib(plat_specific=0, standard_lib=1)
config_file = 'config-{}{}'.format(get_python_version(), build_flags)
if hasattr(sys.implementation, '_multiarch'):
config_file += '-%s' % sys.implementation._multiarch
return os.path.join(lib_dir, config_file, 'Makefile')
def parse_config_h(fp, g=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if g is None:
g = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
#
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try: v = int(v)
except ValueError: pass
g[n] = v
else:
m = undef_rx.match(line)
if m:
g[m.group(1)] = 0
return g
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
def parse_makefile(fn, g=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
from distutils.text_file import TextFile
fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape")
if g is None:
g = {}
done = {}
notdone = {}
while True:
line = fp.readline()
if line is None: # eof
break
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
# do variable interpolation here
while notdone:
for name in list(notdone):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if name.startswith('PY_') and name[3:] in renamed_variables:
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
del notdone[name]
if name.startswith('PY_') \
and name[3:] in renamed_variables:
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
fp.close()
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
g.update(done)
return g
def expand_makefile_vars(s, vars):
"""Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
'string' according to 'vars' (a dictionary mapping variable names to
values). Variables not present in 'vars' are silently expanded to the
empty string. The variable values in 'vars' should not contain further
variable expansions; if 'vars' is the output of 'parse_makefile()',
you're fine. Returns a variable-expanded version of 's'.
"""
# This algorithm does multiple expansion, so if vars['foo'] contains
# "${bar}", it will expand ${foo} to ${bar}, and then expand
# ${bar}... and so forth. This is fine as long as 'vars' comes from
# 'parse_makefile()', which takes care of such expansions eagerly,
# according to make's variable expansion semantics.
while True:
m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
if m:
(beg, end) = m.span()
s = s[0:beg] + vars.get(m.group(1)) + s[end:]
else:
break
return s
_config_vars = None
def _init_posix():
"""Initialize the module as appropriate for POSIX systems."""
# _sysconfigdata is generated at build time, see the sysconfig module
name = os.environ.get('_PYTHON_SYSCONFIGDATA_NAME',
'_sysconfigdata_{abi}_{platform}_{multiarch}'.format(
abi=sys.abiflags,
platform=sys.platform,
multiarch=getattr(sys.implementation, '_multiarch', ''),
))
_temp = __import__(name, globals(), locals(), ['build_time_vars'], 0)
build_time_vars = _temp.build_time_vars
global _config_vars
_config_vars = {}
_config_vars.update(build_time_vars)
def _init_nt():
"""Initialize the module as appropriate for NT"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
g['EXT_SUFFIX'] = _imp.extension_suffixes()[0]
g['EXE'] = ".exe"
g['VERSION'] = get_python_version().replace(".", "")
g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable))
global _config_vars
_config_vars = g
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform. Generally this includes
everything needed to build extensions and install both pure modules and
extensions. On Unix, this means every variable defined in Python's
installed Makefile; on Windows it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _config_vars
if _config_vars is None:
func = globals().get("_init_" + os.name)
if func:
func()
else:
_config_vars = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_config_vars['prefix'] = PREFIX
_config_vars['exec_prefix'] = EXEC_PREFIX
# For backward compatibility, see issue19555
SO = _config_vars.get('EXT_SUFFIX')
if SO is not None:
_config_vars['SO'] = SO
# Always convert srcdir to an absolute path
srcdir = _config_vars.get('srcdir', project_base)
if os.name == 'posix':
if python_build:
# If srcdir is a relative path (typically '.' or '..')
# then it should be interpreted relative to the directory
# containing Makefile.
base = os.path.dirname(get_makefile_filename())
srcdir = os.path.join(base, srcdir)
else:
# srcdir is not meaningful since the installation is
# spread about the filesystem. We choose the
# directory containing the Makefile since we know it
# exists.
srcdir = os.path.dirname(get_makefile_filename())
_config_vars['srcdir'] = os.path.abspath(os.path.normpath(srcdir))
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if python_build and os.name == "posix":
base = project_base
if (not os.path.isabs(_config_vars['srcdir']) and
base != os.getcwd()):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _config_vars['srcdir'])
_config_vars['srcdir'] = os.path.normpath(srcdir)
# OS X platforms require special customization to handle
# multi-architecture, multi-os-version installers
if sys.platform == 'darwin':
import _osx_support
_osx_support.customize_config_vars(_config_vars)
if args:
vals = []
for name in args:
vals.append(_config_vars.get(name))
return vals
else:
return _config_vars
def get_config_var(name):
"""Return the value of a single variable using the dictionary
returned by 'get_config_vars()'. Equivalent to
get_config_vars().get(name)
"""
if name == 'SO':
import warnings
warnings.warn('SO is deprecated, use EXT_SUFFIX', DeprecationWarning, 2)
return get_config_vars().get(name)
| |
"""
Two-dimensional pattern generators drawing from various random distributions.
$Id$
"""
__version__='$Revision$'
import numpy
from numpy.oldnumeric import zeros,floor,where,choose,less,greater,Int,random_array
import param
from param.parameterized import ParamOverrides
from patterngenerator import PatternGenerator
from . import Composite, Gaussian
from sheetcoords import SheetCoordinateSystem
def seed(seed=None):
"""
Set the seed on the shared RandomState instance.
Convenience function: shortcut to RandomGenerator.random_generator.seed().
"""
RandomGenerator.random_generator.seed(seed)
class RandomGenerator(PatternGenerator):
"""2D random noise pattern generator abstract class."""
__abstract = True
# The orientation is ignored, so we don't show it in
# auto-generated lists of parameters (e.g. in the GUI)
orientation = param.Number(precedence=-1)
random_generator = param.Parameter(
default=numpy.random.RandomState(seed=(500,500)),precedence=-1,doc=
"""
numpy's RandomState provides methods for generating random
numbers (see RandomState's help for more information).
Note that all instances will share this RandomState object,
and hence its state. To create a RandomGenerator that has its
own state, set this parameter to a new RandomState instance.
""")
def _distrib(self,shape,p):
"""Method for subclasses to override with a particular random distribution."""
raise NotImplementedError
# Optimization: We use a simpler __call__ method here to skip the
# coordinate transformations (which would have no effect anyway)
def __call__(self,**params_to_override):
p = ParamOverrides(self,params_to_override)
shape = SheetCoordinateSystem(p.bounds,p.xdensity,p.ydensity).shape
result = self._distrib(shape,p)
self._apply_mask(p,result)
for of in p.output_fns:
of(result)
return result
class UniformRandom(RandomGenerator):
"""2D uniform random noise pattern generator."""
def _distrib(self,shape,p):
return p.random_generator.uniform(p.offset, p.offset+p.scale, shape)
class BinaryUniformRandom(RandomGenerator):
"""
2D binary uniform random noise pattern generator.
Generates an array of random numbers that are 1.0 with the given
on_probability, or else 0.0, then scales it and adds the offset as
for other patterns. For the default scale and offset, the result
is a binary mask where some elements are on at random.
"""
on_probability = param.Number(default=0.5,bounds=[0.0,1.0],doc="""
Probability (in the range 0.0 to 1.0) that the binary value
(before scaling) is on rather than off (1.0 rather than 0.0).""")
def _distrib(self,shape,p):
rmin = p.on_probability-0.5
return p.offset+p.scale*(p.random_generator.uniform(rmin,rmin+1.0,shape).round())
class GaussianRandom(RandomGenerator):
"""
2D Gaussian random noise pattern generator.
Each pixel is chosen independently from a Gaussian distribution
of zero mean and unit variance, then multiplied by the given
scale and adjusted by the given offset.
"""
scale = param.Number(default=0.25,softbounds=(0.0,2.0))
offset = param.Number(default=0.50,softbounds=(-2.0,2.0))
def _distrib(self,shape,p):
return p.offset+p.scale*p.random_generator.standard_normal(shape)
# CEBALERT: in e.g. script_repr, an instance of this class appears to
# have only pattern.Constant() in its list of generators, which might
# be confusing. The Constant pattern has no effect because the
# generators list is overridden in __call__. Shouldn't the generators
# parameter be hidden for this class (and possibly for others based on
# pattern.Composite)? For that to be safe, we'd at least have to have
# a warning if someone ever sets a hidden parameter, so that having it
# revert to the default value would always be ok.
class GaussianCloud(Composite):
"""Uniform random noise masked by a circular Gaussian."""
operator = param.Parameter(numpy.multiply)
gaussian_size = param.Number(default=1.0,doc="Size of the Gaussian pattern.")
aspect_ratio = param.Number(default=1.0,bounds=(0.0,None),softbounds=(0.0,2.0),
precedence=0.31,doc="""
Ratio of gaussian width to height; width is gaussian_size*aspect_ratio.""")
def __call__(self,**params_to_override):
p = ParamOverrides(self,params_to_override)
p.generators=[Gaussian(aspect_ratio=p.aspect_ratio,size=p.gaussian_size),
UniformRandom()]
return super(GaussianCloud,self).__call__(**p)
### JABHACKALERT: This code seems to work fine when the input regions
### are all the same size and shape, but for
### e.g. examples/hierarchical.ty the resulting images in the Test
### Pattern preview window are square (instead of the actual
### rectangular shapes), matching between the eyes (instead of the
### actual two different rectangles), and with dot sizes that don't
### match between the eyes. It's not clear why this happens.
class RandomDotStereogram(PatternGenerator):
"""
Random dot stereogram using rectangular black and white patches.
Based on Matlab code originally from Jenny Read, reimplemented
in Python by Tikesh Ramtohul (2006).
"""
# Suppress unused parameters
x = param.Number(precedence=-1)
y = param.Number(precedence=-1)
size = param.Number(precedence=-1)
orientation = param.Number(precedence=-1)
# Override defaults to make them appropriate
scale = param.Number(default=0.5)
offset = param.Number(default=0.5)
# New parameters for this pattern
#JABALERT: Should rename xdisparity and ydisparity to x and y, and simply
#set them to different values for each pattern to get disparity
xdisparity = param.Number(default=0.0,bounds=(-1.0,1.0),softbounds=(-0.5,0.5),
precedence=0.50,doc="Disparity in the horizontal direction.")
ydisparity = param.Number(default=0.0,bounds=(-1.0,1.0),softbounds=(-0.5,0.5),
precedence=0.51,doc="Disparity in the vertical direction.")
dotdensity = param.Number(default=0.5,bounds=(0.0,None),softbounds=(0.1,0.9),
precedence=0.52,doc="Number of dots per unit area; 0.5=50% coverage.")
dotsize = param.Number(default=0.1,bounds=(0.0,None),softbounds=(0.05,0.15),
precedence=0.53,doc="Edge length of each square dot.")
random_seed=param.Integer(default=500,bounds=(0,1000),
precedence=0.54,doc="Seed value for the random position of the dots.")
def __call__(self,**params_to_override):
p = ParamOverrides(self,params_to_override)
xsize,ysize = SheetCoordinateSystem(p.bounds,p.xdensity,p.ydensity).shape
xsize,ysize = int(round(xsize)),int(round(ysize))
xdisparity = int(round(xsize*p.xdisparity))
ydisparity = int(round(xsize*p.ydisparity))
dotsize = int(round(xsize*p.dotsize))
bigxsize = 2*xsize
bigysize = 2*ysize
ndots=int(round(p.dotdensity * (bigxsize+2*dotsize) * (bigysize+2*dotsize) /
min(dotsize,xsize) / min(dotsize,ysize)))
halfdot = floor(dotsize/2)
# Choose random colors and locations of square dots
random_seed = p.random_seed
random_array.seed(random_seed*12,random_seed*99)
col=where(random_array.random((ndots))>=0.5, 1.0, -1.0)
random_array.seed(random_seed*122,random_seed*799)
xpos=floor(random_array.random((ndots))*(bigxsize+2*dotsize)) - halfdot
random_array.seed(random_seed*1243,random_seed*9349)
ypos=floor(random_array.random((ndots))*(bigysize+2*dotsize)) - halfdot
# Construct arrays of points specifying the boundaries of each
# dot, cropping them by the big image size (0,0) to (bigxsize,bigysize)
x1=xpos.astype(Int) ; x1=choose(less(x1,0),(x1,0))
y1=ypos.astype(Int) ; y1=choose(less(y1,0),(y1,0))
x2=(xpos+(dotsize-1)).astype(Int) ; x2=choose(greater(x2,bigxsize),(x2,bigxsize))
y2=(ypos+(dotsize-1)).astype(Int) ; y2=choose(greater(y2,bigysize),(y2,bigysize))
# Draw each dot in the big image, on a blank background
bigimage = zeros((bigysize,bigxsize))
for i in range(ndots):
bigimage[y1[i]:y2[i]+1,x1[i]:x2[i]+1] = col[i]
result = p.offset + p.scale*bigimage[ (ysize/2)+ydisparity:(3*ysize/2)+ydisparity ,
(xsize/2)+xdisparity:(3*xsize/2)+xdisparity ]
for of in p.output_fns:
of(result)
return result
| |
import logging
from django.conf import settings
from djconnectwise.utils import RequestSettings
import re
import requests
from retrying import retry
class ConnectWiseAPIError(Exception):
"""Raise this, not request exceptions."""
pass
class ConnectWiseRecordNotFoundError(ConnectWiseAPIError):
"""The record was not found."""
pass
CW_RESPONSE_MAX_RECORDS = 1000 # The greatest number of records ConnectWise
# will send us in one response.
RETRY_WAIT_EXPONENTIAL_MULTAPPLIER = 1000 # Initial number of milliseconds to
# wait before retrying a request.
RETRY_WAIT_EXPONENTIAL_MAX = 10000 # Maximum number of milliseconds to wait
# before retrying a request.
CW_DEFAULT_PAGE = 1 # CW Pagination is 1-indexed
CONTENT_DISPOSITION_RE = re.compile(
'^attachment; filename=\"{0,1}(.*?)\"{0,1}$'
)
logger = logging.getLogger(__name__)
class ConnectWiseAPIClient(object):
API = None
def __init__(
self,
company_id=None,
server_url=None,
api_public_key=None,
api_private_key=None,
api_codebase=None
):
if not company_id:
company_id = settings.CONNECTWISE_CREDENTIALS['company_id']
if not server_url:
server_url = settings.CONNECTWISE_SERVER_URL
if not api_public_key:
api_public_key = settings.CONNECTWISE_CREDENTIALS['api_public_key']
if not api_private_key:
api_private_key = settings.CONNECTWISE_CREDENTIALS[
'api_private_key'
]
if not api_codebase:
api_codebase = settings.CONNECTWISE_CREDENTIALS['api_codebase']
if not self.API:
raise ValueError('API not specified')
self.api_public_key = api_public_key
self.api_private_key = api_private_key
self.api_codebase = api_codebase
self.server_url = '{0}/{1}/apis/3.0/{2}/'.format(
server_url,
self.api_codebase,
self.API,
)
self.auth = ('{0}+{1}'.format(company_id, self.api_public_key),
'{0}'.format(self.api_private_key),)
self.request_settings = RequestSettings().get_settings()
self.timeout = self.request_settings['timeout']
def _endpoint(self, path):
return '{0}{1}'.format(self.server_url, path)
def _log_failed(self, response):
logger.error('FAILED API CALL: {0} - {1} - {2}'.format(
response.url, response.status_code, response.content))
def fetch_resource(self, endpoint_url, params=None, should_page=False,
retry_counter=None,
*args, **kwargs):
"""
A convenience method for issuing a request to the
specified REST endpoint.
Note: retry_counter is used specifically for testing.
It is a dict in the form {'count': 0} that is passed in
to verify the number of attempts that were made.
"""
@retry(stop_max_attempt_number=self.request_settings['max_attempts'],
wait_exponential_multiplier=RETRY_WAIT_EXPONENTIAL_MULTAPPLIER,
wait_exponential_max=RETRY_WAIT_EXPONENTIAL_MAX)
def _fetch_resource(endpoint_url, params=None, should_page=False,
retry_counter=None,
*args, **kwargs):
if retry_counter:
retry_counter['count'] += 1
if not params:
params = {}
if should_page:
params['pageSize'] = kwargs.get('page_size',
CW_RESPONSE_MAX_RECORDS)
params['page'] = kwargs.get('page', CW_DEFAULT_PAGE)
try:
endpoint = self._endpoint(endpoint_url)
logger.debug('Making GET request to {}'.format(endpoint))
response = requests.get(
endpoint,
params=params,
auth=self.auth,
timeout=self.timeout,
)
except requests.RequestException as e:
logger.error('Request failed: GET {}: {}'.format(endpoint, e))
raise ConnectWiseAPIError('{}'.format(e))
if 200 <= response.status_code < 300:
return response.json()
if response.status_code == 404:
msg = 'Resource {} was not found.'.format(response.url)
logger.warning(msg)
raise ConnectWiseRecordNotFoundError(msg)
else:
self._log_failed(response)
raise ConnectWiseAPIError(response.content)
return _fetch_resource(endpoint_url, params=params,
should_page=should_page,
*args, **kwargs)
class ProjectAPIClient(ConnectWiseAPIClient):
API = 'project'
ENDPOINT_PROJECTS = 'projects/'
def get_project(self, project_id):
endpoint_url = '{}/{}'.format(self.ENDPOINT_PROJECTS, project_id)
return self.fetch_resource(endpoint_url)
def get_projects(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_PROJECTS, should_page=True,
*args, **kwargs)
class CompanyAPIClient(ConnectWiseAPIClient):
API = 'company'
ENDPOINT_COMPANIES = 'companies'
ENDPOINT_COMPANY_STATUSES = '{}/statuses'.format(ENDPOINT_COMPANIES)
def by_id(self, company_id):
endpoint_url = '{}/{}'.format(self.ENDPOINT_COMPANIES, company_id)
return self.fetch_resource(endpoint_url)
def get_companies(self, *args, **kwargs):
if 'conditions' in kwargs:
kwargs['params'] = {
'conditions': kwargs['conditions']
}
return self.fetch_resource(self.ENDPOINT_COMPANIES, should_page=True,
*args, **kwargs)
def get_company_statuses(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_COMPANY_STATUSES,
should_page=True,
*args, **kwargs)
class SalesAPIClient(ConnectWiseAPIClient):
API = 'sales'
ENDPOINT_OPPORTUNITIES = 'opportunities'
ENDPOINT_OPPORTUNITY_STATUSES = \
'{}/statuses'.format(ENDPOINT_OPPORTUNITIES)
ENDPOINT_OPPORTUNITY_TYPES = \
'{}/types'.format(ENDPOINT_OPPORTUNITIES)
def by_id(self, opportunity_id):
endpoint_url = '{}/{}'.format(
self.ENDPOINT_OPPORTUNITIES, opportunity_id)
return self.fetch_resource(endpoint_url)
def get_opportunities(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_OPPORTUNITIES,
should_page=True,
*args, **kwargs)
def get_opportunity_statuses(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_OPPORTUNITY_STATUSES,
should_page=True,
*args, **kwargs)
def get_opportunity_types(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_OPPORTUNITY_TYPES,
should_page=True,
*args, **kwargs)
class SystemAPIClient(ConnectWiseAPIClient):
API = 'system'
# endpoints
ENDPOINT_MEMBERS = 'members/'
ENDPOINT_MEMBERS_IMAGE = 'members/{}/image'
ENDPOINT_MEMBERS_COUNT = 'members/count'
ENDPOINT_CALLBACKS = 'callbacks/'
ENDPOINT_INFO = 'info/'
def get_connectwise_version(self):
result = self.fetch_resource(self.ENDPOINT_INFO)
return result.get('version', '')
def get_members(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_MEMBERS,
should_page=True, *args, **kwargs)
def get_member_count(self):
return self.fetch_resource(self.ENDPOINT_MEMBERS_COUNT)
def get_callbacks(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_CALLBACKS,
should_page=True, *args, **kwargs)
def delete_callback(self, entry_id):
try:
endpoint = self._endpoint(
'{}{}'.format(self.ENDPOINT_CALLBACKS, entry_id)
)
logger.debug('Making DELETE request to {}'.format(endpoint))
response = requests.request(
'delete',
endpoint,
auth=self.auth,
timeout=self.timeout,
)
except requests.RequestException as e:
logger.error('Request failed: DELETE {}: {}'.format(endpoint, e))
raise ConnectWiseAPIError('{}'.format(e))
response.raise_for_status()
return response
def create_callback(self, callback_entry):
try:
endpoint = self._endpoint(self.ENDPOINT_CALLBACKS)
logger.debug('Making POST request to {}'.format(endpoint))
response = requests.request(
'post',
endpoint,
json=callback_entry,
auth=self.auth,
timeout=self.timeout,
)
except requests.RequestException as e:
logger.error('Request failed: POST {}: {}'.format(endpoint, e))
raise ConnectWiseAPIError('{}'.format(e))
if 200 <= response.status_code < 300:
return response.json()
else:
self._log_failed(response)
raise ConnectWiseAPIError(response.content)
def update_callback(self, callback_entry):
try:
endpoint = self._endpoint(
'callbacks/{0}'.format(callback_entry.entry_id)
)
logger.debug('Making PUT request to {}'.format(endpoint))
response = requests.request(
'put',
endpoint,
json=callback_entry,
auth=self.auth,
timeout=self.timeout,
)
except requests.RequestException as e:
logger.error('Request failed: PUT {}: {}'.format(endpoint, e))
raise ConnectWiseAPIError('{}'.format(e))
if 200 <= response.status_code < 300:
return response.json()
else:
self._log_failed(response)
raise ConnectWiseAPIError(response.content)
def get_member_by_identifier(self, identifier):
return self.fetch_resource('members/{0}'.format(identifier))
def get_member_image_by_identifier(self, identifier):
"""
Return a (filename, content) tuple.
"""
try:
endpoint = self._endpoint(
self.ENDPOINT_MEMBERS_IMAGE.format(identifier)
)
logger.debug('Making GET request to {}'.format(endpoint))
response = requests.get(
endpoint,
auth=self.auth,
timeout=self.timeout,
)
except requests.RequestException as e:
logger.error('Request failed: GET {}: {}'.format(endpoint, e))
raise ConnectWiseAPIError('{}'.format(e))
if 200 <= response.status_code < 300:
headers = response.headers
content_disposition_header = headers.get('Content-Disposition',
default='')
msg = "Got member '{}' image; size {} bytes " \
"and content-disposition header '{}'"
logger.info(msg.format(
identifier,
len(response.content),
content_disposition_header
))
attachment_filename = self._attachment_filename(
content_disposition_header)
return attachment_filename, response.content
else:
self._log_failed(response)
return None, None
def _attachment_filename(self, content_disposition):
"""
Return the attachment filename from the content disposition header.
If there's no match, return None.
"""
m = CONTENT_DISPOSITION_RE.match(content_disposition)
return m.group(1) if m else None
class ServiceAPIClient(ConnectWiseAPIClient):
API = 'service'
ENDPOINT_TICKETS = 'tickets'
ENDPOINT_BOARDS = 'boards'
ENDPOINT_PRIORITIES = 'priorities'
ENDPOINT_LOCATIONS = 'locations'
def __init__(self, *args, **kwargs):
self.extra_conditions = None
if 'extra_conditions' in kwargs:
self.extra_conditions = kwargs.pop('extra_conditions')
super().__init__(*args, **kwargs)
def get_conditions(self):
default_conditions = settings.DJCONNECTWISE_DEFAULT_TICKET_CONDITIONS
condition_list = [c for c in [
default_conditions, self.extra_conditions] if c]
conditions = ''
for condition in condition_list:
condition = '({})'.format(condition)
if conditions:
condition = ' AND {}'.format(condition)
conditions += condition
return conditions
def tickets_count(self):
params = dict(
conditions=self.get_conditions(),
)
return self.fetch_resource(
'{}/count'.format(self.ENDPOINT_TICKETS), params
).get('count', 0)
def get_ticket(self, ticket_id):
endpoint_url = '{}/{}'.format(self.ENDPOINT_TICKETS, ticket_id)
return self.fetch_resource(endpoint_url)
def get_tickets(self, *args, **kwargs):
params = dict(
conditions=self.get_conditions()
)
return self.fetch_resource(self.ENDPOINT_TICKETS, should_page=True,
params=params, *args, **kwargs)
def update_ticket_status(self, ticket_id, closed_flag, status):
"""
Update the ticket's closedFlag and status on the server.
"""
# Yeah, this schema is a bit bizarre. See CW docs at
# https://developer.connectwise.com/Manage/Developer_Guide#Patch
body = [
{
'op': 'replace',
'path': 'closedFlag',
'value': closed_flag
},
{
'op': 'replace',
'path': 'status',
'value': {
'id': status.id,
'name': status.name,
},
},
]
try:
endpoint = self._endpoint(
'{}/{}'.format(self.ENDPOINT_TICKETS, ticket_id)
)
logger.debug('Making PATCH request to {}'.format(endpoint))
response = requests.patch(
endpoint,
json=body,
auth=self.auth,
timeout=self.timeout,
)
except requests.RequestException as e:
logger.error('Request failed: PATCH {}: {}'.format(endpoint, e))
raise ConnectWiseAPIError('{}'.format(e))
if 200 <= response.status_code < 300:
return response.json()
else:
self._log_failed(response)
raise ConnectWiseAPIError(response.content)
def get_statuses(self, board_id, *args, **kwargs):
"""
Returns the status types associated with the specified board.
"""
endpoint_url = '{}/{}/statuses'.format(self.ENDPOINT_BOARDS, board_id)
return self.fetch_resource(endpoint_url, should_page=True,
*args, **kwargs)
def get_boards(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_BOARDS, should_page=True,
*args, **kwargs)
def get_board(self, board_id):
return self.fetch_resource('{}/{}'.format(
self.ENDPOINT_BOARDS, board_id)
)
def get_priorities(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_PRIORITIES, should_page=True,
*args, **kwargs)
def get_teams(self, board_id, *args, **kwargs):
endpoint = '{}/{}/teams/'.format(self.ENDPOINT_BOARDS, board_id)
return self.fetch_resource(endpoint, should_page=True, *args, **kwargs)
def get_locations(self, *args, **kwargs):
return self.fetch_resource(self.ENDPOINT_LOCATIONS, should_page=True,
*args, **kwargs)
| |
#!/usr/bin/env python
#
# nucmer_to_crunch.py
#
# USAGE: Usage: nucmer_to_crunch.py [-h] [-o OUTFILENAME] [-i INFILENAME] [-v]
#
# optional arguments:
# -h, --help show this help message and exit
# -o OUTFILENAME, --outfile OUTFILENAME
# Output .crunch file
# -i INFILENAME, --infile INFILENAME
# Input .coords file
# -v, --verbose Give verbose output
#
# A short script that converts the output of MUMmer's show-coords package
# to a .crunch file that can be used in Sanger's ACT comparative genomics
# visualisation tool.
#
# The script acts equivalently to the one-liner:
#
# tail -n +6 in.coords | awk \
# '{print $7" "$10" "$1" "$2" "$12" "$4" "$5" "$13}' > out.crunch
#
#
# but has the advantage that you don't have to remember which columns go in
# which order, and the Python boilerplate provides nicer logging and usage
# information.
#
# Copyright (C) 2014 The James Hutton Institute
# Author: Leighton Pritchard
#
# Contact:
# leighton.pritchard@hutton.ac.uk
#
# Leighton Pritchard,
# Information and Computing Sciences,
# James Hutton Institute,
# Errol Road,
# Invergowrie,
# Dundee,
# DD6 9LH,
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2010-2014 The James Hutton Institute
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
# IMPORTS
from argparse import ArgumentParser
import logging
import logging.handlers
import os
import re
import sys
###
# FUNCTIONS
# Parse cmd-line
def parse_cmdline(args):
""" Parse command-line arguments. Note that the input and output
directories are positional arguments
"""
parser = ArgumentParser(prog="nucmer_to_crunch.py")
parser.add_argument("-o", "--outfile", dest="outfilename",
action="store", default=None,
help="Output .crunch file")
parser.add_argument("-i", "--infile", dest="infilename",
action="store", default=None,
help="Input .coords file")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true",
help="Give verbose output")
return parser.parse_args()
# Report last exception as string
def last_exception():
""" Returns last exception as a string
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
return ''.join(traceback.format_exception(exc_type, exc_value,
exc_traceback))
# Process the input stream
def process_stream(infh, outfh):
""" Processes the input stream, assuming show-coords output, with
five header lines, and whitespace separation.
show-coords output has the following columns (post-processing)
1: reference sequence start
2: reference sequence end
3: subject sequence start
4: subject sequence end
5: reference alignment length
6: subject alignment length
7: alignment percentage identity
8: reference sequence ID
9: subject sequence ID
This is converted to .crunch (MSPcrunch) format output with the
following columns, separated by whitespace.
1: score (reference alignment length)
2: alignment percentage identity
3: reference sequence start
4: reference sequence end
5: reference sequence ID
6: subject sequence start
7: subject sequence end
8: subject sequence ID
"""
# Read in the input stream into a list of lines
try:
tbldata = list(infh.readlines())
except:
logger.error("Could not process input (exiting)")
logger.error(last_exception())
sys.exit(1)
logger.info("Read %d lines from input" % len(tbldata))
tbldata = tbldata[5:]
logger.info("Skipped header lines.")
for line in [l.strip().split() for l in tbldata if
len(l.strip())]:
# Due to the column marker symbols, there are offsets for the
# columns, relative to those in the text above.
outfh.write(' '.join([line[6], line[9], line[0], line[1],
line[11], line[3], line[4], line[12]]) + '\n')
###
# SCRIPT
if __name__ == '__main__':
# Parse command-line
args = parse_cmdline(sys.argv)
# We set up logging, and modify loglevel according to whether we need
# verbosity or not
logger = logging.getLogger('nucmer_to_crunch.py')
logger.setLevel(logging.DEBUG)
err_handler = logging.StreamHandler(sys.stderr)
err_formatter = logging.Formatter('%(levelname)s: %(message)s')
err_handler.setFormatter(err_formatter)
if args.verbose:
err_handler.setLevel(logging.INFO)
else:
err_handler.setLevel(logging.WARNING)
logger.addHandler(err_handler)
# Report arguments, if verbose
logger.info(args)
# Do we have an input file? No? Then use stdin
if args.infilename is None:
infhandle = sys.stdin
logger.info("Using stdin for input")
else:
logger.info("Using %s for input" % args.infilename)
try:
infhandle = open(args.infilename, 'rU')
except:
logger.error("Could not open input file: %s (exiting)" %
args.infilename)
logger.error(''.join(
traceback.format_exception(sys.last_type,
sys.last_value,
sys.last_traceback)))
sys.exit(1)
# Do we have an output file? No? Then use stdout
if args.outfilename is None:
outfhandle = sys.stdout
logger.info("Using stdout for output")
else:
logger.info("Using %s for output" % args.outfilename)
try:
outfhandle = open(args.outfilename, 'w')
except:
logger.error("Could not open output file: %s (exiting)" %
args.outfilename)
logger.error(''.join(
traceback.format_exception(sys.last_type,
sys.last_value,
sys.last_traceback)))
sys.exit(1)
# Process input stream
process_stream(infhandle, outfhandle)
| |
#!/usr/bin/python
#
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Connections via pexpect to SSH and Telnet endpoints.
By deliberate side-effect, this module overwrites pexpect.spawn.__select
with an implementation based on poll(), to support use with higher file
descriptors than supported by select().
"""
import errno
import os
import re
import select
import socket
import time
import paramiko
import pexpect
import gflags
import logging
import sshclient
import push_exceptions as exceptions
FLAGS = gflags.FLAGS
TIMEOUT_DEFAULT = 20.0
class Error(Exception):
pass
class ConnectionError(Error):
"""The connection failed due to an error."""
class TimeoutError(Error):
"""The operation timed-out."""
class OperationFailedError(Error):
"""The sub-process had a non-zero exit status."""
class ScpError(Error):
"""An error occurred during an SCP operation."""
def _SelectViaPoll(_, rfds, wfds, efds, timeout):
"""poll() based replacement for pexpect.spawn.__select().
As mentioned in the module docstring, this is required since Python's select
is unable to wait for events on high-numbered file descriptors. The API is
as per select.select(), however if we are interrupted by a signal, we wait
again for the remaining time.
Args:
_: An object, self, unused.
rfds: A list, file descriptors to check for read.
wfds: A list, file descriptors to check for write.
efds: A list, file descriptors to check for exceptions.
timeout: A float, timeout (seconds).
Returns:
A tuple of three lists, being the descriptors in each of the incoming lists
which are ready for read, write or have an exception, respectively.
"""
if wfds or efds:
logging.fatal('Unexpected code change in pexpect: __select '
'called with wfds=%s efds=%s', wfds, efds)
p = select.poll()
for fd in rfds:
p.register(fd, select.POLLIN)
# See pexpect.spawn.__select for timeout handling logic; this is the same
# in select() and poll(), except that the timeout argument to poll() is
# in milliseconds. poll() raises the same exception on timeout as select().
if timeout is not None:
end_time = time.time() + timeout
while True:
try:
fdstate = p.poll(int(timeout * 1000) if timeout is not None else None)
# Build a list of descriptors which select() would return as 'available
# for read' (which includes EOF conditions which may be indicated as
# POLLIN, POLLHUP or POLLIN|POLLHUP, depending on the type of file
# descriptor).
rrfds = []
for fd, state in fdstate:
if state & select.POLLIN or state & select.POLLHUP:
rrfds.append(fd)
return (rrfds, [], [])
except select.error as e:
if e[0] == errno.EINTR:
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return ([], [], [])
else:
raise
# Override pexpect.spawn.__select as mentioned in module docstring.
pexpect.spawn._spawn__select = _SelectViaPoll
class Connection(object):
"""The base class for pexpect connections."""
def __init__(self, host, username, password=None, success=None,
connect_command=None, timeout=None, find_prompt=False,
enable_password=None, find_prompt_prefix=None,
find_prompt_suffix=''):
"""Initializer.
Args:
host: A string, the hostname or IP address to connect to.
username: A string, the username to use on the connection.
password: A string, the password to use on the connection.
success: A string, the string to expect to trigger successful completion.
connect_command: A string, the command to connect (minus the host suffix).
timeout: A float, the number of seconds before a connection times out.
find_prompt: A bool, if true then success is a regexp and it's group(1)
should be used to build self._prompt.
enable_password: A string, the enable password to optionally use.
find_prompt_prefix: A string, the prefix to put before group(1) from the
success regexp to build self._prompt, if find_prompt is true.
"""
self._connect_timeout = timeout or TIMEOUT_DEFAULT
self._host = host
self._username = username
self._password = password
self._success = success
self._find_prompt = find_prompt
self._connect_command = connect_command
self._enable_password = enable_password
self._find_prompt_prefix = (
r'(?:^|\n)' if find_prompt_prefix is None else find_prompt_prefix)
self._find_prompt_suffix = find_prompt_suffix
self.child = None
def _MaybeFindPrompt(self):
"""Enable if necessary, then perform prompt discovery if required."""
if self._enable_password:
# Enable before prompt discovery. Set a broad prompt expression.
password_sent = False
logging.debug('Enabling on %r', self._host)
self.child.sendline('enable')
while True:
i = self.child.expect(
[self._success, 'Password:', 'Bad secrets',
'Password: timeout expired!'], timeout=10)
if i == 0:
# Found the prompt, we're enabled.
logging.debug('We are enabled')
break
elif i == 1 and not password_sent:
self.child.sendline(self._enable_password)
logging.debug('Sent enable password to %r', self._host)
password_sent = True
else:
logging.debug('Got index %d back from expect', i)
# Sleep momentarily before expecting again, break buffer swap races.
time.sleep(0.05)
if self._find_prompt:
host = re.escape(self.child.match.group(1))
if len(self.child.match.groups()) > 1:
mode = re.escape(self.child.match.group(2))
else:
mode = ''
try:
self._prompt = (
self._find_prompt_prefix + host + self._find_prompt_suffix + mode)
self.re_prompt = re.compile(self._prompt)
logging.debug('%s: prompt set to %r', self._host, self._prompt)
except IndexError:
logging.debug('%s: find_prompt set but no capture group - skipping',
self._host)
else:
self.re_prompt = re.compile(self._success)
class SocketSpawn(pexpect.spawn):
"""Wrapper around pexpect.spawn to use a supplied socket.
This class does not close the file; it assumes it is a Python socket
which will be held/destroyed by the caller.
"""
# pylint: disable=g-bad-name
def __init__(self, sock, *args, **kwargs):
pexpect.spawn.__init__(self, None, *args, **kwargs)
self.child_fd = sock.fileno()
self.closed = False
self.name = '<file descriptor %d>' % self.child_fd
def isalive(self):
if self.child_fd == -1:
return False
try:
os.fstat(self.child_fd)
return True
except OSError:
return False
def __del__(self):
return
def close(self):
return
def terminate(self, force=False):
_ = force
return
def kill(self, sig):
_ = sig
return
class SocketConnection(Connection):
"""IPv4 TCP socket connection class."""
def __init__(self, host, port, username, password=None, success=None,
timeout=None, initial_chat=None, find_prompt=False,
find_prompt_prefix=None):
"""Creates an IPv4 TCP socket connection.
Args:
host: As per parent.
port: An int, the port number to connect to.
username: As per parent.
password: As per parent.
success: As per parent.
timeout: As per parent.
initial_chat: A tuple of tuples, each tuple in this list is a string
to expect from the socket and a response; the chat must occur in the
exact order specified. Intended only for telnet option negotiation.
find_prompt: As per parent.
find_prompt_prefix: As per parent.
"""
super(SocketConnection, self).__init__(
host, username=username, password=password, success=success,
timeout=timeout, find_prompt=find_prompt,
find_prompt_prefix=find_prompt_prefix)
self._port = port
self._initial_chat = initial_chat
self._connect_timeout = timeout or TIMEOUT_DEFAULT
if success is None:
self._success = self._username+r'.*> '
def Connect(self):
"""Makes the connection."""
self._sock = socket.socket()
self._sock.settimeout(self._connect_timeout)
try:
self._sock.connect((self._host, self._port))
except socket.timeout:
raise TimeoutError(self._connect_timeout)
except socket.gaierror as e:
raise ConnectionError('Lookup failure for %r: %s' % (self._host, e[1]))
except socket.error as e:
raise ConnectionError('Connect failure for %r: %s' % (self._host, e[1]))
if self._initial_chat is not None:
try:
for expected_recv, to_send in self._initial_chat:
actual_recv = self._sock.recv(len(expected_recv))
if actual_recv == expected_recv:
self._sock.send(to_send)
else:
raise ConnectionError('Initial chat failure for %r: expected %r, '
'got %r' % (self._host, expected_recv,
actual_recv))
except socket.timeout:
logging.debug('Initial chat timeout for %r', self._host)
raise TimeoutError(self._connect_timeout)
self._sock.settimeout(None)
self.child = SocketSpawn(self._sock, maxread=8192)
self.child.timeout = self._connect_timeout
logging.debug('Socket connected to %r:%s', self._host, self._port)
responses = self.child.compile_pattern_list([
self._success,
r'[Ll]ogin|[Uu]ser[Nn]ame',
r'[Pp]assword:',
r'Permission denied|Authentication failed'])
self.exit_list = self.child.compile_pattern_list(pexpect.EOF)
while True:
try:
timeout = max(1, self._connect_timeout)
pattern = self.child.expect_list(responses, timeout=timeout)
logging.debug('Connect() matched responses[%d]', pattern)
if pattern == 0:
self._MaybeFindPrompt()
break
elif pattern == 1:
self.child.send(self._username+'\r')
elif pattern == 2:
self.child.send(self._password+'\r')
elif pattern == 3:
raise ConnectionError('Permission denied for %r' % self._host)
else:
raise ConnectionError('Unexpected pattern %d' % pattern)
except pexpect.TIMEOUT:
raise TimeoutError(timeout)
except pexpect.EOF as e:
raise ConnectionError(str(e))
return None
class SshSpawn(pexpect.spawn):
"""Wrapper around pexpect.spawn to use a Paramiko channel."""
# pylint: disable=g-bad-name
def __init__(self, channel, *args, **kwargs):
pexpect.spawn.__init__(self, None, *args, **kwargs)
self.channel = channel
self.child_fd = None
self.closed = False
self.name = '<ssh channel %s>' % channel.get_id()
def isalive(self):
try:
return self.channel.get_transport().is_active()
except AttributeError:
return False
def read_nonblocking(self, size=1, timeout=None):
"""See parent. This actually may or may not block based on timeout."""
if not self.isalive():
raise pexpect.EOF('End Of File (EOF) in read() - Not alive.')
if timeout == -1:
timeout = self.timeout
self.channel.settimeout(timeout)
try:
s = self.channel.recv(size)
except socket.timeout:
raise pexpect.TIMEOUT('Timeout (%s) exceeded in read().' % timeout)
except paramiko.SSHException as e:
raise pexpect.EOF('Paramiko exception: %s' % e)
except EOFError:
raise pexpect.EOF('Paramiko reported End Of File (EOF) in read()')
if not s:
self.flag_eof = 1
raise pexpect.EOF('End Of File (EOF) in read().')
return s
def send(self, s):
return self.channel.send(s)
def __del__(self):
return
def close(self):
return
def terminate(self, force=False):
_ = force
return
def kill(self, sig):
_ = sig
return
class HpSshSpawn(SshSpawn):
"""Wrapped pexpect.spawn to use a Paramiko channel and HP ANSI filters.
This also deals with the annoying pager which cannot be disabled.
"""
# ANSI character sequences to convert to a newline.
NEWLINE_RE = re.compile('\x1B(?:\\[0m|E)')
# All other ANSI character sequences (removed from the output).
# Matches all strings containing \x1B, unless they contain a truncated ANSI
# sequence at the end of the string.
ANSI_RE = re.compile('\x1B([^[]|\\[[^@-~]*[@-~])')
def __init__(self, channel, *args, **kwargs):
SshSpawn.__init__(self, channel, *args, **kwargs)
self._read_nonblocking_buf = ''
def _Filter(self, text):
text = re.sub(self.NEWLINE_RE, '\n', text)
text = re.sub(self.ANSI_RE, '', text)
logging.debug('Filtered: %r', text)
return text
def read_nonblocking(self, size=1, timeout=None):
"""Read, handling terminal control input from an HP ProCurve.
This may or may not actually block, as per its parent.
Args:
size: An int, the minimum size block to return.
timeout: An optional float, wait only timeout seconds at most.
Returns:
A string, the filtered output.
"""
start = time.time()
if timeout == -1:
timeout = self.timeout
while True:
if timeout and time.time() > start + timeout:
return ''
logging.debug('Unfiltered: %r', in_data)
if in_data and self._read_nonblocking_buf:
logging.debug('Prepending data: %r', self._read_nonblocking_buf)
in_data = self._read_nonblocking_buf + in_data
self._read_nonblocking_buf = ''
filtered = self._Filter(in_data)
escape_location = filtered.find('\x1B')
if escape_location != -1:
logging.debug('Partial ANSI tag in filtered data: %r', filtered)
self._read_nonblocking_buf = filtered[escape_location:]
filtered = filtered[:escape_location]
if filtered:
return filtered
class ParamikoSshConnection(Connection):
"""Base class for SSH connections using Paramiko."""
def __init__(self, host, username, password=None, success=None,
timeout=None, find_prompt=False, ssh_keys=None,
enable_password=None, ssh_client=None, find_prompt_prefix=None):
"""Initializer.
Args:
host: As per parent.
username: As per parent.
password: As per parent.
success: As per parent.
timeout: As per parent.
find_prompt: As per parent.
ssh_keys: A tuple of strings, SSH private keys (optional; may be None).
enable_password: As per parent.
ssh_client: A instance of an object that implements an SSH client.
find_prompt_prefix: As per parent.
"""
super(ParamikoSshConnection, self).__init__(
host, username, password, success, None, timeout, find_prompt,
enable_password=enable_password, find_prompt_prefix=find_prompt_prefix)
if success is None:
self._success = self._username+r'.*> '
self.ssh_client = ssh_client
self._ssh_client = None
self._ssh_keys = ssh_keys or ()
self._spawn = SshSpawn
if self._spawn is None:
raise NotImplementedError('Must supply a spawn= keywoard argument.')
def Connect(self):
"""Makes the connection.
We can have an instance of this class without being connected to the
device, e.g. after a disconnect. Hence setting up the actual SSH connection
should happen in this method, not in the constructor.
"""
try:
if self.ssh_client:
# An SSH client was provided. Use it.
self._ssh_client = self.ssh_client.Connect(
hostname=self._host,
username=self._username,
password=self._password,
ssh_keys=self._ssh_keys,
timeout=self._connect_timeout)
else:
# The Connect() function from the sshclient module is a factory that
# returns a paramiko.SSHClient instance.
self._ssh_client = sshclient.Connect(
hostname=self._host,
username=self._username,
password=self._password,
ssh_keys=self._ssh_keys,
timeout=self._connect_timeout)
except (exceptions.ConnectError, exceptions.AuthenticationError) as e:
raise ConnectionError(str(e))
# We are connected. Now set up pexpect.
logging.debug('SETTING UP PEXPECT')
try:
ssh_channel = self._ssh_client.invoke_shell()
logging.debug('INVOKED A SHELL')
ssh_channel.set_combine_stderr(True)
logging.debug('COMBINED STDERR')
self.child = self._spawn(ssh_channel, maxread=8192)
logging.debug('SPAWNED')
timeout = max(1, self._connect_timeout)
pattern = self.child.expect([self._success], timeout=timeout)
logging.debug('GOT PATTERN: %s', pattern)
if pattern == 0:
self._MaybeFindPrompt()
except pexpect.TIMEOUT:
raise TimeoutError(timeout)
except pexpect.EOF as e:
raise ConnectionError(str(e))
except paramiko.SSHException as e:
msg = 'SSHException connecting to %r: %s' % (self._host, e)
raise ConnectionError(msg)
# Used by _Disconnect in ftos.py and ios.py.
self.exit_list = self.child.compile_pattern_list(pexpect.EOF)
return None
class HpSshFilterConnection(ParamikoSshConnection):
"""Creates an SSH connection to an HP Switch with terminal escape filtering.
This filters terminal escape sequences seen on the Hewlett-Packard ProCurve
ethernet switches.
"""
def __init__(self, host, username, password=None, success=None,
timeout=None, find_prompt=False, ssh_keys=None,
enable_password=None, ssh_client=None, find_prompt_prefix=None):
super(HpSshFilterConnection, self).__init__(
host, username, password, success, timeout, find_prompt,
ssh_keys=ssh_keys, enable_password=enable_password,
ssh_client=ssh_client, find_prompt_prefix=find_prompt_prefix)
self._spawn = HpSshSpawn
def _MaybeFindPrompt(self):
"""Perform real login and then enable if we have an enable password."""
# We always run this for HP, no matter the state of self._find_prompt.
self._prompt = r'(?:^|\n|\r)([A-Za-z0-9\._-]+)(?:>|#) '
# Shake out the prompt. We may be facing a Password prompt or
# a 'Press any key to continue' prompt.
self.child.send('\r')
# Only send the password once.
password_sent = False
try:
# Login.
while True:
logging.debug('Expecting prompt %r', self._prompt)
compiled_regexes = self.child.compile_pattern_list(
[self._prompt, r'Press any key to continue',
'Password:', 'Invalid password',
'Unable to verify password'])
i = self.child.expect(compiled_regexes, timeout=10)
if i == 0:
re_str = (re.escape(self.child.match.group(1)) +
r'(?:>|#) ')
logging.debug('Prompt set to %r', re_str)
self.re_prompt = re.compile(re_str)
break
elif i == 1:
logging.debug('Pressing any key (space)')
self.child.send(' ')
elif i == 2 and not password_sent:
# Send the password only once.
try:
self.child.sendline(self._password)
logging.debug('Sent user password (again) to %r', self._host)
password_sent = True
except (pexpect.TIMEOUT, pexpect.EOF) as e:
self._ssh_client = None
raise ConnectionError(str(e))
elif i <= 3 and i < 5:
logging.error('CONNECT_ERROR Incorrect user password on %r',
self._host)
# Sleep momentarily before expecting again to break buffer swap races.
time.sleep(0.05)
# Enable.
password_sent = False
logging.debug('Enabling for HP on %r', self._host)
self.child.sendline('enable')
while True:
i = self.child.expect([self._prompt, 'Password:',
'Invalid password',
'Unable to verify password'], timeout=10)
if i == 0:
# Found the prompt, we're enabled.
break
elif i == 1 and not password_sent:
if self._enable_password is not None:
self.child.sendline(self._enable_password)
logging.debug('Sent enable password to %r', self._host)
else:
self.child.sendline(self._password)
logging.debug('Sent user password to %r', self._host)
password_sent = True
elif i <= 3 and i < 5:
logging.error('CONNECT_ERROR Incorrect user password on %r',
self._host)
# Sleep momentarily before expecting again to break buffer swap races.
time.sleep(0.05)
except (pexpect.TIMEOUT, pexpect.EOF) as e:
self._ssh_client = None
raise ConnectionError(str(e))
class ScpPutConnection(Connection):
"""Copies a file via SCP (RCP over SSH)."""
def __init__(self, host, username, password=None):
"""Initializer.
Args:
host: As per parent.
username: As per parent.
password: As per parent.
"""
super(ScpPutConnection, self).__init__(host, username, password)
self._ssh_client = sshclient.Connect(hostname=self._host,
username=self._username,
password=self._password)
self.transport = self._ssh_client.get_transport()
def Copy(self, source_data, destination_file):
"""Handles the SCP file copy.
Args:
source_data: The source data to copy as a string
destination_file: The file on the remote device
Raises:
ScpError: There was an error copying the file.
"""
try:
sshclient.ScpPut(self.transport, source_data, destination_file,
self._connect_timeout)
except sshclient.ScpError as e:
raise ScpError('SCP put failed: %s: %s' % (e.__class__.__name__, e))
| |
#!/usr/bin/env python
#
# Azure Linux extension
#
# Copyright (c) Microsoft Corporation
# All rights reserved.
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the ""Software""), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from xml.etree import ElementTree as ET
import Utils.LadDiagnosticUtil as LadUtil
from Utils.lad_exceptions import LadLoggingConfigException
import Utils.mdsd_xml_templates as mxt
from Utils.omsagent_util import get_syslog_ng_src_name
syslog_src_name = 'mdsd.syslog'
class LadLoggingConfig:
"""
Utility class for obtaining syslog (rsyslog or syslog-ng) configurations for use with fluentd
(currently omsagent), and corresponding omsagent & mdsd configurations, based on the LAD 3.0
syslog config schema. This class also generates omsagent (fluentd) config for LAD 3.0's fileLogs settings
(using the fluentd tail plugin).
"""
def __init__(self, syslogEvents, fileLogs, sinksConfig, pkey_path, cert_path, encrypt_secret):
"""
Constructor to receive/store necessary LAD settings for the desired configuration generation.
:param dict syslogEvents: LAD 3.0 "ladCfg" - "syslogEvents" JSON object, or a False object if it's not given
in the extension settings. An example is as follows:
"ladCfg": {
"syslogEvents" : {
"sinks": "SyslogSinkName0",
"syslogEventConfiguration": {
"facilityName1": "minSeverity1",
"facilityName2": "minSeverity2"
}
}
}
Only the JSON object corresponding to "syslogEvents" key should be passed.
facilityName1/2 is a syslog facility name (e.g., "LOG_USER", "LOG_LOCAL0").
minSeverity1/2 is a syslog severity level (e.g., "LOG_ERR", "LOG_CRIT") or "NONE".
"NONE" means no logs from the facility will be captured (thus it's equivalent to
not specifying the facility at all).
:param dict fileLogs: LAD 3.0 "fileLogs" JSON object, or a False object if it's not given in the ext settings.
An example is as follows:
"fileLogs": {
"fileLogConfiguration": [
{
"file": "/var/log/mydaemonlog",
"table": "MyDaemonEvents",
"sinks": "FilelogSinkName1",
},
{
"file": "/var/log/myotherdaemonelog",
"table": "MyOtherDaemonEvents",
"sinks": "FilelogSinkName2"
}
]
}
Only the JSON array corresponding to "fileLogConfiguration" key should be passed.
"file" is the full path of the log file to be watched and captured. "table" is for the
Azure storage table into which the lines of the watched file will be placed (one row per line).
:param LadUtil.SinkConfiguration sinksConfig: SinkConfiguration object that's created out of "sinksConfig"
LAD 3.0 JSON setting. Refer to LadUtil.SinkConfiguraiton documentation.
:param str pkey_path: Path to the VM's private key that should be passed to mdsd XML for decrypting encrypted
secrets (EH SAS URL)
:param str cert_path: Path to the VM's certificate that should be used to encrypt secrets (EH SAS URL)
:param encrypt_secret: Function to encrypt a secret (string, 2nd param) with the provided cert path param (1st)
"""
self._syslogEvents = syslogEvents
self._fileLogs = fileLogs
self._sinksConfig = sinksConfig
self._pkey_path = pkey_path
self._cert_path = cert_path
self._encrypt_secret = encrypt_secret
self._fac_sev_map = None
try:
# Create facility-severity map. E.g.: { "LOG_USER" : "LOG_ERR", "LOG_LOCAL0", "LOG_CRIT" }
if self._syslogEvents:
self._fac_sev_map = self._syslogEvents['syslogEventConfiguration']
self._syslog_disabled = not self._fac_sev_map # A convenience predicate
if self._fileLogs:
# Convert the 'fileLogs' JSON object array into a Python dictionary of 'file' - 'table'
# E.g., [{ 'file': '/var/log/mydaemonlog1', 'table': 'MyDaemon1Events', 'sinks': 'File1Sink'},
# { 'file': '/var/log/mydaemonlog2', 'table': 'MyDaemon2Events', 'sinks': 'File2SinkA,File2SinkB'}]
self._file_table_map = dict([(entry['file'], entry['table'] if 'table' in entry else '')
for entry in self._fileLogs])
self._file_sinks_map = dict([(entry['file'], entry['sinks'] if 'sinks' in entry else '')
for entry in self._fileLogs])
self._rsyslog_config = None
self._syslog_ng_config = None
self._mdsd_syslog_config = None
self._mdsd_telegraf_config = None
self._mdsd_filelog_config = None
except KeyError as e:
raise LadLoggingConfigException("Invalid setting name provided (KeyError). Exception msg: {0}".format(e))
def get_rsyslog_config(self):
"""
Returns rsyslog config (for use with omsagent) that corresponds to the syslogEvents or the syslogCfg
JSON object given in the construction parameters.
:rtype: str
:return: rsyslog config string that should be appended to /etc/rsyslog.d/95-omsagent.conf (new rsyslog)
or to /etc/rsyslog.conf (old rsyslog)
"""
if not self._rsyslog_config:
if self._syslog_disabled:
self._rsyslog_config = ''
else:
# Generate/save/return rsyslog config string for the facility-severity pairs.
# E.g.: "user.err @127.0.0.1:%SYSLOG_PORT%\nlocal0.crit @127.0.0.1:%SYSLOG_PORT%\n'
self._rsyslog_config = \
'\n'.join('{0}.{1} @127.0.0.1:%SYSLOG_PORT%'.format(syslog_name_to_rsyslog_name(fac),
syslog_name_to_rsyslog_name(sev))
for fac, sev in self._fac_sev_map.iteritems()) + '\n'
return self._rsyslog_config
def get_syslog_ng_config(self):
"""
Returns syslog-ng config (for use with omsagent) that corresponds to the syslogEvents or the syslogCfg
JSON object given in the construction parameters.
:rtype: str
:return: syslog-ng config string that should be appended to /etc/syslog-ng/syslog-ng.conf
"""
if not self._syslog_ng_config:
if self._syslog_disabled:
self._syslog_ng_config = ''
else:
# Generate/save/return syslog-ng config string for the facility-severity pairs.
# E.g.: "log { source(src); filter(f_LAD_oms_f_user); filter(f_LAD_oms_ml_err); destination(d_LAD_oms); };\nlog { source(src); filter(f_LAD_oms_f_local0); filter(f_LAD_oms_ml_crit); destination(d_LAD_oms); };\n"
self._syslog_ng_config = \
'\n'.join('log {{ source({0}); filter(f_LAD_oms_f_{1}); filter(f_LAD_oms_ml_{2}); '
'destination(d_LAD_oms); }};'.format(get_syslog_ng_src_name(),
syslog_name_to_rsyslog_name(fac),
syslog_name_to_rsyslog_name(sev))
for fac, sev in self._fac_sev_map.iteritems()) + '\n'
return self._syslog_ng_config
def parse_pt_duration(self, duration):
"""
Convert the ISO8601 Time Duration into seconds.
for ex PT2H3M20S will be 7400 seconds
:param duration: The ISO8601 duration string to be converted into seconds
"""
total_seconds = 0
count = ""
for ch in duration:
if ch.lower() == 'h':
total_seconds += int(count)*3600
count = ""
elif ch.lower() == 'm':
total_seconds += int(count)*60
count = ""
elif ch.lower() == 's':
total_seconds += int(count)
count = ""
elif ch in ["0","1","2","3","4","5","6","7","8","9"]:
count += ch
return str(total_seconds)+"s"
def parse_lad_perf_settings(self, ladconfig):
"""
Parses the LAD json config to create a list of entries per metric along with it's configuration
as required by telegraf config parser. See example below -
:param ladconfig: The lad json config element
Sample OMI metric json config can be of two types, taken from .settings file
It can have sampleRate key, if not then it defaults to sampleRateInSeconds key in the larger lad_cfg element
{
u'counterSpecifier': u'/builtin/network/packetstransmitted',
u'counter': u'packetstransmitted',
u'class': u'network',
u'sampleRate': u'PT15S',
u'type': u'builtin',
u'annotation': [{
u'locale': u'en-us',
u'displayName': u'Packets sent'
}],
u'unit': u'Count'
}
"annotation": [
{
"displayName": "Disk write guest OS",
"locale": "en-us"
}
],
"class": "disk",
"condition": "IsAggregate=TRUE",
"counter": "writebytespersecond",
"counterSpecifier": "/builtin/disk/writebytespersecond",
"type": "builtin",
"unit": "BytesPerSecond"
},
"""
if not ladconfig:
return []
data = []
default_sample_rate = "15s" #Lowest supported time interval
if "sampleRateInSeconds" in ladconfig and ladconfig["sampleRateInSeconds"] != "":
default_sample_rate = str(ladconfig["sampleRateInSeconds"]) + "s" #Example, converting 15 to 15s
if 'diagnosticMonitorConfiguration' in ladconfig and "performanceCounters" in ladconfig['diagnosticMonitorConfiguration']:
data = ladconfig['diagnosticMonitorConfiguration']["performanceCounters"]
else:
return []
if "performanceCounterConfiguration" not in data or len(data["performanceCounterConfiguration"]) == 0:
return []
parsed_settings = []
perfconf = data["performanceCounterConfiguration"]
for item in perfconf:
counter = {}
counter["displayName"] = item["class"].strip().lower() + "->" + item["annotation"][0]["displayName"].strip().lower()
if "sampleRate" in item:
counter["interval"] = self.parse_pt_duration(item["sampleRate"]) #Converting ISO8601 to seconds string
else:
counter["interval"] = default_sample_rate
parsed_settings.append(counter)
"""
Sample output after parsing the OMI metric
[
{
"displayName" : "Network->Packets sent",
"interval" : "15s"
},
]
"""
return parsed_settings
def get_mdsd_syslog_config(self, disableStorageAccount = False):
"""
Get mdsd XML config string for syslog use with omsagent in LAD 3.0.
:rtype: str
:return: XML string that should be added to the mdsd config XML tree for syslog use with omsagent in LAD 3.0.
"""
if not self._mdsd_syslog_config:
self._mdsd_syslog_config = self.__generate_mdsd_syslog_config(disableStorageAccount)
return self._mdsd_syslog_config
def __generate_mdsd_syslog_config(self, disableStorageAccount = False):
"""
Helper method to generate oms_mdsd_syslog_config
"""
if self._syslog_disabled:
return ''
# For basic syslog conf (single dest table): Source name is unified as 'mdsd.syslog' and
# dest table (eventName) is 'LinuxSyslog'. This is currently the only supported syslog conf scheme.
syslog_routeevents = ''
if not disableStorageAccount:
syslog_routeevents = mxt.per_RouteEvent_tmpl.format(event_name='LinuxSyslog', opt_store_type='')
# Add RouteEvent elements for specified "sinks" for "syslogEvents" feature
# Also add EventStreamingAnnotation for EventHub sinks
syslog_eh_urls = ''
for sink_name in LadUtil.getSinkList(self._syslogEvents):
if sink_name == 'LinuxSyslog':
raise LadLoggingConfigException("'LinuxSyslog' can't be used as a sink name. "
"It's reserved for default Azure Table name for syslog events.")
routeevent, eh_url = self.__generate_routeevent_and_eh_url_for_extra_sink(sink_name,
syslog_src_name)
syslog_routeevents += routeevent
syslog_eh_urls += eh_url
mdsd_event_source = ''
if syslog_routeevents: # Do not add MdsdEventSource element if there's no associated RouteEvent generated.
mdsd_event_source = mxt.per_MdsdEventSource_tmpl.format(source=syslog_src_name,
routeevents=syslog_routeevents)
return mxt.top_level_tmpl_for_logging_only.format(
sources=mxt.per_source_tmpl.format(name=syslog_src_name), events=mdsd_event_source, eh_urls=syslog_eh_urls)
def get_mdsd_telegraf_config(self, namespaces):
"""
Get mdsd XML config string for telegraf use with mdsd in LAD 3.0.
This method is called during config generation to create source tags for mdsd xml
:param namespaces: The list of telegraf plugins being used to source the metrics requested by the user
:rtype: str
:return: XML string that should be added to the mdsd config XML tree for telegraf use with mdsd in LAD 3.0.
"""
if not self._mdsd_telegraf_config:
self._mdsd_telegraf_config = self.__generate_mdsd_telegraf_config(namespaces)
return self._mdsd_telegraf_config
def __generate_mdsd_telegraf_config(self, namespaces):
"""
Helper method to generate mdsd_telegraf_config
"""
if len(namespaces) == 0:
return ''
telegraf_sources = ""
for plugin in namespaces:
# # For telegraf conf we create a Source for each of the measurements(plugins) sent from telegraf
lad_specific_storage_plugin = "storage-" + plugin
telegraf_sources += mxt.per_source_tmpl.format(name=lad_specific_storage_plugin)
return mxt.top_level_tmpl_for_logging_only.format(sources=telegraf_sources, events="", eh_urls="")
def __generate_routeevent_and_eh_url_for_extra_sink(self, sink_name, src_name):
"""
Helper method to generate one RouteEvent element for each extra sink given.
Also generates an EventStreamingAnnotation element for EventHub sinks.
:param str sink_name: The name of the sink for the RouteEvent.
:param str src_name: The name of the ingested source that should be used for EventStreamingAnnotation.
:rtype str,str:
:return: A pair of the XML RouteEvent element string for the sink and the EventHubStreamingAnnotation
XML string.
"""
sink = self._sinksConfig.get_sink_by_name(sink_name)
if not sink:
raise LadLoggingConfigException('Sink name "{0}" is not defined in sinksConfig'.format(sink_name))
sink_type = sink['type']
if not sink_type:
raise LadLoggingConfigException('Sink type for sink "{0}" is not defined in sinksConfig'.format(sink_name))
if sink_type == 'JsonBlob':
return mxt.per_RouteEvent_tmpl.format(event_name=sink_name,
opt_store_type='storeType="JsonBlob"'),\
'' # No EventStreamingAnnotation for JsonBlob
elif sink_type == 'EventHub':
if 'sasURL' not in sink:
raise LadLoggingConfigException('sasURL is not specified for EventHub sink_name={0}'.format(sink_name))
# For syslog/filelogs (ingested events), the source name should be used for EventStreamingAnnotation name.
eh_url = mxt.per_eh_url_tmpl.format(eh_name=src_name, key_path=self._pkey_path,
enc_eh_url=self._encrypt_secret(self._cert_path, sink['sasURL']))
return '', eh_url # No RouteEvent for logging event's EventHub sink
else:
raise LadLoggingConfigException('{0} sink type (for sink_name={1}) is not supported'.format(sink_type,
sink_name))
def get_mdsd_filelog_config(self):
"""
Get mdsd XML config string for filelog (tail) use with omsagent in LAD 3.0.
:rtype: str
:return: XML string that should be added to the mdsd config XML tree for filelog use with omsagent in LAD 3.0.
"""
if not self._mdsd_filelog_config:
self._mdsd_filelog_config = self.__generate_mdsd_filelog_config()
return self._mdsd_filelog_config
def __generate_mdsd_filelog_config(self):
"""
Helper method to generate oms_mdsd_filelog_config
"""
if not self._fileLogs:
return ''
# Per-file source name is 'mdsd.filelog<.path.to.file>' where '<.path.to.file>' is a full path
# with all '/' replaced by '.'.
filelogs_sources = ''
filelogs_mdsd_event_sources = ''
filelogs_eh_urls = ''
for file_key in sorted(self._file_table_map):
if not self._file_table_map[file_key] and not self._file_sinks_map[file_key]:
raise LadLoggingConfigException('Neither "table" nor "sinks" defined for file "{0}"'.format(file_key))
source_name = 'mdsd.filelog{0}'.format(file_key.replace('/', '.'))
filelogs_sources += mxt.per_source_tmpl.format(name=source_name)
per_file_routeevents = ''
if self._file_table_map[file_key]:
per_file_routeevents += mxt.per_RouteEvent_tmpl.format(event_name=self._file_table_map[file_key], opt_store_type='')
if self._file_sinks_map[file_key]:
for sink_name in self._file_sinks_map[file_key].split(','):
routeevent, eh_url = self.__generate_routeevent_and_eh_url_for_extra_sink(sink_name, source_name)
per_file_routeevents += routeevent
filelogs_eh_urls += eh_url
if per_file_routeevents: # Do not add MdsdEventSource element if there's no associated RouteEvent generated.
filelogs_mdsd_event_sources += \
mxt.per_MdsdEventSource_tmpl.format(source=source_name, routeevents=per_file_routeevents)
return mxt.top_level_tmpl_for_logging_only.format(sources=filelogs_sources, events=filelogs_mdsd_event_sources,
eh_urls=filelogs_eh_urls)
def get_fluentd_syslog_src_config(self):
"""
Get Fluentd's syslog source config that should be used for this LAD's syslog configs.
:rtype: str
:return: Fluentd config string that should be overwritten to
/etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/syslog.conf
(after replacing '%SYSLOG_PORT%' with the assigned/picked port number)
"""
fluentd_syslog_src_config = """
<source>
type syslog
port %SYSLOG_PORT%
bind 127.0.0.1
protocol_type udp
include_source_host true
tag mdsd.syslog
</source>
# Generate fields expected for existing mdsd syslog collection schema.
<filter mdsd.syslog.**>
type record_transformer
enable_ruby
<record>
# Fields for backward compatibility with Azure Shoebox V1 (Table storage)
Ignore "syslog"
Facility ${tag_parts[2]}
Severity ${tag_parts[3]}
EventTime ${time.strftime('%Y-%m-%dT%H:%M:%S%z')}
SendingHost ${record["source_host"]}
Msg ${record["message"]}
# Rename 'host' key, as mdsd will add 'Host' for Azure Table and it'll be confusing
hostname ${record["host"]}
</record>
remove_keys host,message,source_host # Renamed (duplicated) fields, so just remove
</filter>
"""
return '' if self._syslog_disabled else fluentd_syslog_src_config
def get_fluentd_filelog_src_config(self):
"""
Get Fluentd's filelog (tail) source config that should be used for this LAD's fileLogs settings.
:rtype: str
:return: Fluentd config string that should be overwritten to
/etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/file.conf
"""
if not self._fileLogs:
return ''
fluentd_tail_src_config_template = """
# For all monitored files
<source>
@type tail
path {file_paths}
pos_file /var/opt/microsoft/omsagent/LAD/tmp/filelogs.pos
tag mdsd.filelog.*
format none
message_key Msg # LAD uses "Msg" as the field name
</source>
# Add FileTag field (existing LAD behavior)
<filter mdsd.filelog.**>
@type record_transformer
<record>
FileTag ${{tag_suffix[2]}}
</record>
</filter>
"""
return fluentd_tail_src_config_template.format(file_paths=','.join(self._file_table_map.keys()))
def get_fluentd_out_mdsd_config(self):
"""
Get Fluentd's out_mdsd output config that should be used for LAD.
TODO This is not really syslog-specific, so should be moved outside from here.
:rtype: str
:return: Fluentd config string that should be overwritten to
/etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/z_out_mdsd.conf
"""
fluentd_out_mdsd_config_template = """
# Output to mdsd
<match mdsd.**>
type mdsd
log_level warn
djsonsocket /var/run/mdsd/lad_mdsd_djson.socket # Full path to mdsd dynamic json socket file
acktimeoutms 5000 # max time in milli-seconds to wait for mdsd acknowledge response. If 0, no wait.
{tag_regex_cfg_line} num_threads 1
buffer_chunk_limit 1000k
buffer_type file
buffer_path /var/opt/microsoft/omsagent/LAD/state/out_mdsd*.buffer
buffer_queue_limit 128
flush_interval 10s
retry_limit 3
retry_wait 10s
</match>
"""
tag_regex_cfg_line = '' if self._syslog_disabled \
else r""" mdsd_tag_regex_patterns [ "^mdsd\\.syslog" ] # fluentd tag patterns whose match will be used as mdsd source name
"""
return fluentd_out_mdsd_config_template.format(tag_regex_cfg_line=tag_regex_cfg_line)
syslog_name_to_rsyslog_name_map = {
# facilities
'LOG_AUTH': 'auth',
'LOG_AUTHPRIV': 'authpriv',
'LOG_CRON': 'cron',
'LOG_DAEMON': 'daemon',
'LOG_FTP': 'ftp',
'LOG_KERN': 'kern',
'LOG_LOCAL0': 'local0',
'LOG_LOCAL1': 'local1',
'LOG_LOCAL2': 'local2',
'LOG_LOCAL3': 'local3',
'LOG_LOCAL4': 'local4',
'LOG_LOCAL5': 'local5',
'LOG_LOCAL6': 'local6',
'LOG_LOCAL7': 'local7',
'LOG_LPR': 'lpr',
'LOG_MAIL': 'mail',
'LOG_NEWS': 'news',
'LOG_SYSLOG': 'syslog',
'LOG_USER': 'user',
'LOG_UUCP': 'uucp',
# severities
'LOG_EMERG': 'emerg',
'LOG_ALERT': 'alert',
'LOG_CRIT': 'crit',
'LOG_ERR': 'err',
'LOG_WARNING': 'warning',
'LOG_NOTICE': 'notice',
'LOG_INFO': 'info',
'LOG_DEBUG': 'debug'
}
def syslog_name_to_rsyslog_name(syslog_name):
"""
Convert a syslog name (e.g., "LOG_USER") to the corresponding rsyslog name (e.g., "user")
:param str syslog_name: A syslog name for a facility (e.g., "LOG_USER") or a severity (e.g., "LOG_ERR")
:rtype: str
:return: Corresponding rsyslog name (e.g., "user" or "error")
"""
if syslog_name == '*':
# We accept '*' as a facility name (also as a severity name, though it's not required)
# to allow customers to collect for reserved syslog facility numeric IDs (12-15)
return '*'
if syslog_name not in syslog_name_to_rsyslog_name_map:
raise LadLoggingConfigException('Invalid syslog name given: {0}'.format(syslog_name))
return syslog_name_to_rsyslog_name_map[syslog_name]
def copy_sub_elems(dst_xml, src_xml, path):
"""
Copy sub-elements of src_elem (XML) to dst_elem.
:param xml.etree.ElementTree.ElementTree dst_xml: Python xml tree object to which sub-elements will be copied.
:param xml.etree.ElementTree.ElementTree src_xml: Python xml tree object from which sub-elements will be copied.
:param str path: The path of the element whose sub-elements will be copied.
:return: None. dst_xml will be updated with copied sub-elements
"""
dst_elem = dst_xml.find(path)
src_elem = src_xml.find(path)
if src_elem is None:
return
for sub_elem in src_elem:
dst_elem.append(sub_elem)
def copy_source_mdsdevent_eh_url_elems(mdsd_xml_tree, mdsd_logging_xml_string):
"""
Copy MonitoringManagement/Schemas/Schema, MonitoringManagement/Sources/Source,
MonitoringManagement/Events/MdsdEvents/MdsdEventSource elements, and MonitoringManagement/EventStreamingAnnotations
/EventStreamingAnnontation elements from mdsd_rsyslog_xml_string to mdsd_xml_tree.
Used to actually add generated rsyslog mdsd config XML elements to the mdsd config XML tree.
:param xml.etree.ElementTree.ElementTree mdsd_xml_tree: Python xml.etree.ElementTree object that's generated from mdsd config XML template
:param str mdsd_logging_xml_string: XML string containing the generated logging (syslog/filelog) mdsd config XML elements.
See oms_syslog_mdsd_*_expected_xpaths member variables in test_lad_logging_config.py for examples in XPATHS format.
:return: None. mdsd_xml_tree object will contain the added elements.
"""
if not mdsd_logging_xml_string:
return
mdsd_logging_xml_tree = ET.ElementTree(ET.fromstring(mdsd_logging_xml_string))
# Copy Source elements (sub-elements of Sources element)
copy_sub_elems(mdsd_xml_tree, mdsd_logging_xml_tree, 'Sources')
# Copy MdsdEventSource elements (sub-elements of Events/MdsdEvents element)
copy_sub_elems(mdsd_xml_tree, mdsd_logging_xml_tree, 'Events/MdsdEvents')
# Copy EventStreamingAnnotation elements (sub-elements of EventStreamingAnnotations element)
copy_sub_elems(mdsd_xml_tree, mdsd_logging_xml_tree, 'EventStreamingAnnotations')
| |
"""
base execnet gateway code send to the other side for bootstrapping.
NOTE: aims to be compatible to Python 2.5-3.X, Jython and IronPython
:copyright: 2004-2015
:authors:
- Holger Krekel
- Armin Rigo
- Benjamin Peterson
- Ronny Pfannschmidt
- many others
"""
from __future__ import with_statement
import sys
import os
import weakref
import traceback
import struct
# NOTE that we want to avoid try/except style importing
# to avoid setting sys.exc_info() during import
#
ISPY3 = sys.version_info >= (3, 0)
if ISPY3:
from io import BytesIO
exec("def do_exec(co, loc): exec(co, loc)\n"
"def reraise(cls, val, tb): raise val\n")
unicode = str
_long_type = int
from _thread import interrupt_main
SUBPROCESS32 = False
else:
from StringIO import StringIO as BytesIO
exec("def do_exec(co, loc): exec co in loc\n"
"def reraise(cls, val, tb): raise cls, val, tb\n")
bytes = str
_long_type = long
try:
from thread import interrupt_main
except ImportError:
interrupt_main = None
try:
import subprocess32 # NOQA
SUBPROCESS32 = True
except ImportError:
SUBPROCESS32 = False
sys.exc_clear()
# f = open("/tmp/execnet-%s" % os.getpid(), "w")
# def log_extra(*msg):
# f.write(" ".join([str(x) for x in msg]) + "\n")
class EmptySemaphore:
acquire = release = lambda self: None
def get_execmodel(backend):
if hasattr(backend, "backend"):
return backend
if backend == "thread":
importdef = {
'get_ident': ['thread::get_ident', '_thread::get_ident'],
'_start_new_thread': ['thread::start_new_thread',
'_thread::start_new_thread'],
'threading': ["threading"],
'queue': ["queue", "Queue"],
'sleep': ['time::sleep'],
'subprocess': ['subprocess32' if SUBPROCESS32 else 'subprocess'],
'socket': ['socket'],
'_fdopen': ['os::fdopen'],
'_lock': ['threading'],
'_event': ['threading'],
}
def exec_start(self, func, args=()):
self._start_new_thread(func, args)
elif backend == "eventlet":
importdef = {
'get_ident': ['eventlet.green.thread::get_ident'],
'_spawn_n': ['eventlet::spawn_n'],
'threading': ['eventlet.green.threading'],
'queue': ["eventlet.queue"],
'sleep': ['eventlet::sleep'],
'subprocess': ['eventlet.green.subprocess'],
'socket': ['eventlet.green.socket'],
'_fdopen': ['eventlet.green.os::fdopen'],
'_lock': ['eventlet.green.threading'],
'_event': ['eventlet.green.threading'],
}
def exec_start(self, func, args=()):
self._spawn_n(func, *args)
elif backend == "gevent":
importdef = {
'get_ident': ['gevent.thread::get_ident'],
'_spawn_n': ['gevent::spawn'],
'threading': ['threading'],
'queue': ["gevent.queue"],
'sleep': ['gevent::sleep'],
'subprocess': ['gevent.subprocess'],
'socket': ['gevent.socket'],
# XXX
'_fdopen': ['gevent.fileobject::FileObjectThread'],
'_lock': ['gevent.lock'],
'_event': ['gevent.event'],
}
def exec_start(self, func, args=()):
self._spawn_n(func, *args)
else:
raise ValueError("unknown execmodel %r" % (backend,))
class ExecModel:
def __init__(self, name):
self._importdef = importdef
self.backend = name
self._count = 0
def __repr__(self):
return "<ExecModel %r>" % self.backend
def __getattr__(self, name):
locs = self._importdef.get(name)
if locs is None:
raise AttributeError(name)
for loc in locs:
parts = loc.split("::")
loc = parts.pop(0)
try:
mod = __import__(loc, None, None, "__doc__")
except ImportError:
pass
else:
if parts:
mod = getattr(mod, parts[0])
setattr(self, name, mod)
return mod
raise AttributeError(name)
start = exec_start
def fdopen(self, fd, mode, bufsize=1):
return self._fdopen(fd, mode, bufsize)
def WorkerPool(self, hasprimary=False):
return WorkerPool(self, hasprimary=hasprimary)
def Semaphore(self, size=None):
if size is None:
return EmptySemaphore()
return self._lock.Semaphore(size)
def Lock(self):
return self._lock.RLock()
def RLock(self):
return self._lock.RLock()
def Event(self):
event = self._event.Event()
if sys.version_info < (2, 7):
# patch wait function to return event state instead of None
real_wait = event.wait
def wait(timeout=None):
real_wait(timeout=timeout)
return event.isSet()
event.wait = wait
return event
def PopenPiped(self, args):
PIPE = self.subprocess.PIPE
return self.subprocess.Popen(args, stdout=PIPE, stdin=PIPE)
return ExecModel(backend)
class Reply(object):
""" reply instances provide access to the result
of a function execution that got dispatched
through WorkerPool.spawn()
"""
def __init__(self, task, threadmodel):
self.task = task
self._result_ready = threadmodel.Event()
self.running = True
def get(self, timeout=None):
""" get the result object from an asynchronous function execution.
if the function execution raised an exception,
then calling get() will reraise that exception
including its traceback.
"""
self.waitfinish(timeout)
try:
return self._result
except AttributeError:
reraise(*(self._excinfo[:3])) # noqa
def waitfinish(self, timeout=None):
if not self._result_ready.wait(timeout):
raise IOError("timeout waiting for %r" % (self.task, ))
def run(self):
func, args, kwargs = self.task
try:
try:
self._result = func(*args, **kwargs)
except:
# sys may be already None when shutting down the interpreter
if sys is not None:
self._excinfo = sys.exc_info()
finally:
self._result_ready.set()
self.running = False
class WorkerPool(object):
""" A WorkerPool allows to spawn function executions
to threads, returning a reply object on which you
can ask for the result (and get exceptions reraised).
This implementation allows the main thread to integrate
itself into performing function execution through
calling integrate_as_primary_thread() which will return
when the pool received a trigger_shutdown().
"""
def __init__(self, execmodel, hasprimary=False):
""" by default allow unlimited number of spawns. """
self.execmodel = execmodel
self._running_lock = self.execmodel.Lock()
self._running = set()
self._shuttingdown = False
self._waitall_events = []
if hasprimary:
if self.execmodel.backend != "thread":
raise ValueError("hasprimary=True requires thread model")
self._primary_thread_task_ready = self.execmodel.Event()
else:
self._primary_thread_task_ready = None
def integrate_as_primary_thread(self):
""" integrate the thread with which we are called as a primary
thread for executing functions triggered with spawn().
"""
assert self.execmodel.backend == "thread", self.execmodel
primary_thread_task_ready = self._primary_thread_task_ready
# interacts with code at REF1
while 1:
primary_thread_task_ready.wait()
reply = self._primary_thread_task
if reply is None: # trigger_shutdown() woke us up
break
self._perform_spawn(reply)
# we are concurrent with trigger_shutdown and spawn
with self._running_lock:
if self._shuttingdown:
break
primary_thread_task_ready.clear()
def trigger_shutdown(self):
with self._running_lock:
self._shuttingdown = True
if self._primary_thread_task_ready is not None:
self._primary_thread_task = None
self._primary_thread_task_ready.set()
def active_count(self):
return len(self._running)
def _perform_spawn(self, reply):
reply.run()
with self._running_lock:
self._running.remove(reply)
if not self._running:
while self._waitall_events:
waitall_event = self._waitall_events.pop()
waitall_event.set()
def _try_send_to_primary_thread(self, reply):
# REF1 in 'thread' model we give priority to running in main thread
# note that we should be called with _running_lock hold
primary_thread_task_ready = self._primary_thread_task_ready
if primary_thread_task_ready is not None:
if not primary_thread_task_ready.isSet():
self._primary_thread_task = reply
# wake up primary thread
primary_thread_task_ready.set()
return True
return False
def spawn(self, func, *args, **kwargs):
""" return Reply object for the asynchronous dispatch
of the given func(*args, **kwargs).
"""
reply = Reply((func, args, kwargs), self.execmodel)
with self._running_lock:
if self._shuttingdown:
raise ValueError("pool is shutting down")
self._running.add(reply)
if not self._try_send_to_primary_thread(reply):
self.execmodel.start(self._perform_spawn, (reply,))
return reply
def terminate(self, timeout=None):
""" trigger shutdown and wait for completion of all executions. """
self.trigger_shutdown()
return self.waitall(timeout=timeout)
def waitall(self, timeout=None):
""" wait until all active spawns have finished executing. """
with self._running_lock:
if not self._running:
return True
# if a Reply still runs, we let run_and_release
# signal us -- note that we are still holding the
# _running_lock to avoid race conditions
my_waitall_event = self.execmodel.Event()
self._waitall_events.append(my_waitall_event)
return my_waitall_event.wait(timeout=timeout)
sysex = (KeyboardInterrupt, SystemExit)
DEBUG = os.environ.get('EXECNET_DEBUG')
pid = os.getpid()
if DEBUG == '2':
def trace(*msg):
try:
line = " ".join(map(str, msg))
sys.stderr.write("[%s] %s\n" % (pid, line))
sys.stderr.flush()
except Exception:
pass # nothing we can do, likely interpreter-shutdown
elif DEBUG:
import tempfile
import os
fn = os.path.join(tempfile.gettempdir(), 'execnet-debug-%d' % pid)
# sys.stderr.write("execnet-debug at %r" % (fn,))
debugfile = open(fn, 'w')
def trace(*msg):
try:
line = " ".join(map(str, msg))
debugfile.write(line + "\n")
debugfile.flush()
except Exception:
try:
v = sys.exc_info()[1]
sys.stderr.write(
"[%s] exception during tracing: %r\n" % (pid, v))
except Exception:
pass # nothing we can do, likely interpreter-shutdown
else:
notrace = trace = lambda *msg: None
class Popen2IO:
error = (IOError, OSError, EOFError)
def __init__(self, outfile, infile, execmodel):
# we need raw byte streams
self.outfile, self.infile = outfile, infile
if sys.platform == "win32":
import msvcrt
try:
msvcrt.setmode(infile.fileno(), os.O_BINARY)
msvcrt.setmode(outfile.fileno(), os.O_BINARY)
except (AttributeError, IOError):
pass
self._read = getattr(infile, "buffer", infile).read
self._write = getattr(outfile, "buffer", outfile).write
self.execmodel = execmodel
def read(self, numbytes):
"""Read exactly 'numbytes' bytes from the pipe. """
# a file in non-blocking mode may return less bytes, so we loop
buf = bytes()
while numbytes > len(buf):
data = self._read(numbytes-len(buf))
if not data:
raise EOFError(
"expected %d bytes, got %d" % (numbytes, len(buf)))
buf += data
return buf
def write(self, data):
"""write out all data bytes. """
assert isinstance(data, bytes)
self._write(data)
self.outfile.flush()
def close_read(self):
self.infile.close()
def close_write(self):
self.outfile.close()
class Message:
""" encapsulates Messages and their wire protocol. """
_types = []
def __init__(self, msgcode, channelid=0, data=''):
self.msgcode = msgcode
self.channelid = channelid
self.data = data
@staticmethod
def from_io(io):
try:
header = io.read(9) # type 1, channel 4, payload 4
if not header:
raise EOFError("empty read")
except EOFError:
e = sys.exc_info()[1]
raise EOFError('couldnt load message header, ' + e.args[0])
msgtype, channel, payload = struct.unpack('!bii', header)
return Message(msgtype, channel, io.read(payload))
def to_io(self, io):
header = struct.pack('!bii', self.msgcode, self.channelid,
len(self.data))
io.write(header+self.data)
def received(self, gateway):
self._types[self.msgcode](self, gateway)
def __repr__(self):
name = self._types[self.msgcode].__name__.upper()
return "<Message %s channel=%s lendata=%s>" % (
name, self.channelid, len(self.data))
class GatewayReceivedTerminate(Exception):
""" Receiverthread got termination message. """
def _setupmessages():
def status(message, gateway):
# we use the channelid to send back information
# but don't instantiate a channel object
d = {
'numchannels': len(gateway._channelfactory._channels),
'numexecuting': gateway._execpool.active_count(),
'execmodel': gateway.execmodel.backend,
}
gateway._send(Message.CHANNEL_DATA, message.channelid,
dumps_internal(d))
gateway._send(Message.CHANNEL_CLOSE, message.channelid)
def channel_exec(message, gateway):
channel = gateway._channelfactory.new(message.channelid)
gateway._local_schedulexec(channel=channel, sourcetask=message.data)
def channel_data(message, gateway):
gateway._channelfactory._local_receive(message.channelid, message.data)
def channel_close(message, gateway):
gateway._channelfactory._local_close(message.channelid)
def channel_close_error(message, gateway):
remote_error = RemoteError(loads_internal(message.data))
gateway._channelfactory._local_close(message.channelid, remote_error)
def channel_last_message(message, gateway):
gateway._channelfactory._local_close(message.channelid, sendonly=True)
def gateway_terminate(message, gateway):
raise GatewayReceivedTerminate(gateway)
def reconfigure(message, gateway):
if message.channelid == 0:
target = gateway
else:
target = gateway._channelfactory.new(message.channelid)
target._strconfig = loads_internal(message.data, gateway)
types = [
status, reconfigure, gateway_terminate,
channel_exec, channel_data, channel_close,
channel_close_error, channel_last_message,
]
for i, handler in enumerate(types):
Message._types.append(handler)
setattr(Message, handler.__name__.upper(), i)
_setupmessages()
def geterrortext(excinfo,
format_exception=traceback.format_exception, sysex=sysex):
try:
l = format_exception(*excinfo)
errortext = "".join(l)
except sysex:
raise
except:
errortext = '%s: %s' % (excinfo[0].__name__,
excinfo[1])
return errortext
class RemoteError(Exception):
""" Exception containing a stringified error from the other side. """
def __init__(self, formatted):
self.formatted = formatted
Exception.__init__(self)
def __str__(self):
return self.formatted
def __repr__(self):
return "%s: %s" % (self.__class__.__name__, self.formatted)
def warn(self):
if self.formatted != INTERRUPT_TEXT:
# XXX do this better
sys.stderr.write("[%s] Warning: unhandled %r\n"
% (os.getpid(), self,))
class TimeoutError(IOError):
""" Exception indicating that a timeout was reached. """
NO_ENDMARKER_WANTED = object()
class Channel(object):
"Communication channel between two Python Interpreter execution points."
RemoteError = RemoteError
TimeoutError = TimeoutError
_INTERNALWAKEUP = 1000
_executing = False
def __init__(self, gateway, id):
assert isinstance(id, int)
self.gateway = gateway
# XXX: defaults copied from Unserializer
self._strconfig = getattr(gateway, '_strconfig', (True, False))
self.id = id
self._items = self.gateway.execmodel.queue.Queue()
self._closed = False
self._receiveclosed = self.gateway.execmodel.Event()
self._remoteerrors = []
def _trace(self, *msg):
self.gateway._trace(self.id, *msg)
def setcallback(self, callback, endmarker=NO_ENDMARKER_WANTED):
""" set a callback function for receiving items.
All already queued items will immediately trigger the callback.
Afterwards the callback will execute in the receiver thread
for each received data item and calls to ``receive()`` will
raise an error.
If an endmarker is specified the callback will eventually
be called with the endmarker when the channel closes.
"""
_callbacks = self.gateway._channelfactory._callbacks
with self.gateway._receivelock:
if self._items is None:
raise IOError("%r has callback already registered" % (self,))
items = self._items
self._items = None
while 1:
try:
olditem = items.get(block=False)
except self.gateway.execmodel.queue.Empty:
if not (self._closed or self._receiveclosed.isSet()):
_callbacks[self.id] = (
callback,
endmarker,
self._strconfig,
)
break
else:
if olditem is ENDMARKER:
items.put(olditem) # for other receivers
if endmarker is not NO_ENDMARKER_WANTED:
callback(endmarker)
break
else:
callback(olditem)
def __repr__(self):
flag = self.isclosed() and "closed" or "open"
return "<Channel id=%d %s>" % (self.id, flag)
def __del__(self):
if self.gateway is None: # can be None in tests
return
self._trace("channel.__del__")
# no multithreading issues here, because we have the last ref to 'self'
if self._closed:
# state transition "closed" --> "deleted"
for error in self._remoteerrors:
error.warn()
elif self._receiveclosed.isSet():
# state transition "sendonly" --> "deleted"
# the remote channel is already in "deleted" state, nothing to do
pass
else:
# state transition "opened" --> "deleted"
# check if we are in the middle of interpreter shutdown
# in which case the process will go away and we probably
# don't need to try to send a closing or last message
# (and often it won't work anymore to send things out)
if Message is not None:
if self._items is None: # has_callback
msgcode = Message.CHANNEL_LAST_MESSAGE
else:
msgcode = Message.CHANNEL_CLOSE
try:
self.gateway._send(msgcode, self.id)
except (IOError, ValueError): # ignore problems with sending
pass
def _getremoteerror(self):
try:
return self._remoteerrors.pop(0)
except IndexError:
try:
return self.gateway._error
except AttributeError:
pass
return None
#
# public API for channel objects
#
def isclosed(self):
""" return True if the channel is closed. A closed
channel may still hold items.
"""
return self._closed
def makefile(self, mode='w', proxyclose=False):
""" return a file-like object.
mode can be 'w' or 'r' for writeable/readable files.
if proxyclose is true file.close() will also close the channel.
"""
if mode == "w":
return ChannelFileWrite(channel=self, proxyclose=proxyclose)
elif mode == "r":
return ChannelFileRead(channel=self, proxyclose=proxyclose)
raise ValueError("mode %r not availabe" % (mode,))
def close(self, error=None):
""" close down this channel with an optional error message.
Note that closing of a channel tied to remote_exec happens
automatically at the end of execution and cannot
be done explicitely.
"""
if self._executing:
raise IOError("cannot explicitly close channel within remote_exec")
if self._closed:
self.gateway._trace(self, "ignoring redundant call to close()")
if not self._closed:
# state transition "opened/sendonly" --> "closed"
# threads warning: the channel might be closed under our feet,
# but it's never damaging to send too many CHANNEL_CLOSE messages
# however, if the other side triggered a close already, we
# do not send back a closed message.
if not self._receiveclosed.isSet():
put = self.gateway._send
if error is not None:
put(Message.CHANNEL_CLOSE_ERROR, self.id,
dumps_internal(error))
else:
put(Message.CHANNEL_CLOSE, self.id)
self._trace("sent channel close message")
if isinstance(error, RemoteError):
self._remoteerrors.append(error)
self._closed = True # --> "closed"
self._receiveclosed.set()
queue = self._items
if queue is not None:
queue.put(ENDMARKER)
self.gateway._channelfactory._no_longer_opened(self.id)
def waitclose(self, timeout=None):
""" wait until this channel is closed (or the remote side
otherwise signalled that no more data was being sent).
The channel may still hold receiveable items, but not receive
any more after waitclose() has returned. Exceptions from executing
code on the other side are reraised as local channel.RemoteErrors.
EOFError is raised if the reading-connection was prematurely closed,
which often indicates a dying process.
self.TimeoutError is raised after the specified number of seconds
(default is None, i.e. wait indefinitely).
"""
# wait for non-"opened" state
self._receiveclosed.wait(timeout=timeout)
if not self._receiveclosed.isSet():
raise self.TimeoutError("Timeout after %r seconds" % timeout)
error = self._getremoteerror()
if error:
raise error
def send(self, item):
"""sends the given item to the other side of the channel,
possibly blocking if the sender queue is full.
The item must be a simple python type and will be
copied to the other side by value. IOError is
raised if the write pipe was prematurely closed.
"""
if self.isclosed():
raise IOError("cannot send to %r" % (self,))
self.gateway._send(Message.CHANNEL_DATA, self.id, dumps_internal(item))
def receive(self, timeout=None):
"""receive a data item that was sent from the other side.
timeout: None [default] blocked waiting. A positive number
indicates the number of seconds after which a channel.TimeoutError
exception will be raised if no item was received.
Note that exceptions from the remotely executing code will be
reraised as channel.RemoteError exceptions containing
a textual representation of the remote traceback.
"""
itemqueue = self._items
if itemqueue is None:
raise IOError("cannot receive(), channel has receiver callback")
try:
x = itemqueue.get(timeout=timeout)
except self.gateway.execmodel.queue.Empty:
raise self.TimeoutError("no item after %r seconds" % (timeout))
if x is ENDMARKER:
itemqueue.put(x) # for other receivers
raise self._getremoteerror() or EOFError()
else:
return x
def __iter__(self):
return self
def next(self):
try:
return self.receive()
except EOFError:
raise StopIteration
__next__ = next
def reconfigure(self, py2str_as_py3str=True, py3str_as_py2str=False):
"""
set the string coercion for this channel
the default is to try to convert py2 str as py3 str,
but not to try and convert py3 str to py2 str
"""
self._strconfig = (py2str_as_py3str, py3str_as_py2str)
data = dumps_internal(self._strconfig)
self.gateway._send(Message.RECONFIGURE, self.id, data=data)
ENDMARKER = object()
INTERRUPT_TEXT = "keyboard-interrupted"
class ChannelFactory(object):
def __init__(self, gateway, startcount=1):
self._channels = weakref.WeakValueDictionary()
self._callbacks = {}
self._writelock = gateway.execmodel.Lock()
self.gateway = gateway
self.count = startcount
self.finished = False
self._list = list # needed during interp-shutdown
def new(self, id=None):
""" create a new Channel with 'id' (or create new id if None). """
with self._writelock:
if self.finished:
raise IOError("connexion already closed: %s" % (self.gateway,))
if id is None:
id = self.count
self.count += 2
try:
channel = self._channels[id]
except KeyError:
channel = self._channels[id] = Channel(self.gateway, id)
return channel
def channels(self):
return self._list(self._channels.values())
#
# internal methods, called from the receiver thread
#
def _no_longer_opened(self, id):
try:
del self._channels[id]
except KeyError:
pass
try:
callback, endmarker, strconfig = self._callbacks.pop(id)
except KeyError:
pass
else:
if endmarker is not NO_ENDMARKER_WANTED:
callback(endmarker)
def _local_close(self, id, remoteerror=None, sendonly=False):
channel = self._channels.get(id)
if channel is None:
# channel already in "deleted" state
if remoteerror:
remoteerror.warn()
self._no_longer_opened(id)
else:
# state transition to "closed" state
if remoteerror:
channel._remoteerrors.append(remoteerror)
queue = channel._items
if queue is not None:
queue.put(ENDMARKER)
self._no_longer_opened(id)
if not sendonly: # otherwise #--> "sendonly"
channel._closed = True # --> "closed"
channel._receiveclosed.set()
def _local_receive(self, id, data):
# executes in receiver thread
channel = self._channels.get(id)
try:
callback, endmarker, strconfig = self._callbacks[id]
except KeyError:
queue = channel and channel._items
if queue is None:
pass # drop data
else:
item = loads_internal(data, channel)
queue.put(item)
else:
try:
data = loads_internal(data, channel, strconfig)
callback(data) # even if channel may be already closed
except Exception:
excinfo = sys.exc_info()
self.gateway._trace("exception during callback: %s" %
excinfo[1])
errortext = self.gateway._geterrortext(excinfo)
self.gateway._send(Message.CHANNEL_CLOSE_ERROR,
id, dumps_internal(errortext))
self._local_close(id, errortext)
def _finished_receiving(self):
with self._writelock:
self.finished = True
for id in self._list(self._channels):
self._local_close(id, sendonly=True)
for id in self._list(self._callbacks):
self._no_longer_opened(id)
class ChannelFile(object):
def __init__(self, channel, proxyclose=True):
self.channel = channel
self._proxyclose = proxyclose
def isatty(self):
return False
def close(self):
if self._proxyclose:
self.channel.close()
def __repr__(self):
state = self.channel.isclosed() and 'closed' or 'open'
return '<ChannelFile %d %s>' % (self.channel.id, state)
class ChannelFileWrite(ChannelFile):
def write(self, out):
self.channel.send(out)
def flush(self):
pass
class ChannelFileRead(ChannelFile):
def __init__(self, channel, proxyclose=True):
super(ChannelFileRead, self).__init__(channel, proxyclose)
self._buffer = None
def read(self, n):
try:
if self._buffer is None:
self._buffer = self.channel.receive()
while len(self._buffer) < n:
self._buffer += self.channel.receive()
except EOFError:
self.close()
if self._buffer is None:
ret = ""
else:
ret = self._buffer[:n]
self._buffer = self._buffer[n:]
return ret
def readline(self):
if self._buffer is not None:
i = self._buffer.find("\n")
if i != -1:
return self.read(i+1)
line = self.read(len(self._buffer)+1)
else:
line = self.read(1)
while line and line[-1] != "\n":
c = self.read(1)
if not c:
break
line += c
return line
class BaseGateway(object):
exc_info = sys.exc_info
_sysex = sysex
id = "<slave>"
def __init__(self, io, id, _startcount=2):
self.execmodel = io.execmodel
self._io = io
self.id = id
self._strconfig = (Unserializer.py2str_as_py3str,
Unserializer.py3str_as_py2str)
self._channelfactory = ChannelFactory(self, _startcount)
self._receivelock = self.execmodel.RLock()
# globals may be NONE at process-termination
self.__trace = trace
self._geterrortext = geterrortext
self._receivepool = self.execmodel.WorkerPool()
def _trace(self, *msg):
self.__trace(self.id, *msg)
def _initreceive(self):
self._receivepool.spawn(self._thread_receiver)
def _thread_receiver(self):
def log(*msg):
self._trace("[receiver-thread]", *msg)
log("RECEIVERTHREAD: starting to run")
io = self._io
try:
while 1:
msg = Message.from_io(io)
log("received", msg)
with self._receivelock:
msg.received(self)
del msg
except (KeyboardInterrupt, GatewayReceivedTerminate):
pass
except EOFError:
log("EOF without prior gateway termination message")
self._error = self.exc_info()[1]
except Exception:
log(self._geterrortext(self.exc_info()))
log('finishing receiving thread')
# wake up and terminate any execution waiting to receive
self._channelfactory._finished_receiving()
log('terminating execution')
self._terminate_execution()
log('closing read')
self._io.close_read()
log('closing write')
self._io.close_write()
log('terminating our receive pseudo pool')
self._receivepool.trigger_shutdown()
def _terminate_execution(self):
pass
def _send(self, msgcode, channelid=0, data=bytes()):
message = Message(msgcode, channelid, data)
try:
message.to_io(self._io)
self._trace('sent', message)
except (IOError, ValueError):
e = sys.exc_info()[1]
self._trace('failed to send', message, e)
# ValueError might be because the IO is already closed
raise IOError("cannot send (already closed?)")
def _local_schedulexec(self, channel, sourcetask):
channel.close("execution disallowed")
# _____________________________________________________________________
#
# High Level Interface
# _____________________________________________________________________
#
def newchannel(self):
""" return a new independent channel. """
return self._channelfactory.new()
def join(self, timeout=None):
""" Wait for receiverthread to terminate. """
self._trace("waiting for receiver thread to finish")
self._receivepool.waitall()
class SlaveGateway(BaseGateway):
def _local_schedulexec(self, channel, sourcetask):
sourcetask = loads_internal(sourcetask)
self._execpool.spawn(self.executetask, ((channel, sourcetask)))
def _terminate_execution(self):
# called from receiverthread
self._trace("shutting down execution pool")
self._execpool.trigger_shutdown()
if not self._execpool.waitall(5.0):
self._trace(
"execution ongoing after 5 secs,"" trying interrupt_main")
# We try hard to terminate execution based on the assumption
# that there is only one gateway object running per-process.
if sys.platform != "win32":
self._trace("sending ourselves a SIGINT")
os.kill(os.getpid(), 2) # send ourselves a SIGINT
elif interrupt_main is not None:
self._trace("calling interrupt_main()")
interrupt_main()
if not self._execpool.waitall(10.0):
self._trace("execution did not finish in another 10 secs, "
"calling os._exit()")
os._exit(1)
def serve(self):
def trace(msg):
self._trace("[serve] " + msg)
hasprimary = self.execmodel.backend == "thread"
self._execpool = self.execmodel.WorkerPool(hasprimary=hasprimary)
trace("spawning receiver thread")
self._initreceive()
try:
if hasprimary:
# this will return when we are in shutdown
trace("integrating as primary thread")
self._execpool.integrate_as_primary_thread()
trace("joining receiver thread")
self.join()
except KeyboardInterrupt:
# in the slave we can't really do anything sensible
trace("swallowing keyboardinterrupt, serve finished")
def executetask(self, item):
try:
channel, (source, call_name, kwargs) = item
if not ISPY3 and kwargs:
# some python2 versions do not accept unicode keyword params
# note: Unserializer generally turns py2-str to py3-str objects
newkwargs = {}
for name, value in kwargs.items():
if isinstance(name, unicode):
name = name.encode('ascii')
newkwargs[name] = value
kwargs = newkwargs
loc = {'channel': channel, '__name__': '__channelexec__'}
self._trace("execution starts[%s]: %s" %
(channel.id, repr(source)[:50]))
channel._executing = True
try:
co = compile(source+'\n', '<remote exec>', 'exec')
do_exec(co, loc) # noqa
if call_name:
self._trace('calling %s(**%60r)' % (call_name, kwargs))
function = loc[call_name]
function(channel, **kwargs)
finally:
channel._executing = False
self._trace("execution finished")
except KeyboardInterrupt:
channel.close(INTERRUPT_TEXT)
raise
except:
excinfo = self.exc_info()
if not isinstance(excinfo[1], EOFError):
if not channel.gateway._channelfactory.finished:
self._trace("got exception: %r" % (excinfo[1],))
errortext = self._geterrortext(excinfo)
channel.close(errortext)
return
self._trace("ignoring EOFError because receiving finished")
channel.close()
#
# Cross-Python pickling code, tested from test_serializer.py
#
class DataFormatError(Exception):
pass
class DumpError(DataFormatError):
"""Error while serializing an object."""
class LoadError(DataFormatError):
"""Error while unserializing an object."""
if ISPY3:
def bchr(n):
return bytes([n])
else:
bchr = chr
DUMPFORMAT_VERSION = bchr(1)
FOUR_BYTE_INT_MAX = 2147483647
FLOAT_FORMAT = "!d"
FLOAT_FORMAT_SIZE = struct.calcsize(FLOAT_FORMAT)
COMPLEX_FORMAT = "!dd"
COMPLEX_FORMAT_SIZE = struct.calcsize(COMPLEX_FORMAT)
class _Stop(Exception):
pass
class Unserializer(object):
num2func = {} # is filled after this class definition
py2str_as_py3str = True # True
py3str_as_py2str = False # false means py2 will get unicode
def __init__(self, stream, channel_or_gateway=None, strconfig=None):
gateway = getattr(channel_or_gateway, 'gateway', channel_or_gateway)
strconfig = getattr(channel_or_gateway, '_strconfig', strconfig)
if strconfig:
self.py2str_as_py3str, self.py3str_as_py2str = strconfig
self.stream = stream
self.channelfactory = getattr(gateway, '_channelfactory', gateway)
def load(self, versioned=False):
if versioned:
ver = self.stream.read(1)
if ver != DUMPFORMAT_VERSION:
raise LoadError("wrong dumpformat version %r" % ver)
self.stack = []
try:
while True:
opcode = self.stream.read(1)
if not opcode:
raise EOFError
try:
loader = self.num2func[opcode]
except KeyError:
raise LoadError(
"unkown opcode %r - "
"wire protocol corruption?" % (opcode,))
loader(self)
except _Stop:
if len(self.stack) != 1:
raise LoadError("internal unserialization error")
return self.stack.pop(0)
else:
raise LoadError("didn't get STOP")
def load_none(self):
self.stack.append(None)
def load_true(self):
self.stack.append(True)
def load_false(self):
self.stack.append(False)
def load_int(self):
i = self._read_int4()
self.stack.append(i)
def load_longint(self):
s = self._read_byte_string()
self.stack.append(int(s))
if ISPY3:
load_long = load_int
load_longlong = load_longint
else:
def load_long(self):
i = self._read_int4()
self.stack.append(long(i))
def load_longlong(self):
l = self._read_byte_string()
self.stack.append(long(l))
def load_float(self):
binary = self.stream.read(FLOAT_FORMAT_SIZE)
self.stack.append(struct.unpack(FLOAT_FORMAT, binary)[0])
def load_complex(self):
binary = self.stream.read(COMPLEX_FORMAT_SIZE)
self.stack.append(complex(*struct.unpack(COMPLEX_FORMAT, binary)))
def _read_int4(self):
return struct.unpack("!i", self.stream.read(4))[0]
def _read_byte_string(self):
length = self._read_int4()
as_bytes = self.stream.read(length)
return as_bytes
def load_py3string(self):
as_bytes = self._read_byte_string()
if not ISPY3 and self.py3str_as_py2str:
# XXX Should we try to decode into latin-1?
self.stack.append(as_bytes)
else:
self.stack.append(as_bytes.decode("utf-8"))
def load_py2string(self):
as_bytes = self._read_byte_string()
if ISPY3 and self.py2str_as_py3str:
s = as_bytes.decode("latin-1")
else:
s = as_bytes
self.stack.append(s)
def load_bytes(self):
s = self._read_byte_string()
self.stack.append(s)
def load_unicode(self):
self.stack.append(self._read_byte_string().decode("utf-8"))
def load_newlist(self):
length = self._read_int4()
self.stack.append([None] * length)
def load_setitem(self):
if len(self.stack) < 3:
raise LoadError("not enough items for setitem")
value = self.stack.pop()
key = self.stack.pop()
self.stack[-1][key] = value
def load_newdict(self):
self.stack.append({})
def _load_collection(self, type_):
length = self._read_int4()
if length:
res = type_(self.stack[-length:])
del self.stack[-length:]
self.stack.append(res)
else:
self.stack.append(type_())
def load_buildtuple(self):
self._load_collection(tuple)
def load_set(self):
self._load_collection(set)
def load_frozenset(self):
self._load_collection(frozenset)
def load_stop(self):
raise _Stop
def load_channel(self):
id = self._read_int4()
newchannel = self.channelfactory.new(id)
self.stack.append(newchannel)
# automatically build opcodes and byte-encoding
class opcode:
""" container for name -> num mappings. """
def _buildopcodes():
l = []
later_added = {
'COMPLEX': 1,
}
for name, func in Unserializer.__dict__.items():
if name.startswith("load_"):
opname = name[5:].upper()
l.append((opname, func))
l.sort(key=lambda x: (later_added.get(x[0], 0), x[0]))
for i, (opname, func) in enumerate(l):
assert i < 26, "xxx"
i = bchr(64+i)
Unserializer.num2func[i] = func
setattr(opcode, opname, i)
_buildopcodes()
def dumps(obj):
""" return a serialized bytestring of the given obj.
The obj and all contained objects must be of a builtin
python type (so nested dicts, sets, etc. are all ok but
not user-level instances).
"""
return _Serializer().save(obj, versioned=True)
def dump(byteio, obj):
""" write a serialized bytestring of the given obj to the given stream. """
_Serializer(write=byteio.write).save(obj, versioned=True)
def loads(bytestring, py2str_as_py3str=False, py3str_as_py2str=False):
""" return the object as deserialized from the given bytestring.
py2str_as_py3str: if true then string (str) objects previously
dumped on Python2 will be loaded as Python3
strings which really are text objects.
py3str_as_py2str: if true then string (str) objects previously
dumped on Python3 will be loaded as Python2
strings instead of unicode objects.
if the bytestring was dumped with an incompatible protocol
version or if the bytestring is corrupted, the
``execnet.DataFormatError`` will be raised.
"""
io = BytesIO(bytestring)
return load(io,
py2str_as_py3str=py2str_as_py3str,
py3str_as_py2str=py3str_as_py2str)
def load(io, py2str_as_py3str=False, py3str_as_py2str=False):
""" derserialize an object form the specified stream.
Behaviour and parameters are otherwise the same as with ``loads``
"""
strconfig = (py2str_as_py3str, py3str_as_py2str)
return Unserializer(io, strconfig=strconfig).load(versioned=True)
def loads_internal(bytestring, channelfactory=None, strconfig=None):
io = BytesIO(bytestring)
return Unserializer(io, channelfactory, strconfig).load()
def dumps_internal(obj):
return _Serializer().save(obj)
class _Serializer(object):
_dispatch = {}
def __init__(self, write=None):
if write is None:
self._streamlist = []
write = self._streamlist.append
self._write = write
def save(self, obj, versioned=False):
# calling here is not re-entrant but multiple instances
# may write to the same stream because of the common platform
# atomic-write guaruantee (concurrent writes each happen atomicly)
if versioned:
self._write(DUMPFORMAT_VERSION)
self._save(obj)
self._write(opcode.STOP)
try:
streamlist = self._streamlist
except AttributeError:
return None
return type(streamlist[0])().join(streamlist)
def _save(self, obj):
tp = type(obj)
try:
dispatch = self._dispatch[tp]
except KeyError:
methodname = 'save_' + tp.__name__
meth = getattr(self.__class__, methodname, None)
if meth is None:
raise DumpError("can't serialize %s" % (tp,))
dispatch = self._dispatch[tp] = meth
dispatch(self, obj)
def save_NoneType(self, non):
self._write(opcode.NONE)
def save_bool(self, boolean):
if boolean:
self._write(opcode.TRUE)
else:
self._write(opcode.FALSE)
def save_bytes(self, bytes_):
self._write(opcode.BYTES)
self._write_byte_sequence(bytes_)
if ISPY3:
def save_str(self, s):
self._write(opcode.PY3STRING)
self._write_unicode_string(s)
else:
def save_str(self, s):
self._write(opcode.PY2STRING)
self._write_byte_sequence(s)
def save_unicode(self, s):
self._write(opcode.UNICODE)
self._write_unicode_string(s)
def _write_unicode_string(self, s):
try:
as_bytes = s.encode("utf-8")
except UnicodeEncodeError:
raise DumpError("strings must be utf-8 encodable")
self._write_byte_sequence(as_bytes)
def _write_byte_sequence(self, bytes_):
self._write_int4(len(bytes_), "string is too long")
self._write(bytes_)
def _save_integral(self, i, short_op, long_op):
if i <= FOUR_BYTE_INT_MAX:
self._write(short_op)
self._write_int4(i)
else:
self._write(long_op)
self._write_byte_sequence(str(i).rstrip("L").encode("ascii"))
def save_int(self, i):
self._save_integral(i, opcode.INT, opcode.LONGINT)
def save_long(self, l):
self._save_integral(l, opcode.LONG, opcode.LONGLONG)
def save_float(self, flt):
self._write(opcode.FLOAT)
self._write(struct.pack(FLOAT_FORMAT, flt))
def save_complex(self, cpx):
self._write(opcode.COMPLEX)
self._write(struct.pack(COMPLEX_FORMAT, cpx.real, cpx.imag))
def _write_int4(self, i, error="int must be less than %i" %
(FOUR_BYTE_INT_MAX,)):
if i > FOUR_BYTE_INT_MAX:
raise DumpError(error)
self._write(struct.pack("!i", i))
def save_list(self, L):
self._write(opcode.NEWLIST)
self._write_int4(len(L), "list is too long")
for i, item in enumerate(L):
self._write_setitem(i, item)
def _write_setitem(self, key, value):
self._save(key)
self._save(value)
self._write(opcode.SETITEM)
def save_dict(self, d):
self._write(opcode.NEWDICT)
for key, value in d.items():
self._write_setitem(key, value)
def save_tuple(self, tup):
for item in tup:
self._save(item)
self._write(opcode.BUILDTUPLE)
self._write_int4(len(tup), "tuple is too long")
def _write_set(self, s, op):
for item in s:
self._save(item)
self._write(op)
self._write_int4(len(s), "set is too long")
def save_set(self, s):
self._write_set(s, opcode.SET)
def save_frozenset(self, s):
self._write_set(s, opcode.FROZENSET)
def save_Channel(self, channel):
self._write(opcode.CHANNEL)
self._write_int4(channel.id)
def init_popen_io(execmodel):
if not hasattr(os, 'dup'): # jython
io = Popen2IO(sys.stdout, sys.stdin, execmodel)
import tempfile
sys.stdin = tempfile.TemporaryFile('r')
sys.stdout = tempfile.TemporaryFile('w')
else:
try:
devnull = os.devnull
except AttributeError:
if os.name == 'nt':
devnull = 'NUL'
else:
devnull = '/dev/null'
# stdin
stdin = execmodel.fdopen(os.dup(0), 'r', 1)
fd = os.open(devnull, os.O_RDONLY)
os.dup2(fd, 0)
os.close(fd)
# stdout
stdout = execmodel.fdopen(os.dup(1), 'w', 1)
fd = os.open(devnull, os.O_WRONLY)
os.dup2(fd, 1)
# stderr for win32
if os.name == 'nt':
sys.stderr = execmodel.fdopen(os.dup(2), 'w', 1)
os.dup2(fd, 2)
os.close(fd)
io = Popen2IO(stdout, stdin, execmodel)
sys.stdin = execmodel.fdopen(0, 'r', 1)
sys.stdout = execmodel.fdopen(1, 'w', 1)
return io
def serve(io, id):
trace("creating slavegateway on %r" % (io,))
SlaveGateway(io=io, id=id, _startcount=2).serve()
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, absolute_import
import frappe
import json
from email.utils import formataddr, parseaddr
from frappe.utils import get_url, get_formatted_email, cint, validate_email_add, split_emails, get_fullname
from frappe.utils.file_manager import get_file
from frappe.email.bulk import check_bulk_limit
from frappe.utils.scheduler import log
import frappe.email.smtp
import MySQLdb
import time
from frappe import _
from frappe.utils.background_jobs import enqueue
@frappe.whitelist()
def make(doctype=None, name=None, content=None, subject=None, sent_or_received = "Sent",
sender=None, recipients=None, communication_medium="Email", send_email=False,
print_html=None, print_format=None, attachments='[]', send_me_a_copy=False, cc=None, flags=None):
"""Make a new communication.
:param doctype: Reference DocType.
:param name: Reference Document name.
:param content: Communication body.
:param subject: Communication subject.
:param sent_or_received: Sent or Received (default **Sent**).
:param sender: Communcation sender (default current user).
:param recipients: Communication recipients as list.
:param communication_medium: Medium of communication (default **Email**).
:param send_mail: Send via email (default **False**).
:param print_html: HTML Print format to be sent as attachment.
:param print_format: Print Format name of parent document to be sent as attachment.
:param attachments: List of attachments as list of files or JSON string.
:param send_me_a_copy: Send a copy to the sender (default **False**).
"""
is_error_report = (doctype=="User" and name==frappe.session.user and subject=="Error Report")
send_me_a_copy = cint(send_me_a_copy)
if doctype and name and not is_error_report and not frappe.has_permission(doctype, "email", name) and not (flags or {}).get('ignore_doctype_permissions'):
raise frappe.PermissionError("You are not allowed to send emails related to: {doctype} {name}".format(
doctype=doctype, name=name))
if not sender:
sender = get_formatted_email(frappe.session.user)
comm = frappe.get_doc({
"doctype":"Communication",
"subject": subject,
"content": content,
"sender": sender,
"recipients": recipients,
"cc": cc or None,
"communication_medium": communication_medium,
"sent_or_received": sent_or_received,
"reference_doctype": doctype,
"reference_name": name
})
comm.insert(ignore_permissions=True)
# if not committed, delayed task doesn't find the communication
frappe.db.commit()
if cint(send_email):
comm.send(print_html, print_format, attachments, send_me_a_copy=send_me_a_copy)
return {
"name": comm.name,
"emails_not_sent_to": ", ".join(comm.emails_not_sent_to) if hasattr(comm, "emails_not_sent_to") else None
}
def validate_email(doc):
"""Validate Email Addresses of Recipients and CC"""
if not (doc.communication_type=="Communication" and doc.communication_medium == "Email") or doc.flags.in_receive:
return
# validate recipients
for email in split_emails(doc.recipients):
validate_email_add(email, throw=True)
# validate CC
for email in split_emails(doc.cc):
validate_email_add(email, throw=True)
# validate sender
def notify(doc, print_html=None, print_format=None, attachments=None,
recipients=None, cc=None, fetched_from_email_account=False):
"""Calls a delayed task 'sendmail' that enqueus email in Bulk Email queue
:param print_html: Send given value as HTML attachment
:param print_format: Attach print format of parent document
:param attachments: A list of filenames that should be attached when sending this email
:param recipients: Email recipients
:param cc: Send email as CC to
:param fetched_from_email_account: True when pulling email, the notification shouldn't go to the main recipient
"""
recipients, cc = get_recipients_and_cc(doc, recipients, cc,
fetched_from_email_account=fetched_from_email_account)
doc.emails_not_sent_to = set(doc.all_email_addresses) - set(doc.sent_email_addresses)
if frappe.flags.in_test:
# for test cases, run synchronously
doc._notify(print_html=print_html, print_format=print_format, attachments=attachments,
recipients=recipients, cc=cc)
else:
check_bulk_limit(list(set(doc.sent_email_addresses)))
enqueue(sendmail, queue="default", timeout=300, event="sendmail",
communication_name=doc.name,
print_html=print_html, print_format=print_format, attachments=attachments,
recipients=recipients, cc=cc, lang=frappe.local.lang, session=frappe.local.session)
def _notify(doc, print_html=None, print_format=None, attachments=None,
recipients=None, cc=None):
prepare_to_notify(doc, print_html, print_format, attachments)
frappe.sendmail(
recipients=(recipients or []) + (cc or []),
show_as_cc=(cc or []),
expose_recipients=True,
sender=doc.sender,
reply_to=doc.incoming_email_account,
subject=doc.subject,
content=doc.content,
reference_doctype=doc.reference_doctype,
reference_name=doc.reference_name,
attachments=doc.attachments,
message_id=doc.name,
unsubscribe_message=_("Leave this conversation"),
bulk=True,
communication=doc.name
)
def update_parent_status(doc):
"""Update status of parent document based on who is replying."""
if doc.communication_type != "Communication":
return
parent = doc.get_parent_doc()
if not parent:
return
status_field = parent.meta.get_field("status")
if status_field and "Open" in (status_field.options or "").split("\n"):
to_status = "Open" if doc.sent_or_received=="Received" else "Replied"
if to_status in status_field.options.splitlines():
parent.db_set("status", to_status)
parent.notify_update()
def get_recipients_and_cc(doc, recipients, cc, fetched_from_email_account=False):
doc.all_email_addresses = []
doc.sent_email_addresses = []
doc.previous_email_sender = None
if not recipients:
recipients = get_recipients(doc, fetched_from_email_account=fetched_from_email_account)
if not cc:
cc = get_cc(doc, recipients, fetched_from_email_account=fetched_from_email_account)
if fetched_from_email_account:
# email was already sent to the original recipient by the sender's email service
original_recipients, recipients = recipients, []
# send email to the sender of the previous email in the thread which this email is a reply to
if doc.previous_email_sender:
recipients.append(doc.previous_email_sender)
# cc that was received in the email
original_cc = split_emails(doc.cc)
# don't cc to people who already received the mail from sender's email service
cc = list(set(cc) - set(original_cc) - set(original_recipients))
return recipients, cc
def prepare_to_notify(doc, print_html=None, print_format=None, attachments=None):
"""Prepare to make multipart MIME Email
:param print_html: Send given value as HTML attachment.
:param print_format: Attach print format of parent document."""
if print_format:
doc.content += get_attach_link(doc, print_format)
set_incoming_outgoing_accounts(doc)
if not doc.sender:
doc.sender = doc.outgoing_email_account.email_id
if not doc.sender_full_name:
doc.sender_full_name = doc.outgoing_email_account.name or _("Notification")
if doc.sender:
# combine for sending to get the format 'Jane <jane@example.com>'
doc.sender = formataddr([doc.sender_full_name, doc.sender])
doc.attachments = []
if print_html or print_format:
doc.attachments.append(frappe.attach_print(doc.reference_doctype, doc.reference_name,
print_format=print_format, html=print_html))
if attachments:
if isinstance(attachments, basestring):
attachments = json.loads(attachments)
for a in attachments:
if isinstance(a, basestring):
# is it a filename?
try:
file = get_file(a)
doc.attachments.append({"fname": file[0], "fcontent": file[1]})
except IOError:
frappe.throw(_("Unable to find attachment {0}").format(a))
else:
doc.attachments.append(a)
def set_incoming_outgoing_accounts(doc):
doc.incoming_email_account = doc.outgoing_email_account = None
if doc.reference_doctype:
doc.incoming_email_account = frappe.db.get_value("Email Account",
{"append_to": doc.reference_doctype, "enable_incoming": 1}, "email_id")
doc.outgoing_email_account = frappe.db.get_value("Email Account",
{"append_to": doc.reference_doctype, "enable_outgoing": 1},
["email_id", "always_use_account_email_id_as_sender", "name"], as_dict=True)
if not doc.incoming_email_account:
doc.incoming_email_account = frappe.db.get_value("Email Account",
{"default_incoming": 1, "enable_incoming": 1}, "email_id")
if not doc.outgoing_email_account:
doc.outgoing_email_account = frappe.db.get_value("Email Account",
{"default_outgoing": 1, "enable_outgoing": 1},
["email_id", "always_use_account_email_id_as_sender", "name"], as_dict=True) or frappe._dict()
def get_recipients(doc, fetched_from_email_account=False):
"""Build a list of email addresses for To"""
# [EDGE CASE] doc.recipients can be None when an email is sent as BCC
recipients = split_emails(doc.recipients)
if fetched_from_email_account and doc.in_reply_to:
# add sender of previous reply
doc.previous_email_sender = frappe.db.get_value("Communication", doc.in_reply_to, "sender")
recipients.append(doc.previous_email_sender)
if recipients:
# exclude email accounts
exclude = [d[0] for d in
frappe.db.get_all("Email Account", ["email_id"], {"enable_incoming": 1}, as_list=True)]
exclude += [d[0] for d in
frappe.db.get_all("Email Account", ["login_id"], {"enable_incoming": 1}, as_list=True)
if d[0]]
recipients = filter_email_list(doc, recipients, exclude)
return recipients
def get_cc(doc, recipients=None, fetched_from_email_account=False):
"""Build a list of email addresses for CC"""
# get a copy of CC list
cc = split_emails(doc.cc)
if doc.reference_doctype and doc.reference_name:
if fetched_from_email_account:
# if it is a fetched email, add follows to CC
cc.append(get_owner_email(doc))
cc += get_assignees(doc)
if getattr(doc, "send_me_a_copy", False) and doc.sender not in cc:
cc.append(doc.sender)
if cc:
# exclude email accounts, unfollows, recipients and unsubscribes
exclude = [d[0] for d in
frappe.db.get_all("Email Account", ["email_id"], {"enable_incoming": 1}, as_list=True)]
exclude += [d[0] for d in
frappe.db.get_all("Email Account", ["login_id"], {"enable_incoming": 1}, as_list=True)
if d[0]]
exclude += [d[0] for d in frappe.db.get_all("User", ["name"], {"thread_notify": 0}, as_list=True)]
exclude += [(parseaddr(email)[1] or "").lower() for email in recipients]
if fetched_from_email_account:
# exclude sender when pulling email
exclude += [parseaddr(doc.sender)[1]]
if doc.reference_doctype and doc.reference_name:
exclude += [d[0] for d in frappe.db.get_all("Email Unsubscribe", ["email"],
{"reference_doctype": doc.reference_doctype, "reference_name": doc.reference_name}, as_list=True)]
cc = filter_email_list(doc, cc, exclude, is_cc=True)
return cc
def filter_email_list(doc, email_list, exclude, is_cc=False):
# temp variables
filtered = []
email_address_list = []
for email in list(set(email_list)):
email_address = (parseaddr(email)[1] or "").lower()
if not email_address:
continue
# this will be used to eventually find email addresses that aren't sent to
doc.all_email_addresses.append(email_address)
if (email in exclude) or (email_address in exclude):
continue
if is_cc:
is_user_enabled = frappe.db.get_value("User", email_address, "enabled")
if is_user_enabled==0:
# don't send to disabled users
continue
# make sure of case-insensitive uniqueness of email address
if email_address not in email_address_list:
# append the full email i.e. "Human <human@example.com>"
filtered.append(email)
email_address_list.append(email_address)
doc.sent_email_addresses.extend(email_address_list)
return filtered
def get_owner_email(doc):
owner = doc.get_parent_doc().owner
return get_formatted_email(owner) or owner
def get_assignees(doc):
return [( get_formatted_email(d.owner) or d.owner ) for d in
frappe.db.get_all("ToDo", filters={
"reference_type": doc.reference_doctype,
"reference_name": doc.reference_name,
"status": "Open"
}, fields=["owner"])
]
def get_attach_link(doc, print_format):
"""Returns public link for the attachment via `templates/emails/print_link.html`."""
return frappe.get_template("templates/emails/print_link.html").render({
"url": get_url(),
"doctype": doc.reference_doctype,
"name": doc.reference_name,
"print_format": print_format,
"key": doc.get_parent_doc().get_signature()
})
def sendmail(communication_name, print_html=None, print_format=None, attachments=None,
recipients=None, cc=None, lang=None, session=None):
try:
if lang:
frappe.local.lang = lang
if session:
# hack to enable access to private files in PDF
session['data'] = frappe._dict(session['data'])
frappe.local.session.update(session)
# upto 3 retries
for i in xrange(3):
try:
communication = frappe.get_doc("Communication", communication_name)
communication._notify(print_html=print_html, print_format=print_format, attachments=attachments,
recipients=recipients, cc=cc)
except MySQLdb.OperationalError, e:
# deadlock, try again
if e.args[0]==1213:
frappe.db.rollback()
time.sleep(1)
continue
else:
raise
else:
break
except:
traceback = log("frappe.core.doctype.communication.email.sendmail", frappe.as_json({
"communication_name": communication_name,
"print_html": print_html,
"print_format": print_format,
"attachments": attachments,
"recipients": recipients,
"cc": cc,
"lang": lang
}))
frappe.logger(__name__).error(traceback)
raise
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Module for super cell.
"""
from copy import deepcopy
import numpy as np
import catplot.descriptors as dc
from catplot.grid_components import extract_plane
class SuperCell(object):
""" Abstract base class for supercell.
"""
def __init__(self, nodes, edges, arrows=None):
self.nodes = nodes
self.edges = edges
self.arrows = [] if arrows is None else arrows
# Change all coordinates in nodes and edges to Cartisan coordinates.
for node in self.nodes:
node.coordinate = np.dot(self.cell_vectors.T, node.coordinate)
for edge in self.edges:
edge.start = np.dot(self.cell_vectors.T, edge.start)
edge.end = np.dot(self.cell_vectors.T, edge.end)
for arrow in self.arrows:
arrow.start = np.dot(self.cell_vectors.T, arrow.start)
arrow.end = np.dot(self.cell_vectors.T, arrow.end)
def __add__(self, other):
""" Redefine add operator to change the default behaviour.
"""
if not np.array_equal(self.cell_vectors, other.cell_vectors):
raise ValueError("Can't add two supercell with different cell vectors")
nodes = self.nodes + other.nodes
edges = self.edges + other.edges
arrows = self.arrows + other.arrows
# NOTE: here the cell_vectors will not be passed in,
# or the coordinate mapping will be done repeatly.
new_supercell = self.__class__(nodes, edges, arrows)
new_supercell.cell_vectors = self.cell_vectors
return new_supercell
def set_nodes_attr(self, name, value):
""" Set an attribute for all nodes in supercell.
"""
for node in self.nodes:
setattr(node, name, value)
def set_edges_attr(self, name, value):
""" Set an attribute for all edges in supercell.
"""
for edge in self.edges:
setattr(edge, name, value)
class SuperCell2D(SuperCell):
""" 2D supercell for a lattice grid.
Parameters:
-----------
nodes: Node2D object list, all nodes in supercell.
edges: Edge2D object list, all edges in supercell.
arrows: Arrow2D object list, all arrows in supercell, default is [].
cell_vectors: 2D-like array,
the basis vectors for the supercell, default is [[1.0, 0.0], [0.0, 1.0]].
"""
cell_vectors = dc.Basis2D("cell_vectors")
def __init__(self, nodes, edges, arrows=None, cell_vectors=None):
if cell_vectors is None:
self.cell_vectors = np.array([[1.0, 0.0],
[0.0, 1.0]])
else:
self.cell_vectors = np.array(cell_vectors)
super(self.__class__, self).__init__(nodes, edges, arrows)
def move(self, move_vector):
""" Move the super cell along the move vector.
"""
# Move nodes.
for node in self.nodes:
node.move(move_vector)
# Move edges.
for edge in self.edges:
edge.move(move_vector)
# Move arrows.
for arrow in self.arrows:
arrow.move(move_vector)
return self
def clone(self, relative_position):
""" Clone a new 2D supercell to a specific position.
Parameters:
-----------
relative_position: list of two float, optional.
the position of new cloned node relative to the original node,
default is [0.0, 0.0].
"""
# new_nodes = [node.clone(relative_position) for node in self.nodes]
# new_edges = [edge.clone(relative_position) for edge in self.edges]
# new_arrows = [arrow.clone(relative_position) for arrow in self.arrows]
#
# new_supercell = self.__class__(new_nodes,
# new_edges,
# new_arrows)
new_supercell = deepcopy(self)
new_supercell.move(relative_position)
return new_supercell
def expand(self, nx, ny, cell_vectors=None):
""" Expand the supercell to a lager supercell.
Parameters:
-----------
nx : int, the expansion number along x axis.
ny : int, the expansion number along y axis.
cell_vectors: 2x3 array, cell vectors for supercell expansion
default value is the same as cell vectors of this supercell.
"""
if cell_vectors is None:
cell_vectors = self.cell_vectors
cell_vectors = np.array(cell_vectors)
# Expand along x axis.
x_expanded_supercell = self
for i in range(1, nx):
move_vector = cell_vectors[0, :]*i
x_expanded_supercell += self.clone(move_vector)
# Expand along y axis.
expanded_supercell = x_expanded_supercell
for j in range(1, ny):
move_vector = cell_vectors[1, :]*j
expanded_supercell += x_expanded_supercell.clone(move_vector)
return expanded_supercell
@extract_plane
def to3d(self, **kwargs):
""" Map a 2D supercell to 3D space.
"""
# Map nodes and edges.
plane = kwargs["plane"]
nodes = [n.to3d(plane=plane) for n in self.nodes]
edges = [e.to3d(plane=plane) for e in self.edges]
cell_vectors = kwargs.pop("cell_vectors", None)
return SuperCell3D(nodes, edges, cell_vectors=cell_vectors)
class SuperCell3D(SuperCell2D):
""" 3D supercell in a 3D lattice grid.
Parameters:
-----------
nodes: Node3D object list, all nodes in supercell.
edges: Edge3D object list, all edges in supercell.
arrows: (NOT SUPPORT) Arrow3D object list, all arrows in supercell, default is [].
cell_vectors: 3D-like array,
the basis vectors for the supercell, default is [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]].
"""
cell_vectors = dc.Basis3D("cell_vectors")
def __init__(self, nodes, edges, arrows=None, cell_vectors=None):
if cell_vectors is None:
self.cell_vectors = np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
else:
self.cell_vectors = np.array(cell_vectors)
super(SuperCell2D, self).__init__(nodes, edges, arrows)
@staticmethod
@extract_plane
def from2d(supercell2d, **kwargs):
""" Construct 3D supercell from a 2D supercell.
"""
return supercell2d.to3d(**kwargs)
def expand(self, nx, ny, nz, cell_vectors=None):
""" Expand the supercell to a larger one in 3D grid.
Parameters:
-----------
nx : int, the expansion number along x axis.
ny : int, the expansion number along y axis.
nz : int, the expansion number along z axis.
cell_vectors: 3x3 array, cell vectors for supercell expansion
default value is the same as cell vectors of this supercell.
"""
if cell_vectors is None:
cell_vectors = self.cell_vectors
cell_vectors = np.array(cell_vectors)
# Expand along x axis.
x_expanded_supercell = self
for i in range(1, nx):
move_vector = cell_vectors[0, :]*i
x_expanded_supercell += self.clone(move_vector)
# Expand along y axis.
y_expanded_supercell = x_expanded_supercell
for j in range(1, ny):
move_vector = cell_vectors[1, :]*j
y_expanded_supercell += x_expanded_supercell.clone(move_vector)
# Expand along z axis.
expanded_supercell = y_expanded_supercell
for k in range(1, nz):
move_vector = cell_vectors[2, :]*k
expanded_supercell += y_expanded_supercell.clone(move_vector)
return expanded_supercell
| |
import tkinter as tk
from tkinter import Button
import time
import numpy as np
from PIL import ImageTk, Image
PhotoImage = ImageTk.PhotoImage
UNIT = 100 # pixels
HEIGHT = 5 # grid height
WIDTH = 5 # grid width
TRANSITION_PROB = 1
POSSIBLE_ACTIONS = [0, 1, 2, 3] # up, down, left, right
ACTIONS = [(-1, 0), (1, 0), (0, -1), (0, 1)] # actions in coordinates
REWARDS = []
class GraphicDisplay(tk.Tk):
def __init__(self, agent):
super(GraphicDisplay, self).__init__()
self.title('Policy Iteration')
self.geometry('{0}x{1}'.format(HEIGHT * UNIT, HEIGHT * UNIT + 50))
self.texts = []
self.arrows = []
self.env = Env()
self.agent = agent
self.evaluation_count = 0
self.improvement_count = 0
self.is_moving = 0
(self.up, self.down, self.left, self.right), self.shapes = self.load_images()
self.canvas = self._build_canvas()
self.text_reward(2, 2, "R : 1.0")
self.text_reward(1, 2, "R : -1.0")
self.text_reward(2, 1, "R : -1.0")
def _build_canvas(self):
canvas = tk.Canvas(self, bg='white',
height=HEIGHT * UNIT,
width=WIDTH * UNIT)
# buttons
iteration_button = Button(self, text="Evaluate",
command=self.evaluate_policy)
iteration_button.configure(width=10, activebackground="#33B5E5")
canvas.create_window(WIDTH * UNIT * 0.13, HEIGHT * UNIT + 10,
window=iteration_button)
policy_button = Button(self, text="Improve",
command=self.improve_policy)
policy_button.configure(width=10, activebackground="#33B5E5")
canvas.create_window(WIDTH * UNIT * 0.37, HEIGHT * UNIT + 10,
window=policy_button)
policy_button = Button(self, text="move", command=self.move_by_policy)
policy_button.configure(width=10, activebackground="#33B5E5")
canvas.create_window(WIDTH * UNIT * 0.62, HEIGHT * UNIT + 10,
window=policy_button)
policy_button = Button(self, text="reset", command=self.reset)
policy_button.configure(width=10, activebackground="#33B5E5")
canvas.create_window(WIDTH * UNIT * 0.87, HEIGHT * UNIT + 10,
window=policy_button)
# create grids
for col in range(0, WIDTH * UNIT, UNIT): # 0~400 by 80
x0, y0, x1, y1 = col, 0, col, HEIGHT * UNIT
canvas.create_line(x0, y0, x1, y1)
for row in range(0, HEIGHT * UNIT, UNIT): # 0~400 by 80
x0, y0, x1, y1 = 0, row, HEIGHT * UNIT, row
canvas.create_line(x0, y0, x1, y1)
# add img to canvas
self.rectangle = canvas.create_image(50, 50, image=self.shapes[0])
canvas.create_image(250, 150, image=self.shapes[1])
canvas.create_image(150, 250, image=self.shapes[1])
canvas.create_image(250, 250, image=self.shapes[2])
# pack all
canvas.pack()
return canvas
# (rectangle, triangle1, triangle2, circle)
def load_images(self):
up = PhotoImage(Image.open("../img/up.png").resize((13, 13)))
right = PhotoImage(Image.open("../img/right.png").resize((13, 13)))
left = PhotoImage(Image.open("../img/left.png").resize((13, 13)))
down = PhotoImage(Image.open("../img/down.png").resize((13, 13)))
rectangle = PhotoImage(Image.open("../img/rectangle.png").resize((65, 65)))
triangle = PhotoImage(Image.open("../img/triangle.png").resize((65, 65)))
circle = PhotoImage(Image.open("../img/circle.png").resize((65, 65)))
return (up, down, left, right), (rectangle, triangle, circle)
def reset(self):
if self.is_moving == 0:
self.evaluation_count = 0
self.improvement_count = 0
for i in self.texts:
self.canvas.delete(i)
for i in self.arrows:
self.canvas.delete(i)
self.agent.value_table = [[0.0] * WIDTH for _ in range(HEIGHT)]
self.agent.policy_table = ([[[0.25, 0.25, 0.25, 0.25]] * WIDTH
for _ in range(HEIGHT)])
self.policy_table[2][2] = []
x, y = self.canvas.coords(self.rectangle)
self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)
def text_value(self, row, col, contents, font='Helvetica', size=10,
style='normal', anchor="nw"):
origin_x, origin_y = 85, 70
x, y = origin_y + (UNIT * col), origin_x + (UNIT * row)
font = (font, str(size), style)
text = self.canvas.create_text(x, y, fill="black", text=contents,
font=font, anchor=anchor)
return self.texts.append(text)
def text_reward(self, row, col, contents, font='Helvetica', size=10,
style='normal', anchor="nw"):
origin_x, origin_y = 5, 5
x, y = origin_y + (UNIT * col), origin_x + (UNIT * row)
font = (font, str(size), style)
text = self.canvas.create_text(x, y, fill="black", text=contents,
font=font, anchor=anchor)
return self.texts.append(text)
def rectangle_move(self, action):
base_action = np.array([0, 0])
location = self.find_rectangle()
self.render()
if action == 0 and location[0] > 0: # up
base_action[1] -= UNIT
elif action == 1 and location[0] < HEIGHT - 1: # down
base_action[1] += UNIT
elif action == 2 and location[1] > 0: # left
base_action[0] -= UNIT
elif action == 3 and location[1] < WIDTH - 1: # right
base_action[0] += UNIT
# move agent
self.canvas.move(self.rectangle, base_action[0], base_action[1])
def find_rectangle(self):
temp = self.canvas.coords(self.rectangle)
x = (temp[0] / 100) - 0.5
y = (temp[1] / 100) - 0.5
return int(y), int(x)
def move_by_policy(self):
if self.improvement_count != 0 and self.is_moving != 1:
self.is_moving = 1
x, y = self.canvas.coords(self.rectangle)
self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)
x, y = self.find_rectangle()
while len(self.agent.policy_table[x][y]) != 0:
self.after(100,
self.rectangle_move(self.agent.get_action([x, y])))
x, y = self.find_rectangle()
self.is_moving = 0
def draw_one_arrow(self, col, row, policy):
if col == 2 and row == 2:
return
if policy[0] > 0: # up
origin_x, origin_y = 50 + (UNIT * row), 10 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y,
image=self.up))
if policy[1] > 0: # down
origin_x, origin_y = 50 + (UNIT * row), 90 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y,
image=self.down))
if policy[2] > 0: # left
origin_x, origin_y = 10 + (UNIT * row), 50 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y,
image=self.left))
if policy[3] > 0: # right
origin_x, origin_y = 90 + (UNIT * row), 50 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y,
image=self.right))
def draw_from_policy(self, policy_table):
for i in range(HEIGHT):
for j in range(WIDTH):
self.draw_one_arrow(i, j, policy_table[i][j])
def print_value_table(self, value_table):
for i in range(WIDTH):
for j in range(HEIGHT):
self.text_value(i, j, value_table[i][j])
def render(self):
time.sleep(0.1)
self.canvas.tag_raise(self.rectangle)
self.update()
def evaluate_policy(self):
self.evaluation_count += 1
for i in self.texts:
self.canvas.delete(i)
self.agent.policy_evaluation()
self.print_value_table(self.agent.value_table)
def improve_policy(self):
self.improvement_count += 1
for i in self.arrows:
self.canvas.delete(i)
self.agent.policy_improvement()
self.draw_from_policy(self.agent.policy_table)
class Env:
def __init__(self):
self.transition_probability = TRANSITION_PROB
self.width = WIDTH
self.height = HEIGHT
self.reward = [[0] * WIDTH for _ in range(HEIGHT)]
self.possible_actions = POSSIBLE_ACTIONS
self.reward[2][2] = 1 # reward 1 for circle
self.reward[1][2] = -1 # reward -1 for triangle
self.reward[2][1] = -1 # reward -1 for triangle
self.all_state = []
for x in range(WIDTH):
for y in range(HEIGHT):
state = [x, y]
self.all_state.append(state)
def get_reward(self, state, action):
next_state = self.state_after_action(state, action)
return self.reward[next_state[0]][next_state[1]]
def state_after_action(self, state, action_index):
action = ACTIONS[action_index]
return self.check_boundary([state[0] + action[0], state[1] + action[1]])
@staticmethod
def check_boundary(state):
state[0] = (0 if state[0] < 0 else WIDTH - 1
if state[0] > WIDTH - 1 else state[0])
state[1] = (0 if state[1] < 0 else HEIGHT - 1
if state[1] > HEIGHT - 1 else state[1])
return state
def get_transition_prob(self, state, action):
return self.transition_probability
def get_all_states(self):
return self.all_state
| |
#!/usr/bin/env python
# Copyright (c) 2014, Paessler AG <support@paessler.com>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
# and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# PRTG Python Miniprobe
# Miniprobe needs at least Python 2.7 because of "importlib"
# If older python version is used you will have to install "importlib"
# import general modules
import sys
import json
import time
import gc
import logging
import socket
# import own modules
sys.path.append('./')
try:
from miniprobe import MiniProbe
import sensors
import requests
import multiprocessing
except Exception as e:
print(e)
# Implemented for internal testing only. Not for public usage!
http = False
if sys.argv[1:] and sys.argv[1] == "http":
http = True
class Probe(object):
def __init__(self):
gc.enable()
self.mini_probe = MiniProbe(http)
self.config = self.mini_probe.read_config('./probe.conf')
self.probe_stop = False
self.announce = False
self.task = False
self.has_json_content = False
self.data_request_payload_json = []
self.task_request_response_json = []
self.key_sha1 = self.mini_probe.hash_access_key(self.config['key'])
self.out_queue = multiprocessing.Queue()
self.sensor_list = self.mini_probe.get_import_sensors()
self.announce_json = json.dumps(self.mini_probe.build_announce(self.sensor_list))
self.announce_data = self.mini_probe.create_parameters(self.config, self.announce_json, 'announce')
self.url_announce = self.mini_probe.create_url(self.config, 'announce', http)
self.url_task = self.mini_probe.create_url(self.config, 'tasks', http)
self.task_data = self.mini_probe.build_task(self.config)
self.url_data = self.mini_probe.create_url(self.config, 'data', http)
self.procs = []
# Set up debug logging
self.logger = logging.getLogger("")
if self.config['debug'] == "True":
self.config['debug'] = True
self.logger.setLevel(logging.DEBUG)
logging.warning("DEBUG LOGGING HAS BEEN TURNED ON!!")
logging.getLogger("requests").setLevel(logging.INFO)
else:
self.config['debug'] = False
self.logger.setLevel(logging.INFO)
logging.info("Debug logging has been turned off!!")
logging.getLogger("requests").setLevel(logging.WARNING)
if self.config['cleanmem'] == "True":
self.config['cleanmem'] = True
else:
self.config['cleanmem'] = False
def send_announce(self):
"""
send announce request to core
"""
try:
announce_request = self.mini_probe.request_to_core("announce", self.announce_data, self.config)
if announce_request.status_code == requests.codes.ok:
self.announce = True
logging.info("Announce success.")
logging.debug("Announce success. Details: HTTP Status %s, Message: %s"
% (announce_request.status_code, announce_request.text))
else:
logging.info("Announce pending. Trying again in %s seconds"
% str(int(self.config['baseinterval']) / 2))
logging.debug("Announce pending. Details: HTTP Status %s, Message: %s"
% (announce_request.status_code, announce_request.text))
time.sleep(int(self.config['baseinterval']) / 2)
except Exception as request_exception:
logging.error(request_exception)
time.sleep(int(self.config['baseinterval']) / 2)
def get_tasks(self):
"""
get tasks from core
"""
self.data_request_payload_json = []
self.has_json_content = False
try:
task_request = self.mini_probe.request_to_core("tasks", self.task_data, self.config)
try:
if str(task_request.json()) != "[]":
self.has_json_content = True
self.task = True
logging.info("Task success.")
logging.debug("Task success. HTTP Status %s, Message: %s"
% (task_request.status_code, task_request.text))
return task_request
else:
logging.info("Task has no JSON content. Trying again in %s seconds"
% (int(self.config['baseinterval']) / 2))
logging.debug("Task has no JSON content. Details: HTTP Status %s, Message: %s"
% (task_request.status_code, task_request.text))
return None
except Exception as json_exception:
logging.info(json_exception)
logging.info("No JSON. HTTP Status: %s, Message: %s" % (task_request.status_code, task_request.text))
return None
except Exception as request_exception:
logging.error(request_exception)
logging.error("Exception. Trying again in %s seconds." % str(int(self.config['baseinterval']) / 3))
return None
def send_data(self):
"""
send processed data to the core
"""
try:
data_request = self.mini_probe.request_to_core("data", json.dumps(self.data_request_payload_json),
self.config)
if data_request.status_code == requests.codes.ok:
logging.info("Data success.")
logging.debug("Data success. Details: HTTP Status %s, Message: %s"
% (data_request.status_code, data_request.text))
self.data_request_payload_json = []
else:
logging.info("Data issue. Current data might be dropped, please turn on debug logging")
logging.debug("Data issue. Details: HTTP Status %s, Message: %s"
% (data_request.status_code, data_request.text))
except Exception as request_exception:
logging.error(request_exception)
def kill_procs(self):
"""
killing processes in worker pool when finished
"""
for p in self.procs:
if not p.is_alive():
p.join()
p.terminate()
del p
def main(self):
"""
Main routine for MiniProbe (Python)
"""
# Doing some startup logging
logging.info("PRTG Small Probe '%s' starting on '%s'" % (self.config['name'], socket.gethostname()))
logging.info("Connecting to PRTG Core Server at %s:%s" % (self.config['server'], self.config['port']))
while not self.announce:
self.send_announce()
while not self.probe_stop:
self.task = False
while not self.task:
task_request = self.get_tasks()
if not task_request:
time.sleep(int(self.config['baseinterval']) / 2)
gc.collect()
if task_request.status_code == requests.codes.ok and self.has_json_content:
self.task_request_response_json = task_request.json()
logging.debug("JSON response: %s" % self.task_request_response_json)
if self.config['subprocs']:
json_response_chunks = self.mini_probe.split_json_response(self.task_request_response_json,
self.config['subprocs'])
else:
json_response_chunks = self.mini_probe.split_json_response(self.task_request_response_json)
for element in json_response_chunks:
for part in element:
logging.debug(part)
for sensor in self.sensor_list:
if part['kind'] == sensor.get_kind():
p = multiprocessing.Process(target=sensor.get_data, args=(part, self.out_queue),
name=part['kind'])
self.procs.append(p)
logging.debug("Spawning sensor %s." % p.name)
p.start()
else:
pass
gc.collect()
try:
while len(self.data_request_payload_json) < len(element):
out = self.out_queue.get()
self.data_request_payload_json.append(out)
except Exception as data_queue_exception:
logging.error(data_queue_exception)
pass
self.send_data()
if len(self.task_request_response_json) > 10:
time.sleep((int(self.config['baseinterval']) * (9 / len(self.task_request_response_json))))
else:
time.sleep(int(self.config['baseinterval']) / 2)
elif task_request.status_code != requests.codes.ok:
logging.info("Task issue. Request returning incorrect status code. Turn on debugging for details")
logging.debug("Task issue. Details: HTTP Status %s, Message: %s"
% (task_request.status_code, task_request.text))
else:
logging.info("Task has no JSON content. Nothing to do. Waiting for %s seconds."
% str(int(self.config['baseinterval']) / 3))
time.sleep(int(self.config['baseinterval']) / 3)
self.kill_procs()
gc.collect()
if self.config['cleanmem']:
# checking if clean memory option has been chosen during install then call the method to flush mem
self.mini_probe.clean_mem()
sys.exit()
if __name__ == "__main__":
probe = Probe()
probe.main()
| |
"""Caches are used for multiple things:
- To speed up asset building. Filter operations every step
of the way can be cached, so that individual parts of a
build that haven't changed can be reused.
- Bundle definitions are cached when a bundle is built so we
can determine whether they have changed and whether a rebuild
is required.
This data is not all stored in the same cache necessarily. The
classes in this module provide the "environment.cache" object, but
also serve in other places.
"""
import os
from os import path
from webassets import six
from webassets.merge import BaseHunk
from webassets.filter import Filter, freezedicts
from webassets.utils import md5_constructor, pickle
__all__ = ('FilesystemCache', 'MemoryCache', 'get_cache',)
def make_hashable(data):
"""Ensures ``data`` can be hashed().
Mostly needs to support dict. The other special types we use
as hash keys (Hunks, Filters) already have a proper hash() method.
See also ``make_md5``.
Note that we do not actually hash the data for the memory cache.
"""
return freezedicts(data)
def make_md5(*data):
"""Make a md5 hash based on``data``.
Specifically, this knows about ``Hunk`` objects, and makes sure
the actual content is hashed.
This is very conservative, and raises an exception if there are
data types that it does not explicitly support. This is because
we had in the past some debugging headaches with the cache not
working for this very reason.
MD5 is faster than sha, and we don't care so much about collisions.
We care enough however not to use hash().
"""
def walk(obj):
if isinstance(obj, (tuple, list)):
for item in obj:
for d in walk(item): yield d
elif isinstance(obj, dict):
for k in sorted(obj.keys()):
for d in walk(k): yield d
for d in walk(obj[k]): yield d
elif isinstance(obj, BaseHunk):
yield obj.data().encode('utf-8')
elif isinstance(obj, Filter):
yield str(hash(obj)).encode('utf-8')
elif isinstance(obj, int):
yield str(obj).encode('utf-8')
elif isinstance(obj, six.text_type):
yield obj.encode('utf-8')
elif isinstance(obj, six.binary_type):
yield obj
else:
raise ValueError('Cannot MD5 type %s' % type(obj))
md5 = md5_constructor()
for d in walk(data):
md5.update(d)
return md5.hexdigest()
def safe_unpickle(string):
"""Unpickle the string, or return ``None`` if that fails."""
try:
return pickle.loads(string)
except:
return None
class BaseCache(object):
"""Abstract base class.
The cache key must be something that is supported by the Python hash()
function. The cache value may be a string, or anything that can be pickled.
Since the cache is used for multiple purposes, all webassets-internal code
should always tag its keys with an id, like so:
key = ("tag", actual_key)
One cache instance can only be used safely with a single Environment.
"""
def get(self, key):
"""Should return the cache contents, or False.
"""
raise NotImplementedError()
def set(self, key, value):
raise NotImplementedError()
class MemoryCache(BaseCache):
"""Caches stuff in the process memory.
WARNING: Do NOT use this in a production environment, where you
are likely going to have multiple processes serving the same app!
Note that the keys are used as-is, not passed through hash() (which is
a difference: http://stackoverflow.com/a/9022664/15677). However, the
reason we don't is because the original value is nicer to debug.
"""
def __init__(self, capacity):
self.capacity = capacity
self.keys = []
self.cache = {}
def __eq__(self, other):
"""Return equality with the config values that instantiate
this instance.
"""
return False == other or \
None == other or \
id(self) == id(other)
def get(self, key):
key = make_hashable(key)
return self.cache.get(key, None)
def set(self, key, value):
key = make_hashable(key)
self.cache[key] = value
try:
self.keys.remove(key)
except ValueError:
pass
self.keys.append(key)
# limit cache to the given capacity
to_delete = self.keys[0:max(0, len(self.keys)-self.capacity)]
self.keys = self.keys[len(to_delete):]
for item in to_delete:
del self.cache[item]
class FilesystemCache(BaseCache):
"""Uses a temporary directory on the disk.
"""
V = 2 # We have changed the cache format once
def __init__(self, directory):
self.directory = directory
def __eq__(self, other):
"""Return equality with the config values
that instantiate this instance.
"""
return True == other or \
self.directory == other or \
id(self) == id(other)
def get(self, key):
filename = path.join(self.directory, '%s' % make_md5(self.V, key))
if not path.exists(filename):
return None
f = open(filename, 'rb')
try:
result = f.read()
finally:
f.close()
return safe_unpickle(result)
def set(self, key, data):
filename = path.join(self.directory, '%s' % make_md5(self.V, key))
f = open(filename, 'wb')
try:
f.write(pickle.dumps(data))
finally:
f.close()
def get_cache(option, env):
"""Return a cache instance based on ``option``.
"""
if not option:
return None
if isinstance(option, BaseCache):
return option
elif isinstance(option, type) and issubclass(option, BaseCache):
return option()
if option is True:
directory = path.join(env.directory, '.webassets-cache')
# Auto-create the default directory
if not path.exists(directory):
os.makedirs(directory)
else:
directory = option
return FilesystemCache(directory)
| |
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Provides common functionality for integrated unit tests
"""
import collections
import random
import string
import time
from oslo_log import log as logging
from oslo_utils.fixture import uuidsentinel as uuids
import nova.conf
from nova import context
from nova.db import api as db
import nova.image.glance
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client as api_client
from nova.tests.unit import cast_as_call
from nova.tests.unit import fake_notifier
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
from nova.virt import fake
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
def generate_random_alphanumeric(length):
"""Creates a random alphanumeric string of specified length."""
return ''.join(random.choice(string.ascii_uppercase + string.digits)
for _x in range(length))
def generate_random_numeric(length):
"""Creates a random numeric string of specified length."""
return ''.join(random.choice(string.digits)
for _x in range(length))
def generate_new_element(items, prefix, numeric=False):
"""Creates a random string with prefix, that is not in 'items' list."""
while True:
if numeric:
candidate = prefix + generate_random_numeric(8)
else:
candidate = prefix + generate_random_alphanumeric(8)
if candidate not in items:
return candidate
LOG.debug("Random collision on %s", candidate)
class _IntegratedTestBase(test.TestCase):
REQUIRES_LOCKING = True
ADMIN_API = False
# Override this in subclasses which use the NeutronFixture. New tests
# should rely on Neutron since nova-network is deprecated. The default
# value of False here is only temporary while we update the existing
# functional tests to use Neutron.
USE_NEUTRON = False
def setUp(self):
super(_IntegratedTestBase, self).setUp()
# TODO(mriedem): Fix the functional tests to work with Neutron.
self.flags(use_neutron=self.USE_NEUTRON)
nova.tests.unit.image.fake.stub_out_image_service(self)
self.useFixture(cast_as_call.CastAsCall(self))
self.useFixture(nova_fixtures.Database(database='placement'))
placement = self.useFixture(nova_fixtures.PlacementFixture())
self.placement_api = placement.api
self._setup_services()
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
def _setup_compute_service(self):
return self.start_service('compute')
def _setup_scheduler_service(self):
return self.start_service('scheduler')
def _setup_services(self):
# NOTE(danms): Set the global MQ connection to that of our first cell
# for any cells-ignorant code. Normally this is defaulted in the tests
# which will result in us not doing the right thing.
if 'cell1' in self.cell_mappings:
self.flags(transport_url=self.cell_mappings['cell1'].transport_url)
self.conductor = self.start_service('conductor')
self.consoleauth = self.start_service('consoleauth')
if self.USE_NEUTRON:
self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self))
else:
self.network = self.start_service('network',
manager=CONF.network_manager)
self.scheduler = self._setup_scheduler_service()
self.compute = self._setup_compute_service()
self.api_fixture = self.useFixture(
nova_fixtures.OSAPIFixture(self.api_major_version))
# if the class needs to run as admin, make the api endpoint
# the admin, otherwise it's safer to run as non admin user.
if self.ADMIN_API:
self.api = self.api_fixture.admin_api
else:
self.api = self.api_fixture.api
if hasattr(self, 'microversion'):
self.api.microversion = self.microversion
def get_unused_server_name(self):
servers = self.api.get_servers()
server_names = [server['name'] for server in servers]
return generate_new_element(server_names, 'server')
def get_unused_flavor_name_id(self):
flavors = self.api.get_flavors()
flavor_names = list()
flavor_ids = list()
[(flavor_names.append(flavor['name']),
flavor_ids.append(flavor['id']))
for flavor in flavors]
return (generate_new_element(flavor_names, 'flavor'),
int(generate_new_element(flavor_ids, '', True)))
def get_invalid_image(self):
return uuids.fake
def _build_minimal_create_server_request(self, image_uuid=None):
server = {}
# NOTE(takashin): In API version 2.36, image APIs were deprecated.
# In API version 2.36 or greater, self.api.get_images() returns
# a 404 error. In that case, 'image_uuid' should be specified.
server[self._image_ref_parameter] = (image_uuid or
self.api.get_images()[0]['id'])
# Set a valid flavorId
flavor = self.api.get_flavors()[0]
LOG.debug("Using flavor: %s", flavor)
server[self._flavor_ref_parameter] = ('http://fake.server/%s'
% flavor['id'])
# Set a valid server name
server_name = self.get_unused_server_name()
server['name'] = server_name
return server
def _create_flavor_body(self, name, ram, vcpus, disk, ephemeral, id, swap,
rxtx_factor, is_public):
return {
"flavor": {
"name": name,
"ram": ram,
"vcpus": vcpus,
"disk": disk,
"OS-FLV-EXT-DATA:ephemeral": ephemeral,
"id": id,
"swap": swap,
"rxtx_factor": rxtx_factor,
"os-flavor-access:is_public": is_public,
}
}
def _create_flavor(self, memory_mb=2048, vcpu=2, disk=10, ephemeral=10,
swap=0, rxtx_factor=1.0, is_public=True,
extra_spec=None):
flv_name, flv_id = self.get_unused_flavor_name_id()
body = self._create_flavor_body(flv_name, memory_mb, vcpu, disk,
ephemeral, flv_id, swap, rxtx_factor,
is_public)
self.api_fixture.admin_api.post_flavor(body)
if extra_spec is not None:
spec = {"extra_specs": extra_spec}
self.api_fixture.admin_api.post_extra_spec(flv_id, spec)
return flv_id
def _build_server(self, flavor_id, image=None):
server = {}
if image is None:
image = self.api.get_images()[0]
LOG.debug("Image: %s", image)
# We now have a valid imageId
server[self._image_ref_parameter] = image['id']
else:
server[self._image_ref_parameter] = image
# Set a valid flavorId
flavor = self.api.get_flavor(flavor_id)
LOG.debug("Using flavor: %s", flavor)
server[self._flavor_ref_parameter] = ('http://fake.server/%s'
% flavor['id'])
# Set a valid server name
server_name = self.get_unused_server_name()
server['name'] = server_name
return server
def _check_api_endpoint(self, endpoint, expected_middleware):
app = self.api_fixture.app().get((None, '/v2'))
while getattr(app, 'application', False):
for middleware in expected_middleware:
if isinstance(app.application, middleware):
expected_middleware.remove(middleware)
break
app = app.application
self.assertEqual([],
expected_middleware,
("The expected wsgi middlewares %s are not "
"existed") % expected_middleware)
class InstanceHelperMixin(object):
def _wait_for_server_parameter(self, admin_api, server, expected_params,
max_retries=10):
retry_count = 0
while True:
server = admin_api.get_server(server['id'])
if all([server[attr] == expected_params[attr]
for attr in expected_params]):
break
retry_count += 1
if retry_count == max_retries:
self.fail('Wait for state change failed, '
'expected_params=%s, server=%s'
% (expected_params, server))
time.sleep(0.5)
return server
def _wait_for_state_change(self, admin_api, server, expected_status,
max_retries=10):
return self._wait_for_server_parameter(
admin_api, server, {'status': expected_status}, max_retries)
def _build_minimal_create_server_request(self, api, name, image_uuid=None,
flavor_id=None, networks=None,
az=None):
server = {}
# We now have a valid imageId
server['imageRef'] = image_uuid or api.get_images()[0]['id']
if not flavor_id:
# Set a valid flavorId
flavor_id = api.get_flavors()[1]['id']
server['flavorRef'] = ('http://fake.server/%s' % flavor_id)
server['name'] = name
if networks is not None:
server['networks'] = networks
if az is not None:
server['availability_zone'] = az
return server
def _wait_until_deleted(self, server):
initially_in_error = (server['status'] == 'ERROR')
try:
for i in range(40):
server = self.api.get_server(server['id'])
if not initially_in_error and server['status'] == 'ERROR':
self.fail('Server went to error state instead of'
'disappearing.')
time.sleep(0.5)
self.fail('Server failed to delete.')
except api_client.OpenStackApiNotFoundException:
return
def _wait_for_action_fail_completion(
self, server, expected_action, event_name, api=None):
"""Polls instance action events for the given instance, action and
action event name until it finds the action event with an error
result.
"""
if api is None:
api = self.api
completion_event = None
for attempt in range(10):
actions = api.get_instance_actions(server['id'])
# Look for the migrate action.
for action in actions:
if action['action'] == expected_action:
events = (
api.api_get(
'/servers/%s/os-instance-actions/%s' %
(server['id'], action['request_id'])
).body['instanceAction']['events'])
# Look for the action event being in error state.
for event in events:
if (event['event'] == event_name and
event['result'] is not None and
event['result'].lower() == 'error'):
completion_event = event
# Break out of the events loop.
break
if completion_event:
# Break out of the actions loop.
break
# We didn't find the completion event yet, so wait a bit.
time.sleep(0.5)
if completion_event is None:
self.fail('Timed out waiting for %s failure event. Current '
'instance actions: %s' % (event_name, actions))
def _wait_for_migration_status(self, server, expected_statuses):
"""Waits for a migration record with the given statuses to be found
for the given server, else the test fails. The migration record, if
found, is returned.
"""
api = getattr(self, 'admin_api', None)
if api is None:
api = self.api
statuses = [status.lower() for status in expected_statuses]
for attempt in range(10):
migrations = api.api_get('/os-migrations').body['migrations']
for migration in migrations:
if (migration['instance_uuid'] == server['id'] and
migration['status'].lower() in statuses):
return migration
time.sleep(0.5)
self.fail('Timed out waiting for migration with status "%s" for '
'instance: %s' % (expected_statuses, server['id']))
class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
"""Base test class for functional tests that check provider usage
and consumer allocations in Placement during various operations.
Subclasses must define a **compute_driver** attribute for the virt driver
to use.
This class sets up standard fixtures and controller services but does not
start any compute services, that is left to the subclass.
"""
microversion = 'latest'
def setUp(self):
self.flags(compute_driver=self.compute_driver)
super(ProviderUsageBaseTestCase, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.AllServicesCurrent())
fake_notifier.stub_notifier(self)
self.addCleanup(fake_notifier.reset)
placement = self.useFixture(nova_fixtures.PlacementFixture())
self.placement_api = placement.api
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.admin_api = api_fixture.admin_api
self.admin_api.microversion = self.microversion
self.api = self.admin_api
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.start_service('conductor')
self.scheduler_service = self.start_service('scheduler')
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.computes = {}
def _start_compute(self, host, cell_name=None):
"""Start a nova compute service on the given host
:param host: the name of the host that will be associated to the
compute service.
:param cell_name: optional name of the cell in which to start the
compute service (defaults to cell1)
:return: the nova compute service object
"""
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
compute = self.start_service('compute', host=host, cell=cell_name)
self.computes[host] = compute
return compute
def _get_provider_uuid_by_host(self, host):
# NOTE(gibi): the compute node id is the same as the compute node
# provider uuid on that compute
resp = self.admin_api.api_get(
'os-hypervisors?hypervisor_hostname_pattern=%s' % host).body
return resp['hypervisors'][0]['id']
def _get_provider_usages(self, provider_uuid):
return self.placement_api.get(
'/resource_providers/%s/usages' % provider_uuid).body['usages']
def _get_allocations_by_server_uuid(self, server_uuid):
return self.placement_api.get(
'/allocations/%s' % server_uuid).body['allocations']
def _get_allocations_by_provider_uuid(self, rp_uuid):
return self.placement_api.get(
'/resource_providers/%s/allocations' % rp_uuid).body['allocations']
def _get_all_providers(self):
return self.placement_api.get(
'/resource_providers', version='1.14').body['resource_providers']
def _create_trait(self, trait):
return self.placement_api.put('/traits/%s' % trait, {}, version='1.6')
def _get_provider_traits(self, provider_uuid):
return self.placement_api.get(
'/resource_providers/%s/traits' % provider_uuid,
version='1.6').body['traits']
def _set_provider_traits(self, rp_uuid, traits):
"""This will overwrite any existing traits.
:param rp_uuid: UUID of the resource provider to update
:param traits: list of trait strings to set on the provider
:returns: APIResponse object with the results
"""
provider = self.placement_api.get(
'/resource_providers/%s' % rp_uuid).body
put_traits_req = {
'resource_provider_generation': provider['generation'],
'traits': traits
}
return self.placement_api.put(
'/resource_providers/%s/traits' % rp_uuid,
put_traits_req, version='1.6')
def _get_all_resource_classes(self):
dicts = self.placement_api.get(
'/resource_classes', version='1.2').body['resource_classes']
return [d['name'] for d in dicts]
def _get_all_traits(self):
return self.placement_api.get('/traits', version='1.6').body['traits']
def _get_provider_inventory(self, rp_uuid):
return self.placement_api.get(
'/resource_providers/%s/inventories' % rp_uuid).body['inventories']
def _get_provider_aggregates(self, rp_uuid):
return self.placement_api.get(
'/resource_providers/%s/aggregates' % rp_uuid,
version='1.1').body['aggregates']
def _post_resource_provider(self, rp_name):
return self.placement_api.post(
url='/resource_providers',
version='1.20', body={'name': rp_name}).body
def _set_inventory(self, rp_uuid, inv_body):
"""This will set the inventory for a given resource provider.
:param rp_uuid: UUID of the resource provider to update
:param inv_body: inventory to set on the provider
:returns: APIResponse object with the results
"""
return self.placement_api.post(
url= ('/resource_providers/%s/inventories' % rp_uuid),
version='1.15', body=inv_body).body
def _get_resource_provider_by_uuid(self, rp_uuid):
return self.placement_api.get(
'/resource_providers/%s' % rp_uuid, version='1.15').body
def _set_aggregate(self, rp_uuid, agg_id):
provider = self.placement_api.get(
'/resource_providers/%s' % rp_uuid).body
post_agg_req = {"aggregates": [agg_id],
"resource_provider_generation": provider['generation']}
return self.placement_api.put(
'/resource_providers/%s/aggregates' % rp_uuid, version='1.19',
body=post_agg_req).body
def _get_all_rp_uuids_in_a_tree(self, in_tree_rp_uuid):
rps = self.placement_api.get(
'/resource_providers?in_tree=%s' % in_tree_rp_uuid,
version='1.20').body['resource_providers']
return [rp['uuid'] for rp in rps]
def assertRequestMatchesUsage(self, requested_resources, root_rp_uuid):
# It matches the usages of the whole tree against the request
rp_uuids = self._get_all_rp_uuids_in_a_tree(root_rp_uuid)
# NOTE(gibi): flattening the placement usages means we cannot
# verify the structure here. However I don't see any way to define this
# function for nested and non-nested trees in a generic way.
total_usage = collections.defaultdict(int)
for rp in rp_uuids:
usage = self._get_provider_usages(rp)
for rc, amount in usage.items():
total_usage[rc] += amount
# Cannot simply do an assertEqual(expected, actual) as usages always
# contain every RC even if the usage is 0 and the flavor could also
# contain explicit 0 request for some resources.
# So if the flavor contains an explicit 0 resource request (e.g. in
# case of ironic resources:VCPU=0) then this code needs to assert that
# such resource has 0 usage in the tree. In the other hand if the usage
# contains 0 value for some resources that the flavor does not request
# then that is totally fine.
for rc, value in requested_resources.items():
self.assertIn(
rc, total_usage,
'The requested resource class not found in the total_usage of '
'the RP tree')
self.assertEqual(
value,
total_usage[rc],
'The requested resource amount does not match with the total '
'resource usage of the RP tree')
for rc, value in total_usage.items():
if value != 0:
self.assertEqual(
requested_resources[rc],
value,
'The requested resource amount does not match with the '
'total resource usage of the RP tree')
def assertFlavorMatchesUsage(self, root_rp_uuid, *flavors):
resources = collections.defaultdict(int)
for flavor in flavors:
res = self._resources_from_flavor(flavor)
for rc, value in res.items():
resources[rc] += value
self.assertRequestMatchesUsage(resources, root_rp_uuid)
def _resources_from_flavor(self, flavor):
resources = collections.defaultdict(int)
resources['VCPU'] = flavor['vcpus']
resources['MEMORY_MB'] = flavor['ram']
resources['DISK_GB'] = flavor['disk']
for key, value in flavor['extra_specs'].items():
if key.startswith('resources'):
resources[key.split(':')[1]] += value
return resources
def assertFlavorMatchesAllocation(self, flavor, consumer_uuid,
root_rp_uuid):
# NOTE(gibi): This function does not handle sharing RPs today.
expected_rps = self._get_all_rp_uuids_in_a_tree(root_rp_uuid)
allocations = self._get_allocations_by_server_uuid(consumer_uuid)
# NOTE(gibi): flattening the placement allocation means we cannot
# verify the structure here. However I don't see any way to define this
# function for nested and non-nested trees in a generic way.
total_allocation = collections.defaultdict(int)
for rp, alloc in allocations.items():
self.assertIn(rp, expected_rps, 'Unexpected, out of tree RP in the'
' allocation')
for rc, value in alloc['resources'].items():
total_allocation[rc] += value
self.assertEqual(
self._resources_from_flavor(flavor),
total_allocation,
'The resources requested in the flavor does not match with total '
'allocation in the RP tree')
def get_migration_uuid_for_instance(self, instance_uuid):
# NOTE(danms): This is too much introspection for a test like this, but
# we can't see the migration uuid from the API, so we just encapsulate
# the peek behind the curtains here to keep it out of the tests.
# TODO(danms): Get the migration uuid from the API once it is exposed
ctxt = context.get_admin_context()
migrations = db.migration_get_all_by_filters(
ctxt, {'instance_uuid': instance_uuid})
self.assertEqual(1, len(migrations),
'Test expected a single migration, '
'but found %i' % len(migrations))
return migrations[0].uuid
def _boot_and_check_allocations(self, flavor, source_hostname):
"""Boot an instance and check that the resource allocation is correct
After booting an instance on the given host with a given flavor it
asserts that both the providers usages and resource allocations match
with the resources requested in the flavor. It also asserts that
running the periodic update_available_resource call does not change the
resource state.
:param flavor: the flavor the instance will be booted with
:param source_hostname: the name of the host the instance will be
booted on
:return: the API representation of the booted instance
"""
server_req = self._build_minimal_create_server_request(
self.api, 'some-server', flavor_id=flavor['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none')
server_req['availability_zone'] = 'nova:%s' % source_hostname
LOG.info('booting on %s', source_hostname)
created_server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(
self.admin_api, created_server, 'ACTIVE')
# Verify that our source host is what the server ended up on
self.assertEqual(source_hostname, server['OS-EXT-SRV-ATTR:host'])
source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
# Before we run periodics, make sure that we have allocations/usages
# only on the source host
self.assertFlavorMatchesUsage(source_rp_uuid, flavor)
# Check that the other providers has no usage
for rp_uuid in [self._get_provider_uuid_by_host(hostname)
for hostname in self.computes.keys()
if hostname != source_hostname]:
self.assertRequestMatchesUsage({'VCPU': 0,
'MEMORY_MB': 0,
'DISK_GB': 0}, rp_uuid)
# Check that the server only allocates resource from the host it is
# booted on
self.assertFlavorMatchesAllocation(flavor, server['id'],
source_rp_uuid)
self._run_periodics()
# After running the periodics but before we start any other operation,
# we should have exactly the same allocation/usage information as
# before running the periodics
# Check usages on the selected host after boot
self.assertFlavorMatchesUsage(source_rp_uuid, flavor)
# Check that the server only allocates resource from the host it is
# booted on
self.assertFlavorMatchesAllocation(flavor, server['id'],
source_rp_uuid)
# Check that the other providers has no usage
for rp_uuid in [self._get_provider_uuid_by_host(hostname)
for hostname in self.computes.keys()
if hostname != source_hostname]:
self.assertRequestMatchesUsage({'VCPU': 0,
'MEMORY_MB': 0,
'DISK_GB': 0}, rp_uuid)
return server
def _delete_and_check_allocations(self, server):
"""Delete the instance and asserts that the allocations are cleaned
:param server: The API representation of the instance to be deleted
"""
self.api.delete_server(server['id'])
self._wait_until_deleted(server)
# NOTE(gibi): The resource allocation is deleted after the instance is
# destroyed in the db so wait_until_deleted might return before the
# the resource are deleted in placement. So we need to wait for the
# instance.delete.end notification as that is emitted after the
# resources are freed.
fake_notifier.wait_for_versioned_notifications('instance.delete.end')
for rp_uuid in [self._get_provider_uuid_by_host(hostname)
for hostname in self.computes.keys()]:
self.assertRequestMatchesUsage({'VCPU': 0,
'MEMORY_MB': 0,
'DISK_GB': 0}, rp_uuid)
# and no allocations for the deleted server
allocations = self._get_allocations_by_server_uuid(server['id'])
self.assertEqual(0, len(allocations))
def _run_periodics(self):
"""Run the update_available_resource task on every compute manager
This runs periodics on the computes in an undefined order; some child
class redefined this function to force a specific order.
"""
ctx = context.get_admin_context()
for compute in self.computes.values():
LOG.info('Running periodic for compute (%s)',
compute.manager.host)
compute.manager.update_available_resource(ctx)
LOG.info('Finished with periodics')
def _move_and_check_allocations(self, server, request, old_flavor,
new_flavor, source_rp_uuid, dest_rp_uuid):
self.api.post_server_action(server['id'], request)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
def _check_allocation():
self.assertFlavorMatchesUsage(source_rp_uuid, old_flavor)
self.assertFlavorMatchesUsage(dest_rp_uuid, new_flavor)
# The instance should own the new_flavor allocation against the
# destination host created by the scheduler
self.assertFlavorMatchesAllocation(new_flavor, server['id'],
dest_rp_uuid)
# The migration should own the old_flavor allocation against the
# source host created by conductor
migration_uuid = self.get_migration_uuid_for_instance(server['id'])
self.assertFlavorMatchesAllocation(old_flavor, migration_uuid,
source_rp_uuid)
# OK, so the move operation has run, but we have not yet confirmed or
# reverted the move operation. Before we run periodics, make sure
# that we have allocations/usages on BOTH the source and the
# destination hosts.
_check_allocation()
self._run_periodics()
_check_allocation()
# Make sure the RequestSpec.flavor matches the new_flavor.
ctxt = context.get_admin_context()
reqspec = objects.RequestSpec.get_by_instance_uuid(ctxt, server['id'])
self.assertEqual(new_flavor['id'], reqspec.flavor.flavorid)
def _migrate_and_check_allocations(self, server, flavor, source_rp_uuid,
dest_rp_uuid):
request = {
'migrate': None
}
self._move_and_check_allocations(
server, request=request, old_flavor=flavor, new_flavor=flavor,
source_rp_uuid=source_rp_uuid, dest_rp_uuid=dest_rp_uuid)
def _resize_to_same_host_and_check_allocations(self, server, old_flavor,
new_flavor, rp_uuid):
# Resize the server to the same host and check usages in VERIFY_RESIZE
# state
self.flags(allow_resize_to_same_host=True)
resize_req = {
'resize': {
'flavorRef': new_flavor['id']
}
}
self.api.post_server_action(server['id'], resize_req)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
self.assertFlavorMatchesUsage(rp_uuid, old_flavor, new_flavor)
# The instance should hold a new_flavor allocation
self.assertFlavorMatchesAllocation(new_flavor, server['id'],
rp_uuid)
# The migration should hold an old_flavor allocation
migration_uuid = self.get_migration_uuid_for_instance(server['id'])
self.assertFlavorMatchesAllocation(old_flavor, migration_uuid,
rp_uuid)
# We've resized to the same host and have doubled allocations for both
# the old and new flavor on the same host. Run the periodic on the
# compute to see if it tramples on what the scheduler did.
self._run_periodics()
# In terms of usage, it's still double on the host because the instance
# and the migration each hold an allocation for the new and old
# flavors respectively.
self.assertFlavorMatchesUsage(rp_uuid, old_flavor, new_flavor)
# The instance should hold a new_flavor allocation
self.assertFlavorMatchesAllocation(new_flavor, server['id'],
rp_uuid)
# The migration should hold an old_flavor allocation
self.assertFlavorMatchesAllocation(old_flavor, migration_uuid,
rp_uuid)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import resource_loader
_lstm_ops_so = load_library.load_op_library(
resource_loader.get_path_to_datafile("_lstm_ops.so"))
assert _lstm_ops_so, "Could not load _lstm_ops.so."
# pylint: disable=invalid-name
def _lstm_block_cell(x,
cs_prev,
h_prev,
w,
b,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""Computes the LSTM cell forward propagation for 1 time step.
This implementation uses 1 weight matrix and 1 bias vector, there is no
diagonal peephole connection.
This kernel op implements the following mathematical equations:
```python
xh = [x, h_prev]
[i, f, ci, o] = xh * w + b
f = f + forget_bias
i = sigmoid(i)
f = sigmoid(f)
ci = tanh(ci)
o = sigmoid(o)
cs = ci .* i + cs_prev .* f
co = tanh(cs)
h = co .* o
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
The input to the LSTM cell.
cs_prev: A `Tensor`. Must have the same type as `x`.
h_prev: A `Tensor`. Must have the same type as `x`.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
wci: A `Tensor`. Must have the same type as `x`.
wcf: A `Tensor`. Must have the same type as `x`.
wco: A `Tensor`. Must have the same type as `x`.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `3`.
use_peephole: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
f: A `Tensor`. Has the same type as `x`. The forget gate.
o: A `Tensor`. Has the same type as `x`. The output gate.
ci: A `Tensor`. Has the same type as `x`. The cell input.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
h: A `Tensor`. Has the same type as `x`. The output h vector.
Raises:
ValueError: If cell_size is None.
"""
if wci is None:
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wco = wci
wcf = wci
# pylint: disable=protected-access
return _lstm_ops_so.lstm_block_cell(x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wco=wco,
wcf=wcf,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip,
use_peephole=use_peephole,
name=name)
# pylint: enable=protected-access
def _block_lstm(seq_len_max,
x,
w,
b,
cs_prev=None,
h_prev=None,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""TODO(williamchan): add doc.
Args:
seq_len_max: A `Tensor` of type `int64`.
x: A list of at least 1 `Tensor` objects of the same type in: `float32`.
w: A `Tensor`. Must have the same type as `x`.
b: A `Tensor`. Must have the same type as `x`.
cs_prev: A `Tensor`. Must have the same type as `x`.
h_prev: A `Tensor`. Must have the same type as `x`.
wci: A `Tensor`. Must have the same type as `x`.
wcf: A `Tensor`. Must have the same type as `x`.
wco: A `Tensor`. Must have the same type as `x`.
forget_bias: An optional `float`. Defaults to `1`.
cell_clip: An optional `float`. Defaults to `3`.
use_peephole: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
cs: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
f: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
o: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
ci: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
co: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
h: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
Raises:
ValueError: If `b` does not have a valid shape.
"""
batch_size = x[0].get_shape().with_rank(2)[0].value
cell_size4 = b.get_shape().with_rank(1)[0].value
if cell_size4 is None:
raise ValueError("`b` shape must not be None.")
cell_size = cell_size4 / 4
zero_state = None
if cs_prev is None or h_prev is None:
zero_state = array_ops.constant(0,
dtype=dtypes.float32,
shape=[batch_size, cell_size])
if cs_prev is None:
cs_prev = zero_state
if h_prev is None:
h_prev = zero_state
if wci is None:
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wco = wci
wcf = wci
# pylint: disable=protected-access
return _lstm_ops_so.block_lstm(seq_len_max=seq_len_max,
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wco=wco,
wcf=wcf,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip,
name=name,
use_peephole=use_peephole)
# pylint: enable=protected-access
# pylint: enable=invalid-name
_lstm_block_cell_grad_outputs = ["cs_prev_grad", "dicfo"]
ops.RegisterShape("LSTMBlockCell")(common_shapes.call_cpp_shape_fn)
@ops.RegisterGradient("LSTMBlockCell")
def _LSTMBlockCellGrad(op, *grad):
"""Gradient for LSTMBlockCell."""
(x, cs_prev, h_prev, w, wci, wco, wcf, b) = op.inputs
(i, cs, f, o, ci, co, _) = op.outputs
(_, cs_grad, _, _, _, _, h_grad) = grad
batch_size = x.get_shape().with_rank(2)[0].value
if batch_size is None:
batch_size = -1
input_size = x.get_shape().with_rank(2)[1].value
if input_size is None:
raise ValueError("input_size from `x` should not be None.")
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
(cs_prev_grad, dicfo, wci_grad, wcf_grad,
wco_grad) = _lstm_ops_so.lstm_block_cell_grad(
x,
cs_prev,
h_prev,
w,
wci,
wcf,
wco,
b,
i,
cs,
f,
o,
ci,
co,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
# Backprop from dicfo to xh.
xh_grad = math_ops.matmul(dicfo, w, transpose_b=True)
x_grad = array_ops.slice(xh_grad, (0, 0), (batch_size, input_size))
x_grad.get_shape().merge_with(x.get_shape())
h_prev_grad = array_ops.slice(xh_grad, (0, input_size),
(batch_size, cell_size))
h_prev_grad.get_shape().merge_with(h_prev.get_shape())
# Backprop from dicfo to w.
xh = array_ops.concat(1, [x, h_prev])
w_grad = math_ops.matmul(xh, dicfo, transpose_a=True)
w_grad.get_shape().merge_with(w.get_shape())
# Backprop from dicfo to b.
b_grad = nn_ops.bias_add_grad(dicfo)
b_grad.get_shape().merge_with(b.get_shape())
return (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad)
ops.RegisterShape("LSTMBlockCellGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("BlockLSTM")(common_shapes.call_cpp_shape_fn)
@ops.RegisterGradient("BlockLSTM")
def _BlockLSTMGrad(op, *grad):
"""Gradient for BlockLSTM."""
max_len = op.get_attr("max_len")
seq_len_max = op.inputs[0]
x = op.inputs[1:1 + max_len]
cs_prev = op.inputs[-7]
h_prev = op.inputs[-6]
w = op.inputs[-5]
wci = op.inputs[-4]
wco = op.inputs[-3]
wcf = op.inputs[-2]
b = op.inputs[-1]
i = op.outputs[0 * max_len:1 * max_len]
cs = op.outputs[1 * max_len:2 * max_len]
f = op.outputs[2 * max_len:3 * max_len]
o = op.outputs[3 * max_len:4 * max_len]
ci = op.outputs[4 * max_len:5 * max_len]
co = op.outputs[5 * max_len:6 * max_len]
h = op.outputs[6 * max_len:7 * max_len]
cs_grad = grad[-max_len * 2:-max_len]
h_grad = grad[-max_len:]
(x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wco_grad, wcf_grad,
b_grad) = _lstm_ops_so.block_lstm_grad(
seq_len_max,
x,
cs_prev,
h_prev,
w,
wci,
wco,
wcf,
b,
i,
cs,
f,
o,
ci,
co,
h,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
return [None] + x_grad + [cs_prev_grad, h_prev_grad, w_grad, wci_grad,
wco_grad, wcf_grad, b_grad]
ops.RegisterShape("BlockLSTMGrad")(common_shapes.call_cpp_shape_fn)
class LSTMBlockCell(rnn_cell.RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
Unlike BasicLSTMCell, this is a monolithic op and should be much faster. The
weight and bias matrixes should be compatible as long as the variabel scope
matches.
"""
def __init__(self, num_units, forget_bias=1.0, use_peephole=False):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
use_peephole: Whether to use peephole connections or not.
"""
self._num_units = num_units
self._forget_bias = forget_bias
self._use_peephole = use_peephole
@property
def state_size(self):
return (self._num_units,) * 2
@property
def output_size(self):
return self._num_units
def __call__(self, x, states_prev, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or type(self).__name__):
x_shape = x.get_shape().with_rank(2)
if not x_shape[1]:
raise ValueError("Expecting x_shape[1] to be sets: %s" % str(x_shape))
if len(states_prev) != 2:
raise ValueError("Expecting states_prev to be a tuple with length 2.")
input_size = x_shape[1]
w = vs.get_variable("W", [input_size + self._num_units,
self._num_units * 4])
b = vs.get_variable("b", [w.get_shape().with_rank(2)[1]],
initializer=init_ops.constant_initializer(0.0))
wci = vs.get_variable("wci", [self._num_units])
wco = vs.get_variable("wco", [self._num_units])
wcf = vs.get_variable("wcf", [self._num_units])
(cs_prev, h_prev) = states_prev
(_, cs, _, _, _, _, h) = _lstm_block_cell(x,
cs_prev,
h_prev,
w,
b,
wci=wci,
wco=wco,
wcf=wcf,
forget_bias=self._forget_bias,
use_peephole=self._use_peephole)
return (h, (cs, h))
| |
import sfh_tests_multi_proc
import numpy as np
import ResolvedStellarPops as rsp
import galaxy_tests
import os
import tables
import matplotlib.pylab as plt
from TPAGBparams import snap_src
import logging
logger = logging.getLogger()
angst_data = rsp.angst_tables.AngstTables()
import model_plots
fontlarge = 24
fontmid = 20
fontsmall = 16
def contamination_files(filenames):
opt_eagb_contam = np.array([])
opt_rheb_contam = np.array([])
ir_eagb_contam = np.array([])
ir_rheb_contam = np.array([])
opt_ms_contam = np.array([])
opt_bheb_contam = np.array([])
ir_ms_contam = np.array([])
ir_bheb_contam = np.array([])
if type(filenames) == str:
filenames = list(filenames)
for filename in filenames:
if 'ngc404' in filename:
continue
with open(filename, 'r') as fhandle:
lines = fhandle.readlines()
names = 'MS RGB HEB BHEB RHEB EAGB TPAGB Total '.split()
# rc contamination 12::13
rgb_opt = [l for l in lines if l.startswith('rgb opt')]
rgb_data = zip(*[t.strip().split()[2:] for t in rgb_opt])
rgb_data = np.array(rgb_data, dtype=float)
eagb_in_rgb = rgb_data[5]/rgb_data[7]
rheb_in_rgb = rgb_data[4]/rgb_data[7]
opt_eagb_contam = np.append(opt_eagb_contam, np.max(eagb_in_rgb))
opt_rheb_contam = np.append(opt_rheb_contam, np.max(rheb_in_rgb))
#print filename, 'opt', np.max(eagb_in_rgb), np.max(rheb_in_rgb)
opt = [l for l in lines if l.startswith('rgb opt') or l.startswith('agb opt')]
data = zip(*[t.strip().split()[2:] for t in opt])
data = np.array(data, dtype=float)
ms_in_opt = data[0]/data[7]
bheb_in_opt = data[3]/data[7]
opt_bheb_contam = np.append(opt_bheb_contam, np.max(bheb_in_opt))
opt_ms_contam = np.append(opt_ms_contam, np.max(ms_in_opt))
print filename, 'opt', np.max(ms_in_opt), np.max(bheb_in_opt)
rgb_ir = [l for l in lines if l.startswith('rgb ir')]
rgb_data = zip(*[t.strip().split()[2:] for t in rgb_ir])
rgb_data = np.array(rgb_data, dtype=float)
eagb_in_rgb = rgb_data[5]/rgb_data[7]
rheb_in_rgb = rgb_data[4]/rgb_data[7]
ir_eagb_contam = np.append(ir_eagb_contam, np.max(eagb_in_rgb))
ir_rheb_contam = np.append(ir_rheb_contam, np.max(rheb_in_rgb))
#print filename, 'ir', np.max(eagb_in_rgb), np.max(rheb_in_rgb)
ir = [l for l in lines if l.startswith('rgb ir') or l.startswith('agb ir')]
data = zip(*[t.strip().split()[2:] for t in ir])
data = np.array(data, dtype=float)
ms_in_ir = data[0]/data[7]
bheb_in_ir = data[3]/data[7]
#print filename, 'ir', np.max(eagb_in_rgb), np.max(rheb_in_rgb)
ir_bheb_contam = np.append(ir_bheb_contam, np.max(bheb_in_ir))
ir_ms_contam = np.append(ir_ms_contam, np.max(ms_in_ir))
print 'opt eagb, rheb', np.max(opt_eagb_contam), np.max(opt_rheb_contam)
print 'ir eagb, rheb', np.max(ir_eagb_contam), np.max(ir_rheb_contam)
print 'opt bheb, ms', np.max(opt_bheb_contam), np.max(opt_ms_contam)
print 'ir bheb, ms', np.max(ir_bheb_contam), np.max(ir_ms_contam)
class StatisticalComparisons(object):
def __init__(self, cmd_input_file, target, outfile_loc='default',
extra_str='', mc=True):
self.target = target
self.outfile_loc, self.fnames, self.agb_mod = \
sfh_tests_multi_proc.setup_files(cmd_input_file, target,
outfile_loc=outfile_loc,
extra_str=extra_str, mc=mc)
self.files = sfh_tests_multi_proc.FileIO()
self.files.mc = mc
def poission_chi2(self, hist_it_up=False, table_file='default',
just_gauss=False):
self.files.ags = sfh_tests_multi_proc.load_default_ancient_galaxies(table_file=table_file)
self.files.load_data_for_normalization(target=self.target, ags=self.files.ags)
opt_gal, ir_gal = self.files.load_galaxies(hist_it_up=hist_it_up)
# cut LF at 90% completeness
obins, = np.nonzero(opt_gal.bins <= self.files.opt_offset)
ibins, = np.nonzero(ir_gal.bins <= self.files.ir_offset)
agb_obins, = np.nonzero(opt_gal.bins <= self.files.opt_trgb - \
self.files.opt_trgb_err * \
self.files.ags.factor[0])
agb_ibins, = np.nonzero(ir_gal.bins <= self.files.ir_trgb - \
self.files.ir_trgb_err * \
self.files.ags.factor[1])
opt_model_hists, opt_models_binss = self.files.load_lf_file(self.fnames[0])
ir_model_hists, ir_models_binss = self.files.load_lf_file(self.fnames[1])
opt_chi2 = np.array([])
ir_chi2 = np.array([])
opt_chi2_agb = np.array([])
ir_chi2_agb = np.array([])
opt_pval = np.array([])
ir_pval = np.array([])
opt_pval_agb = np.array([])
ir_pval_agb = np.array([])
nhists = np.min([len(ir_model_hists), len(opt_model_hists)])
for i in range(nhists):
chi2, pct_dif, sig = rsp.Galaxies.stellar_prob(opt_gal.hist[obins[1:]],
opt_model_hists[i][obins[1:]])
if just_gauss is True:
chi2, pval = self.chi2(opt_gal.hist[obins[1:]],
opt_model_hists[i][obins[1:]])
opt_pval = np.append(opt_pval, pval)
opt_chi2 = np.append(opt_chi2, chi2)
#print 'opt', chi2, np.mean(sig)
chi2, pct_dif, sig = rsp.Galaxies.stellar_prob(ir_gal.hist[ibins[1:]],
ir_model_hists[i][ibins[1:]])
if just_gauss is True:
chi2, pval = self.chi2(ir_gal.hist[ibins[1:]],
ir_model_hists[i][ibins[1:]])
ir_pval = np.append(ir_pval, pval)
ir_chi2 = np.append(ir_chi2, chi2)
#print 'ir', chi2, np.mean(sig)
chi2, pct_dif, sig = rsp.Galaxies.stellar_prob(opt_gal.hist[agb_obins[1:]],
opt_model_hists[i][agb_obins[1:]])
if just_gauss is True:
chi2, pval = self.chi2(opt_gal.hist[agb_obins[1:]],
opt_model_hists[i][agb_obins[1:]])
opt_pval_agb = np.append(opt_pval_agb, pval)
opt_chi2_agb = np.append(opt_chi2_agb, chi2)
#print 'opta', chi2, np.mean(sig)
chi2, pct_dif, sig = rsp.Galaxies.stellar_prob(ir_gal.hist[agb_ibins[1:]],
ir_model_hists[i][agb_ibins[1:]])
if just_gauss is True:
chi2, pval = self.chi2(ir_gal.hist[agb_ibins[1:]],
ir_model_hists[i][agb_ibins[1:]])
ir_pval_agb = np.append(ir_pval_agb, pval)
ir_chi2_agb = np.append(ir_chi2_agb, chi2)
#print 'ira ', chi2, np.mean(sig)
if just_gauss is True:
return opt_chi2, ir_chi2, opt_chi2_agb, ir_chi2_agb, opt_pval, ir_pval, opt_pval_agb, ir_pval_agb
else:
return opt_chi2, ir_chi2, opt_chi2_agb, ir_chi2_agb
def chi2(self, ohist, mhist):
from scipy import stats
# t-test:
#ti = (ohist - mhist) ** 2 / (ohist + mhist)
#naners = np.isnan(ti)
#ti[naners] = 0
#print np.sum(ti)
# maybe there is a better way to mask this... chiw was a typo...
chiw = (mhist - ohist) ** 2 / ohist
naners = np.isinf(chiw)
chiw[naners] = 0
naners = np.isnan(chiw)
chiw[naners] = 0
oinds, = np.nonzero(chiw > 0)
chi2 = np.sum(chiw)/float(len(oinds)-1)#, len(oinds)
pval = 1 - stats.chi2.cdf(chi2, len(oinds)-1)
return chi2, pval
def result2dict(result_files, search=None):
res_dict = {}
if 'narratio' in result_files[0]:
if search is None:
search = 'ratio'
chi2 = False
elif 'chi2' in result_files[0]:
if search is None:
search = 'chi2'
chi2 = True
else:
print 'either narratio file or chi2 file'
return {}
for result_file in result_files:
data = rsp.fileIO.readfile(result_file, string_column=0)
target = os.path.split(result_file)[1].split('_')[3]
agb_mod = os.path.split(result_file)[1].split(target)[0][:-1]
if search == 'all':
fields = [d for d in data.dtype.names if not 'target' in d]
else:
fields = [d for d in data.dtype.names if search in d]
key = '%s_%s' % (target, agb_mod)
for f in fields:
res_dict['%s_%s_mean' % (key, f)] = np.mean(data[f])
if chi2 is True:
res_dict['%s_%s_std' % (key, f)] = np.std(data[f])
# to get narratio means
#opt_total = [v for k,v in narr_dict.items() if 'opt_ar_ratio_mean' in k]
#ir_total = [v for k,v in narr_dict.items() if 'ir_ar_ratio_mean' in k]
#narr_dict['%s_opt_ratio_mean' % agb_mod] = np.mean(opt_total)
#narr_dict['%s_ir__ratio_mean' % agb_mod] = np.mean(ir_total)
#print '%s opt $%.2f\pm%.2f$' % (self.agb_mod, np.mean(opt_total), np.std(opt_total))
#print '%s ir $%.2f\pm%.2f$' % (self.agb_mod, np.mean(ir_total), np.std(ir_total))
# to get chi2 means
#agb_mods = np.unique(agb_mods)
#for extra in extras:
# for band in bands:
# band += extra
# for agb_mod in agb_mods:
# total = [v for k,v in chi_dict.items() if '%s_%s_mean' % (agb_mod, band) in k]
# chi_dict['%s_%s_mean' % (agb_mod, band)] = np.mean(total)
# chi_dict['%s_%s_std' % (agb_mod, band)] = np.std(total)
return res_dict
def write_chi2_table(targets, cmd_input_files, table_file='default',
outfile_loc='default', extra_str='', just_gauss=False):
if just_gauss is True:
extra_str2 = extra_str + '_gauss'
else:
extra_str2 = extra_str
chi2_files = []
for target in targets:
for cmd_input_file in cmd_input_files:
st = StatisticalComparisons(cmd_input_file, target,
outfile_loc=outfile_loc,
extra_str=extra_str)
chi2_file = os.path.join(outfile_loc,
'%s_%s%s_chi2.dat' % (st.agb_mod, target,
extra_str2))
result = st.poission_chi2(table_file=table_file, just_gauss=just_gauss)
if just_gauss is True:
opt_chi2, ir_chi2, opt_chi2_agb, ir_chi2_agb, opt_pval, ir_pval, opt_pval_agb, ir_pval_agb = result
cfmt = '%i %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n'
with open(chi2_file, 'w') as c2:
c2.write('# sfr opt_chi2 ir_chi2 opt_agb_chi2 ir_agb_chi2 ')
c2.write('ir_chi2_agb opt_pval ir_pval opt_pval_agb ir_pval_agb \n')
for i in range(len(opt_chi2)):
c2.write(cfmt % (i, opt_chi2[i], ir_chi2[i],
opt_chi2_agb[i], ir_chi2_agb[i],
opt_pval[i], ir_pval[i],
opt_pval_agb[i], ir_pval_agb[i]))
chi2_files.append(chi2_file)
else:
opt_chi2, ir_chi2, opt_chi2_agb, ir_chi2_agb = result
cfmt = '%i %.3f %.3f %.3f %.3f \n'
with open(chi2_file, 'w') as c2:
c2.write('# sfr opt_chi2 ir_chi2 opt_agb_chi2 ir_agb_chi2\n')
for i in range(len(opt_chi2)):
c2.write(cfmt % (i, opt_chi2[i], ir_chi2[i],
opt_chi2_agb[i], ir_chi2_agb[i]))
chi2_files.append(chi2_file)
return chi2_files
def get_data(table_file='default'):
if table_file == 'default':
table_file = snap_src + '/tables/ancients_0.1_0.2_galaxies.dat'
ags = sfh_tests_multi_proc.AncientGalaxies()
ags.read_trgb_table(table_file)
data_dict = {}
for i, target in enumerate(ags.data.target):
for band in ['opt', 'ir']:
ratio = ags.data[i]['n%s_agb' % band] / \
ags.data[i]['n%s_rgb' % band]
data_dict['%s_%s' % (target, band)] = ratio
data_dict['%s_%s_err' % (target, band)] = \
galaxy_tests.count_uncert_ratio(ags.data[i]['n%s_agb' % band],
ags.data[i]['n%s_rgb' % band])
return data_dict
def narratio_table(narratio_files, table_file='default'):
'''write the latex table'''
data_dict = get_data(table_file=table_file)
nar_dict = result2dict(narratio_files)
targets = list(np.unique([k.split('_')[0] for k in nar_dict.keys()]))
agb_mods = list(np.unique(['_'.join(k.split('_')[1:4])
for k in nar_dict.keys()]))
# table columns: target, data ratio, (ratio, frac diff) per agb mod
ir_table = np.empty(shape=(len(targets) + 1, len(agb_mods)*2 + 1),
dtype='|S20')
ir_table[:, :] = ''
opt_table = np.empty(shape=(len(targets) + 1, len(agb_mods)*2 + 1),
dtype='|S20')
opt_table[:, :] = ''
fmt = r'$%.3f\pm%.3f$ & '
fmt2 = r'$%.3f\pm%.3f$ \\'
for key, val in nar_dict.items():
# go through half the dict (see below)
if 'err' in key:
continue
# choose which table
if 'ir' in key:
table = ir_table
band = 'ir'
if 'opt' in key:
table = opt_table
band = 'opt'
# choose the correct row and column placement in the table
target = key.split('_')[0]
agb_mod = '_'.join(key.split('_')[1:4])
row = targets.index(target)
# column 0 is target, columns 1, 3, 5 have ratios
column = (agb_mods.index(agb_mod) * 2) + 1
#print row, column
# target
table[row, 0] = '%s &' % target.upper()
# data
dnarr = data_dict['%s_%s' % (target.lower(), band)]
derr = data_dict['%s_%s_err' % (target.lower(), band)]
#dstr = fmt % (dnarr, derr)
#table[row, 1] = dstr
# model
mnarr = val
# grab the error from the dict
err_key = key.replace('mean', 'err_mean')
mnerr = nar_dict[err_key]
mstr = fmt % (mnarr, mnerr)
table[row, column] = mstr
# frac difference
#pct_diff = (mnarr - dnarr) / dnarr
#pct_diff_err = np.abs(pct_diff * (mnerr/mnarr + derr/dnarr))
pct_diff = (mnarr / dnarr)
pct_diff_err = np.abs(pct_diff * (mnerr/mnarr + derr/dnarr))
f = fmt
# if final column, put \\ not &
if column + 1 == table.shape[1] - 1:
f = fmt2
pdstr = f % (pct_diff, pct_diff_err)
table[row, column + 1] = pdstr
# totals:
nar_dict = result2dict(narratio_files, search='all')
data_total = data_table(targets)
data_total = np.array(data_total.translate(None, '\\$Total&').replace('pm', ' ').split(), dtype=float)
for i, agb_mod in enumerate(agb_mods):
for band, table in zip(['ir', 'opt'], [ir_table, opt_table]):
f = fmt
column = (i * 2) + 1
nrgb = np.sum([v for k, v in nar_dict.items()
if agb_mod in k and band in k and 'rgb' in k])
nagb = np.sum([v for k, v in nar_dict.items()
if agb_mod in k and band in k and 'agb' in k])
ratio = nagb / nrgb
err = galaxy_tests.count_uncert_ratio(nagb, nrgb)
table[-1, column] = f % (ratio, err)
column += 1
if column == table.shape[1] - 1:
f = fmt2
# frac difference
if band == 'ir':
j = 6
if band == 'opt':
j = 2
dratio = data_total[j]
derr = data_total[j+1]
pct_diff = (ratio / dratio)
pct_diff_err = np.abs(pct_diff * (err/ratio + derr/dratio))
#table[-1, column] = f % (pct_diff, pct_diff_err)
#table[-1, 0] = 'Total & '
# mean
for i, agb_mod in enumerate(agb_mods):
for table in [ir_table, opt_table]:
f = fmt
column = (i * 2) + 1
val, err = \
np.mean(np.array([l.translate(None, ' $&\\').split('pm')
for l in table[:, column][:-1]], dtype=float), axis=0)
print agb_mod, err/val
table[-1, column] = f % (val, err)
column += 1
if column == table.shape[1] - 1:
f = fmt2
val, err = \
np.mean(np.array([l.translate(None, ' $&\\').split('pm')
for l in table[:, column][:-1]], dtype=float), axis=0)
table[-1, column] = f % (val, err)
table[-1, 0] = 'Mean & '
# write the file
ratio_str = '$\\frac{N_{\\rm TP-AGB}}{N_{\\rm RGB}}$'
header = 'Target & '
for i in range(len(agb_mods)):
header += '%s %s & Frac. Difference & ' % (ratio_str,
agb_mods[i].split('_')[-1])
header = header[:-2] + '\\\\ \n \\hline \n'
outfile_dir = os.path.split(narratio_files[0])[0]
for band, table in zip(['opt', 'ir'], [opt_table, ir_table]):
outfile = os.path.join(outfile_dir, '%s_narratio_table.tex' % band)
with open(outfile, 'w') as out:
out.write(header)
np.savetxt(out, table, fmt='%s')
return ir_table, opt_table
def data_table(targets, table_file='default'):
# target, av, dist, [opt: frac comp, trgb, nrgb, nagb ratio] [ir: ..]
if table_file == 'default':
table_file = snap_src + '/tables/ancients_0.1_0.2_galaxies.dat'
ags = sfh_tests_multi_proc.AncientGalaxies()
ags.read_trgb_table(table_file)
data_dict = get_data(table_file=table_file)
comp_data = tables.read_completeness_table()
row = ''
row2 = ''
ts = list(set([t.lower() for t in targets]) & set(ags.data.target))
assert len(ts) == len(targets), 'cant find all targets in ags.data'
inds = list([np.where(t.lower() == ags.data.target)[0][0] for t in targets])
opt_agb_tot = np.sum(ags.data.nopt_agb[inds])
opt_rgb_tot = np.sum(ags.data.nopt_rgb[inds])
ir_agb_tot = np.sum(ags.data.nir_agb[inds])
ir_rgb_tot = np.sum(ags.data.nir_rgb[inds])
totfmt = 'Total & %i & %i & $%.3f\\pm%.3f$ & %i & %i & $%.3f\\pm%.3f$ \\\\'
total = totfmt % (opt_agb_tot, opt_rgb_tot, opt_agb_tot/opt_rgb_tot,
galaxy_tests.count_uncert_ratio(opt_agb_tot, opt_rgb_tot),
ir_agb_tot, ir_rgb_tot, ir_agb_tot/ir_rgb_tot,
galaxy_tests.count_uncert_ratio(ir_agb_tot, ir_rgb_tot))
for target in targets:
if target.upper() == 'NGC2976-DEEP':
extra_key='F606W,F814W'
else:
extra_key=None
(Av, dmod) = [angst_data.get_item(target, i, extra_key=extra_key)
for i in ['Av', 'dmod']]
comp_row = rsp.fileIO.get_row(comp_data, 'target', target)
nstars_row = rsp.fileIO.get_row(ags.data, 'target', target)
sub_dict = {}
opt_err_pct = []
ir_err_pct = []
for (k,v) in data_dict.items():
if '404' in target:
target = target.lower().replace('-deep', '')
if target in k or target.lower() in k:
sub_dict[k] = v
opt_err_pct.append(sub_dict['%s_opt_err' % target.lower()] /
sub_dict['%s_opt' % target.lower()])
ir_err_pct.append(sub_dict['%s_ir_err' % target.lower()] /
sub_dict['%s_ir' % target.lower()])
row += '%s & %.2f & %.2f & ' % (target, Av, dmod)
row += '%(opt_filter2).2f & ' % comp_row
row += '%(opt_trgb).2f & ' % nstars_row
row += '%(ir_filter2).2f & ' % comp_row
row += '%(ir_trgb).2f \\\\ \n' % nstars_row
row2 += '%s & ' % target
row2 += '%(nopt_agb)i & %(nopt_rgb)i & ' % nstars_row
row2 += '$%.3f\\pm%.3f$ & ' % (sub_dict['%s_opt' % target.lower()],
sub_dict['%s_opt_err' % target.lower()])
row2 += '%(nir_agb)i & %(nir_rgb)i & ' % nstars_row
row2 += '$%.3f\\pm%.3f$ \\\\ \n' % (sub_dict['%s_ir' % target.lower()],
sub_dict['%s_ir_err' % target.lower()])
with open('data_table.tex', 'w') as out:
out.write(row)
out.write(row2)
out.write(total)
out.write('# max err pct. opt: %.3f ir: %.3f \n' % \
(np.max(opt_err_pct), np.max(ir_err_pct)))
return total
def chi2plot(model_dict, outfile_loc=None):
targets = list(np.unique([k.split('_')[0] for k in model_dict.keys()]))
agb_mods = list(np.unique(['_'.join(k.split('_')[1:4])
for k in model_dict.keys()]))
cols = ['darkgreen', 'navy', 'darkred']
fig, axs = plt.subplots(ncols=2, nrows=2, sharex=True, sharey=False,
figsize=(10,10))
plt.subplots_adjust(right=0.95, left=0.08, wspace=0.2, top=0.95, hspace=0.1)
offsets = np.linspace(0, 1, len(targets))
for key, val in model_dict.items():
if 'std' in key:
continue
target = key.split('_')[0]
errval = model_dict[key.replace('mean', 'std')]
ioff = targets.index(target)
agb_mod = '_'.join(key.split('_')[1:4])
col = cols[agb_mods.index(agb_mod)]
sym = 'o'
if not agb_mod.endswith('nov13'):
mfc='white'
else:
mfc = col
if not 'nov13' in agb_mod:
sym = '*'
ax_row = 0
ax_col = 0
if not '_agb' in key:
ax_row = 1
if 'ir' in key:
ax_col = 1
ax = axs[ax_row][ax_col]
ax.errorbar(offsets[ioff], val, yerr=errval, marker=sym, color=col, ms=12,
mfc=mfc, ecolor='black', mew=1.5, elinewidth=2)
ax.set_ylabel('$\chi^2$', fontsize=fontlarge)
ax.xaxis.set_ticks(offsets)
ax.set_xticklabels(['$%s$' % t.replace('-deep', '').replace('-', '\!-\!').upper() for t in targets])
[t.set_rotation(30) for t in ax.get_xticklabels()]
ax.tick_params(labelsize=fontmid)
#ymaxs[ax_num] = np.max([val, ymaxs[ax_num]])
axs[0][0].set_title(r'$\rm{Optical}$', fontsize=fontlarge)
axs[0][1].set_title(r'$\rm{NIR}$', fontsize=fontlarge)
[ax.set_ylim(0, 25) for ax in axs[:, 0]]
[ax.set_ylim(0, 10) for ax in axs[:, 1]]
#fig.subplots_adjust(hspace=0.1)
xlims = ax.get_xlim()
off = np.diff(offsets)[0]
ax.set_xlim(xlims[0]-off/2, xlims[1]+off/2)
sym = ['o', 'o', '*']
mfc = [cols[0], 'None', 'None']
[axs[0, 0].plot(-99, 99, sym[j], mfc=mfc[j], ms=12, mew=1.5, color=cols[j],
label='$%s$' % model_plots.translate_model_name(agb_mods[j].split('_')[-1]))
for j in range(len(agb_mods))]
axs[0, 0].legend(loc=0, numpoints=1, fontsize=fontsmall)
[ax.annotate(r'$\rm{TP\!-\!AGB\ Only}$', (0.02, 0.02), fontsize=fontmid,
xycoords='axes fraction') for ax in axs[0, :]]
if outfile_loc is None:
outfile_loc = os.getcwd()
outfile = os.path.join(outfile_loc, 'chi2_plot.png')
plt.savefig(outfile, dpi=150)
return axs
def run_match_stats(targets='ancients'):
targets = galaxy_tests.load_targets(targets)
hmc_file_loc = os.path.join(snap_src, 'data', 'sfh_parsec')
cmd_file_loc = os.path.join(hmc_file_loc, 'cmd_files')
for target in targets:
target = target.lower()
try:
target = target.replace('-deep', '')
hmc_file, = rsp.fileIO.get_files(hmc_file_loc, '%s*sfh' % target)
cmd_file, = rsp.fileIO.get_files(cmd_file_loc, '%s*cmd' % target)
except:
print target, 'cmd file not found.'
continue
rsp.match_utils.match_stats(hmc_file, cmd_file)
return
| |
'''
.. _motionevent:
Motion Event
============
The :class:`MotionEvent` is the base class used for every touch and non-touch
event. This class defines all the properties and methods needed to handle 2D and
3D movements but has many more capabilities.
.. note::
You never create the :class:`MotionEvent` yourself: this is the role of the
:mod:`~kivy.input.providers`.
Motion Event and Touch
----------------------
We differentiate between a Motion Event and Touch event. A Touch event is a
:class:`MotionEvent` with the `pos` profile. Only theses events are dispatched
throughout the widget tree.
1. The :class:`MotionEvent` 's are gathered from input providers.
2. All the :class:`MotionEvent` 's are dispatched from
:py:func:`~kivy.core.window.WindowBase.on_motion`.
3. If a :class:`MotionEvent` has a `pos` profile, we dispatch it through
:py:func:`~kivy.core.window.WindowBase.on_touch_down`,
:py:func:`~kivy.core.window.WindowBase.on_touch_move` and
:py:func:`~kivy.core.window.WindowBase.on_touch_up`.
Listening to a Motion Event
---------------------------
If you want to receive all MotionEvents, Touch or not, you can bind the
MotionEvent from the :class:`~kivy.core.window.Window` to your own callback::
def on_motion(self, etype, motionevent):
# will receive all motion events.
pass
Window.bind(on_motion=on_motion)
Profiles
--------
A capability is the ability of a :class:`MotionEvent` to store new
information or a way to indicate what is supported by the MotionEvent. For
example, you can receive a MotionEvent that has an angle, a fiducial ID, or
even a shape. You can check the :attr:`~MotionEvent.profile` attribute to check
what is currently supported by the MotionEvent and how to access it.
This is a tiny list of the supported profiles by default. Check other input
providers to see if there are other profiles available.
============== ================================================================
Profile name Description
-------------- ----------------------------------------------------------------
angle 2D angle. Use property `a`
button Mouse button (left, right, middle, scrollup, scrolldown)
Use property `button`
markerid Marker or Fiducial ID. Use property `fid`
pos 2D position. Use properties `x`, `y` or `pos``
pos3d 3D position. Use properties `x`, `y`, `z`
pressure Pressure of the contact. Use property `pressure`
shape Contact shape. Use property `shape`
============== ================================================================
If you want to know whether the current :class:`MotionEvent` has an angle::
def on_touch_move(self, touch):
if 'angle' in touch.profile:
print('The touch angle is', touch.a)
If you want to select only the fiducials::
def on_touch_move(self, touch):
if 'markerid' not in touch.profile:
return
'''
__all__ = ('MotionEvent', )
import weakref
from inspect import isroutine
from copy import copy
from time import time
from kivy.vector import Vector
class EnhancedDictionnary(dict):
def __getattr__(self, attr):
try:
return self.__getitem__(attr)
except KeyError:
return super(EnhancedDictionnary, self).__getattr__(attr)
def __setattr__(self, attr, value):
self.__setitem__(attr, value)
class MotionEventMetaclass(type):
def __new__(mcs, name, bases, attrs):
__attrs__ = []
for base in bases:
if hasattr(base, '__attrs__'):
__attrs__.extend(base.__attrs__)
if '__attrs__' in attrs:
__attrs__.extend(attrs['__attrs__'])
attrs['__attrs__'] = tuple(__attrs__)
return super(MotionEventMetaclass, mcs).__new__(mcs, name, bases, attrs)
MotionEventBase = MotionEventMetaclass('MotionEvent', (object, ), {})
class MotionEvent(MotionEventBase):
'''Abstract class to represent a touch and non-touch object.
:Parameters:
`id` : str
unique ID of the MotionEvent
`args` : list
list of parameters, passed to the depack() function
'''
__uniq_id = 0
__attrs__ = \
('device', 'push_attrs', 'push_attrs_stack',
'is_touch', 'id', 'shape', 'profile',
# current position, in 0-1 range
'sx', 'sy', 'sz',
# first position set, in 0-1 range
'osx', 'osy', 'osz',
# last position set, in 0-1 range
'psx', 'psy', 'psz',
# delta from the last position and current one, in 0-1 range
'dsx', 'dsy', 'dsz',
# current position, in screen range
'x', 'y', 'z',
# first position set, in screen range
'ox', 'oy', 'oz',
# last position set, in 0-1 range
'px', 'py', 'pz',
# delta from the last position and current one, in screen range
'dx', 'dy', 'dz',
'time_start',
'is_double_tap', 'double_tap_time',
'is_triple_tap', 'triple_tap_time',
'ud')
def __init__(self, device, id, args):
if self.__class__ == MotionEvent:
raise NotImplementedError('class MotionEvent is abstract')
MotionEvent.__uniq_id += 1
#: True if the Motion Event is a Touch. Can be also verified is `pos` is
#: :attr:`profile`.
self.is_touch = False
#: Attributes to push by default, when we use :func:`push` : x, y, z,
#: dx, dy, dz, ox, oy, oz, px, py, pz.
self.push_attrs_stack = []
self.push_attrs = ('x', 'y', 'z', 'dx', 'dy', 'dz', 'ox', 'oy', 'oz',
'px', 'py', 'pz', 'pos')
#: Uniq ID of the touch. You can safely use this property, it will be
#: never the same accross all existing touches.
self.uid = MotionEvent.__uniq_id
#: Device used for creating this touch
self.device = device
# For grab
self.grab_list = []
self.grab_exclusive_class = None
self.grab_state = False
#: Used to determine which widget the touch is being dispatched to.
#: Check the :func:`grab` function for more information.
self.grab_current = None
#: Profiles currently used in the touch
self.profile = []
#: Id of the touch, not uniq. This is generally the Id set by the input
#: provider, like ID in TUIO. If you have multiple TUIO source, the same
#: id can be used. Prefer to use :attr:`uid` attribute instead.
self.id = id
#: Shape of the touch, subclass of
#: :class:`~kivy.input.shape.Shape`.
#: By default, the property is set to None
self.shape = None
#: X position, in 0-1 range
self.sx = 0.0
#: Y position, in 0-1 range
self.sy = 0.0
#: Z position, in 0-1 range
self.sz = 0.0
#: Origin X position, in 0-1 range.
self.osx = None
#: Origin Y position, in 0-1 range.
self.osy = None
#: Origin Z position, in 0-1 range.
self.osz = None
#: Previous X position, in 0-1 range.
self.psx = None
#: Previous Y position, in 0-1 range.
self.psy = None
#: Previous Z position, in 0-1 range.
self.psz = None
#: Delta between self.sx and self.psx, in 0-1 range.
self.dsx = None
#: Delta between self.sy and self.psy, in 0-1 range.
self.dsy = None
#: Delta between self.sz and self.psz, in 0-1 range.
self.dsz = None
#: X position, in window range
self.x = 0.0
#: Y position, in window range
self.y = 0.0
#: Z position, in window range
self.z = 0.0
#: Origin X position, in window range
self.ox = None
#: Origin Y position, in window range
self.oy = None
#: Origin Z position, in window range
self.oz = None
#: Previous X position, in window range
self.px = None
#: Previous Y position, in window range
self.py = None
#: Previous Z position, in window range
self.pz = None
#: Delta between self.x and self.px, in window range
self.dx = None
#: Delta between self.y and self.py, in window range
self.dy = None
#: Delta between self.z and self.pz, in window range
self.dz = None
#: Position (X, Y), in window range
self.pos = (0.0, 0.0)
#: Initial time of the touch creation
self.time_start = time()
#: Time of the last update
self.time_update = self.time_start
#: Time of the end event (last touch usage)
self.time_end = -1
#: Indicate if the touch is a double tap or not
self.is_double_tap = False
#: Indicate if the touch is a triple tap or not
#:
#: .. versionadded:: 1.7.0
self.is_triple_tap = False
#: If the touch is a :attr:`is_double_tap`, this is the time between the
#: previous tap and the current touch.
self.double_tap_time = 0
#: If the touch is a :attr:`is_triple_tap`, this is the time between the
#: first tap and the current touch.
#: .. versionadded:: 1.7.0
self.triple_tap_time = 0
#: User data dictionnary. Use this dictionnary to save your own data on
#: the touch.
self.ud = EnhancedDictionnary()
self.depack(args)
def depack(self, args):
'''Depack `args` into attributes of the class'''
# set initial position and last position
if self.osx is None:
self.psx = self.osx = self.sx
self.psy = self.osy = self.sy
self.psz = self.osz = self.sz
# update the delta
self.dsx = self.sx - self.psx
self.dsy = self.sy - self.psy
self.dsz = self.sz - self.psz
def grab(self, class_instance, exclusive=False):
'''Grab this motion event. You can grab a touch if you absolutly want to
receive on_touch_move() and on_touch_up(), even if the touch is not
dispatched by your parent::
def on_touch_down(self, touch):
touch.grab(self)
def on_touch_move(self, touch):
if touch.grab_current is self:
# I received my grabbed touch
else:
# it's a normal touch
def on_touch_up(self, touch):
if touch.grab_current is self:
# I receive my grabbed touch, I must ungrab it!
touch.ungrab(self)
else:
# it's a normal touch
pass
'''
if not self.is_touch:
raise Exception('Grab works only for Touch MotionEvents.')
if self.grab_exclusive_class is not None:
raise Exception('Cannot grab the touch, touch is exclusive')
class_instance = weakref.ref(class_instance)
if exclusive:
self.grab_exclusive_class = class_instance
self.grab_list.append(class_instance)
def ungrab(self, class_instance):
'''Ungrab a previously grabbed touch
'''
class_instance = weakref.ref(class_instance)
if self.grab_exclusive_class == class_instance:
self.grab_exclusive_class = None
if class_instance in self.grab_list:
self.grab_list.remove(class_instance)
def move(self, args):
'''Move the touch to another position
'''
self.px = self.x
self.py = self.y
self.pz = self.z
self.psx = self.sx
self.psy = self.sy
self.psz = self.sz
self.time_update = time()
self.depack(args)
def scale_for_screen(self, w, h, p=None, rotation=0):
'''Scale position for the screen
'''
sx, sy = self.sx, self.sy
if rotation == 0:
self.x = sx * float(w)
self.y = sy * float(h)
elif rotation == 90:
sx, sy = sy, 1 - sx
self.x = sx * float(h)
self.y = sy * float(w)
elif rotation == 180:
sx, sy = 1 - sx, 1 - sy
self.x = sx * float(w)
self.y = sy * float(h)
elif rotation == 270:
sx, sy = 1 - sy, sx
self.x = sx * float(h)
self.y = sy * float(w)
if p:
self.z = self.sz * float(p)
if self.ox is None:
self.px = self.ox = self.x
self.py = self.oy = self.y
self.pz = self.oz = self.z
self.dx = self.x - self.px
self.dy = self.y - self.py
self.dz = self.z - self.pz
# cache position
self.pos = self.x, self.y
def push(self, attrs=None):
'''Push attribute values in `attrs` onto the stack
'''
if attrs is None:
attrs = self.push_attrs
values = [getattr(self, x) for x in attrs]
self.push_attrs_stack.append((attrs, values))
def pop(self):
'''Pop attributes values from the stack
'''
attrs, values = self.push_attrs_stack.pop()
for i in range(len(attrs)):
setattr(self, attrs[i], values[i])
def apply_transform_2d(self, transform):
'''Apply a transformation on x, y, z, px, py, pz,
ox, oy, oz, dx, dy, dz
'''
self.x, self.y = self.pos = transform(self.x, self.y)
self.px, self.py = transform(self.px, self.py)
self.ox, self.oy = transform(self.ox, self.oy)
self.dx = self.x - self.px
self.dy = self.y - self.py
def copy_to(self, to):
'''Copy some attribute to another touch object.'''
for attr in self.__attrs__:
to.__setattr__(attr, copy(self.__getattribute__(attr)))
def distance(self, other_touch):
'''Return the distance between the current touch and another touch.
'''
return Vector(self.pos).distance(other_touch.pos)
def update_time_end(self):
self.time_end = time()
# facilities
@property
def dpos(self):
'''Return delta between last position and current position, in the
screen coordinate system (self.dx, self.dy)'''
return self.dx, self.dy
@property
def opos(self):
'''Return the initial position of the touch in the screen
coordinate system (self.ox, self.oy)'''
return self.ox, self.oy
@property
def ppos(self):
'''Return the previous position of the touch in the screen
coordinate system (self.px, self.py)'''
return self.px, self.py
@property
def spos(self):
'''Return the position in the 0-1 coordinate system
(self.sx, self.sy)'''
return self.sx, self.sy
def __str__(self):
basename = str(self.__class__)
classname = basename.split('.')[-1].replace('>', '').replace('\'', '')
return '<%s spos=%s pos=%s>' % (classname, self.spos, self.pos)
def __repr__(self):
out = []
for x in dir(self):
v = getattr(self, x)
if x[0] == '_':
continue
if isroutine(v):
continue
out.append('%s="%s"' % (x, v))
return '<%s %s>' % (
self.__class__.__name__,
' '.join(out))
@property
def is_mouse_scrolling(self, *args):
'''Returns True if the touch is a mousewheel scrolling
.. versionadded:: 1.6.0
'''
return 'button' in self.profile and 'scroll' in self.button
| |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:61142")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:61142")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Cryptoescudo address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Cryptoescudo address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks on Keras layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import functools
import numpy as np
from keras.benchmarks import benchmark_util
from keras.benchmarks.layer_benchmarks import layer_benchmarks_test_base
def _get_metadata(name):
return {
"model_name": "ideal_layers",
"parameters": name[1] + "_shape",
}
def _get_layer_args(layer_cls, layer_args):
# To make benchmark parameters compatible with GPU platform.
if layer_cls is tf.keras.layers.Bidirectional:
return {"layer": tf.keras.layers.LSTM(1)}
return layer_args
def _get_input_data(inputs):
if "input_shape" in inputs:
return tf.ones(inputs["input_shape"])
elif "input" in inputs:
return inputs["input"]
else:
raise ValueError("Please specify either `input_shape` or `input`"
"for the benchmark test")
def _layer_call_backward(layer, x):
with tf.GradientTape() as tape:
y = layer(x)
loss = tf.reduce_mean(y**2)
_ = tape.gradient(loss, layer.trainable_variables)
CORE_LAYERS = [
("Dense_small_shape", tf.keras.layers.Dense,
{"units": 32, "activation": "relu"},
{"input_shape": (1, 16)}, 100),
("Activation_small_shape", tf.keras.layers.Activation,
{"activation": "relu"},
{"input_shape": (1, 4)}, 100),
("Embedding_small_shape", tf.keras.layers.Embedding,
{"input_dim": 1, "output_dim": 1, "input_length": 1},
{"input": np.random.randint(1, size=(1, 1))}, 100),
("Embedding_normal_shape", tf.keras.layers.Embedding,
{"input_dim": 1000, "output_dim": 64, "input_length": 10},
{"input": np.random.randint(1000, size=(32, 10))}, 100),
("Masking_small_shape", tf.keras.layers.Masking,
{"mask_value": 1}, {"input_shape": (1, 1)}, 100),
("Lambda_small_shape", tf.keras.layers.Lambda,
{"function": lambda x: x ** 2}, {"input_shape": (1, 1)}, 100),
("Flatten_small_shape", tf.keras.layers.Flatten,
{}, {"input_shape": (1, 1)}, 100),
]
CONV_LAYERS = [
("Conv1D_small_shape", tf.keras.layers.Conv1D,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1)}, 100),
("Conv2D_small_shape", tf.keras.layers.Conv2D,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1, 1)}, 100),
("Conv2D_normal_shape", tf.keras.layers.Conv2D,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (64, 28, 28, 3)}, 100),
("Conv3D_small_shape", tf.keras.layers.Conv3D,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1, 1, 1)}, 100),
("Conv1DTranspose_small_shape", tf.keras.layers.Conv1DTranspose,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1)}, 100),
("Conv2DTranspose_small_shape", tf.keras.layers.Conv2DTranspose,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1, 1)}, 100),
("Conv3DTranspose_small_shape", tf.keras.layers.Conv3DTranspose,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1, 1, 1)}, 100),
("SeparableConv1D_small_shape", tf.keras.layers.SeparableConv1D,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1)}, 100),
("SeparableConv2D_small_shape", tf.keras.layers.SeparableConv2D,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1, 1)}, 100),
("DepthwiseConv2D_small_shape", tf.keras.layers.DepthwiseConv2D,
{"kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1, 1)}, 100),
]
RECURRENT_LAYERS = [
("LSTM_small_shape", tf.keras.layers.LSTM,
{"units": 1}, {"input_shape": (1, 1, 1)}, 100),
("LSTM_normal_shape", tf.keras.layers.LSTM,
{"units": 4}, {"input_shape": (32, 10, 8)}, 100),
("GRU_small_shape", tf.keras.layers.GRU,
{"units": 1}, {"input_shape": (1, 1, 1)}, 100),
("SimpleRNN_small_shape", tf.keras.layers.SimpleRNN,
{"units": 1}, {"input_shape": (1, 1, 1)}, 100),
("TimeDistributed_small_shape", tf.keras.layers.TimeDistributed,
{"layer": tf.keras.layers.Conv2D(1, 1)},
{"input_shape": (1, 1, 1, 1, 1)}, 100),
("Bidirectional_small_shape", tf.keras.layers.Bidirectional,
{}, {"input_shape": (1, 1, 1)}, 100),
("ConvLSTM2D_small_shape", tf.keras.layers.ConvLSTM2D,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1, 1, 1)}, 100),
("RNN_small_shape", tf.keras.layers.RNN,
{"cell": tf.keras.layers.LSTMCell(1)}, {"input_shape": (1, 1, 1)}, 100),
]
NORMALIZATION_LAYERS = [
("BatchNormalization_small_shape", tf.keras.layers.BatchNormalization,
{"axis": -1}, {"input_shape": (1, 1, 1)}, 100),
("LayerNormalization_small_shape", tf.keras.layers.LayerNormalization,
{"axis": -1}, {"input_shape": (1, 1, 1)}, 100),
]
REGULARIZATION_LAYERS = [
("Dropout_small_shape", tf.keras.layers.Dropout,
{"rate": 0.2}, {"input_shape": (1, 1, 1)}, 100),
("SpatialDropout1D_small_shape", tf.keras.layers.SpatialDropout1D,
{"rate": 0.2}, {"input_shape": (1, 1, 1)}, 100),
("SpatialDropout2D_small_shape", tf.keras.layers.SpatialDropout2D,
{"rate": 0.2}, {"input_shape": (1, 1, 1, 1)}, 100),
("SpatialDropout3D_small_shape", tf.keras.layers.SpatialDropout3D,
{"rate": 0.2}, {"input_shape": (1, 1, 1, 1, 1)}, 100),
("GaussianDropout_small_shape", tf.keras.layers.GaussianDropout,
{"rate": 0.2}, {"input_shape": (1, 1, 1)}, 100),
("GaussianNoise_small_shape", tf.keras.layers.GaussianNoise,
{"stddev": 0.1}, {"input_shape": (1, 1, 1)}, 100),
("ActivityRegularization_small_shape",
tf.keras.layers.ActivityRegularization,
{"l1": 0.3}, {"input_shape": (1, 1, 1)}, 100),
("AlphaDropout_small_shape", tf.keras.layers.AlphaDropout,
{"rate": 0.2}, {"input_shape": (1, 1, 1)}, 100),
]
ATTENSION_LAYERS = [
("Attention_small_shape", tf.keras.layers.Attention,
{"use_scale": False}, {"input": [np.ones((1, 1, 1)), np.ones((1, 1, 1))]},
100),
("AdditiveAttention_small_shape", tf.keras.layers.AdditiveAttention,
{"use_scale": True}, {"input": [np.ones((1, 1, 1)), np.ones((1, 1, 1))]},
100),
]
POOLING_LAYERS = [
("MaxPooling1D_small_shape", tf.keras.layers.MaxPooling1D,
{"pool_size": 1, "strides": 1}, {"input_shape": (1, 1, 1)}, 100),
("MaxPooling2D_small_shape", tf.keras.layers.MaxPooling2D,
{"pool_size": 1, "strides": 1}, {"input_shape": (1, 1, 1, 1)}, 100),
("MaxPooling3D_small_shape", tf.keras.layers.MaxPooling3D,
{"pool_size": 1, "strides": 1}, {"input_shape": (1, 1, 1, 1, 1)}, 100),
("AveragePooling1D_small_shape", tf.keras.layers.AveragePooling1D,
{"pool_size": 1, "strides": 1}, {"input_shape": (1, 1, 1)}, 100),
("AveragePooling2D_small_shape", tf.keras.layers.AveragePooling2D,
{"pool_size": 1, "strides": 1}, {"input_shape": (1, 1, 1, 1)}, 100),
("AveragePooling3D_small_shape", tf.keras.layers.AveragePooling3D,
{"pool_size": 1, "strides": 1}, {"input_shape": (1, 1, 1, 1, 1)}, 100),
("GlobalMaxPooling1D_small_shape", tf.keras.layers.GlobalMaxPooling1D,
{}, {"input_shape": (1, 1, 1)}, 100),
("GlobalMaxPooling2D_small_shape", tf.keras.layers.GlobalMaxPooling2D,
{}, {"input_shape": (1, 1, 1, 1)}, 100),
("GlobalMaxPooling3D_small_shape", tf.keras.layers.GlobalMaxPooling3D,
{}, {"input_shape": (1, 1, 1, 1, 1)}, 100),
("GlobalAveragePooling1D_small_shape",
tf.keras.layers.GlobalAveragePooling1D,
{}, {"input_shape": (1, 1, 1)}, 100),
("GlobalAveragePooling2D_small_shape",
tf.keras.layers.GlobalAveragePooling2D,
{}, {"input_shape": (1, 1, 1, 1)}, 100),
("GlobalAveragePooling3D_small_shape",
tf.keras.layers.GlobalAveragePooling3D,
{}, {"input_shape": (1, 1, 1, 1, 1)}, 100),
]
class KerasLayerBenchmarks( # pylint: disable=undefined-variable
layer_benchmarks_test_base.LayerBenchmarksBase,
metaclass=tf.__internal__.test.ParameterizedBenchmark):
# The parameter of each layer benchmark is a tuple, and the first one is
# the benchmark name. It must follow the convention of
# "{layer_name}_{small|normal|large}_shape" to make it compatible with
# `self.report_benchmark()` method.
_benchmark_parameters = benchmark_util.generate_benchmark_params_cpu_gpu(
CORE_LAYERS + CONV_LAYERS + RECURRENT_LAYERS + NORMALIZATION_LAYERS +
REGULARIZATION_LAYERS + ATTENSION_LAYERS + POOLING_LAYERS)
def benchmark_layer_call(self, layer_cls, layer_args, inputs, num_iters):
layer = layer_cls(**_get_layer_args(layer_cls, layer_args))
x = _get_input_data(inputs)
fn = functools.partial(layer, x)
name = benchmark_util.get_benchmark_name(self._get_name())
metadata = {"implementation": name[0] + ".layer.call"}
metadata.update(_get_metadata(name))
self.run_report(fn, num_iters, metadata)
def benchmark_layer_call_with_function(
self, layer_cls, layer_args, inputs, num_iters):
layer = layer_cls(**_get_layer_args(layer_cls, layer_args))
x = _get_input_data(inputs)
layer.call = tf.function(layer.call)
fn = functools.partial(layer, x)
name = benchmark_util.get_benchmark_name(self._get_name())
metadata = {"implementation": name[0] + ".layer.call.function"}
metadata.update(_get_metadata(name))
self.run_report(fn, num_iters, metadata)
def benchmark_layer_call_with_xla(
self, layer_cls, layer_args, inputs, num_iters):
name = benchmark_util.get_benchmark_name(self._get_name())
# TODO(b/173461426)
if layer_cls is tf.keras.layers.Embedding and name[-1] == "GPU":
return
layer = layer_cls(**_get_layer_args(layer_cls, layer_args))
x = _get_input_data(inputs)
layer.call = tf.function(
layer.call, jit_compile=True)
fn = functools.partial(layer, x)
metadata = {"implementation": name[0] + ".layer.call.xla"}
metadata.update(_get_metadata(name))
self.run_report(fn, num_iters, metadata)
def benchmark_layer_call_backward(
self, layer_cls, layer_args, inputs, num_iters):
layer = layer_cls(**_get_layer_args(layer_cls, layer_args))
x = _get_input_data(inputs)
fn = functools.partial(_layer_call_backward, layer, x)
name = benchmark_util.get_benchmark_name(self._get_name())
metadata = {"implementation": name[0] + ".layer.call.backward"}
metadata.update(_get_metadata(name))
self.run_report(fn, num_iters, metadata)
def benchmark_layer_call_backward_with_function(
self, layer_cls, layer_args, inputs, num_iters):
layer = layer_cls(**_get_layer_args(layer_cls, layer_args))
x = _get_input_data(inputs)
layer.call = tf.function(layer.call)
fn = functools.partial(_layer_call_backward, layer, x)
name = benchmark_util.get_benchmark_name(self._get_name())
metadata = {"implementation": name[0] + ".layer.call.backward.function"}
metadata.update(_get_metadata(name))
self.run_report(fn, num_iters, metadata)
def benchmark_layer_call_backward_with_xla(
self, layer_cls, layer_args, inputs, num_iters):
name = benchmark_util.get_benchmark_name(self._get_name())
# TODO(b/153480400)
if layer_cls in [
tf.keras.layers.LSTM, tf.keras.layers.Bidirectional,
tf.keras.layers.ConvLSTM2D, tf.keras.layers.GRU, tf.keras.layers.RNN,
tf.keras.layers.SimpleRNN
]:
return
# TODO(b/173461426)
if layer_cls is tf.keras.layers.Embedding and name[-1] == "GPU":
return
layer = layer_cls(**_get_layer_args(layer_cls, layer_args))
x = _get_input_data(inputs)
layer.call = tf.function(
layer.call, jit_compile=True)
fn = functools.partial(_layer_call_backward, layer, x)
metadata = {"implementation": name[0] + ".layer.call.backward.xla"}
metadata.update(_get_metadata(name))
self.run_report(fn, num_iters, metadata)
if __name__ == "__main__":
tf.test.main()
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Utility to lift subgraphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
def _graph_inputs(op):
return [x.op for x in op.inputs] + list(op.control_inputs)
def _as_operation(op_or_tensor):
if isinstance(op_or_tensor, ops.Tensor):
return op_or_tensor.op
return op_or_tensor
class UnliftableError(Exception):
"""Raised if a Tensor cannot be lifted from the graph."""
pass
def _constant_inputs(op_or_tensor):
return all(_as_operation(i).type == u"Const"
and not _as_operation(i).control_inputs
for i in _graph_inputs(_as_operation(op_or_tensor)))
def _path_from(from_op, tensor, sources):
"""Find one path from `from_op` to `tensor`, ignoring `sources`.
Args:
from_op: A `tf.Operation`.
tensor: A `tf.Operation` or `tf.Tensor`.
sources: A list of `tf.Tensor`.
Returns:
A python string containing the path, or "??" if none is found.
"""
visited_ops = set([x.op for x in sources])
ops_to_visit = [_as_operation(tensor)]
some_op_output = {}
while ops_to_visit:
op = ops_to_visit.pop()
if op in visited_ops:
continue
visited_ops.add(op)
if op == from_op:
path_op = op
path = [path_op]
final_op = _as_operation(tensor)
while path_op != final_op:
path_op = some_op_output[path_op]
path.append(path_op)
return " <- ".join(["%s (%s)" % (x.name, x.type) for x in reversed(path)])
else:
for inp in _graph_inputs(op):
if inp not in visited_ops and inp not in sources:
some_op_output[inp] = op
ops_to_visit.append(inp)
return "??"
def _map_subgraph(init_tensor, sources, disallowed_placeholders, visited_ops,
op_outputs, add_sources):
"""Walk a Graph and capture the subgraph between init_tensor and sources.
Note: This function mutates visited_ops and op_outputs.
Arguments:
init_tensor: A Tensor or Operation where the subgraph terminates.
sources: A set of Tensors where subgraph extraction should stop.
disallowed_placeholders: An optional set of ops which may not appear in the
lifted graph. Defaults to all placeholders.
visited_ops: A set of operations which were visited in a prior pass.
op_outputs: A defaultdict containing the outputs of an op which are to be
copied into the new subgraph.
add_sources: A boolean indicating whether placeholders which are not in
sources should be allowed.
Returns:
The set of placeholders upon which init_tensor depends and are not in
sources.
Raises:
UnliftableError: if init_tensor depends on a placeholder which is not in
sources and add_sources is False.
"""
ops_to_visit = [_as_operation(init_tensor)]
extra_sources = set()
while ops_to_visit:
op = ops_to_visit.pop()
if op in visited_ops:
continue
visited_ops.add(op)
should_raise = False
if disallowed_placeholders is not None and op in disallowed_placeholders:
should_raise = True
elif op.type == "Placeholder":
if disallowed_placeholders is None and not add_sources:
should_raise = True
extra_sources.update(op.outputs)
if should_raise:
raise UnliftableError(
"Unable to lift tensor %s because it depends transitively on "
"placeholder %s via at least one path, e.g.: %s"
% (repr(init_tensor), repr(op), _path_from(op, init_tensor, sources)))
for inp in _graph_inputs(op):
op_outputs[inp].add(op)
if inp not in visited_ops and inp not in (sources or extra_sources):
ops_to_visit.append(inp)
return extra_sources
def _copy_non_source(op, graph, op_map):
"""Copy an op directly to a given graph.
This function assumes that all of the inputs to an op have already been
copied.
Args:
op: The op to be copied.
graph: The destination graph.
op_map: A dict mapping ops and tensors in the old graph to the new one.
"""
copied_inputs = [op_map[x] for x in op.inputs]
copied_control_inputs = [op_map[x] for x in op.control_inputs]
with ops.control_dependencies(copied_control_inputs), ops.device(op.device):
copied_op = graph.create_op(
op_type=op.type,
inputs=copied_inputs,
dtypes=[x.dtype for x in op.outputs],
attrs=op.node_def.attr,
name=op.name)
op_map[op] = copied_op
for i, o in enumerate(op.outputs):
op_map[o] = copied_op.outputs[i]
def _copy_source(s, graph, op_map, handle_captures, inverse_captures):
"""Create a source in a graph based on a Tensor from a different graph.
This function creates a placeholder analog of `s` in a graph with the
following behavior:
1) If s is a captured Tensor or Variable and handle_captures is set to True,
simply capture it in the new graph as well.
2) If s is a PlaceholderWithDefault whose default is a constant, preserve
said default in the new graph.
3) When applicable, copy resource variable metadata from `s` to the newly
created placeholder.
Args:
s: The source of interest.
graph: The destination graph.
op_map: A dict mapping ops and tensors in the old graph to the new one.
handle_captures: A boolean indicating whether to re-capture s in the new
graph or simply create a vanilla placeholder.
inverse_captures: A dict mapping s back to the Tensor or Variable that it
captures.
"""
if handle_captures and s in inverse_captures:
copied_placeholder = graph.capture(inverse_captures[s], name=s.op.name)
elif s.op.type == "PlaceholderWithDefault" and _constant_inputs(s):
# Copy the default value to the graph.
default_value = s.op.inputs[0]
_copy_non_source(op=default_value.op, graph=graph, op_map=op_map)
with ops.device(s.op.device):
copied_placeholder = array_ops.placeholder_with_default(
input=op_map[default_value], shape=s.shape, name=s.op.name)
else:
with ops.device(s.op.device):
copied_placeholder = array_ops.placeholder(
dtype=s.dtype, shape=s.shape, name=s.op.name)
base_handle = resource_variable_ops.get_resource_handle_data(s)
if base_handle.shape_and_type:
resource_variable_ops._set_handle_shapes_and_types( # pylint: disable=protected-access
copied_placeholder,
base_handle,
graph_mode=True)
op_map[s] = copied_placeholder
def lift_to_graph(init_tensors, graph, sources=None,
disallowed_placeholders=None, add_sources=False,
handle_captures=False, base_graph=None):
"""Copies the tensor and all its inputs recursively to the outer graph.
Args:
init_tensors: The Tensor to lift.
graph: The graph to lift to.
sources: Optional sequence of nodes to start from. If omitted the whole
subgraph which feeds into `init_tensor` is lifted.
disallowed_placeholders: An optional set of ops which may not appear in the
lifted graph. Defaults to all placeholders.
add_sources: A boolean indicating whether placeholders which are not in
sources should be allowed.
handle_captures: A boolean indicating whether to re-capture s in the new
graph or simply create a vanilla placeholder.
base_graph: The graph from which to lift ops. This will be inferred if not
specified.
Returns:
A mapping from ops in the current default graph to ops in `graph`.
Raises:
UnliftableError: If a placeholder blocks lifting.
"""
variable_init_tensors = {i for i in init_tensors if isinstance(
i, resource_variable_ops.ResourceVariable)}
init_tensors = set(init_tensors).difference(variable_init_tensors)
base_graph = base_graph or list(init_tensors)[0].graph
# Check that the initializer does not depend on any placeholders.
sources = set(sources or [])
visited_ops = set([x.op for x in sources])
op_outputs = collections.defaultdict(set)
# First we extract the subgraph between init_tensors and sources.
for init_tensor in init_tensors:
sources.update(_map_subgraph(
init_tensor=init_tensor,
sources=sources,
disallowed_placeholders=disallowed_placeholders,
visited_ops=visited_ops,
op_outputs=op_outputs,
add_sources=add_sources))
# Topologically sort the nodes we've extracted. Now we know how many of their
# outputs are part of this subgraph.
ops_to_copy = []
marked_ops = set([])
ops_to_visit = [_as_operation(t) for t in init_tensors
if not op_outputs[_as_operation(t)]]
while ops_to_visit:
op = ops_to_visit.pop()
if op in marked_ops:
continue
marked_ops.add(op)
ops_to_copy.append(op)
for inp in _graph_inputs(op):
if (all(x in marked_ops for x in op_outputs[inp]) and
inp not in sources):
ops_to_visit.append(inp)
# When lifting from one FuncGraph to another, we will need to capture the
# relevant tensors as well.
captures = collections.OrderedDict()
if (isinstance(base_graph, func_graph.FuncGraph) and
isinstance(graph, func_graph.FuncGraph)):
captures = base_graph.captures
inverse_captures = {v: k for k, v in captures.items()}
# ops_to_copy now holds a reverse topologically sorted list of ops which
# ends in the initializer. We copy those to the outermost graph and
# build the initialization op there.
with graph.as_default():
op_map = {i: i for i in variable_init_tensors} # Pass through variables.
source_ops = set()
# Add the sources in the same order as the original graph.
for s in six.itervalues(captures):
if s in sources:
sources.remove(s)
source_ops.add(s.op)
_copy_source(
s=s,
graph=graph,
op_map=op_map,
handle_captures=handle_captures,
inverse_captures=inverse_captures)
for s in sources:
source_ops.add(s.op)
_copy_source(
s=s,
graph=graph,
op_map=op_map,
handle_captures=handle_captures,
inverse_captures=inverse_captures)
for op in reversed(ops_to_copy):
if op in source_ops:
continue
_copy_non_source(op=op, graph=graph, op_map=op_map)
return op_map
| |
import logging
import networkx
import functools
from collections import deque
from typing import Optional, Tuple
logger = logging.getLogger(__name__)
def with_initialize(func):
@functools.wraps(func)
def wrapper(obj, *args, **kwargs):
if not obj._initialized:
obj.initialize()
return func(obj, *args, **kwargs)
return wrapper
class IndraOntology(networkx.DiGraph):
"""A directed graph representing entities and their properties
as nodes and ontological relationships between the entities as
edges.
Attributes
----------
name : str
A prefix/name for the ontology, used for the purposes of caching.
version : str
A version for the ontology, used for the purposes of caching.
"""
version = None
name = None
def __init__(self):
super().__init__()
self._initialized = False
self.name_to_grounding = {}
self.transitive_closure = set()
self._isa_counter = 0
self._isrel_counter = 0
def initialize(self):
"""Initialize the ontology by adding nodes and edges.
By convention, ontologies are implemented such that the constructor
does not add all the nodes and edges, which can take a long time.
This function is called automatically when any of the user-facing
methods ot IndraOntology is called. This way, the ontology is only
fully constructed if it is used.
"""
raise NotImplementedError('The initialize method needs to be '
'implemented when subclassing '
'IndraOntology')
@with_initialize
def _check_path(self, ns1, id1, ns2, id2, edge_types):
try:
target = (ns2, id2)
if target in self._transitive_rel(ns1, id1, self.child_rel,
edge_types, target):
return True
else:
return False
# This typically happens if the node is missing from
# the graph. Is there a more specific error type?
except networkx.NetworkXError:
return False
@staticmethod
def get_ns_id(node):
"""Return the name space and ID of a given node from its label.
Parameters
----------
node : str
A node's label.
Returns
-------
tuple(str, str)
A tuple of the node's name space and ID.
"""
return IndraOntology.reverse_label(node)
@staticmethod
def get_ns(node):
"""Return the name space of a given node from its label.
Parameters
----------
node : str
A node's label.
Returns
-------
str
The node's name space.
"""
return IndraOntology.get_ns_id(node)[0]
@staticmethod
def get_id(node):
"""Return the name ID a given node from its label.
Parameters
----------
node : str
A node's label.
Returns
-------
str
The node's ID within its name space.
"""
return IndraOntology.get_ns_id(node)[1]
@with_initialize
def isrel(self, ns1, id1, ns2, id2, rels):
"""Return True if the two entities are related with a given rel.
Parameters
----------
ns1 : str
The first entity's name space.
id1 : str
The first entity's ID.
ns2 : str
The second entity's name space.
id2 : str
The second entity's ID.
rels : iterable of str
A set of edge types to traverse when determining
if the first entity is related to the second
entity.
Returns
-------
bool
True if the first entity is related to the second with
a directed path containing edges with types in `rels` .
Otherwise False.
"""
self._isrel_counter += 1
return self._check_path(ns1, id1, ns2, id2, rels)
@with_initialize
def isa(self, ns1, id1, ns2, id2):
"""Return True if the first entity is related to the second as 'isa'.
Parameters
----------
ns1 : str
The first entity's name space.
id1 : str
The first entity's ID.
ns2 : str
The second entity's name space.
id2 : str
The second entity's ID.
Returns
-------
bool
True if the first entity is related to the second with
a directed path containing edges with type `isa`.
Otherwise False.
"""
return self.isrel(ns1, id1, ns2, id2, rels={'isa'})
@with_initialize
def partof(self, ns1, id1, ns2, id2):
"""Return True if the first entity is related to the second as 'partof'.
Parameters
----------
ns1 : str
The first entity's name space.
id1 : str
The first entity's ID.
ns2 : str
The second entity's name space.
id2 : str
The second entity's ID.
Returns
-------
bool
True if the first entity is related to the second with
a directed path containing edges with type `partof`.
Otherwise False.
"""
return self.isrel(ns1, id1, ns2, id2, rels={'partof'})
@with_initialize
def isa_or_partof(self, ns1, id1, ns2, id2):
"""Return True if the first entity is related to the second as 'isa'
or `partof`.
Parameters
----------
ns1 : str
The first entity's name space.
id1 : str
The first entity's ID.
ns2 : str
The second entity's name space.
id2 : str
The second entity's ID.
Returns
-------
bool
True if the first entity is related to the second with
a directed path containing edges with type `isa` or `partof`.
Otherwise False.
"""
self._isa_counter += 1
if self.transitive_closure:
return (self.label(ns1, id1),
self.label(ns2, id2)) in self.transitive_closure
return self.isrel(ns1, id1, ns2, id2, rels={'isa', 'partof'})
@with_initialize
def maps_to(self, ns1, id1, ns2, id2):
"""Return True if the first entity has an xref to the second.
Parameters
----------
ns1 : str
The first entity's name space.
id1 : str
The first entity's ID.
ns2 : str
The second entity's name space.
id2 : str
The second entity's ID.
Returns
-------
bool
True if the first entity is related to the second with
a directed path containing edges with type `xref`.
Otherwise False.
"""
return self._check_path(ns1, id1, ns2, id2, {'xref'})
@with_initialize
def map_to(self, ns1, id1, ns2):
"""Return an entity that is a unique xref of an entity
in a given name space.
This function first finds all mappings via `xrefs` edges
from the given first entity to the given second
name space. If exactly one such mapping target is found, the
target is returned. Otherwise, None is returned.
Parameters
----------
ns1 : str
The first entity's name space.
id1 : str
The first entity's ID.
ns2 : str
The second entity's name space.
Returns
-------
str
The name space of the second entity
str
The ID of the second entity in the given name space.
"""
targets = [target for target in
self.descendants_rel(ns1, id1, {'xref'})
if target[0] == ns2]
if len(targets) == 1:
return targets[0]
return None
@with_initialize
def _transitive_rel(self, ns, id, rel_fun, rel_types, target=None):
source = (ns, id)
visited = {source}
queue = deque([(source,
rel_fun(*source, rel_types))])
while queue:
parent, children = queue[0]
try:
child = next(children)
if target and child == target:
return [target]
if child not in visited:
visited.add(child)
queue.append((child,
rel_fun(*child, rel_types)))
except networkx.NetworkXError as e:
logger.debug(e)
return []
except StopIteration:
queue.popleft()
return list(visited - {source})
@with_initialize
def descendants_rel(self, ns, id, rel_types):
return self._transitive_rel(ns, id, self.child_rel, rel_types)
@with_initialize
def ancestors_rel(self, ns, id, rel_types):
return self._transitive_rel(ns, id, self.parent_rel, rel_types)
@with_initialize
def child_rel(self, ns, id, rel_types):
source = self.label(ns, id)
# This is to handle the case where the node is not in the
# graph
try:
succ_iter = self.successors(source)
except networkx.NetworkXError:
return []
for target in succ_iter:
if self.edges[source, target]['type'] in rel_types:
yield self.get_ns_id(target)
@with_initialize
def parent_rel(self, ns, id, rel_types):
target = self.label(ns, id)
# This is to handle the case where the node is not in the
# graph
try:
pred_iter = self.predecessors(target)
except networkx.NetworkXError:
return []
for source in pred_iter:
if self.edges[source, target]['type'] in rel_types:
yield self.get_ns_id(source)
@with_initialize
def get_children(self, ns, id, ns_filter=None):
"""Return all `isa` or `partof` children of a given entity.
Importantly, `isa` and `partof` edges always point towards
higher-level entities in the ontology but here "child" means
lower-level entity i.e., ancestors in the graph.
Parameters
----------
ns : str
The name space of an entity.
id : str
The ID of an entity.
ns_filter : Optional[set]
If provided, only entities within the set of given
name spaces are returned.
Returns
-------
list
A list of entities (name space, ID pairs) that are the
children of the given entity.
"""
children = self.ancestors_rel(ns, id, {'isa', 'partof'})
children = [(cns, cid) for cns, cid in children
if ns_filter is None or cns in ns_filter]
return children
@with_initialize
def get_parents(self, ns, id):
"""Return all `isa` or `partof` parents of a given entity.
Importantly, `isa` and `partof` edges always point towards
higher-level entities in the ontology but here "parent" means
higher-level entity i.e., descendants in the graph.
Parameters
----------
ns : str
The name space of an entity.
id : str
The ID of an entity.
Returns
-------
list
A list of entities (name space, ID pairs) that are the
parents of the given entity.
"""
return self.descendants_rel(ns, id, {'isa', 'partof'})
@with_initialize
def get_top_level_parents(self, ns, id):
"""Return all top-level `isa` or `partof` parents of a given entity.
Top level means that this function only returns parents which
don't have any further `isa` or `partof` parents above them.
Importantly, `isa` and `partof` edges always point towards
higher-level entities in the ontology but here "parent" means
higher-level entity i.e., descendants in the graph.
Parameters
----------
ns : str
The name space of an entity.
id : str
The ID of an entity.
Returns
-------
list
A list of entities (name space, ID pairs) that are the
top-level parents of the given entity.
"""
parents = self.get_parents(ns, id)
return [p for p in parents if not self.get_parents(*p)]
@with_initialize
def get_mappings(self, ns, id):
"""Return entities that are xrefs of a given entity.
This function returns all mappings via `xrefs` edges
from the given entity.
Parameters
----------
ns : str
An entity's name space.
id : str
An entity's ID.
Returns
-------
list
A list of entities (name space, ID pairs) that are
direct or indirect xrefs of the given entity.
"""
return self.descendants_rel(ns, id, {'xref'})
@with_initialize
def get_replacement(self, ns, id):
"""Return a replacement for a given entity or None if no replacement.
A replacement is typically necessary if the given entity is obsolete
and has been replaced by another entry.
Parameters
----------
ns : str
An entity's name space.
id : str
An entity's ID.
Returns
-------
: tuple
A tuple of the form (ns, id) of the replacement entity
or None if no replacement.
"""
rep = list(self.child_rel(ns, id, {'replaced_by'}))
if rep:
return rep[0]
@with_initialize
def get_name(self, ns, id):
"""Return the standard name of a given entity.
Parameters
----------
ns : str
An entity's name space.
id : str
An entity's ID.
Returns
-------
str or None
The name associated with the given entity or None
if the node is not in the ontology or doesn't
have a standard name.
"""
return self.get_node_property(ns, id, property='name')
@with_initialize
def get_type(self, ns, id):
"""Return the type of a given entity.
Parameters
----------
ns : str
An entity's name space.
id : str
An entity's ID.
Returns
-------
str or None
The type associated with the given entity or None
if the node is not in the ontology or doesn't
have a type annotation.
"""
return self.get_node_property(ns, id, 'type')
@with_initialize
def get_polarity(self, ns, id):
"""Return the polarity of a given entity.
Parameters
----------
ns : str
An entity's name space.
id : str
An entity's ID.
Returns
-------
str or None
The polarity associated with the given entity or None
if the node is not in the ontology or doesn't
have a polarity.
"""
return self.get_node_property(ns, id, property='polarity')
@with_initialize
def get_node_property(self, ns, id, property):
"""Return a given property of a given entity.
Parameters
----------
ns : str
An entity's name space.
id : str
An entity's ID.
property : str
The property to look for on the given node.
Returns
-------
str or None
The name associated with the given entity or None
if the node is not in the ontology or doesn't
have the given property.
"""
try:
return self.nodes[self.label(ns, id)][property]
except KeyError:
return None
@with_initialize
def is_opposite(self, ns1, id1, ns2, id2):
"""Return True if the two entities are opposites of each other.
Parameters
----------
ns1 : str
The first entity's name space.
id1 : str
The first entity's ID.
ns2 : str
The second entity's name space.
id2 : str
The second entity's ID.
Returns
-------
bool
True if the first entity is in an `is_opposite`
relationship with the second. False otherwise.
"""
# FIXME: this assumes, as is the case in practice with our
# ontologies that we have disjunct pairs of is_opposite entities
# more generally, we may need to allow other edge types and
# look at the overall "polarity" of the path.
return self._check_path(ns1, id1, ns2, id2, {'is_opposite'})
@with_initialize
def get_id_from_name(self, ns, name) -> Optional[Tuple[str, str]]:
"""Return an entity's ID given its name space and standard name.
Parameters
----------
ns : str
The name space in which the standard name is defined.
name : str
The standard name defined in the name space.
Returns
-------
:
The pair of namespace and ID corresponding to the given
standard name in the given name space or None if it's not
available.
"""
if not self.name_to_grounding:
self._build_name_lookup()
return self.name_to_grounding.get((ns, name))
@with_initialize
def _build_name_lookup(self):
self.name_to_grounding = {
(self.get_ns(node), data['name']): self.get_ns_id(node)
for node, data in self.nodes(data=True)
if 'name' in data
and not data.get('obsolete', False)
}
@with_initialize
def nodes_from_suffix(self, suffix):
"""Return all node labels which have a given suffix.
This is useful for finding entities in ontologies where
the IDs consist of paths like a/b/c/...
Parameters
----------
suffix : str
A label suffix.
Returns
-------
list
A list of node labels that have the given suffix.
"""
return [node for node in self.nodes
if node.endswith(suffix)]
@staticmethod
def label(ns, id):
"""Return the label corresponding to a given entity.
This is mostly useful for constructing the ontology
or when adding new nodes/edges. It can be overriden
in subclasses to change the default mapping
from ns / id to a label.
Parameters
----------
ns : str
An entity's name space.
id : str
An entity's ID.
Returns
-------
str
The label corresponding to the given entity.
"""
return '%s:%s' % (ns, id)
@staticmethod
def reverse_label(label):
"""Return the name space and ID from a given label.
This is the complement of the `label` method which
reverses a label into a name space and ID.
Parameters
----------
label
A node label.
Returns
-------
str
The name space corresponding to the label.
str
The ID corresponding to the label.
"""
return tuple(label.split(':', maxsplit=1))
def _build_transitive_closure(self):
if self.transitive_closure:
return
logger.info('Building transitive closure for faster '
'isa/partof lookups...')
self.transitive_closure = set()
for node in self.nodes():
ns, id = self.get_ns_id(node)
for pns, pid in self.descendants_rel(ns, id,
rel_types={'isa',
'partof'}):
self.transitive_closure.add((self.label(ns, id),
self.label(pns, pid)))
@with_initialize
def print_stats(self):
logger.info('Number of nodes: %d' % len(self.nodes))
logger.info('Number of edges: %d' % len(self.edges))
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""subcmd_extract.py
Provides the extract subcommand for pdp.py
(c) The James Hutton Institute 2017-2019
Author: Leighton Pritchard
Contact: leighton.pritchard@hutton.ac.uk
Leighton Pritchard,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD2 5DA,
Scotland,
UK
The MIT License
Copyright (c) 2017-2019 The James Hutton Institute
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import multiprocessing
import os
import shutil
from Bio import AlignIO, SeqIO
from joblib import Parallel, delayed
from tqdm import tqdm
from diagnostic_primers import extract, load_primers
from diagnostic_primers.extract import PDPAmpliconError
from diagnostic_primers.scripts.tools import (
collect_existing_output,
create_output_directory,
load_config_json,
log_clines,
run_parallel_jobs,
)
def extract_primers(task_name, primer, coll, outdir, limits):
"""Convenience function for parallelising primer extraction
:param task_name:
:param primer:
:param coll:
:param outdir:
:param limits: tuple - minimum and maximum amplicon lengths to consider
Returns dict of primer identity and FASTA file path
"""
amplicons, _ = extract.extract_amplicons(task_name, primer, coll, limits)
amplicon_fasta = {}
for pname in amplicons.primer_names:
seqoutfname = os.path.join(outdir, pname + ".fasta")
amplicons.write_amplicon_sequences(pname, seqoutfname)
amplicon_fasta[pname] = seqoutfname
return amplicon_fasta
def recover_existing_aln_files(args, logger, outdir):
"""Return list of existing alignment files if in recovery mode
:param args: Namespace of command-line arguments
:param logger: logging object
:param outdir: Path to output directory
Returns an empty list, unless in recovery mode. In recovery mode, a list
of existing output files is returned
"""
if args.recovery:
logger.warning("Entering recovery mode")
logger.info(
"\tIn this mode, existing comparison output from %s is reused", outdir
)
existingfiles = collect_existing_output(outdir, "extract", args)
logger.info(
"Existing files found:\n\t%s", "\n\t".join([_ for _ in existingfiles])
)
return existingfiles
return []
def mafft_align_sequences(args, logger, amplicon_fasta, outdir):
"""Align amplicon sequences using MAFFT
:param args: Namespace of command-line arguments
:param logger: logging object
:param amplicon_fasta: dictionary of amplicon sequences keyed by filename
:param outdir: path to output directory
"""
# If we are in recovery mode, we are salvaging output from a previous
# run, and do not necessarily need to rerun all the jobs. In this case,
# we prepare a list of output files we want to recover from the results
# in the output directory.
amplicon_alnfiles = {}
existingfiles = recover_existing_aln_files(args, logger, outdir)
clines = []
logger.info(
"Compiling MAFFT alignment commands for %d amplicons", len(amplicon_fasta)
)
for pname, fname in tqdm(
amplicon_fasta.items(),
desc="compiling MAFFT commands",
disable=args.disable_tqdm,
):
alnoutfname = os.path.join(outdir, pname + ".aln")
amplicon_alnfiles[pname] = alnoutfname
with open(fname, "r") as ifh:
if len(list(SeqIO.parse(ifh, "fasta"))) < 2:
logger.warning(
"Output amplicon file %s cannot be aligned with MAFFT (copying)",
fname,
)
shutil.copyfile(fname, alnoutfname)
if os.path.split(alnoutfname)[-1] not in existingfiles: # skip if file exists
# MAFFT is run with --quiet flag to suppress verbiage in STDERR
clines.append(
"pdp_mafft_wrapper.py {} --quiet {} {}".format(
args.mafft_exe, fname, alnoutfname
)
)
# Pass command-lines to the appropriate scheduler
if clines:
logger.info("Aligning amplicons with MAFFT")
logger.info("MAFFT command lines:\n\t%s", "\n\t".join(clines))
pretty_clines = [str(c).replace(" -", " \\\n -") for c in clines]
log_clines(pretty_clines, logger)
run_parallel_jobs(clines, args, logger)
else:
logger.warning(
"No MAFFT jobs were scheduled (you may see this if the --recovery option is active)"
)
return amplicon_alnfiles
def subcmd_extract(args, logger, use_parallelism=True):
"""Extract amplicons corresponding to primer sets.
:param args: Namespace of command-line arguments
:param logger: logging object
:param use_parallelism: boolean flag for debugging
if set to True, use joblib to parallelise tasks; set to False to aid
with debugging/localising issues
"""
logger.info("Extracting amplicons for primer set %s", args.primerfile)
logger.info("PrimerSearch and genome information provided by %s", args.infilename)
if not args.noalign:
logger.info("MAFFT executable for alignment: %s", args.mafft_exe)
# Create output directory, if needed
task_name = os.path.splitext(os.path.split(args.primerfile)[-1])[0]
outdir = os.path.join(args.outdir, task_name)
create_output_directory(outdir, args.ex_force, logger)
# Load the config file and extract the amplicons for each primer set
# in turn. Put the amplicons into a .fasta file and record the location
# for each primer set
logger.info("Loading primers from %s", args.primerfile)
primers = load_primers(args.primerfile, fmt="json")
coll = load_config_json(args, logger)
# Run parallel extractions of primers
logger.info("Extracting amplicons from source genomes")
if use_parallelism:
results = Parallel(n_jobs=multiprocessing.cpu_count())(
delayed(extract_primers)(
task_name,
primer,
coll,
outdir,
(args.ex_minamplicon, args.ex_maxamplicon),
)
for primer in tqdm(
primers, desc="extracting amplicons", disable=args.disable_tqdm
)
)
else:
results = []
for primer in tqdm(
primers, desc="extracting amplicons", disable=args.disable_tqdm
):
result = extract_primers(
task_name,
primer,
coll,
outdir,
(args.ex_minamplicon, args.ex_maxamplicon),
)
results.append(result)
amplicon_seqfiles = dict(pair for d in results for pair in d.items())
# Align the sequences with MAFFT
if not args.noalign:
amplicon_seqfiles = mafft_align_sequences(
args, logger, amplicon_seqfiles, outdir
)
# Calculate distance matrix information and write to file
logger.info("Calculating distance matrices")
distoutfname = os.path.join(outdir, "distances_summary.tab")
logger.info("Writing distance metric summaries to %s", distoutfname)
with open(distoutfname, "w") as ofh:
ofh.write(
"\t".join(
[
"primer",
"dist_mean",
"dist_sd",
"dist_min",
"dist_max",
"unique",
"nonunique",
"shannon_index",
"shannon_evenness",
]
)
+ "\n"
)
# Note: ordered output for the table; tqdm call returns
# (primer filename, alignment filename)
for fnames in tqdm(
sorted(amplicon_seqfiles.items()),
desc="processing alignments",
disable=args.disable_tqdm,
):
try:
result = extract.calculate_distance(
AlignIO.read(open(fnames[1]), "fasta")
)
except PDPAmpliconError as exc: # Catches alignment/calculation problems
logger.warning("Distance calculation error: %s", exc)
result = extract.DistanceResults(None, [], 0, 0, 0, 0, 0, 0, 0, 0)
ofh.write(
"\t".join(
[
fnames[0],
"%0.4f" % result.mean,
"%0.4f" % result.sd,
"%0.4f" % result.min,
"%0.4f" % result.max,
"%d" % result.unique,
"%d" % result.nonunique,
"%.02f" % result.shannon,
"%.02f" % result.evenness,
]
)
+ "\n"
)
return 0
| |
#!/usr/bin/env python
##===--- swift-bench.py -------------------------------*- coding: utf-8 -*-===##
##
## This source file is part of the Swift.org open source project
##
## Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
## Licensed under Apache License v2.0 with Runtime Library Exception
##
## See http://swift.org/LICENSE.txt for license information
## See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
##
##===----------------------------------------------------------------------===##
# This file implements a test harness for running Swift performance benchmarks.
#
# Its input is a set of swift files, containing functions named 'bench_*' that
# take no arguments and returns Int. The harness makes a separate test from
# each of these functions, runs all the tests and reports aggregate results.
#
# The workflow of the harness is the following:
# o Basing on the input files, generate 'processed' files. These files contain
# a main function with simple arguments parsing, time measurement utilities
# and a loop in which the bench-functions are called.
# o When all files are processed, the harness begins to compile them, keeping
# track of all compile fails for later results reporting.
# o When all files are compiled, the harness begins to run the tests. The
# harness chooses a number of iterations for each tests to achieve the best
# accuracy in the given time limit (in order to do that, it performs several
# auxiliary test runs). When the iteration number is chosen, the measurement
# of execution time is actually performed.
# o At this point everything is ready, and the harness simply reports the
# results.
#
# Ideas for the harness improvement and development are welcomed here:
# rdar://problem/18072938
from __future__ import print_function
import subprocess
import re
import os
import sys
import argparse
import math
# Calculate the population standard deviation
def pstdev(l):
return (sum((x - sum(l) / float(len(l))) ** 2 for x in l) / len(l)) ** 0.5
class SwiftBenchHarness:
sources = []
verboseLevel = 0
compiler = ""
tests = {}
timeLimit = 1000
minSampleTime = 100
minIterTime = 1
optFlags = []
def log(self, str, level):
if self.verboseLevel >= level:
for i in range(1,level):
sys.stdout.write(' ')
print(str)
def runCommand(self, cmd):
self.log(' Executing: ' + ' '.join(cmd), 1)
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def parseArguments(self):
self.log("Parsing arguments.", 2)
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbosity", help="increase output verbosity", type=int)
parser.add_argument("files", help="input files", nargs='+')
parser.add_argument('-c', '--compiler', help="compiler to use")
parser.add_argument('-t', '--timelimit', help="Time limit for every test", type=int)
parser.add_argument('-s', '--sampletime', help="Minimum time for every sample", type=int)
parser.add_argument('-f', '--flags', help="Compilation flags", nargs='+')
args = parser.parse_args()
if args.verbosity:
self.verboseLevel = args.verbosity
self.sources = args.files
if args.flags:
self.optFlags = args.flags
if args.compiler:
self.compiler = args.compiler
else:
self.compiler = 'swiftc'
if args.timelimit and args.timelimit > 0:
self.timeLimit = args.timelimit
if args.sampletime and args.sampletime > 0:
self.minSampleTime = args.sampletime
self.log("Sources: %s." % ', '.join(self.sources), 3)
self.log("Compiler: %s." % self.compiler, 3)
self.log("Opt flags: %s." % ', '.join(self.optFlags), 3)
self.log("Verbosity: %s." % self.verboseLevel, 3)
self.log("Time limit: %s." % self.timeLimit, 3)
self.log("Min sample time: %s." % self.minSampleTime, 3)
def processSource(self, name):
self.log("Processing source file: %s." % name, 2)
header = """
@_silgen_name("mach_absolute_time") func __mach_absolute_time__() -> UInt64
@_silgen_name("opaqueGetInt32")
func _opaqueGetInt32(x: Int) -> Int
@_silgen_name("opaqueGetInt64")
func _opaqueGetInt64(x: Int) -> Int
@inline(never)
public func getInt(x: Int) -> Int {
#if arch(i386) || arch(arm)
return _opaqueGetInt32(x)
#elseif arch(x86_64) || arch(arm64) || arch(powerpc64) || arch(powerpc64le)
return _opaqueGetInt64(x)
#else
return x
#endif
}
@inline(never)
func False() -> Bool { return getInt(1) == 0 }
@inline(never)
func Consume(x: Int) { if False() { println(x) } }
"""
beforeBench = """
@inline(never)
"""
intoBench = """
if False() { return 0 }
"""
mainBegin = """
func main() {
var N = 1
var name = ""
if Process.arguments.count > 1 {
N = Process.arguments[1].toInt()!
}
"""
mainBody = """
name = "%s"
if Process.arguments.count <= 2 || Process.arguments[2] == name {
let start = __mach_absolute_time__()
for _ in 1...N {
bench_%s()
}
let end = __mach_absolute_time__()
println("\(name),\(N),\(end - start)")
}
"""
mainEnd = """
}
main()
"""
benchRE = re.compile("^\s*func\s\s*bench_([a-zA-Z0-9_]+)\s*\(\s*\)\s*->\s*Int\s*({)?\s*$")
with open(name) as f:
lines = list(f)
output = header
lookingForCurlyBrace = False
testNames = []
for l in lines:
if lookingForCurlyBrace:
output += l
if "{" not in l:
continue
lookingForCurlyBrace = False
output += intoBench
continue
m = benchRE.match(l)
if m:
output += beforeBench
output += l
benchName = m.group(1)
# TODO: Keep track of the line number as well
self.log("Benchmark found: %s" % benchName, 3)
self.tests[name+":"+benchName] = Test(benchName, name, "", "")
testNames.append(benchName)
if m.group(2):
output += intoBench
else:
lookingForCurlyBrace = True
else:
output += l
output += mainBegin
for n in testNames:
output += mainBody % (n, n)
processedName = 'processed_' + os.path.basename(name)
output += mainEnd
with open(processedName, 'w') as f:
f.write(output)
for n in testNames:
self.tests[name+":"+n].processedSource = processedName
def processSources(self):
self.log("Processing sources: %s." % self.sources, 2)
for s in self.sources:
self.processSource(s)
def compileOpaqueCFile(self):
self.log("Generating and compiling C file with opaque functions.", 3)
fileBody = """
#include <stdint.h>
extern "C" int32_t opaqueGetInt32(int32_t x) { return x; }
extern "C" int64_t opaqueGetInt64(int64_t x) { return x; }
"""
with open('opaque.cpp', 'w') as f:
f.write(fileBody)
# TODO: Handle subprocess.CalledProcessError for this call:
self.runCommand(['clang++', 'opaque.cpp', '-o', 'opaque.o', '-c', '-O2'])
compiledFiles = {}
def compileSource(self, name):
self.tests[name].binary = "./"+self.tests[name].processedSource.split(os.extsep)[0]
if not self.tests[name].processedSource in self.compiledFiles:
try:
self.runCommand([self.compiler, self.tests[name].processedSource, "-o", self.tests[name].binary + '.o', '-c'] + self.optFlags)
self.runCommand([self.compiler, '-o', self.tests[name].binary, self.tests[name].binary + '.o', 'opaque.o'])
self.compiledFiles[self.tests[name].processedSource] = ('', '')
except subprocess.CalledProcessError as e:
self.compiledFiles[self.tests[name].processedSource] = ('COMPFAIL', e.output)
(status, output) = self.compiledFiles[self.tests[name].processedSource]
self.tests[name].status = status
self.tests[name].output = output
def compileSources(self):
self.log("Compiling processed sources.", 2)
self.compileOpaqueCFile()
for t in self.tests:
self.compileSource(t)
def runBenchmarks(self):
self.log("Running benchmarks.", 2)
for t in self.tests:
self.runBench(t)
def parseBenchmarkOutput(self, res):
# Parse lines like
# TestName,NNN,MMM
# where NNN - performed iterations number, MMM - execution time (in ns)
RESULTS_RE = re.compile(r"(\w+),[ \t]*(\d+),[ \t]*(\d+)")
m = RESULTS_RE.match(res)
if not m:
return ("", 0, 0)
return (m.group(1), m.group(2), m.group(3))
def computeItersNumber(self, name):
scale = 1
spent = 0
# Measure time for one iteration
# If it's too small, increase number of iteration until it's measurable
while (spent <= self.minIterTime):
try:
r = self.runCommand([self.tests[name].binary, str(scale),
self.tests[name].name])
(testName, itersComputed, execTime) = self.parseBenchmarkOutput(r)
spent = int(execTime) / 1000000 # Convert ns to ms
if spent <= self.minIterTime:
scale *= 2
if scale > sys.maxint:
return (0, 0)
except subprocess.CalledProcessError as e:
r = e.output
break
if spent == 0:
spent = 1
# Now compute number of samples we can take in the given time limit
mult = int(self.minSampleTime / spent)
if mult == 0:
mult = 1
scale *= mult
spent *= mult
samples = int(self.timeLimit / spent)
if samples == 0:
samples = 1
return (samples, scale)
def runBench(self, name):
if not self.tests[name].status == "":
return
(numSamples, iterScale) = self.computeItersNumber(name)
if (numSamples, iterScale) == (0, 0):
self.tests[name].status = "CAN'T MEASURE"
self.tests[name].output = "Can't find number of iterations for the test to last longer than %d ms." % self.minIterTime
return
samples = []
self.log("Running bench: %s, numsamples: %d" % (name, numSamples), 2)
for i in range(0,numSamples):
try:
r = self.runCommand([self.tests[name].binary, str(iterScale),
self.tests[name].name])
(testName, itersComputed, execTime) = self.parseBenchmarkOutput(r)
# TODO: Verify testName and itersComputed
samples.append(int(execTime) / iterScale)
self.tests[name].output = r
except subprocess.CalledProcessError as e:
self.tests[name].status = "RUNFAIL"
self.tests[name].output = e.output
break
res = TestResults(name, samples)
self.tests[name].results = res
def reportResults(self):
self.log("\nReporting results.", 2)
print("==================================================")
for t in self.tests:
self.tests[t].Print()
class Test:
def __init__(self, name, source, processedSource, binary):
self.name = name
self.source = source
self.processedSource = processedSource
self.binary = binary
self.status = ""
def Print(self):
print("NAME: %s" % self.name)
print("SOURCE: %s" % self.source)
if self.status == "":
self.results.Print()
else:
print("STATUS: %s" % self.status)
print("OUTPUT:")
print(self.output)
print("END OF OUTPUT")
print("")
class TestResults:
def __init__(self, name, samples):
self.name = name
self.samples = samples
if len(samples) > 0:
self.Process()
def Process(self):
self.minimum = min(self.samples)
self.maximum = max(self.samples)
self.avg = sum(self.samples)/len(self.samples)
self.std = pstdev(self.samples)
self.err = self.std / math.sqrt(len(self.samples))
self.int_min = self.avg - self.err*1.96
self.int_max = self.avg + self.err*1.96
def Print(self):
print("SAMPLES: %d" % len(self.samples))
print("MIN: %3.2e" % self.minimum)
print("MAX: %3.2e" % self.maximum)
print("AVG: %3.2e" % self.avg)
print("STD: %3.2e" % self.std)
print("ERR: %3.2e (%2.1f%%)" % (self.err, self.err*100/self.avg))
print("CONF INT 0.95: (%3.2e, %3.2e)" % (self.int_min, self.int_max))
print("")
def main():
harness = SwiftBenchHarness()
harness.parseArguments()
harness.processSources()
harness.compileSources()
harness.runBenchmarks()
harness.reportResults()
main()
| |
#!/usr/bin/env python3
# Copyright (c) 2011 Collabora Ltd. <http://www.collabora.co.uk/>
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from .configfile import ConfigFile
from gi.repository import Gtk
class ConfigWizard():
"""Simple configuration wizard window."""
def __init__(self, config_file_path):
self._config_items = []
self._config_entries = {}
self._config_file_path = config_file_path
self._config_file_obj = None
"""
[ {item_label, item_type, item_name, item_with_value} , ... ]
"""
def set_config_items(self, items):
self._config_items = items
keys = {}
for i in self._config_items:
keys[i["item_name"]] = {"type": i["item_type"]}
self._valid_keys = keys
def set_config_file_obj(self, obj):
self._config_file_obj = obj
def get_config_file_obj(self, obj):
return self._config_file_obj
def show(self, read_from_disc=False):
if read_from_disc:
self._config_file_obj = ConfigFile(self._config_file_path)
self._config_file_obj.set_valid_keys(self._valid_keys)
self._config_file_obj.load()
else:
if self._config_file_obj is None:
raise RuntimeError("I need the run time obj")
self._config_popup = Gtk.Window()
self._config_popup.set_default_size(200, 200)
self._config_popup.connect('delete_event', self._close_config_cb)
table = Gtk.Table(12, 1, True)
self._config_popup.add(table)
row = 1
for i in self._config_items:
hbox = self._create_param(i)
table.attach(hbox, 0, 1, row, row + 1, xpadding=5, ypadding=2)
row = row + 1
hbox = Gtk.HBox()
save_button = Gtk.Button.new_with_label('Save')
save_button.set_size_request(50, 15)
save_button.connect('pressed', self._save_config_cb)
hbox.add(save_button)
cancel_button = Gtk.Button.new_with_label('Cancel')
cancel_button.set_size_request(50, 15)
cancel_button.connect('pressed', self._close_config_cb)
hbox.add(cancel_button)
table.attach(hbox, 0, 1, row, row + 1, xpadding=5, ypadding=2)
self._config_popup.show_all()
def _save_config_cb(self, widget):
try:
self._do_save_config()
except Exception as e:
w = Gtk.Window()
ls = Gtk.Label(label=e.message)
w.add(ls)
w.show_all()
finally:
self._config_popup.hide()
def _do_save_config(self):
for i in self._config_items:
param_name = i["item_name"]
v = self._config_entries[param_name]
if v.__class__ is Gtk.Entry:
value = v.get_text()
elif v.__class__ is Gtk.CheckButton:
value = v.get_active()
else:
raise RuntimeError("Don't recognize the class %s" % type(v))
self._config_file_obj.set(param_name, value)
self._config_file_obj.save()
"""
{item_label, item_type, item_name, item_with_value}
"""
def _create_param(self, opts):
param_name = opts["item_name"]
with_value = opts["item_with_value"] if "item_with_value" in opts \
else True
hbox = Gtk.HBox()
if opts["item_type"] == "text":
entry = Gtk.Entry()
entry.set_size_request(150, 25)
if with_value:
value = self._config_file_obj.get(param_name, True)
entry.set_text(str(value))
elif opts["item_type"] == "boolean":
entry = Gtk.CheckButton()
if with_value:
value = self._config_file_obj.get(param_name, True)
entry.set_active(value)
self._config_entries[param_name] = entry
label = Gtk.Label(label=opts["item_label"] + ': ')
label.set_alignment(1.0, 0.5)
label.set_size_request(100, 25)
hbox.add(label)
hbox.add(entry)
return hbox
def _close_config_cb(self, widget, event=None):
self._config_popup.hide()
def test_wizard_from_config_file_obj(test_config_file):
keys = {}
keys["nick"] = {"type": "text"}
keys["account_id"] = {"type": "text"}
keys["server"] = {"type": "text"}
keys["port"] = {"type": "text"}
keys["password"] = {"type": "text"}
keys["register"] = {"type": "text"}
c = ConfigFile(test_config_file)
c.set_valid_keys(keys)
c.set("nick", "rgs")
c.set("account_id", "rgs@andromeda")
c.set("server", "andromeda")
c.set("port", 5223)
c.set("password", "97c74fa0dc3b39b8c87f119fa53cced2b7040786")
c.set("register", True)
c.save()
c = ConfigFile(test_config_file)
c.set_valid_keys(keys)
c.load()
config_w = ConfigWizard(test_config_file)
config_items = [
{"item_label": "Nickname", "item_type": "text", "item_name": "nick"},
{"item_label": "Account ID", "item_type": "text",
"item_name": "account_id"},
{"item_label": "Server", "item_type": "text", "item_name": "server"},
{"item_label": "Port", "item_type": "text", "item_name": "port"},
{"item_label": "Password", "item_type": "text",
"item_name": "password"},
{"item_label": "Register", "item_type": "text",
"item_name": "register"}]
config_w.set_config_items(config_items)
config_w.set_config_file_obj(c)
config_w.show()
def test_wizard_from_config_file_path(test_config_file):
keys = {}
keys["nick"] = {"type": "text"}
keys["account_id"] = {"type": "text"}
keys["server"] = {"type": "text"}
keys["port"] = {"type": "text"}
keys["password"] = {"type": "text"}
keys["register"] = {"type": "text"}
c = ConfigFile(test_config_file)
c.set_valid_keys(keys)
c.set("nick", "rgs")
c.set("account_id", "rgs@andromeda")
c.set("server", "andromeda")
c.set("port", 5223)
c.set("password", "97c74fa0dc3b39b8c87f119fa53cced2b7040786")
c.set("register", True)
c.save()
config_w = ConfigWizard(test_config_file)
config_items = [
{"item_label": "Nickname", "item_type": "text", "item_name": "nick"},
{"item_label": "Account ID", "item_type": "text",
"item_name": "account_id"},
{"item_label": "Server", "item_type": "text", "item_name": "server"},
{"item_label": "Port", "item_type": "text", "item_name": "port"},
{"item_label": "Password", "item_type": "text",
"item_name": "password"},
{"item_label": "Register", "item_type": "text",
"item_name": "register"}]
config_w.set_config_items(config_items)
config_w.show(True)
if __name__ == "__main__":
# test_wizard_from_config_file_obj("/tmp/configwizard.test.0001")
test_wizard_from_config_file_path("/tmp/configwizard.test.0002")
Gtk.main()
| |
# encoding: utf-8
"""Object instance and class helper functions."""
import logging
import inspect
import pkg_resources
from collections import defaultdict
from functools import partial
from marrow.util.compat import binary, unicode
def flatten(x):
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
"""
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, (binary, unicode)):
for els in flatten(el):
yield els
else:
yield el
def yield_property(iterable, name, default=None):
for i in iterable: yield getattr(i, name, default)
def yield_keyvalue(iterable, key, default=None):
for i in iterable: yield i[key] if key in iterable else default
class _NoDefault(object):
pass
NoDefault = _NoDefault()
def merge(s, t):
"""Merge dictionary t into s."""
for k, v in t.items():
if isinstance(v, dict):
if k not in s:
s[k] = v
continue
s[k] = merge(s[k], v)
continue
s[k] = v
return s
def load_object(target, namespace=None):
"""This helper function loads an object identified by a dotted-notation string.
For example:
# Load class Foo from example.objects
load_object('example.objects:Foo')
If a plugin namespace is provided simple name references are allowed. For example:
# Load the plugin named 'routing' from the 'web.dispatch' namespace
load_object('routing', 'web.dispatch')
Providing a namespace does not prevent full object lookup (dot-colon notation) from working.
"""
if namespace and ':' not in target:
allowable = dict((i.name, i) for i in pkg_resources.iter_entry_points(namespace))
if target not in allowable:
raise ValueError('Unknown plugin "' + target + '"; found: ' + ', '.join(allowable))
return allowable[target].load()
parts, target = target.split(':') if ':' in target else (target, None)
module = __import__(parts)
for part in parts.split('.')[1:] + ([target] if target else []):
module = getattr(module, part)
return module
class PluginCache(defaultdict):
"""Lazily load plugins from the given namespace."""
def __init__(self, namespace):
super(PluginCache, self).__init__()
self.namespace = namespace
def __missing__(self, key):
return load_object(key, self.namespace)
class Cache(dict):
"""A least-recently-used (LRU) cache.
Discards the least recently referenced object when full.
Based on Python Cookbook contributions from multiple sources:
* http://code.activestate.com/recipes/521871/
* http://code.activestate.com/recipes/498110/
* http://code.activestate.com/recipes/252524/
* http://code.activestate.com/recipes/498245/
And Genshi's LRUCache:
http://genshi.edgewall.org/browser/trunk/genshi/util.py
Warning: If memory cleanup is diabled this dictionary will leak.
"""
class CacheElement(object):
def __init__(self, key, value):
self.previous = self.next = None
self.key, self.value = key, value
def __repr__(self):
return repr(self.value)
def __init__(self, capacity):
super(Cache, self).__init__()
self.head = self.tail = None
self.capacity = capacity
def __iter__(self):
cur = self.head
while cur:
yield cur.key
cur = cur.next
def __getitem__(self, key):
element = super(Cache, self).__getitem__(key)
self._update(element)
return element.value
def __setitem__(self, key, value):
try:
element = super(Cache, self).__getitem__(key)
element.value = value
self._update(element)
except KeyError:
# Item doesn't exist, create a new wrapper element.
element = self.CacheElement(key, value)
super(Cache, self).__setitem__(key, element)
self._insert(element)
self._restrict()
def _insert(self, element):
element.previous, element.next = None, self.head
if self.head is not None:
self.head.previous = element
else:
self.tail = element
self.head = element
def _restrict(self):
while len(self) > self.capacity:
# element = super(Cache, self).get(self.tail.key)
del self[self.tail.key]
if self.tail != self.head:
self.tail = self.tail.previous
self.tail.next = None
else:
self.head = self.tail = None
def _update(self, element):
if self.head == element:
return
previous = element.previous
previous.next = element.next
if element.next is not None:
element.next.previous = previous
else:
self.tail = previous
element.previous, element.next = None, self.head
self.head.previous = self.head = element
class LoggingFile(object):
"""A write-only file-like object that redirects to the standard Python logging module."""
def __init__(self, logger=None, level=logging.ERROR):
logger = logger if logger else logging.getLogger('logfile')
self.logger = partial(logger.log, level)
def write(self, text):
self.logger(text)
def writelines(self, lines):
for line in lines:
self.logger(line)
def close(self, *args, **kw):
"""A no-op method used for several of the file-like object methods."""
pass
def next(self, *args, **kw):
"""An error-raising exception usedbfor several of the methods."""
raise IOError("Logging files can not be read.")
flush = close
read = next
readline = next
readlines = next
class CounterMeta(type):
'''
A simple meta class which adds a ``_counter`` attribute to the instances of
the classes it is used on. This counter is simply incremented for each new
instance.
'''
counter = 0
def __call__(self, *args, **kwargs):
instance = type.__call__(self, *args, **kwargs)
instance._counter = CounterMeta.counter
CounterMeta.counter += 1
return instance
def getargspec(obj):
"""An improved inspect.getargspec.
Has a slightly different return value from the default getargspec.
Returns a tuple of:
required, optional, args, kwargs
list, dict, bool, bool
Required is a list of required named arguments.
Optional is a dictionary mapping optional arguments to defaults.
Args and kwargs are True for the respective unlimited argument type.
"""
argnames, varargs, varkw, _defaults = None, None, None, None
if inspect.isfunction(obj) or inspect.ismethod(obj):
argnames, varargs, varkw, _defaults = inspect.getargspec(obj)
elif inspect.isclass(obj):
if inspect.ismethoddescriptor(obj.__init__):
argnames, varargs, varkw, _defaults = [], False, False, None
else:
argnames, varargs, varkw, _defaults = inspect.getargspec(obj.__init__)
elif hasattr(obj, '__call__'):
argnames, varargs, varkw, _defaults = inspect.getargspec(obj.__call__)
else:
raise TypeError("Object not callable?")
# Need test case to prove this is even possible.
# if (argnames, varargs, varkw, defaults) is (None, None, None, None):
# raise InspectionFailed()
if argnames and argnames[0] == 'self':
del argnames[0]
if _defaults is None:
_defaults = []
defaults = dict()
else:
# Create a mapping dictionary of defaults; this is slightly more useful.
defaults = dict()
_defaults = list(_defaults)
_defaults.reverse()
argnames.reverse()
for i, default in enumerate(_defaults):
defaults[argnames[i]] = default
argnames.reverse()
# del argnames[-len(_defaults):]
return argnames, defaults, True if varargs else False, True if varkw else False
class RichComparisonMixin(object):
def __eq__(self, other):
raise NotImplementedError("Equality not implemented")
def __lt__(self, other):
raise NotImplementedError("Less than not implemented")
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
| |
from snappy import Manifold
from cypari import *
from multiprocessing import *
import copy
import sys
from ManifoldIterators import *
from VolumeUtilities import *
def prepare_pvolume_file(maniter, ofilenm, append = False, engine = 'magma', max_secs = 20, sln = 2, retrieve = True, period = 100, separator = ';'):
"""The same as calling get_volume_data(mans).write_to_csv(ofilenm) with the given parameters,
except output will be written out every period manifolds and logs generated, instead of all at once."""
ctr = 0
block = list()
done = False
try:
if append:
f = open(ofilenm,'a')
else:
f = open(ofilenm,'w')
while True:
try:
block.append(maniter.next())
ctr += 1
except StopIteration:
done = True
if ctr == period or done:
print 'Processing '+str(block[0])+' to '+str(block[-1])+'.'
v = get_volume_data(ForwardingIterator(block.__iter__(),lambda m : str(m)),engine=engine,max_secs=max_secs,sln=sln,retrieve=retrieve)
v.write_to_csv(f,append=append,separator=separator)
append = True # we must be appending after the first time
ctr = 0
block = list()
if done:
break
finally:
f.close()
# Count the number of distinct non-zero (<EPSILON) values up to sign
def _distinct_abs(vol_list, epsilon = EPSILON):
pos = set([abs(pari(v)) for v in vol_list if v >= epsilon]) # remove nonpositive volumes (as distinct is up to sign)
good = list()
for v in pos:
matches = [u for u in good if abs(v-u) <= epsilon]
if not matches:
good.append(v)
return len(good)
def get_volume_data(man_nms, engine = 'magma', max_secs = 20, retrieve = True, sln = 2, max_itf_degree = MAX_ITF):
""" Returns a VolumeData object containing exotic volumes for manifolds with the given names
Volumes' precision is based on pari, so set it there
set retrieve = False to skip retrieving ptolemy data from files
set engine = None to skip computing ptolemy data in an engine
set max_secs to specify how long we will spend computing a given manifolds' data before killing the engine and moving on;
specifying None means we will never give up (unless something crashes)
if the engine given crashes, so will IDLE and SnapPy; to avoid this, run this command only from within python scripts.
Manifolds with more than floor(max_itf_degree/2) distinct volumes to an obstruction class
will have their data for that obstruction class removed, since this demonstrates an invariant trace field with too high ncp
Set to None and it will be ignored."""
#TODO: special case max_secs=None to not bother with processes
if engine:
def _use_engine(v,p): # this function will be called in a second process to facilitate time limits
p.send(v.compute_solutions(engine = engine))
recs = dict()
for nm in man_nms:
try:
sols = None
var = Manifold(nm).ptolemy_variety(sln,'all')
try:
if retrieve:
sols = var.retrieve_decomposition()
else:
raise Exception("Go on and compute")
except Exception as e: # try using engine
if engine:
mine, theirs = Pipe(duplex = False)
p = Process(target=_use_engine,args=[var,theirs])
p.daemon = True
p.start()
if mine.poll(max_secs): # Here is the time limit stuff
sols = mine.recv()
p.terminate()
else:
p.terminate() # give up on this one
print 'Computation took too long; skipping '+nm
continue
else:
print 'No engine and no data retrieved; skipping '+nm
continue
if sols:
data = [(c.number_field(),c.solutions(numerical = True).volume_numerical()) for c in sols.flatten()]
for cl_idx in xrange(len(data)):
if data[cl_idx]: # TODO may be trivial since no check here
for v in data[cl_idx][1]:
recs.setdefault(str(data[cl_idx][0]),dict()).setdefault(str(v),list()).append((nm,cl_idx))
else:
print 'Got no solutions; skipping '+nm
except Exception as e:
print(str(e))+'; skipping '+nm
continue
for p in recs.keys():
for v in recs[p].keys():
recs[p][v] = list(set(recs[p][v]))
return VolumeData(data = recs)
def get_potential_trace_fields(poly,sln=2):
"""Given a minimal polynomial of a trace field, returns a list of minimal polynomials of the potential invariant trace fields."""
pol = pari(poly)
try:
return [str(rec[0].polredabs()) for rec in pol.nfsubfields()[1:] if _knmiss(rec[0].poldegree(),pol.poldegree(),sln=sln)] # poldegree returns int
except: # we want cypari.gen.PariError, but no idea how to reference; fortunately, anything else will just raise again
try:
pol = pol.polredabs()
except: # actually except PariError again
print 'When running trace field '+poly+' polredabs couldn\'t handle it.'
return [poly] # between this return and the above print statement, we should know when the above error happened.
return get_potential_trace_fields(str(pol),sln=sln)
def is_pitf(poly,cand,sln):
pol = pari(poly)
cand = pari(cand)
small = cand.poldegree()
large = pol.poldegree()
return _knmiss(small,large,sln)
def _knmiss(s,l,n):
if s <= 0 or l <= 0 or n <= 0:
return False
while s < l:
s *= n
return s == l
# Wrapper for manipulating data on pseudo-volumes
class VolumeData:
"""This class is for storage and manipulation of exotic volumes of some manifolds.
Given a value for data, the constructor makes a VolumeData object wrapping it.
The datastructure is {poly:{volume:(manifold,obstruction_class_index)}}
It's usually not nescecary to make these yourself; collection and read methods return them for you."""
# structure: dict poly ---> dict volume ---> [(manifold,obstr_cl)]
def __init__(self, data = dict()):
self.data = data
def get_polys(self):
"""Returns (as a list of strings) the minimal polynomials of the ptolemy/trace fields for the volumes in this object."""
return self.data.keys()
def get_volumes(self,poly):
"""Returns (as a list of strings) the volumes that occur over the field with the given minimal polynomial."""
return self.data[poly].keys()
def get_manifolds(self,poly,volume):
"""Returns a list of the names of manifolds that produce the given minimal polynomial/volume pair."""
return [p[0] for p in self.data[poly][volume]]
def combine_with(self,other):
"""Returns a VolumeData object containing the data from self and other; in case of a conflict (which should not occur),
the other's data takes precedence."""
new_data = copy.deepcopy(self.data)
for p in other.get_polys():
for v in other.get_volumes(p):
new_data.setdefault(p,dict()).setdefault(v,list()).extend(other.data[p][v])
return VolumeData(data = new_data)
# given an (open, ready to write) file object or valid filename, writes the data
def write_to_csv(self, output_file, separator = ';', append = False):
"""Writes out the data to output_file, provided output_file is a valid (open, ready to write) File object or filename."""
f = None
try:
if type(output_file) == str:
if append:
f = open(output_file,'a')
else:
f = open(output_file,'w')
else:
f = output_file
if not append:
f.write('"TraceField"'+separator+'"Volume"'+separator+'"Manifold"'+separator+'"ObstructionClass"'+'\n')
for p in self.get_polys():
for v in self.get_volumes(p):
for rec in self.data[p][v]:
f.write('"'+p+'"'+separator)
f.write('"'+v+'"'+separator)
f.write('"'+rec[0]+'"'+separator)
f.write('"'+str(rec[1])+'"\n')
finally:
if type(output_file) == str and f:
f.close()
def filter_fields(self, maxsfdegree=MAX_ITF, sln = 2):
"""This filter removes some polynomials with no subfields of degree <= maxsfdegree
it doesn't get them all, but does avoid calling nfsubfields; it is quick and approximate."""
def _filter(p): # for a double break
deg = pari(p).poldegree()
for n in xrange(maxsfdegree):
if _knmiss(n+1, deg, sln):
return
del self.data[p]
for p in self.data.keys():
_filter(p)
# Remove all volumes that are integral multiples of another volume (including 1*)
# To register as an integral multiple, the decimal part of big/small must be less than epsilon
# Will remove manifolds if all their pvols were integral multiples of other pvols
def cull_volumes(self, epsilon = EPSILON): # code adapted from VolumeProcessing
for poly in self.get_polys():
vols = self.get_volumes(poly)
i = 0
while i < len(vols) - 1:
j = i + 1
while j < len(vols):
try:
if is_int(float(vols[i])/float(vols[j]), epsilon = epsilon) and gen.pari(vols[i] + ' > ' + vols[j]) == 1:
# We have to throw away (culled) manifold names to let all culled manifolds have the same volume
# [j] divides [i] so remove [i]
del self.data[poly][vols.pop(i)]
# i is already effectivley incremented, so we must offset it
i = i-1
break
elif is_int(float(vols[j])/float(vols[i]), epsilon = epsilon):
# this time, remove [j]
del self.data[poly][vols.pop(i)]
# j is effectivley incremented, no need to do it
else:
j += 1
except (ValueError, ZeroDivisionError): # bad quotient; not a linear combination either way so...
j += 1
i += 1
def remove_nonpositive_vols(self, epsilon = EPSILON):
"""Removes any volume less than epsilon"""
for p in self.get_polys():
for v in self.get_volumes(p):
try:
if float(v) < epsilon:
del self.data[p][v]
except: # v was really close to 0
del self.data[p][v]
def filter_distinct_volumes(self, maxsfdegree = MAX_ITF, epsilon = EPSILON):
"""Removes an obstruction class if there are more than floor(maxsfdegree/2) distinct (up to sign) nonzero volumes.
If this condition is met, it means that the invariant trace fields have more than maxsfdegree,
because they have more complex places than that degree could possibly have."""
# This sucks, because we have to get everything by manifold,oc pairs again.
classes = dict() # (m,oc):[(poly,vol)]
ncp = maxsfdegree/2
for p in self.get_polys():
for v in self.get_volumes(p):
for rec in self.data[p][v]:
classes.setdefault(rec,list()).append((p,v))
for rec,l in classes.items():
if _distinct_abs([p[1] for p in l], epsilon = epsilon) > ncp: # too many distinct volumes
for p,v in classes[rec]:
self.data[p][v].remove(rec)
def clean(self, maxsfdegree = MAX_ITF, epsilon = EPSILON, n=2):
"""Runs several methods for decreasing size without losing much information
Set maxsfdegree to None to avoid culling based on subfield degree."""
if maxsfdegree:
self.filter_fields(maxsfdegree = maxsfdegree, sln = n)
self.filter_distinct_volumes(maxsfdegree = maxsfdegree, epsilon = epsilon)
self.remove_nonpositive_vols(epsilon = epsilon)
# Cut down to 1 manifold per poly,vol pair.
def remove_duplicate_manifolds(self):
for p in self.get_polys():
for v in self.get_volumes(p):
self.data[p][v] = [self.data[p][v][0]]
def is_int(fl, epsilon = EPSILON):
return fl % 1 < epsilon or 1 - (fl % 1) < epsilon
def read_volumedata_csv(infile, separator = ';'):
"""Given an (open, ready to read data) file object or valid filename, reads the file and returns a VolumeData that would write it."""
f = None
try:
if type(infile) == str:
f = open(infile,'r')
f.readline()
else:
f = infile
data = dict()
for l in f.readlines():
w = l.strip('\n').replace('"','').split(separator)
try:
data.setdefault(w[0],dict()).setdefault(w[1],list()).append((w[2],int(w[3])))
except IndexError: # This was initially for debugging, but it can be useful if you grabbed a file while a line was being written
print 'Malformed Input: '+str(w)
continue
return VolumeData(data = data)
finally:
if type(infile) == str and f:
f.close()
| |
# -*- coding: utf-8 -*-
#
# Author: Wan Xiaolin <wanxiaolin@mail.bnu.edu.cn>
"""Neural Computer Module
"""
import collections
import functools
import itertools
import math
import numpy
import tensorflow
from tensorflow import (concat, expand_dims, reverse, scatter_nd, shape,
split, stack, squeeze, transpose)
from tensorflow import multiply, sigmoid, tanh, norm, exp
from tensorflow import reduce_sum as rsum, cumprod
from tensorflow.python.ops.nn_ops import top_k, softplus
__all__ = ['tensorslice', 'intfcparse', 'contentaddress', 'allocmem',
'tpmemrecall', 'readweight', 'writeweight', 'memread', 'memwrite',
'NeuralComputer']
DTYPE = 'float16'
def get_rand_weights(nrows, ncols, name=None):
w = numpy.random.randn(nrows, ncols)
return tensorflow.Variable(w.astype(DTYPE), name=name)
def get_xavier_weights(nrows, ncols, name=None):
high = math.sqrt(6) / math.sqrt(nrows + ncols)
low = -high
w = numpy.random.uniform(low=4 * low, high=high, size=(nrows, ncols))
return tensorflow.Variable(w.astype(DTYPE), name=name)
def get_zero_weights(nrows, ncols, name=None):
w = numpy.zeros(shape=(nrows, ncols))
return tensorflow.Variable(w.astype(DTYPE), name=name)
def oneplus(x):
return 1 + softplus(x)
def softmax(x, axis):
return exp(x) / rsum(exp(x), axis=axis, keep_dims=True)
def tensorslice(tensor, n_slices):
"""Tensor slice
Parameters
----------
tensor :
n_slices : `[s1_begin, s1]`
Returns
-------
"""
slice_idx = [slice(idx[0], idx[1])
for idx in zip(n_slices[0:-1], n_slices[1:])]
return [tensor[s] for s in slice_idx]
def intfcparse(xi, word_size, n_rh):
"""Interface parse.
Parameters
----------
xi : `[batch_size, (word_size*n_rh + word_size*3 + n_rh*5 + 3)]`
word_size : i.e. `config.word_size`.
n_rh : number of read heads, i.e. `config.n_readheads`
Returns
-------
key_r : a R-list, each element has size of `[batch_size, word_sz]`.
beta_r : a R-list, each element has size of `[batch_size, 1]`.
key_w : `[batch_size, word_sz]`.
beta_w : scalar.
e : `[batch_size, word_size]`.
v : `[batch_size, word_size]`.
fg : a R-list, each element has size of `[batch_size, 1]`.
g_a : a R-list, each element has size of `[batch_size, 1]`.
g_w : a R-list, each element has size of `[batch_size, 1]`.
pi : a R-list, each element has size of `[batch_sz, 3]`.
"""
rh_nsplits_1 = list(itertools.repeat(word_size, times=n_rh))
rh_nsplits_2 = list(itertools.repeat(1, times=n_rh))
rh_nsplits_3 = list(itertools.repeat(3, times=n_rh))
items_size = [n_rh*word_size, n_rh, word_size, 1, word_size, word_size,
n_rh, 1, 1, n_rh*3]
items_name = ['key_r', 'beta_r', 'key_w', 'beta_w', 'e', 'v', 'fg', 'g_a',
'g_w', 'pi']
intfc = dict(zip(items_name, split(xi, items_size, axis=1)))
intfc['key_r'] = split(intfc['key_r'], rh_nsplits_1, axis=1)
intfc['beta_r'] = split(oneplus(intfc['beta_r']), rh_nsplits_2, axis=1)
intfc['key_w'] = intfc['key_w']
intfc['beta_w'] = oneplus(intfc['beta_w'])
intfc['e'] = sigmoid(intfc['e'])
intfc['v'] = intfc['v']
intfc['fg'] = split(sigmoid(intfc['fg']), rh_nsplits_2, axis=1)
intfc['g_a'] = sigmoid(intfc['g_a'])
intfc['g_w'] = sigmoid(intfc['g_w'])
intfc['pi'] = [softmax(x, axis=1)
for x in split(intfc['pi'], rh_nsplits_3, axis=1)]
return intfc
def contentaddress(key, mem, beta):
"""Content address
Parameters
----------
key : `[batch_size, word_size]`
mem : `[batch_size, mem_size, word_size]`
beta : `[batch_size, 1]`
Returns
-------
`[batch_size, mem_size]`
"""
key_ = expand_dims(key, axis=2)
norm_mem = norm(mem, axis=2)
norm_key = norm(key, axis=1, keep_dims=True)
cos_sim = squeeze(mem @ key_) / (norm_key * norm_mem)
return softmax(cos_sim * beta, axis=1)
def allocmem(u_tm1, ww_tm1, wr_tm1_ls, fg_ls):
"""Allocate Memory.
Parameters
----------
u_tm1 : `[batch_size, mem_size]`.
ww_tm1 : `[batch_size, mem_size]`.
wr_tm1_ls : a list of R read weights. each element in the list has size of:
`[batch_size, mem_size]`.
fg_ls : a list of R free gates. each element in the list has size of:
`[batch_size, 1]`.
Returns
-------
u : `[batch_size, mem_size]`.
alloc_vec : `[batch_size, mem_size]`
"""
mem_size = shape(u_tm1)[1]
retention = functools.reduce(
multiply, [1 - fg * wr_tm1 for fg, wr_tm1 in zip(fg_ls, wr_tm1_ls)])
u = (u_tm1 + ww_tm1 - u_tm1 * ww_tm1) * retention
asd_u, asd_u_idx = top_k(u, k=mem_size)
idx = reverse(asd_u_idx, axis=[1])
prod_phi = cumprod(reverse(asd_u, axis=[1]), axis=1, exclusive=True)
alloc_vec = (1 - u) * prod_phi
return alloc_vec, u
def tpmemrecall(tpmem_tm1, p_tm1, ww, wr_tm1_ls):
"""Temporal memory recall.
Parameters
----------
tpmem_tm1 : Temporal memory, `[batch_size, mem_size, mem_size]`.
p_tm1 : `[batch_size, mem_size]`.
ww : write weights, `[batch_size, mem_size]`
wr_tm1_ls : read weights list, each element in the list has size of:
`[batch_size, mem_size]`
Returns
-------
"""
p = (1 - rsum(ww)) * p_tm1 + ww
tpmem = ((1 - expand_dims(ww, axis=2) - expand_dims(ww, axis=1)) *
tpmem_tm1 + expand_dims(p_tm1, axis=2) @ expand_dims(ww, axis=1))
tp_tpmem = transpose(tpmem, perm=[0, 2, 1])
fseq = [squeeze(expand_dims(x, axis=1) @ tpmem) for x in wr_tm1_ls]
bseq = [squeeze(expand_dims(x, axis=1) @ tp_tpmem) for x in wr_tm1_ls]
return tpmem, p, fseq, bseq
def readweight(fseq_ls, bseq_ls, content_r_ls, pi_ls):
"""Return read weighting list `wr`.
Parameters
----------
fseq_ls : `[batch_size, mem_size]`.
bseq_ls : `[batch_size, mem_size]`.
content_r_ls : `[batch_size, mem_size]`.
pi_ls : `[batch_size, 3]`.
Returns
-------
"""
wr_ls = list()
for fseq, bseq, cont_r, pi in zip(fseq_ls, bseq_ls, content_r_ls, pi_ls):
v = expand_dims(pi, axis=1) * stack([bseq, cont_r, bseq], axis=2)
wr_ls.append(rsum(v, axis=2))
return wr_ls
def writeweight(alloc, content_w, g_w, g_a):
"""Return write weighting `ww`
Parameters
----------
alloc : `[batch_size, mem_size]`.
content_w : `[batch_size, mem_size]`.
g_w : `[batch_size, 1]`.
g_a : `[batch_size, 1]`.
Returns
-------
ww : `[batch_size, mem_size]`.
"""
return g_w * (g_a * alloc + (1 - g_a) * content_w)
def memread(mem, wr_ls):
"""Memory read head
Parameters
----------
mem : `[batch_size, mem_size, word_size]`.
wr_ls : a R-list, each element has size of `[batch_size, mem_size]`.
Returns
-------
r_ls : `[batch_size, word_size]`
"""
return [squeeze(expand_dims(wr, axis=1) @ mem) for wr in wr_ls]
def memwrite(mem_tm1, ww, erase, write_vec):
"""Memory write head.
Parameters
----------
mem_tm1 : `[batch_size, mem_size, word_size]`.
ww : `[batch_size, mem_size]`.
erase : `[batch_size, word_size]`.
write_vec : `[batch_size, word_size]`.
Returns
-------
mem : `[batch_size, mem_size, word_size]`.
"""
add_content = 1 - expand_dims(ww, axis=2) @ expand_dims(erase, axis=1)
forget = expand_dims(ww, axis=2) @ expand_dims(write_vec, axis=1)
return mem_tm1 * forget + add_content
class NeuralComputer(object):
"""Neural Computer Module"""
def __init__(self, input_size, output_size, mem_size, word_size, n_reads):
self.params = collections.OrderedDict.fromkeys([
'W_f', 'W_i', 'W_c', 'W_o',
'b_f', 'b_i', 'b_c', 'b_o',
'W_y', 'W_r', 'W_xi'
])
self.input_size = input_size
self.output_size = output_size
self.mem_size = mem_size
self.word_size = word_size
self.n_readheads = n_reads
output_sz = self.output_size
h_sz = self.input_size
x_sz = self.input_size + h_sz + self.n_readheads * self.word_size
wr_sz = self.n_readheads * self.word_size
wxi_sz = (self.word_size * self.n_readheads + self.word_size * 3 +
self.n_readheads * 5 + 3)
self.params['W_f'] = get_xavier_weights(x_sz, h_sz, name='W_f')
self.params['W_i'] = get_xavier_weights(x_sz, h_sz, name='W_i')
self.params['W_c'] = get_xavier_weights(x_sz, h_sz, name='W_c')
self.params['W_o'] = get_xavier_weights(x_sz, h_sz, name='W_o')
self.params['b_f'] = get_zero_weights(1, h_sz, name='b_f')
self.params['b_i'] = get_zero_weights(1, h_sz, name='b_i')
self.params['b_c'] = get_zero_weights(1, h_sz, name='b_c')
self.params['b_o'] = get_zero_weights(1, h_sz, name='b_o')
self.params['W_y'] = get_rand_weights(h_sz, output_sz, name='W_y')
self.params['W_r'] = get_rand_weights(wr_sz, output_sz, name='W_r')
self.params['W_xi'] = get_rand_weights(h_sz, wxi_sz, name='W_xi')
def step(self, x, h_tm1, c_tm1, r_tm1_ls,
mem_tm1, tpmem_tm1, ww_tm1, wr_tm1_ls,
u_tm1, p_tm1):
"""
Parameters
----------
x : `[batch_size, input_size]`
h_tm1 : `[batch_size, input_size]`
c_tm1 : `[batch_size, input_size]`
r_tm1_ls : read vectors list, each element in the list has size of:
`[batch_size, word_size]`
mem_tm1 : `[batch_size, word_size, mem_size]`.
tpmem_tm1 : `[batch_size, mem_size, mem_size]`.
ww_tm1 : `[batch_size, mem_size]`.
wr_tm1_ls : each element in the list has size of:
`[batch_size, mem_size]`.
u_tm1 : `[batch_size, mem_size]`.
p_tm1 : `[batch_size, mem_size]`.
Returns
-------
"""
x_ = concat([x, h_tm1] + r_tm1_ls, axis=1)
f = sigmoid(x_ @ self.params['W_f'] + self.params['b_f'])
i = sigmoid(x_ @ self.params['W_i'] + self.params['b_i'])
o = sigmoid(x_ @ self.params['W_o'] + self.params['b_o'])
c_ = tanh(x_ @ self.params['W_c'] + self.params['b_c'])
c = f * c_tm1 + i * c_
h = o * tanh(c)
xi = h @ self.params['W_xi']
intfc = intfcparse(xi, self.word_size, self.n_readheads)
allocvec, u = allocmem(u_tm1, ww_tm1, wr_tm1_ls, intfc['fg'])
content_w = contentaddress(intfc['key_w'], mem_tm1, intfc['beta_w'])
ww = writeweight(allocvec, content_w, intfc['g_w'], intfc['g_a'])
mem = memwrite(mem_tm1, ww, intfc['e'], intfc['v'])
tpmem, p, fseq_ls, bseq_ls = tpmemrecall(tpmem_tm1, p_tm1, ww,
wr_tm1_ls)
content_r_ls = [contentaddress(k_r, mem, b_r)
for k_r, b_r in zip(intfc['key_r'], intfc['beta_r'])]
wr_ls = readweight(fseq_ls, bseq_ls, content_r_ls, intfc['pi'])
r_ls = memread(mem, wr_ls)
y = h @ self.params['W_y'] + concat(r_ls, axis=1) @ self.params['W_r']
return y, h, c, r_ls, mem, tpmem, ww, wr_ls, u, p
| |
# Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for classifier models."""
import copy
import datetime
from core.platform import models
import feconf
import utils
(classifier_models,) = models.Registry.import_models(
[models.NAMES.classifier])
class ClassifierTrainingJob(object):
"""Domain object for a classifier training job.
A classifier training job is an abstraction of a request made by Oppia
for training a classifier model using certain dataset and a particular ML
algorithm denoted by the algorithm id. The classifier training jobs are
then picked up by the Virtual Machine (VM) through APIs exposed by Oppia.
The training_data is populated lazily when the job is fetched from the
database upon the request from the VM.
Attributes:
job_id: str. The unique id of the classifier training job.
algorithm_id: str. The id of the algorithm that will be used for
generating the classifier.
interaction_id: str. The id of the interaction to which the algorithm
belongs.
exp_id: str. The id of the exploration that contains the state
for which the classifier will be generated.
exp_version: int. The version of the exploration when
the training job was generated.
next_scheduled_check_time: datetime.datetime. The next scheduled time to
check the job.
state_name: str. The name of the state for which the classifier will be
generated.
status: str. The status of the training job request. This can be either
NEW (default value), FAILED, PENDING or COMPLETE.
training_data: list(dict). The training data that is used for training
the classifier. This field is populated lazily when the job request
is picked up by the VM. The list contains dicts where each dict
represents a single training data group, for example:
training_data = [
{
'answer_group_index': 1,
'answers': ['a1', 'a2']
},
{
'answer_group_index': 2,
'answers': ['a2', 'a3']
}
]
classifier_data: dict. The actual classifier model used for
classification purpose.
data_schema_version: int. Schema version of the data used by the
classifier. This depends on the algorithm ID.
"""
def __init__(
self, job_id, algorithm_id, interaction_id, exp_id,
exp_version, next_scheduled_check_time, state_name, status,
training_data, classifier_data, data_schema_version):
"""Constructs a ClassifierTrainingJob domain object.
Args:
job_id: str. The unique id of the classifier training job.
algorithm_id: str. The id of the algorithm that will be used for
generating the classifier.
interaction_id: str. The id of the interaction to which the algorithm
belongs.
exp_id: str. The id of the exploration id that contains the state
for which classifier will be generated.
exp_version: int. The version of the exploration when
the training job was generated.
next_scheduled_check_time: datetime.datetime. The next scheduled time to
check the job.
state_name: str. The name of the state for which the classifier will be
generated.
status: str. The status of the training job request. This can be either
NEW (default), PENDING (when a job has been picked up) or COMPLETE.
training_data: list(dict). The training data that is used for training
the classifier. This is populated lazily when the job request is
picked up by the VM. The list contains dicts where each dict
represents a single training data group, for example:
training_data = [
{
'answer_group_index': 1,
'answers': ['a1', 'a2']
},
{
'answer_group_index': 2,
'answers': ['a2', 'a3']
}
]
classifier_data: dict. The actual classifier model used for
classification purpose.
data_schema_version: int. Schema version of the data used by the
classifier. This depends on the algorithm ID.
"""
self._job_id = job_id
self._algorithm_id = algorithm_id
self._interaction_id = interaction_id
self._exp_id = exp_id
self._exp_version = exp_version
self._next_scheduled_check_time = next_scheduled_check_time
self._state_name = state_name
self._status = status
self._training_data = copy.deepcopy(training_data)
self._classifier_data = classifier_data
self._data_schema_version = data_schema_version
@property
def job_id(self):
"""Returns the job_id of the classifier training job.
Returns:
str. The unique id of the classifier training job.
"""
return self._job_id
@property
def algorithm_id(self):
"""Returns the algorithm_id of the algorithm used for generating
the classifier.
Returns:
str. The id of the algorithm used for generating the classifier.
"""
return self._algorithm_id
@property
def interaction_id(self):
"""Returns the interaction_id to which the algorithm belongs.
Returns:
str. The id of the interaction to which the algorithm belongs.
"""
return self._interaction_id
@property
def exp_id(self):
"""Returns the exploration id for which the classifier will be
generated.
Returns:
str. The id of the exploration that contains the state
for which classifier will be generated.
"""
return self._exp_id
@property
def exp_version(self):
"""Returns the exploration version.
Returns:
int. The version of the exploration when the training job was
generated.
"""
return self._exp_version
@property
def next_scheduled_check_time(self):
"""Returns the next scheduled time to check the job.
Returns:
datetime.datetime. The next scheduled time to check the job.
"""
return self._next_scheduled_check_time
@property
def state_name(self):
"""Returns the state_name for which the classifier will be generated.
Returns:
str. The name of the state for which the classifier will be
generated.
"""
return self._state_name
@property
def status(self):
"""Returns the status of the training job request.
Returns:
str. The status of the training job request. This can be either
NEW (default), PENDING (when a job has been picked up) or
COMPLETE.
"""
return self._status
@property
def training_data(self):
"""Returns the training data used for training the classifier.
Returns:
list(dict). The training data that is used for training the
classifier. This is populated lazily when the job request is
picked up by the VM. The list contains dicts where each dict
represents a single training data group, for example:
training_data = [
{
'answer_group_index': 1,
'answers': ['a1', 'a2']
},
{
'answer_group_index': 2,
'answers': ['a2', 'a3']
}
]
"""
return self._training_data
@property
def classifier_data(self):
"""Returns the classifier data.
Returns:
dict. The actual classifier model used for
classification purpose.
"""
return self._classifier_data
@property
def data_schema_version(self):
"""Returns the schema version of the data used by the classifier.
Returns:
int. Schema version of the data used by the
classifier. This depends on the algorithm ID.
"""
return self._data_schema_version
def update_status(self, status):
"""Updates the status attribute of the ClassifierTrainingJob domain
object.
Args:
status: str. The status of the classifier training job.
"""
initial_status = self._status
if status not in (
feconf.ALLOWED_TRAINING_JOB_STATUS_CHANGES[initial_status]):
raise Exception(
'The status change %s to %s is not valid.' % (
initial_status, status))
self._status = status
def update_next_scheduled_check_time(self, next_scheduled_check_time):
"""Updates the next_scheduled_check_time attribute of the
ClassifierTrainingJob domain object.
Args:
next_scheduled_check_time: datetime.datetime. The next scheduled
time to check the job.
"""
self._next_scheduled_check_time = next_scheduled_check_time
def update_classifier_data(self, classifier_data):
"""Updates the classifier_data attribute of the ClassifierTrainingJob
domain object.
Args:
classifier_data: dict. The classifier model used for classification.
"""
self._classifier_data = classifier_data
def to_dict(self):
"""Constructs a dict representation of training job domain object.
Returns:
A dict representation of training job domain object.
"""
return {
'job_id': self._job_id,
'algorithm_id': self._algorithm_id,
'interaction_id': self._interaction_id,
'exp_id': self._exp_id,
'exp_version': self._exp_version,
'next_scheduled_check_time': self._next_scheduled_check_time,
'state_name': self._state_name,
'status': self._status,
'training_data': self._training_data,
'classifier_data': self._classifier_data,
'data_schema_version': self._data_schema_version
}
def validate(self):
"""Validates the training job before it is saved to storage."""
algorithm_ids = []
if not isinstance(self.job_id, basestring):
raise utils.ValidationError(
'Expected id to be a string, received %s' % self.job_id)
if not isinstance(self.exp_id, basestring):
raise utils.ValidationError(
'Expected exp_id to be a string, received %s' % self.exp_id)
if not isinstance(self.exp_version, int):
raise utils.ValidationError(
'Expected exp_version to be an int, received %s' %
self.exp_version)
if not isinstance(self.next_scheduled_check_time, datetime.datetime):
raise utils.ValidationError(
'Expected next_scheduled_check_time to be datetime,' +
' received %s' % self.next_scheduled_check_time)
if not isinstance(self.state_name, basestring):
raise utils.ValidationError(
'Expected state to be a string, received %s' % self.state_name)
utils.require_valid_name(self.state_name, 'the state name')
if self.status not in feconf.ALLOWED_TRAINING_JOB_STATUSES:
raise utils.ValidationError(
'Expected status to be in %s, received %s' %
feconf.ALLOWED_TRAINING_JOB_STATUSES,
self.exp_version)
if not isinstance(self.interaction_id, basestring):
raise utils.ValidationError(
'Expected interaction_id to be a string, received %s' %
self.interaction_id)
if self.interaction_id not in feconf.INTERACTION_CLASSIFIER_MAPPING:
raise utils.ValidationError(
'Invalid interaction id: %s' % self.interaction_id)
if not isinstance(self.algorithm_id, basestring):
raise utils.ValidationError(
'Expected algorithm_id to be a string, received %s' %
self.algorithm_id)
algorithm_ids = [
classifier_details['algorithm_id'] for classifier_details in
feconf.INTERACTION_CLASSIFIER_MAPPING.values()]
if self.algorithm_id not in algorithm_ids:
raise utils.ValidationError(
'Invalid algorithm id: %s' % self.algorithm_id)
if not isinstance(self.training_data, list):
raise utils.ValidationError(
'Expected training_data to be a list, received %s' % (
self.training_data))
for grouped_answers in self.training_data:
if 'answer_group_index' not in grouped_answers:
raise utils.ValidationError(
'Expected answer_group_index to be a key in training_data',
'list item')
if 'answers' not in grouped_answers:
raise utils.ValidationError(
'Expected answers to be a key in training_data list item')
if not isinstance(grouped_answers['answer_group_index'], int):
raise utils.ValidationError(
'Expected answer_group_index to be an int, received %s' %
grouped_answers['answer_group_index'])
if not isinstance(grouped_answers['answers'], list):
raise utils.ValidationError(
'Expected answers to be a list, received %s' %
grouped_answers['answers'])
# Classifier data can be either None (before its stored) or a dict.
if not isinstance(self.classifier_data, dict) and self.classifier_data:
raise utils.ValidationError(
'Expected classifier_data to be a dict|None, received %s' % (
self.classifier_data))
if not isinstance(self.data_schema_version, int):
raise utils.ValidationError(
'Expected data_schema_version to be an int, received %s' %
self.data_schema_version)
class TrainingJobExplorationMapping(object):
"""Domain object for a job-exploration mapping model.
A job-exploration mapping is a one-to-one relation between the
attributes in an exploration to the training job model for the classifier it
needs to use. The mapping is from <exp_id, exp_version, state_name> to the
job_id.
Attributes:
exp_id: str. ID of the exploration.
exp_version: int. The exploration version at the time the corresponding
classifier's training job was created.
state_name: str. The name of the state to which the classifier
belongs.
job_id. str. The unique ID of the training job in the
job-exploration mapping.
"""
def __init__(self, exp_id, exp_version, state_name, job_id):
"""Constructs a TrainingJobExplorationMapping domain object.
Args:
exp_id: str. ID of the exploration.
exp_version: int. The exploration version at the time the
corresponding classifier's training job was created.
state_name: str. The name of the state to which the classifier
belongs.
job_id: str. The unique ID of the training job.
"""
self._exp_id = exp_id
self._exp_version = exp_version
self._state_name = state_name
self._job_id = job_id
@property
def exp_id(self):
"""Returns the exploration id.
Returns:
str. The id of the exploration.
"""
return self._exp_id
@property
def exp_version(self):
"""Returns the exploration version.
Returns:
int. The exploration version at the time the
corresponding classifier's training job was created.
"""
return self._exp_version
@property
def state_name(self):
"""Returns the state_name to which the classifier belongs.
Returns:
str. The name of the state to which the classifier belongs.
"""
return self._state_name
@property
def job_id(self):
"""Returns the job_id of the training job.
Returns:
str. The unique ID of the training job in the
job-exploration mapping.
"""
return self._job_id
def to_dict(self):
"""Constructs a dict representation of TrainingJobExplorationMapping
domain object.
Returns:
A dict representation of TrainingJobExplorationMapping domain
object.
"""
return {
'exp_id': self._exp_id,
'exp_version': self._exp_version,
'state_name': self.state_name,
'job_id': self._job_id
}
def validate(self):
"""Validates the mapping before it is saved to storage."""
if not isinstance(self.exp_id, basestring):
raise utils.ValidationError(
'Expected exp_id to be a string, received %s' % self.exp_id)
if not isinstance(self.exp_version, int):
raise utils.ValidationError(
'Expected exp_version to be an int, received %s' % (
self.exp_version))
if not isinstance(self.state_name, basestring):
raise utils.ValidationError(
'Expected state_name to be a string, received %s' % (
self.state_name))
if not isinstance(self.job_id, basestring):
raise utils.ValidationError(
'Expected job_id to be a string, received %s' % (
self.job_id))
| |
# Copyright 2012 Tsutomu Uchino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import uno
from bookmarks.cmdparse import \
bk_urlencode, bk_parse_qsl, bk_parse_qs, bk_command_parse
from bookmarks import \
CONFIG_NODE_CONTROLLERS, NAME_NAME
def load_controller_name(ctx, command):
""" Get controller specific settings. """
config = get_config(ctx, CONFIG_NODE_CONTROLLERS)
if config.hasByName(command):
return config.getByName(command).getPropertyValue(NAME_NAME)
return ""
class BookmarksCommands(object):
from bookmarks import PROTOCOL_BOOKMARKS, DIRECTORY_POPUP_URI, \
TAG_POPUP_URI
PROTOCOL_SCRIPT = "vnd.sun.star.script:"
PROTOCOL_MACRO = "macro:"
PROTOCOL_FILE = "file:"
PROTOCOL_COMMAND = ".uno:"
QUERY_NAME_URL = "URL:string"
QUERY_NAME_PATH = "Path:string"
QUERY_NAME_FRAME_NAME = "FrameName:string"
QUERY_NAME_ARGUMENTS = "Arguments:string"
QUERY_NAME_FILTER = "Filter:string"
QUERY_NAME_FILTER_NAME = "FilterName:string"
QUERY_NAME_FOLDER_NAME = "FolderName:string"
QUERY_NAME_TAG = "Tag:string"
QUERY_NAME_SUGGESTED_SAVE_AS_NAME = "SuggestedSaveAsName:string"
COMMAND_OPEN_DOCUMENT = ".uno:Open"
COMMAND_OPEN_DOCUMENT_WITH_URL = ".uno:Open?FrameName:string=_default&URL:string=%s"
COMMAND_PROGRAM = "%sProgram" % PROTOCOL_BOOKMARKS
COMMAND_SOMETHING = "%s%%s" % PROTOCOL_BOOKMARKS
COMMAND_OPEN_FROM = ".uno:Open"
COMMAND_SAVE_AS_INTO = ".uno:SaveAs"
TYPE_DOCUMENT = "document"
TYPE_COMMAND = "command"
TYPE_MACRO = "macro"
TYPE_EDIT = "edit"
TYPE_PROGRAM = "program"
TYPE_FILE = "file"
TYPE_FOLDER = "folder"
TYPE_WEB = "web"
TYPE_DIRECTORY_POPUP = "directory_popup"
TYPE_TAG = "tag"
def bk_parse_qs(text, type=None):
return bk_parse_qs(text, type)
bk_parse_qs = staticmethod(bk_parse_qs)
def bk_parse_qsl(text, type=None):
return bk_parse_qsl(text, type)
bk_parse_qsl = staticmethod(bk_parse_qsl)
def bk_urlencode(qs, type=None):
return bk_urlencode(qs, type)
bk_urlencode = staticmethod(bk_urlencode)
def get_query_value(self, command, name):
parts = command.split("?", 1)
if len(parts) == 2:
qs = self.bk_parse_qs(parts[1])
if name in qs:
return qs[name]
return ""
def bk_command_parse(self, command):
return bk_command_parse(command)
def extract_from_command(self, command):
""" Extract data from command and detect command type. """
def get_qs(name):
try:
return qs[name]
except:
return ""
item_type = ""
value1 = ""
value2 = ""
main, protocol, path, query = self.bk_command_parse(command)
qs = self.bk_parse_qs(query, main)
protocol = protocol + ":"
if protocol == self.PROTOCOL_COMMAND:
if command.startswith(self.COMMAND_OPEN_DOCUMENT) and \
qs and self.QUERY_NAME_URL in qs:
value1 = get_qs(self.QUERY_NAME_URL)
item_type = self.TYPE_DOCUMENT
value2 = get_qs(self.QUERY_NAME_FILTER_NAME)
else:
value1 = main#path
value2 = query
item_type = self.TYPE_COMMAND
elif protocol == self.PROTOCOL_SCRIPT or \
protocol == self.PROTOCOL_MACRO:
value1 = command
item_type = self.TYPE_MACRO
elif protocol == self.PROTOCOL_BOOKMARKS:
flag = path
if flag == "Program":
if qs and self.QUERY_NAME_PATH in qs:
value1 = get_qs(self.QUERY_NAME_PATH)
if self.QUERY_NAME_ARGUMENTS in qs:
value2 = get_qs(self.QUERY_NAME_ARGUMENTS)
item_type = self.TYPE_PROGRAM
elif flag in ("File", "Folder", "Web"):
value1 = get_qs(self.QUERY_NAME_PATH)
item_type = flag.lower()
elif flag == "Edit" or flag == "AddThis":
item_type = flag.lower()
else:
item_type = "bookmarks"
value1 = command
elif main == self.TAG_POPUP_URI:
if qs:
item_type = self.TYPE_TAG
value1 = get_qs(self.QUERY_NAME_TAG)
elif main == self.DIRECTORY_POPUP_URI:
if qs:
item_type = self.TYPE_DIRECTORY_POPUP
value1 = get_qs(self.QUERY_NAME_URL)
value2 = get_qs(self.QUERY_NAME_FILTER)
else:
value1 = command
return item_type, value1, value2
def extract(self, item):
""" Extract values from item. """
item_type, value1, value2 = self.extract_from_command(item.get_command())
return (item_type, (item.get_name(), item.get_description(),
value1, value2, ",".join(item.get_tags())))
def extract_as_row(self, res, item, graphics, show_value=True, show_description=True, show_tags=True):
""" Command to strings for grid view. """
def _(name):
return res.get(name, name)
def get_qs(name):
try:
return qs[name]
except:
return ""
command = item.get_command()
value = ""
args = ""
icon = None
main, protocol, path, query = self.bk_command_parse(command)
qs = self.bk_parse_qs(query, main)
protocol = protocol + ":"
if protocol == self.PROTOCOL_COMMAND:
if qs and main.startswith(self.COMMAND_OPEN_FROM) and \
self.QUERY_NAME_URL in qs:
value = get_qs(self.QUERY_NAME_URL)
icon = graphics["document"]
try:
if value.startswith(self.PROTOCOL_FILE):
value = uno.fileUrlToSystemPath(value)
except:
pass
else:
value = command
icon = graphics["command"]
elif protocol == self.PROTOCOL_SCRIPT:
language = get_qs("language")
value = "%s: %s" % (language, main[20:])
icon = graphics["macro"]
elif protocol == self.PROTOCOL_MACRO:
value = "Basic: %s" % main[6:]
elif protocol == self.PROTOCOL_BOOKMARKS:
item_type = path.lower()
if item_type == "program":
arguments = get_qs(self.QUERY_NAME_ARGUMENTS)
if arguments:
value = "%s: %s, \n%s: %s" % (
_("Program"), get_qs(self.QUERY_NAME_PATH),
_("Arguments"), arguments)
else:
value = "%s: %s" % (_("Program"),
get_qs(self.QUERY_NAME_PATH))
elif item_type == "file":
# icon indicates its type, isnt enough?
#value = "%s: %s" % (_("File"), get_qs(self.QUERY_NAME_PATH))
value = get_qs(self.QUERY_NAME_PATH)
elif item_type == "folder":
#value = "%s: %s" % (_("Folder"), get_qs(self.QUERY_NAME_PATH))
value = get_qs(self.QUERY_NAME_PATH)
elif item_type == "web":
#value = "%s: %s" % (_("Web"), get_qs(self.QUERY_NAME_PATH))
value = get_qs(self.QUERY_NAME_PATH)
elif item_type == "edit" or item_type == "addthis":
value = ""
else:
value = command
icon = graphics[path.lower()]
elif main == self.TAG_POPUP_URI:
if qs:
value = get_qs(self.QUERY_NAME_TAG)
else:
value = command
icon = graphics["tag"]
elif main == self.DIRECTORY_POPUP_URI:
if qs:
value = get_qs(self.QUERY_NAME_URL)
try:
value = uno.fileUrlToSystemPath(value)
except:
pass
else:
value = command
icon = graphics["directory_popup"]
else:
value = command
icon = graphics["command"]
data = [icon, item.get_name()]
if show_tags:
data.append(",".join(item.get_tags()))
if show_value:
data.append(value)
if show_description:
data.append(item.get_description())
return tuple(data)
def generate_command(self, d):
""" Generate commmnd from new value. """
qs = {}
type = d["type"]
main = None
if type == "document":
path = d["path"]
try:
if not path.startswith(self.PROTOCOL_FILE):
path = uno.systemPathToFileUrl(path)
except:
pass
qs[self.QUERY_NAME_FRAME_NAME] ="_default"
filter = d.get("filter", None)
main = self.COMMAND_OPEN_DOCUMENT
qs[self.QUERY_NAME_URL] = path
if filter:
qs[self.QUERY_NAME_FILTER_NAME] = filter
elif type == "macro":
command = d["command"]
elif type == "command":
if d["arguments"]:
command = "%s?%s" % (d["command"], d["arguments"])
#main = d["command"]
else:
command = d["command"]
elif type == "program":
path = d["path"]
if path.startswith(self.PROTOCOL_FILE):
try:
path = uno.fileUrlToSystemPath(path)
except:
pass
main = self.COMMAND_PROGRAM
qs[self.QUERY_NAME_PATH] = path
qs[self.QUERY_NAME_ARGUMENTS] = d["arguments"]
elif type == "something":
main = self.COMMAND_SOMETHING % d["flag"].capitalize()
qs[self.QUERY_NAME_PATH] = d["path"]
elif type == "special":
flag = d["flag"]
path = d["path"]
try:
path = uno.systemPathToFileUrl(path)
except:
pass
if flag == "open_from_folder":
main = self.COMMAND_OPEN_FROM
qs[self.QUERY_NAME_FOLDER_NAME] = path
elif flag == "saveas_into_folder":
main = self.COMMAND_SAVE_AS_INTO
qs[self.QUERY_NAME_FOLDER_NAME] = path
elif flag == "directory_popup":
main = self.DIRECTORY_POPUP_URI
if "create" in d:
qs[self.QUERY_NAME_URL] = d["path"]
if "filter" in d:
qs[self.QUERY_NAME_FILTER] = d["filter"]
else:
command = d["path"]
elif type == "tag":
main = self.TAG_POPUP_URI
qs[self.QUERY_NAME_TAG] = d["tag_name"]
else:
command = "ERRROR"
if qs:
command = main + "?" + self.bk_urlencode(qs, main)
return command
from bookmarks import CONFIG_NODE_SETTINGS, \
NAME_WEB_BROWSER, NAME_FILE_MANAGER, NAME_OPEN_COMMAND, \
NAME_USE_CUSTOM_WEB_BROWSER, NAME_USE_CUSTOM_FILE_MANAGER, \
NAME_USE_CUSTOM_OPEN_COMMAND
from bookmarks.tools import get_config
class DispatchExecutor(BookmarksCommands):
def __init__(self, ctx):
BookmarksCommands.__init__(self)
self.ctx = ctx
self.helper = None
def dispatch(self, frame, command, target_frame="_self", flag=0, args=()):
if self.helper is None:
self.helper = self.ctx.getServiceManager().\
createInstanceWithContext("com.sun.star.frame.DispatchHelper", self.ctx)
self.helper.executeDispatch(frame, command, target_frame, flag, args)
class IllegalDocumentException(Exception):
""" This document does not have own location to bookmark. """
import threading
class EditWindowThread(threading.Thread):
""" Start edit window in another thread.
When Bookmarks PMC tries to open edit window through sub entry of
the menu, the pmc removed during to create the edit window.
It cause crash the office.
"""
def __init__(self, ctx, command):
threading.Thread.__init__(self)
self.ctx = ctx
self.command = command
def run(self):
from bookmarks.imple import BookmarksControllerImple
imple = BookmarksControllerImple.get(self.ctx, self.command)
imple.move_to_front()
class ExecuteAddThis(DispatchExecutor):
class DocumentNotStoredException(Exception):
""" This document is not stored. """
pass
def __init__(self, ctx, frame, command):
DispatchExecutor.__init__(self, ctx)
self.frame = frame
self.command = command
def execute_command(self, command):
self.dispatch(self.frame, command)
def _get_title(self):
""" Check the document has valid URL and stored,
returns title and filter name. """
model = None
try:
model = self.frame.getController().getModel()
except:
pass
if model is None or not hasattr(model, "getURL"):
# or not hasattr(model, "hasLocation"):
raise IllegalDocumentException("Unable to bookmark this document.")
file_url = model.getURL()
if not file_url:
self.execute_command(".uno:SaveAs")
file_url = model.getURL()
if not file_url:
raise DocumentNotStoredException("Not stored.")
title = ""
try:
if hasattr(model, "getDocumentProperties"):
props = model.getDocumentProperties()
if hasattr(props, "Title"):
title = props.Title
if not title:
title = model.getTitle()
if title[-4] == ".":
title = title[0:-4]
except:
pass
filter_name = ""
try:
if hasattr(model, "getArgs"):
args = model.getArgs()
for arg in args:
if arg.Name == "FilterName":
filter_name = arg.Value
break
except:
pass
return file_url, filter_name, title
def run(self):
try:
file_url, filter_name, title = self._get_title()
except Exception as e:
print(e)
return
# res, manager
import bookmarks.manager
from bookmarks.resource import CurrentStringResource
res = CurrentStringResource.get(self.ctx)
manager = bookmarks.manager.BookmarksManager.get(
self.ctx, self.command,
load_controller_name(self.ctx, self.command)
)
from bookmarks.imple import BookmarksControllerImple
BookmarksControllerImple.lock(self.command)
try:
import bookmarks.dialogs
bookmarks.dialogs.BookmarkThisDialog(
self.ctx, res,
manager=manager,
command=self.command,
file_url=file_url,
name=title,
filter_name=filter_name
).execute()
except Exception as e:
print(e)
traceback.print_exc()
BookmarksControllerImple.unlock(self.command)
class BookmarksCommandExecutor(DispatchExecutor):
OPEN_COMMAND = None
FILE_MANAGER = None
WEB_BROWSER = None
def __init__(self, parent, ctx, frame, command):
DispatchExecutor.__init__(self, ctx)
self.parent = parent
self.frame = frame
self.command = command
self.executor = None
env = self.detect_env()
self.is_win32 = env == "win32"
self.load_config()
c = self.__class__
if c.OPEN_COMMAND is None or c.FILE_MANAGER is None or c.WEB_BROWSER is None:
try:
mod = getattr(__import__("bookmarks.env.%s" % env).env, env)
if c.OPEN_COMMAND is None:
self.__class__.OPEN_COMMAND = mod.OPEN
if c.FILE_MANAGER is None:
self.__class__.FILE_MANAGER = mod.FILE_MANAGER
if c.WEB_BROWSER is None:
self.__class__.WEB_BROWSER = self.__class__.OPEN_COMMAND
except:
pass
def popen_execute(self, path, args):
import subprocess
if isinstance(path, list):
_args = list(path)
if args:
_args.append(args)
else:
if args:
_args = [path, args]
else:
_args = [path]
subprocess.Popen(_args).pid
def win_execute(self, path, args):
self.ctx.getServiceManager().createInstanceWithContext(
"com.sun.star.system.SystemShellExecute", self.ctx).\
execute(path, args, 1)
def other_execute(self, path, args):
import os
try:
import thread
except:
import _thread as thread
if args:
command = "%s %s"
else:
command = path
thread.start_new_thread(lambda command: os.system(command), (command,))
def detect_env(self):
""" Detect environment type. """
import sys
type = ""
platform = sys.platform
if platform == "win32":
type = platform
elif platform == "darwin":
type = platform
else:
import os
try:
type = os.environ["DESKTOP_SESSION"]
except:
try:
type = os.environ["GDMSESSION"]
except:
try:
type = os.environ["XDG_SESSION_DESKTOP"]
except:
pass
if type == "default" or type.lower() == "kde":
try:
if os.environ["KDE_SESSION_VERSION"] >= "4":
type = "kde4"
else:
type = "kde3"
except:
type = None
if not type:
# how about other session
type = "other"
return type
def execute_item(self, item):
""" Execute command on dispatch framework of the frame. """
self.execute_command(item.get_command())
def execute_command(self, command):
""" Exec command. """
if command.startswith(self.PROTOCOL_BOOKMARKS):
command_type, value1, value2 = self.extract_from_command(command)
self.execute_bookmarks_command(command_type, value1, value2)
else:
_command = self.decode_command(command)
self.dispatch(self.frame, _command)
def decode_command(self, command):
try:
main, protocol, path, query = self.bk_command_parse(command)
qs = self.bk_parse_qsl(query)
_qs = []
for name, value in qs:
_qs.append(name + "=" + value)
if _qs:
return main + "?" + "&".join(_qs)
else:
return main
except:
return command
def execute_bookmarks_command(self, type, value1, value2):
""" Execute command in category bookmarks. """
try:
fn = getattr(self, "exec_%s" % type)
fn(value1, value2)
except Exception as e:
raise e
def exec_edit(self, value1, value2):
# allow to edit other bookmarks
EditWindowThread(self.ctx, self.command).start()
def exec_addthis(self, value1, value2):
ExecuteAddThis(self.ctx, self.frame, self.command).run()
def _get_executor(self):
try:
from subprocess import Popen
self.executor = self.popen_execute
except:
import os
if os.sep == "\\":
self.executor = self.win_execute
else:
self.executor = self.other_execute
def _execute(self, value1, value2):
if self.executor is None:
self._get_executor()
self.executor(value1, value2)
def exec_program(self, value1, value2):
self._execute(value1, value2)
def exec_file(self, value1, value2):
if self.is_win32:
value1 = self.escape_win32_path(value1)
if not self.OPEN_COMMAND is None:
self._execute(self.OPEN_COMMAND, value1)
def exec_folder(self, value1, value2):
if self.is_win32:
value1 = self.escape_win32_path(value1)
if not self.FILE_MANAGER is None:
self._execute(self.FILE_MANAGER, value1)
def exec_web(self, value1, value2):
if not self.WEB_BROWSER is None:
self._execute(self.WEB_BROWSER, value1)
# ToDo webbrowser module
def escape_win32_path(self, value):
value = value.replace("&", "^&")
value = value.replace("~", "^~")
value = value.replace("(", "^(")
value = value.replace(")", "^)")
return value
def get_config_settings(self):
return get_config(self.ctx, CONFIG_NODE_SETTINGS)
def load_config(self):
config = self.get_config_settings()
if config.getPropertyValue(NAME_USE_CUSTOM_OPEN_COMMAND):
self.__class__.OPEN_COMMAND = config.getPropertyValue(NAME_OPEN_COMMAND)
if config.getPropertyValue(NAME_USE_CUSTOM_FILE_MANAGER):
self.__class__.FILE_MANAGER = config.getPropertyValue(NAME_FILE_MANAGER)
if config.getPropertyValue(NAME_USE_CUSTOM_WEB_BROWSER):
self.__class__.WEB_BROWSER = config.getPropertyValue(NAME_WEB_BROWSER)
| |
#!/usr/bin/env python
"""Registry for filters and abstract classes for basic filter functionality."""
import collections
import glob
import itertools
import os
import yaml
import logging
from grr.lib import config_lib
from grr.lib import registry
from grr.lib.aff4_objects import collections as collections_aff4
from grr.lib.checks import filters
from grr.lib.checks import hints
from grr.lib.checks import triggers
from grr.lib.rdfvalues import anomaly as rdf_anomaly
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import anomaly_pb2
from grr.proto import checks_pb2
class Error(Exception):
"""Base error class."""
class DefinitionError(Error):
"""A check was defined badly."""
class ProcessingError(Error):
"""A check generated bad results."""
def ValidateMultiple(component, hint):
errors = []
for item in component:
try:
item.Validate()
except (DefinitionError) as e:
errors.append(str(e))
if errors:
raise DefinitionError("%s:\n %s" % (hint, "\n ".join(errors)))
def MatchStrToList(match=None):
# Set a default match type of ANY, if unset.
# Allow multiple match types, either as a list or as a string.
if match is None:
match = ["ANY"]
elif isinstance(match, basestring):
match = match.split()
return match
class CheckResult(rdf_structs.RDFProtoStruct):
"""Results of a single check performed on a host."""
protobuf = checks_pb2.CheckResult
def __nonzero__(self):
return bool(self.anomaly)
def ExtendAnomalies(self, other):
"""Merge anomalies from another CheckResult."""
for o in other:
if o is not None:
self.anomaly.Extend(list(o.anomaly))
class CheckResultsCollection(collections_aff4.RDFValueCollection):
"""A collection of check results."""
_rdf_type = CheckResult
class CheckResults(rdf_structs.RDFProtoStruct):
"""All results for a single host."""
protobuf = checks_pb2.CheckResults
def __nonzero__(self):
return bool(self.result)
class Check(rdf_structs.RDFProtoStruct):
"""A definition of a problem, and ways to detect it.
Checks contain an identifier of a problem (check_id) that is a reference to an
externally or internally defined vulnerability.
Checks use one or more Methods to determine if an issue exists. Methods define
data collection and processing, and return an Anomaly if the conditions tested
by the method weren't met.
Checks can define a default platform, OS or environment to target. This
is passed to each Method, but can be overridden by more specific definitions.
"""
protobuf = checks_pb2.Check
def __init__(self, initializer=None, age=None, check_id=None, target=None,
match=None, method=None, hint=None):
super(Check, self).__init__(initializer=initializer, age=age)
self.check_id = check_id
self.match = MatchStrToList(match)
self.hint = Hint(hint, reformat=False)
self.target = target
if method is None:
method = []
self.triggers = triggers.Triggers()
self.matcher = Matcher(self.match, self.hint)
for cfg in method:
# Use the value of "target" as a default for each method, if defined.
# Targets defined in methods or probes override this default value.
if hint:
cfg["hint"] = hints.Overlay(child=cfg.get("hint", {}), parent=hint)
if target:
cfg.setdefault("target", target)
# Create the method and add its triggers to the check.
m = Method(**cfg)
self.method.append(m)
self.triggers.Update(m.triggers, callback=m)
self.artifacts = set([t.artifact for t in self.triggers.conditions])
def SelectChecks(self, conditions):
"""Identifies which check methods to use based on host attributes.
Queries the trigger map for any check methods that apply to a combination of
OS, CPE and/or label.
Args:
conditions: A list of Condition objects.
Returns:
A list of method callbacks that should perform checks.
"""
return self.triggers.Calls(conditions)
def UsesArtifact(self, artifacts):
"""Determines if the check uses the specified artifact.
Args:
artifacts: Either a single artifact name, or a list of artifact names
Returns:
True if the check uses a specific artifact.
"""
# If artifact is a single string, see if it is in the list of artifacts
# as-is. Otherwise, test whether any of the artifacts passed in to this
# function exist in the list of artifacts.
if isinstance(artifacts, basestring):
return artifacts in self.artifacts
else:
return any(True for artifact in artifacts if artifact in self.artifacts)
def Parse(self, conditions, host_data):
"""Runs methods that evaluate whether collected host_data has an issue.
Args:
conditions: A list of conditions to determine which Methods to trigger.
host_data: A map of artifacts and rdf data.
Returns:
A CheckResult populated with Anomalies if an issue exists.
"""
result = CheckResult(check_id=self.check_id)
methods = self.SelectChecks(conditions)
result.ExtendAnomalies([m.Parse(conditions, host_data) for m in methods])
return result
def Validate(self):
"""Check the method is well constructed."""
if not self.check_id:
raise DefinitionError("Check has missing check_id value")
cls_name = self.check_id
if not self.method:
raise DefinitionError("Check %s has no methods" % cls_name)
ValidateMultiple(self.method,
"Check %s has invalid method definitions" % cls_name)
class Method(rdf_structs.RDFProtoStruct):
"""A specific test method using 0 or more filters to process data."""
protobuf = checks_pb2.Method
def __init__(self, initializer=None, age=None, **kwargs):
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
super(Method, self).__init__(initializer=initializer, age=age)
probe = conf.get("probe", {})
resource = conf.get("resource", {})
hint = conf.get("hint", {})
target = conf.get("target", {})
if hint:
# Add the hint to children.
for cfg in probe:
cfg["hint"] = hints.Overlay(child=cfg.get("hint", {}), parent=hint)
self.probe = [Probe(**cfg) for cfg in probe]
self.hint = Hint(hint, reformat=False)
self.match = MatchStrToList(kwargs.get("match"))
self.matcher = Matcher(self.match, self.hint)
self.resource = [rdf_protodict.Dict(**r) for r in resource]
self.target = triggers.Target(**target)
self.triggers = triggers.Triggers()
for p in self.probe:
# If the probe has a target, use it. Otherwise, use the method's target.
target = p.target or self.target
self.triggers.Add(p.artifact, target, p)
def Parse(self, conditions, host_data):
"""Runs probes that evaluate whether collected data has an issue.
Args:
conditions: The trigger conditions.
host_data: A map of artifacts and rdf data.
Returns:
Anomalies if an issue exists.
"""
processed = []
probes = self.triggers.Calls(conditions)
for p in probes:
# Get the data required for the probe. A probe can use a result_context
# (e.g. Parsers, Anomalies, Raw), to identify the data that is needed
# from the artifact collection results.
artifact_data = host_data.get(p.artifact)
if not p.result_context:
rdf_data = artifact_data["PARSER"]
else:
rdf_data = artifact_data.get(str(p.result_context))
try:
result = p.Parse(rdf_data)
except ProcessingError as e:
raise ProcessingError("Bad artifact %s: %s" % (p.artifact, e))
if result:
processed.append(result)
# Matcher compares the number of probes that triggered with results.
return self.matcher.Detect(probes, processed)
def Validate(self):
"""Check the Method is well constructed."""
ValidateMultiple(self.probe, "Method has invalid probes")
ValidateMultiple(self.target, "Method has invalid target")
ValidateMultiple(self.hint, "Method has invalid hint")
class Probe(rdf_structs.RDFProtoStruct):
"""The suite of filters applied to host data."""
protobuf = checks_pb2.Probe
def __init__(self, initializer=None, age=None, **kwargs):
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
conf["match"] = MatchStrToList(kwargs.get("match"))
super(Probe, self).__init__(initializer=initializer, age=age, **conf)
if self.filters:
handler = filters.GetHandler(mode=self.mode)
else:
handler = filters.GetHandler()
self.baseliner = handler(artifact=self.artifact, filters=self.baseline)
self.handler = handler(artifact=self.artifact, filters=self.filters)
hinter = Hint(conf.get("hint", {}), reformat=False)
self.matcher = Matcher(conf["match"], hinter)
def Parse(self, rdf_data):
"""Process rdf data through filters. Test if results match expectations.
Processing of rdf data is staged by a filter handler, which manages the
processing of host data. The output of the filters are compared against
expected results.
Args:
rdf_data: An list containing 0 or more rdf values.
Returns:
An anomaly if data didn't match expectations.
Raises:
ProcessingError: If rdf_data is not a handled type.
"""
if not isinstance(rdf_data, (list, set)):
raise ProcessingError("Bad host data format: %s" % type(rdf_data))
if self.baseline:
comparison = self.baseliner.Parse(rdf_data)
else:
comparison = rdf_data
found = self.handler.Parse(comparison)
results = self.hint.Render(found)
return self.matcher.Detect(comparison, results)
def Validate(self):
"""Check the test set is well constructed."""
ValidateMultiple(self.target, "Probe has invalid target")
self.baseliner.Validate()
self.handler.Validate()
self.hint.Validate()
class Filter(rdf_structs.RDFProtoStruct):
"""Generic filter to provide an interface for different types of filter."""
protobuf = checks_pb2.Filter
def __init__(self, initializer=None, age=None, **kwargs):
# FIXME(sebastianw): Probe seems to pass in the configuration for filters
# as a dict in initializer, rather than as kwargs.
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
super(Filter, self).__init__(initializer=initializer, age=age, **conf)
filter_name = self.type or "Filter"
self._filter = filters.Filter.GetFilter(filter_name)
def Parse(self, rdf_data):
"""Process rdf data through the filter.
Filters sift data according to filter rules. Data that passes the filter
rule is kept, other data is dropped.
If no filter method is provided, the data is returned as a list.
Otherwise, a items that meet filter conditions are returned in a list.
Args:
rdf_data: Host data that has already been processed by a Parser into RDF.
Returns:
A list containing data items that matched the filter rules.
"""
if self._filter:
return list(self._filter.Parse(rdf_data, self.expression))
return rdf_data
def Validate(self):
"""The filter exists, and has valid filter and hint expressions."""
if self.type not in filters.Filter.classes:
raise DefinitionError("Undefined filter type %s" % self.type)
self._filter.Validate(self.expression)
ValidateMultiple(self.hint, "Filter has invalid hint")
class Hint(rdf_structs.RDFProtoStruct):
"""Human-formatted descriptions of problems, fixes and findings."""
protobuf = checks_pb2.Hint
def __init__(self, initializer=None, age=None, reformat=True, **kwargs):
if isinstance(initializer, dict):
conf = initializer
initializer = None
else:
conf = kwargs
super(Hint, self).__init__(initializer=initializer, age=age, **conf)
if not self.max_results:
self.max_results = config_lib.CONFIG.Get("Checks.max_results")
if reformat:
self.hinter = hints.Hinter(self.format)
else:
self.hinter = hints.Hinter()
def Render(self, rdf_data):
"""Processes data according to formatting rules."""
report_data = rdf_data[:self.max_results]
results = [self.hinter.Render(rdf) for rdf in report_data]
extra = len(rdf_data) - len(report_data)
if extra > 0:
results.append("...plus another %d issues." % extra)
return results
def Problem(self, state):
"""Creates an anomaly symptom/problem string."""
if self.problem:
return "%s: %s" % (state, self.problem.strip())
def Fix(self):
"""Creates an anomaly explanation/fix string."""
if self.fix:
return self.fix.strip()
def Validate(self):
"""Ensures that required values are set and formatting rules compile."""
# TODO(user): Default format string.
if self.problem:
pass
class Matcher(object):
"""Performs comparisons between baseline and result data."""
def __init__(self, matches, hint):
method_map = {"NONE": self.GotNone,
"ONE": self.GotSingle,
"SOME": self.GotMultiple,
"ANY": self.GotAny,
"ALL": self.GotAll}
try:
self.detectors = [method_map.get(str(match)) for match in matches]
except KeyError:
raise DefinitionError("Match uses undefined check condition: %s" % match)
self.hint = hint
def Detect(self, baseline, host_data):
"""Run host_data through detectors and return them if a detector triggers.
Args:
baseline: The base set of rdf values used to evaluate whether an issue
exists.
host_data: The rdf values passed back by the filters.
Returns:
A CheckResult message containing anomalies if any detectors identified an
issue, None otherwise.
"""
result = CheckResult()
for detector in self.detectors:
for finding in detector(baseline, host_data):
if finding:
result.ExtendAnomalies(finding)
if result:
return result
def Issue(self, state, results):
"""Collect anomalous findings into a CheckResult.
Comparisons with anomalous conditions collect anomalies into a single
CheckResult message. The contents of the result varies depending on whether
the method making the comparison is a Check, Method or Probe.
- Probes evaluate raw host data and generate Anomalies. These are condensed
into a new CheckResult.
- Checks and Methods evaluate the results of probes (i.e. CheckResults). If
there are multiple probe results, all probe anomalies are aggregated into
a single new CheckResult for the Check or Method.
Args:
state: A text description of what combination of results were anomalous
(e.g. some condition was missing or present.)
results: Anomalies or CheckResult messages.
Returns:
A CheckResult message.
"""
result = CheckResult()
# If there are CheckResults we're aggregating methods or probes.
# Merge all current results into one CheckResult.
# Otherwise, the results are raw host data.
# Generate a new CheckResult and add the specific findings.
if results and all(isinstance(r, CheckResult) for r in results):
result.ExtendAnomalies(results)
else:
result.anomaly = rdf_anomaly.Anomaly(
type=anomaly_pb2.Anomaly.AnomalyType.Name(
anomaly_pb2.Anomaly.ANALYSIS_ANOMALY),
symptom=self.hint.Problem(state),
finding=self.hint.Render(results),
explanation=self.hint.Fix())
return result
def GotNone(self, _, results):
"""Anomaly for no results, an empty list otherwise."""
if not results:
return self.Issue("Missing attribute", ["Expected state was not found"])
return []
def GotSingle(self, _, results):
"""Anomaly for exactly one result, an empty list otherwise."""
if len(results) == 1:
return self.Issue("Found one", results)
return []
def GotMultiple(self, _, results):
"""Anomaly for >1 result, an empty list otherwise."""
if len(results) > 1:
return self.Issue("Found multiple", results)
return []
def GotAny(self, _, results):
"""Anomaly for 1+ results, an empty list otherwise."""
if results:
return self.Issue("Found", results)
return []
def GotAll(self, baseline, results):
"""Anomaly if baseline vs result counts differ, an empty list otherwise."""
num_base = len(baseline)
num_rslt = len(results)
if num_rslt > num_base:
raise ProcessingError("Filter generated more results than base data: "
"%s > %s" % (num_rslt, num_base))
if num_rslt == num_base and num_base > 0:
return self.Issue("Found all", results)
return []
class CheckRegistry(object):
"""A class to register the mapping between checks and host data.
This is used to trigger all relevant checks when we collect the data.
The method registry maps the combination of platform, environment and host
data required by a given method.
"""
checks = {}
triggers = triggers.Triggers()
@classmethod
def Clear(cls):
"""Remove all checks and triggers from the registry."""
cls.checks = {}
cls.triggers = triggers.Triggers()
@classmethod
def RegisterCheck(cls, check, source="unknown", overwrite_if_exists=False):
"""Adds a check to the registry, refresh the trigger to check map."""
if not overwrite_if_exists and check.check_id in cls.checks:
raise DefinitionError("Check named %s already exists and "
"overwrite_if_exists is set to False." %
check.check_id)
check.loaded_from = source
cls.checks[check.check_id] = check
cls.triggers.Update(check.triggers, check)
@staticmethod
def _AsList(arg):
"""Encapsulates an argument in a list, if it's not already iterable."""
if isinstance(arg, basestring) or not isinstance(arg, collections.Iterable):
return [arg]
else:
return list(arg)
@classmethod
def Conditions(cls, artifact=None, os_name=None, cpe=None, labels=None):
"""Provide a series of condition tuples.
A Target can specify multiple artifact, os_name, cpe or label entries. These
are expanded to all distinct tuples. When an entry is undefined or None, it
is treated as a single definition of None, meaning that the condition does
not apply.
Args:
artifact: Names of artifacts that should trigger an action.
os_name: Names of OS' that should trigger an action.
cpe: CPE strings that should trigger an action.
labels: Host labels that should trigger an action.
Yields:
a permuted series of (artifact, os_name, cpe, label) tuples.
"""
artifact = cls._AsList(artifact)
os_name = cls._AsList(os_name)
cpe = cls._AsList(cpe)
labels = cls._AsList(labels)
for condition in itertools.product(artifact, os_name, cpe, labels):
yield condition
@classmethod
def FindChecks(cls, artifact=None, os_name=None, cpe=None, labels=None,
restrict_checks=None):
"""Takes targeting info, identifies relevant checks.
FindChecks will return results when a host has the conditions necessary for
a check to occur. Conditions with partial results are not returned. For
example, FindChecks will not return checks that if a check targets
os_name=["Linux"], labels=["foo"] and a host only has the os_name=["Linux"]
attribute.
Args:
artifact: 0+ artifact names.
os_name: 0+ OS names.
cpe: 0+ CPE identifiers.
labels: 0+ GRR labels.
restrict_checks: A list of check ids to restrict check processing to.
Returns:
the check_ids that apply.
"""
check_ids = set()
conditions = list(cls.Conditions(artifact, os_name, cpe, labels))
for chk_id, chk in cls.checks.iteritems():
if restrict_checks and chk_id not in restrict_checks:
continue
for condition in conditions:
if chk.triggers.Match(*condition):
check_ids.add(chk_id)
break # No need to keep checking other conditions.
return check_ids
@classmethod
def SelectArtifacts(cls, os_name=None, cpe=None, labels=None,
restrict_checks=None):
"""Takes targeting info, identifies artifacts to fetch.
Args:
os_name: 0+ OS names.
cpe: 0+ CPE identifiers.
labels: 0+ GRR labels.
restrict_checks: A list of check ids whose artifacts should be fetched.
Returns:
the artifacts that should be collected.
"""
results = set()
for condition in cls.Conditions(None, os_name, cpe, labels):
trigger = condition[1:]
for chk in cls.checks.values():
if restrict_checks and chk.check_id not in restrict_checks:
continue
results.update(chk.triggers.Artifacts(*trigger))
return results
@classmethod
def Process(cls, host_data, os_name=None, cpe=None, labels=None,
exclude_checks=None, restrict_checks=None):
"""Runs checks over all host data.
Args:
host_data: The data collected from a host, mapped to artifact name.
os_name: 0+ OS names.
cpe: 0+ CPE identifiers.
labels: 0+ GRR labels.
exclude_checks: A list of check ids not to run. A check id in this list
will not get run even if included in restrict_checks.
restrict_checks: A list of check ids that may be run, if appropriate.
Yields:
A CheckResult message for each check that was performed.
"""
# All the conditions that apply to this host.
artifacts = host_data.keys()
check_ids = cls.FindChecks(artifacts, os_name, cpe, labels)
conditions = list(cls.Conditions(artifacts, os_name, cpe, labels))
for check_id in check_ids:
# skip if check in list of excluded checks
if exclude_checks and check_id in exclude_checks:
continue
if restrict_checks and check_id not in restrict_checks:
continue
try:
chk = cls.checks[check_id]
yield chk.Parse(conditions, host_data)
except ProcessingError as e:
logging.warn("Check ID %s raised: %s" % (check_id, e))
def CheckHost(host_data, os_name=None, cpe=None, labels=None,
exclude_checks=None, restrict_checks=None):
"""Perform all checks on a host using acquired artifacts.
Checks are selected based on the artifacts available and the host attributes
(e.g. os_name/cpe/labels) provided as either parameters, or in the
knowledgebase artifact.
A KnowledgeBase artifact should be provided that contains, at a minimum:
- OS
- Hostname or IP
Other knowldegebase attributes may be required for specific checks.
CPE is currently unused, pending addition of a CPE module in the GRR client.
Labels are arbitrary string labels attached to a client.
Args:
host_data: A dictionary with artifact names as keys, and rdf data as values.
os_name: An OS name (optional).
cpe: A CPE string (optional).
labels: An iterable of labels (optional).
exclude_checks: A list of check ids not to run. A check id in this list
will not get run even if included in restrict_checks.
restrict_checks: A list of check ids that may be run, if appropriate.
Returns:
A CheckResults object that contains results for all checks that were
performed on the host.
"""
# Get knowledgebase, os_name from hostdata
kb = host_data.get("KnowledgeBase")
if os_name is None:
os_name = kb.os
if cpe is None:
# TODO(user): Get CPE (requires new artifact/parser)
pass
if labels is None:
# TODO(user): Get labels (see grr/lib/export.py for acquisition
# from client)
pass
return CheckRegistry.Process(host_data, os_name=os_name, cpe=cpe,
labels=labels, restrict_checks=restrict_checks,
exclude_checks=exclude_checks)
def LoadConfigsFromFile(file_path):
"""Loads check definitions from a file."""
with open(file_path) as data:
return {d["check_id"]: d for d in yaml.safe_load_all(data)}
def LoadCheckFromFile(file_path, check_id, overwrite_if_exists=True):
"""Load a single check from a file."""
configs = LoadConfigsFromFile(file_path)
conf = configs.get(check_id)
check = Check(**conf)
check.Validate()
CheckRegistry.RegisterCheck(check, source="file:%s" % file_path,
overwrite_if_exists=overwrite_if_exists)
logging.debug("Loaded check %s from %s", check.check_id, file_path)
return check
def LoadChecksFromFiles(file_paths, overwrite_if_exists=True):
"""Load the checks defined in the specified files."""
loaded = []
for file_path in file_paths:
configs = LoadConfigsFromFile(file_path)
for conf in configs.values():
check = Check(**conf)
# Validate will raise if the check doesn't load.
check.Validate()
loaded.append(check)
CheckRegistry.RegisterCheck(check, source="file:%s" % file_path,
overwrite_if_exists=overwrite_if_exists)
logging.debug("Loaded check %s from %s", check.check_id, file_path)
return loaded
def LoadChecksFromDirs(dir_paths, overwrite_if_exists=True):
"""Load checks from all yaml files in the specified directories."""
loaded = []
for dir_path in dir_paths:
cfg_files = glob.glob(os.path.join(dir_path, "*.yaml"))
loaded.extend(LoadChecksFromFiles(cfg_files, overwrite_if_exists))
return loaded
class CheckLoader(registry.InitHook):
"""Loads checks from the filesystem."""
# TODO(user): Add check loading from datastore.
def RunOnce(self):
LoadChecksFromDirs(config_lib.CONFIG["Checks.config_dir"])
LoadChecksFromFiles(config_lib.CONFIG["Checks.config_files"])
logging.debug("Loaded checks: %s", ",".join(sorted(CheckRegistry.checks)))
| |
"""Test PurePNG against PIL native plugin"""
import unittest
import pngsuite
from PIL import Image
from io import BytesIO
from PIL import PngImagePlugin as pilpng
from png import PngImagePlugin as purepng
def safe_repr(obj, short=False):
"""Truncated output of repr(obj)"""
_MAX_LENGTH = 80
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
try:
set
except NameError:
from sets import Set as set
try:
reload
except NameError:
from imp import reload
try:
unicode
except NameError:
unicode = str
class PilImageToPyPngAdapter(object):
"""Allow reading PIL image as PurePNG rows"""
def __init__(self, im):
self.im = im
self.nowrow = 0
def __len__(self):
return self.im.size[1]
def __next__(self):
if self.nowrow >= self.__len__():
raise StopIteration()
else:
self.nowrow += 1
return self.__getitem__(self.nowrow - 1)
next = __next__
def __iter__(self):
return self
def __getitem__(self, row):
out = []
for col in range(self.im.size[0]):
px = self.im.getpixel((col, row))
if hasattr(px, '__iter__'):
# Multi-channel image
out.extend(px)
else:
# Single channel image
out.append(px)
return out
class BaseTest(unittest.TestCase):
"""Common testcase prototype"""
test_ = None
delta = 0
def assertAlmostEqual(self, first, second,
places=None, msg=None, delta=None):
"""
Updated version which can handle iterables
Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None and places is None:
places = 7
def valuecompare(value1, value2):
"""Compare values within delta"""
if value1 == value2:
# shortcut
return
if delta is not None:
if abs(value1 - value2) > delta:
return '%s != %s within %s delta' % (safe_repr(value1),
safe_repr(value2),
safe_repr(delta))
else: # delta is None:
if round(abs(value1 - value2), places) != 0:
return '%s != %s within %r places' % (safe_repr(value1),
safe_repr(value2),
places)
def compareIters(first, second):
"""Compare iterators"""
passed = []
errmsg = None
for v1, v2 in zip(first, second):
if hasattr(v1, '__iter__') and hasattr(v2, '__iter__'):
mymsg = compareIters(v1, v2)
else:
mymsg = valuecompare(v1, v2)
if mymsg is None:
# clean
passed.append(v1)
else:
if errmsg is None:
errmsg = mymsg + '\n['
if passed:
errmsg = errmsg + '\n' +\
'\n'.join([repr(p) for p in passed])
errmsg = errmsg + '\n - ' + repr(v1) + '\n + ' + repr(v2)
passed = []
if errmsg is not None:
errmsg = errmsg + ']'
return errmsg
if hasattr(first, '__iter__') and hasattr(second, '__iter__'):
standardMsg = compareIters(first, second)
else:
standardMsg = valuecompare(first, second)
if standardMsg is not None:
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertDictEqual(self, d1, d2, msg=None):
"""Comparison of dictionaries with some hacks and better printing"""
keys = set(d1.keys())
self.assertEqual(keys, set(d2.keys()))
for key_ in keys:
val1 = d1.get(key_)
val2 = d2.get(key_)
if isinstance(val2, unicode) and not isinstance(val1, unicode):
# try someway
val2 = val2.encode('utf-8')
self.assertEqual(val1, val2,
'Unequal values for key ' + repr(key_))
def compareImages(self, im1, im2):
"""Compare two images: their size, pixels and metadata"""
self.assertEqual(im1.size, im2.size)
# Copy info before clean it as PIL may rely on this while reading
info1 = dict(im1.info)
info2 = dict(im2.info)
# Transparency will be converted to alpha later
if 'transparency' in info1:
del info1['transparency']
if 'transparency' in info2:
del info2['transparency']
# Interlace does not affect image, only way it saved
if 'interlace' in info1:
del info1['interlace']
if 'interlace' in info2:
del info2['interlace']
self.longMessage = True
self.assertDictEqual(info1, info2)
# compare pixels
if im1.mode != im2.mode or im1.mode == 'P':
im1 = im1.convert('RGBA')
im2 = im2.convert('RGBA')
pix1 = PilImageToPyPngAdapter(im1)
pix2 = PilImageToPyPngAdapter(im2)
if im1.mode == 'RGBA':
self.assertAlmostEqual(pix1[0][3::4], pix2[0][3::4],
delta=self.delta) # alpha fast check
self.assertAlmostEqual(pix1[0], pix2[0],
delta=self.delta) # fast check
self.assertAlmostEqual(pix1, pix2, delta=self.delta) # complete check
class ReadTest(BaseTest):
"""Reading test (read via PIL, read via PurePNG and compare)"""
def runTest(self):
"""Main test method"""
if self.test_ is None:
return
test_file = self.test_
# Load via PurePNG
reload(purepng)
im_pure = Image.open(test_file)
im_pure.load()
test_file.seek(0)
# Load via PIL default plugin
reload(pilpng)
im_pil = Image.open(test_file)
self.compareImages(im_pil, im_pure)
class WriteTest(BaseTest):
"""Writing test (write via PurePNG, re-read and compare with source)"""
def runTest(self):
"""Main test method"""
if self.test_ is None:
return
test_file = self.test_
# Load via PIL default plugin
test_file.seek(0)
reload(pilpng)
im_orig = Image.open(test_file)
# Save via PurePNG
reload(purepng)
pure_file = BytesIO()
# PIL save new named file instead of write to BytesIO
# pure_file.name = type(self).__name__
im_orig.save(pure_file, 'PNG')
# Load again, plugin unimportant after read test
pure_file.seek(0)
im_new = Image.open(pure_file)
self.compareImages(im_orig, im_new)
# Generate tests for each suite file
testsuite = pngsuite.png
def getdelta(testname):
"""Max delta between PIL and PurePNG"""
if testname.endswith('16'):
return 1
elif testname == 'Basn0g03':
# PIL ignore sBIT on 4bit greyscale, PurePNG provide more accuracy
return 7
else:
return 0
for tname_, test_ in (testsuite.items()):
# Disable known bugs
if tname_ in ('tbbn0g04', 'tbwn0g16'):
# Greyscale + transparency does not provide alpha in PIL
continue
if tname_.startswith('ctf') or tname_.startswith('cth') or\
tname_.startswith('ctj') or tname_.startswith('ctg'):
# Unicode handling differently now
continue
if tname_.startswith('x'):
# Error tests will cause errors :)
continue
globals()[tname_ + '_rtest'] = type(tname_ + '_rtest', (ReadTest,),
{'test_': test_,
'delta': getdelta(tname_)})
globals()[tname_ + '_wtest'] = type(tname_ + '_wtest', (WriteTest,),
{'test_': test_,
'delta': getdelta(tname_)})
if __name__ == "__main__":
unittest.main()
| |
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hp.com>
# Author: Dave Tucker <dave.j.tucker@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
BRIDGE = {
"headers": {
"uuid": "UUID",
"name": "Name",
"ports": "Ports",
"mirrors": "Mirrors",
"controller": "Controller",
"datapath_id": "DPID",
"datapath_type": "Datapath Type",
"sflow": "sFlow",
"netflow": "NetFlow",
"protocols": "Protocols",
"status": "Status",
"stp_enable": "STP Enable",
"other_config": "Other Config",
"external_ids": "External IDs",
"columns": "Columns"},
"title": 'Bridge'
}
OVS = {
"headers": {
"uuid": "UUID",
"bridges": "Bridges",
"ssl": "SSL",
"next_cfg": "Next Config",
"curr_cfg": "Current Config",
"statistics": "Statistics",
"ovs_version": "OVS Version",
"db_version": "DB Version",
"system_type": "System Type",
"system_version": "System Version",
"manager_options": "Manager Options",
"other_config": "Other Config",
"external_ids": "External IDs"},
"title": "Open vSwitch"}
PORT = {
"headers": {
"uuid": "UUID",
"name": "Name",
"interfaces": "Interfaces",
"vlan_mode": "VLAN Mode",
"native-untagged": "Native VLAN",
"tag": "Tag",
"trunks": "Trunks",
"other_config": "Other Config",
"bond_mode": "Bond Mode",
"bond_updelay": "Bond Up Delay",
"bond_downdelay": "Bond Down Delay",
"lacp": "LACP ",
"bond_fake_iface": "Bond Fake Interface",
"qos": "QiS",
"mac": "MAC",
"fake_bridge": "Fake Bridge",
"status": "Status"},
"title": "Port"}
INTERFACE = {
"headers": {
"uuid": "UUID",
"name": "Name",
"ifindex": "ifindex",
"mac_in_use": "MAC in Use?",
"mac": "MAC",
"ofport": "OF Port",
"ofport_request": "OF Port Request",
"type": "Type",
"options": "Options",
"admin_state": "Admin State",
"link_state": "Link State",
"link_resets": "Link Resets",
"link_speed": "Link Speed",
"duplex": "Duplex",
"mtu": "MTU",
"lacp_current": "LACP Current",
"status": "Status",
"statistics": "Statistics",
"ingress_policing_rate": "Ingress Policing Rate",
"ingress_policing_burst": "Ingress Policing Burst",
"bfd": "BFD",
"bfd_status": "BFD Status",
"cfm_mpid": "CFM MPID",
"cfm_fault": "CFM Fault",
"cfm_fault_status": "CFM Fault Status",
"cfm_remote_opstate": "CFM Remote Op State",
"cfm_health": "CFM Health",
"cfm_remote_mpids": "CFM Remote MPIDs",
"other_config": "Other Config",
"external_ids": "External IDs"},
"title": "Interface"}
FLOW_TABLE = {
"headers": {
"uuid": "UUID",
"name": "Name",
"flow_limit": "Flow Limit",
"overflow_policy": "Overflow Policy",
"groups": "Groups"},
"title": "Flow Table"
}
QOS = {
"headers": {
"uuid": "UUID",
"type": "Type",
"queues": "Queues",
"other_config ": "Other Config",
"external_ids ": "External IDs"},
"title": "QoS"}
QUEUE = {
"headers": {
"dscp": "DSCP",
"other_config": "Other Config",
"external_ids": "External IDs"},
"title": "Queue"}
MIRROR = {
"headers": {
"uuid": "UUID",
"name": "Name",
"select_all": "Select All",
"select_dst_port ": "Select Source Port",
"select_src_port": "Select Destination Port",
"select_vlan": "Select Vlan",
"output_port": "Output Port",
"output_vlan": "Output VLAN",
"statistics": "Statistics",
"external_ids": "External IDs"},
"title": "Mirror"}
CONTROLLER = {
"headers": {
"uuid": "UUID",
"target ": "Target",
"connection_mode": "Connection Mode",
"max_backoff": "Max. Backoff",
"inactivity_probe": "Inactivity Probe",
"enable_async_messages": "Enable Async Messages",
"controller_rate_limit": "Controller Rate Limit",
"controller_burst_limit": "Controller Burst Limit",
"local_ip": "Local IP",
"local_netmask": "Local Netmask",
"local_gateway": "Local Gateway",
"is_connected": "Connected",
"role": "Role",
"status": "Status",
"other_config": "Other Config",
"external_ids": "External IDs"},
"title": "Controller"
}
MANAGER = {
"headers": {
"uuid": "UUID",
"target": "Target",
"connection_mode": "Connection Mode",
"max_backoff": "Max. Backoff",
"inactivity_probe": "Inactivity Probe",
"is_connected": "Connected",
"status": "Status",
"external_ids": "External IDs",
"other_config": "Other Config"},
"title": "Manager"}
NETFLOW = {
"headers": {
"uuid": "UUID",
"targets": "Targets",
"engine_id": "Engine ID",
"engine_type": "Engine Type",
"active_timeout": "Active Timeout",
"add_id_to_interface": "Add ID to Interface",
"external_ids": "External IDs"},
"title": "Netflow"}
SSL = {
"headers": {
"uuid": "UUID",
"private_key": "Private Key",
"certificate": "Certificate",
"ca_cert": "CA Cert",
"bootstrap_ca_cert": "Bootstrap CA Cert",
"external_ids": "External IDs"},
"title": "SSL"}
SFLOW = {
"headers": {
"uuid": "UUID",
"agent": "Agent",
"header": "Header",
"polling": "Polling",
"sampling": "Sampling",
"targets": "Targets",
"external_ids": "External IDs"},
"title": "sFlow"}
IPFIX = {
"headers": {
"uuid": "UUID",
"targets": "Targets",
"sampling": "Sampling",
"obs_domain_id": "Obs Domain ID",
"obs_point_id": "Obs Point Id",
"cache_active_timeout": "Cache Active Timeout",
"cache_max_flows": "Cache Max Flows",
"external_ids": "External IDs"},
"title": "IPFIX"}
FLOW_SET = {
"headers": {
"uuid": "UUID",
"id": "ID",
"bridge": "Bridge",
"ipfix": "IPFIX",
"external_ids": "External IDs"},
"title": "Flow Sample Collector Set"}
TABLES = {"open_vswitch": OVS,
"bridge": BRIDGE,
"port": PORT,
"interface": INTERFACE,
"flow_table": FLOW_TABLE,
"qos": QOS,
"queue": QUEUE,
"mirror": MIRROR,
"controller": CONTROLLER,
"manager": MANAGER,
"netflow": NETFLOW,
"ssl": SSL,
"sflow": SFLOW,
"ipfix": IPFIX,
"flow_sample_collector_set": FLOW_SET}
ITEMS = dict([(k, v['title']) for k, v in TABLES.items()])
| |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Substitute domain names in the source tree with blockable strings.
"""
from pathlib import Path
import argparse
import collections
import contextlib
import io
import os
import stat
import re
import tarfile
import tempfile
import zlib
from _extraction import extract_tar_file
from _common import ENCODING, get_logger, add_common_params
# Encodings to try on source tree files
TREE_ENCODINGS = ('UTF-8', 'ISO-8859-1')
# Constants for domain substitution cache
_INDEX_LIST = 'cache_index.list'
_INDEX_HASH_DELIMITER = '|'
_ORIG_DIR = 'orig'
# Constants for timestamp manipulation
# Delta between all file timestamps in nanoseconds
_TIMESTAMP_DELTA = 1 * 10**9
class DomainRegexList:
"""Representation of a domain_regex.list file"""
_regex_pair_tuple = collections.namedtuple('DomainRegexPair', ('pattern', 'replacement'))
# Constants for format:
_PATTERN_REPLACE_DELIM = '#'
def __init__(self, path):
self._data = tuple(filter(len, path.read_text().splitlines()))
# Cache of compiled regex pairs
self._compiled_regex = None
def _compile_regex(self, line):
"""Generates a regex pair tuple for the given line"""
pattern, replacement = line.split(self._PATTERN_REPLACE_DELIM)
return self._regex_pair_tuple(re.compile(pattern), replacement)
@property
def regex_pairs(self):
"""
Returns a tuple of compiled regex pairs
"""
if not self._compiled_regex:
self._compiled_regex = tuple(map(self._compile_regex, self._data))
return self._compiled_regex
@property
def search_regex(self):
"""
Returns a single expression to search for domains
"""
return re.compile('|'.join(
map(lambda x: x.split(self._PATTERN_REPLACE_DELIM, 1)[0], self._data)))
# Private Methods
def _substitute_path(path, regex_iter):
"""
Perform domain substitution on path and add it to the domain substitution cache.
path is a pathlib.Path to the file to be domain substituted.
regex_iter is an iterable of regular expression namedtuple like from
config.DomainRegexList.regex_pairs()
Returns a tuple of the CRC32 hash of the substituted raw content and the
original raw content; None for both entries if no substitutions were made.
Raises FileNotFoundError if path does not exist.
Raises UnicodeDecodeError if path's contents cannot be decoded.
"""
if not os.access(path, os.W_OK):
# If the patch cannot be written to, it cannot be opened for updating
print(str(path) + " cannot be opened for writing! Adding write permission...")
path.chmod(path.stat().st_mode | stat.S_IWUSR)
with path.open('r+b') as input_file:
original_content = input_file.read()
if not original_content:
return (None, None)
content = None
encoding = None
for encoding in TREE_ENCODINGS:
try:
content = original_content.decode(encoding)
break
except UnicodeDecodeError:
continue
if not content:
raise UnicodeDecodeError('Unable to decode with any encoding: %s' % path)
file_subs = 0
for regex_pair in regex_iter:
content, sub_count = regex_pair.pattern.subn(regex_pair.replacement, content)
file_subs += sub_count
if file_subs > 0:
substituted_content = content.encode(encoding)
input_file.seek(0)
input_file.write(content.encode(encoding))
input_file.truncate()
return (zlib.crc32(substituted_content), original_content)
return (None, None)
def _validate_file_index(index_file, resolved_tree, cache_index_files):
"""
Validation of file index and hashes against the source tree.
Updates cache_index_files
Returns True if the file index is valid; False otherwise
"""
all_hashes_valid = True
crc32_regex = re.compile(r'^[a-zA-Z0-9]{8}$')
for entry in index_file.read().decode(ENCODING).splitlines():
try:
relative_path, file_hash = entry.split(_INDEX_HASH_DELIMITER)
except ValueError as exc:
get_logger().error('Could not split entry "%s": %s', entry, exc)
continue
if not relative_path or not file_hash:
get_logger().error('Entry %s of domain substitution cache file index is not valid',
_INDEX_HASH_DELIMITER.join((relative_path, file_hash)))
all_hashes_valid = False
continue
if not crc32_regex.match(file_hash):
get_logger().error('File index hash for %s does not appear to be a CRC32 hash',
relative_path)
all_hashes_valid = False
continue
if zlib.crc32((resolved_tree / relative_path).read_bytes()) != int(file_hash, 16):
get_logger().error('Hashes do not match for: %s', relative_path)
all_hashes_valid = False
continue
if relative_path in cache_index_files:
get_logger().error('File %s shows up at least twice in the file index', relative_path)
all_hashes_valid = False
continue
cache_index_files.add(relative_path)
return all_hashes_valid
@contextlib.contextmanager
def _update_timestamp(path: os.PathLike, set_new: bool) -> None:
"""
Context manager to set the timestamp of the path to plus or
minus a fixed delta, regardless of modifications within the context.
if set_new is True, the delta is added. Otherwise, the delta is subtracted.
"""
stats = os.stat(path)
if set_new:
new_timestamp = (stats.st_atime_ns + _TIMESTAMP_DELTA, stats.st_mtime_ns + _TIMESTAMP_DELTA)
else:
new_timestamp = (stats.st_atime_ns - _TIMESTAMP_DELTA, stats.st_mtime_ns - _TIMESTAMP_DELTA)
try:
yield
finally:
os.utime(path, ns=new_timestamp)
# Public Methods
def apply_substitution(regex_path, files_path, source_tree, domainsub_cache):
"""
Substitute domains in source_tree with files and substitutions,
and save the pre-domain substitution archive to presubdom_archive.
regex_path is a pathlib.Path to domain_regex.list
files_path is a pathlib.Path to domain_substitution.list
source_tree is a pathlib.Path to the source tree.
domainsub_cache is a pathlib.Path to the domain substitution cache.
Raises NotADirectoryError if the patches directory is not a directory or does not exist
Raises FileNotFoundError if the source tree or required directory does not exist.
Raises FileExistsError if the domain substitution cache already exists.
Raises ValueError if an entry in the domain substitution list contains the file index
hash delimiter.
"""
if not source_tree.exists():
raise FileNotFoundError(source_tree)
if not regex_path.exists():
raise FileNotFoundError(regex_path)
if not files_path.exists():
raise FileNotFoundError(files_path)
if domainsub_cache.exists():
raise FileExistsError(domainsub_cache)
resolved_tree = source_tree.resolve()
regex_pairs = DomainRegexList(regex_path).regex_pairs
fileindex_content = io.BytesIO()
with tarfile.open(
str(domainsub_cache), 'w:%s' % domainsub_cache.suffix[1:],
compresslevel=1) as cache_tar:
for relative_path in filter(len, files_path.read_text().splitlines()):
if _INDEX_HASH_DELIMITER in relative_path:
# Cache tar will be incomplete; remove it for convenience
cache_tar.close()
domainsub_cache.unlink()
raise ValueError(
'Path "%s" contains the file index hash delimiter "%s"' % relative_path,
_INDEX_HASH_DELIMITER)
path = resolved_tree / relative_path
if not path.exists():
get_logger().warning('Skipping non-existant path: %s', path)
continue
if path.is_symlink():
get_logger().warning('Skipping path that has become a symlink: %s', path)
continue
with _update_timestamp(path, set_new=True):
crc32_hash, orig_content = _substitute_path(path, regex_pairs)
if crc32_hash is None:
get_logger().info('Path has no substitutions: %s', relative_path)
continue
fileindex_content.write('{}{}{:08x}\n'.format(relative_path, _INDEX_HASH_DELIMITER,
crc32_hash).encode(ENCODING))
orig_tarinfo = tarfile.TarInfo(str(Path(_ORIG_DIR) / relative_path))
orig_tarinfo.size = len(orig_content)
with io.BytesIO(orig_content) as orig_file:
cache_tar.addfile(orig_tarinfo, orig_file)
fileindex_tarinfo = tarfile.TarInfo(_INDEX_LIST)
fileindex_tarinfo.size = fileindex_content.tell()
fileindex_content.seek(0)
cache_tar.addfile(fileindex_tarinfo, fileindex_content)
def revert_substitution(domainsub_cache, source_tree):
"""
Revert domain substitution on source_tree using the pre-domain
substitution archive presubdom_archive.
It first checks if the hashes of the substituted files match the hashes
computed during the creation of the domain substitution cache, raising
KeyError if there are any mismatches. Then, it proceeds to
reverting files in the source_tree.
domainsub_cache is removed only if all the files from the domain substitution cache
were relocated to the source tree.
domainsub_cache is a pathlib.Path to the domain substitution cache.
source_tree is a pathlib.Path to the source tree.
Raises KeyError if:
* There is a hash mismatch while validating the cache
* The cache's file index is corrupt or missing
* The cache is corrupt or is not consistent with the file index
Raises FileNotFoundError if the source tree or domain substitution cache do not exist.
"""
# This implementation trades disk space/wear for performance (unless a ramdisk is used
# for the source tree)
# Assumptions made for this process:
# * The correct tar file was provided (so no huge amount of space is wasted)
# * The tar file is well-behaved (e.g. no files extracted outside of destination path)
# * Cache file index and cache contents are already consistent (i.e. no files exclusive to
# one or the other)
if not domainsub_cache.exists():
raise FileNotFoundError(domainsub_cache)
if not source_tree.exists():
raise FileNotFoundError(source_tree)
resolved_tree = source_tree.resolve()
cache_index_files = set() # All files in the file index
with tempfile.TemporaryDirectory(
prefix='domsubcache_files', dir=str(resolved_tree)) as tmp_extract_name:
extract_path = Path(tmp_extract_name)
get_logger().debug('Extracting domain substitution cache...')
extract_tar_file(domainsub_cache, extract_path, None)
# Validate source tree file hashes match
get_logger().debug('Validating substituted files in source tree...')
with (extract_path / _INDEX_LIST).open('rb') as index_file: #pylint: disable=no-member
if not _validate_file_index(index_file, resolved_tree, cache_index_files):
raise KeyError('Domain substitution cache file index is corrupt or hashes mismatch '
'the source tree.')
# Move original files over substituted ones
get_logger().debug('Moving original files over substituted ones...')
for relative_path in cache_index_files:
with _update_timestamp(resolved_tree / relative_path, set_new=False):
(extract_path / _ORIG_DIR / relative_path).replace(resolved_tree / relative_path)
# Quick check for unused files in cache
orig_has_unused = False
for orig_path in (extract_path / _ORIG_DIR).rglob('*'): #pylint: disable=no-member
if orig_path.is_file():
get_logger().warning('Unused file from cache: %s', orig_path)
orig_has_unused = True
if orig_has_unused:
get_logger().warning('Cache contains unused files. Not removing.')
else:
domainsub_cache.unlink()
def _callback(args):
"""CLI Callback"""
if args.reverting:
revert_substitution(args.cache, args.directory)
else:
apply_substitution(args.regex, args.files, args.directory, args.cache)
def main():
"""CLI Entrypoint"""
parser = argparse.ArgumentParser()
add_common_params(parser)
parser.set_defaults(callback=_callback)
subparsers = parser.add_subparsers(title='', dest='packaging')
# apply
apply_parser = subparsers.add_parser(
'apply',
help='Apply domain substitution',
description='Applies domain substitution and creates the domain substitution cache.')
apply_parser.add_argument(
'-r', '--regex', type=Path, required=True, help='Path to domain_regex.list')
apply_parser.add_argument(
'-f', '--files', type=Path, required=True, help='Path to domain_substitution.list')
apply_parser.add_argument(
'-c',
'--cache',
type=Path,
required=True,
help='The path to the domain substitution cache. The path must not already exist.')
apply_parser.add_argument(
'directory', type=Path, help='The directory to apply domain substitution')
apply_parser.set_defaults(reverting=False)
# revert
revert_parser = subparsers.add_parser(
'revert',
help='Revert domain substitution',
description='Reverts domain substitution based only on the domain substitution cache.')
revert_parser.add_argument(
'directory', type=Path, help='The directory to reverse domain substitution')
revert_parser.add_argument(
'-c',
'--cache',
type=Path,
required=True,
help=('The path to the domain substitution cache. '
'The path must exist and will be removed if successful.'))
revert_parser.set_defaults(reverting=True)
args = parser.parse_args()
args.callback(args)
if __name__ == '__main__':
main()
| |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for plugins."""
import inspect
import io
import os.path
from xml.etree import ElementTree
import pytest
import coverage
from coverage import env
from coverage.control import Plugins
from coverage.data import line_counts
from coverage.exceptions import CoverageException, CoverageWarning
from coverage.misc import import_local_file
import coverage.plugin
from tests.coveragetest import CoverageTest
from tests.helpers import CheckUniqueFilenames
class FakeConfig:
"""A fake config for use in tests."""
def __init__(self, plugin, options):
self.plugin = plugin
self.options = options
self.asked_for = []
def get_plugin_options(self, module):
"""Just return the options for `module` if this is the right module."""
self.asked_for.append(module)
if module == self.plugin:
return self.options
else:
return {}
class LoadPluginsTest(CoverageTest):
"""Test Plugins.load_plugins directly."""
def test_implicit_boolean(self):
self.make_file("plugin1.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
config = FakeConfig("plugin1", {})
plugins = Plugins.load_plugins([], config)
assert not plugins
plugins = Plugins.load_plugins(["plugin1"], config)
assert plugins
def test_importing_and_configuring(self):
self.make_file("plugin1.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def __init__(self, options):
self.options = options
self.this_is = "me"
def coverage_init(reg, options):
reg.add_file_tracer(Plugin(options))
""")
config = FakeConfig("plugin1", {'a': 'hello'})
plugins = list(Plugins.load_plugins(["plugin1"], config))
assert len(plugins) == 1
assert plugins[0].this_is == "me"
assert plugins[0].options == {'a': 'hello'}
assert config.asked_for == ['plugin1']
def test_importing_and_configuring_more_than_one(self):
self.make_file("plugin1.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def __init__(self, options):
self.options = options
self.this_is = "me"
def coverage_init(reg, options):
reg.add_file_tracer(Plugin(options))
""")
self.make_file("plugin2.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def __init__(self, options):
self.options = options
def coverage_init(reg, options):
reg.add_file_tracer(Plugin(options))
""")
config = FakeConfig("plugin1", {'a': 'hello'})
plugins = list(Plugins.load_plugins(["plugin1", "plugin2"], config))
assert len(plugins) == 2
assert plugins[0].this_is == "me"
assert plugins[0].options == {'a': 'hello'}
assert plugins[1].options == {}
assert config.asked_for == ['plugin1', 'plugin2']
# The order matters...
config = FakeConfig("plugin1", {'a': 'second'})
plugins = list(Plugins.load_plugins(["plugin2", "plugin1"], config))
assert len(plugins) == 2
assert plugins[0].options == {}
assert plugins[1].this_is == "me"
assert plugins[1].options == {'a': 'second'}
def test_cant_import(self):
with pytest.raises(ImportError, match="No module named '?plugin_not_there'?"):
_ = Plugins.load_plugins(["plugin_not_there"], None)
def test_plugin_must_define_coverage_init(self):
self.make_file("no_plugin.py", """\
from coverage import CoveragePlugin
Nothing = 0
""")
msg_pat = "Plugin module 'no_plugin' didn't define a coverage_init function"
with pytest.raises(CoverageException, match=msg_pat):
list(Plugins.load_plugins(["no_plugin"], None))
class PluginTest(CoverageTest):
"""Test plugins through the Coverage class."""
def test_plugin_imported(self):
# Prove that a plugin will be imported.
self.make_file("my_plugin.py", """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_noop(Plugin())
with open("evidence.out", "w") as f:
f.write("we are here!")
""")
self.assert_doesnt_exist("evidence.out")
cov = coverage.Coverage()
cov.set_option("run:plugins", ["my_plugin"])
cov.start()
cov.stop() # pragma: nested
with open("evidence.out") as f:
assert f.read() == "we are here!"
def test_missing_plugin_raises_import_error(self):
# Prove that a missing plugin will raise an ImportError.
with pytest.raises(ImportError, match="No module named '?does_not_exist_woijwoicweo'?"):
cov = coverage.Coverage()
cov.set_option("run:plugins", ["does_not_exist_woijwoicweo"])
cov.start()
cov.stop()
def test_bad_plugin_isnt_hidden(self):
# Prove that a plugin with an error in it will raise the error.
self.make_file("plugin_over_zero.py", "1/0")
with pytest.raises(ZeroDivisionError):
cov = coverage.Coverage()
cov.set_option("run:plugins", ["plugin_over_zero"])
cov.start()
cov.stop()
def test_plugin_sys_info(self):
self.make_file("plugin_sys_info.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
def sys_info(self):
return [("hello", "world")]
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
debug_out = io.StringIO()
cov = coverage.Coverage(debug=["sys"])
cov._debug_file = debug_out
cov.set_option("run:plugins", ["plugin_sys_info"])
with pytest.warns(None):
# Catch warnings so we don't see "plugins aren't supported on PyTracer"
cov.start()
cov.stop() # pragma: nested
out_lines = [line.strip() for line in debug_out.getvalue().splitlines()]
if env.C_TRACER:
assert 'plugins.file_tracers: plugin_sys_info.Plugin' in out_lines
else:
assert 'plugins.file_tracers: plugin_sys_info.Plugin (disabled)' in out_lines
assert 'plugins.configurers: -none-' in out_lines
expected_end = [
"-- sys: plugin_sys_info.Plugin -------------------------------",
"hello: world",
"-- end -------------------------------------------------------",
]
assert expected_end == out_lines[-len(expected_end):]
def test_plugin_with_no_sys_info(self):
self.make_file("plugin_no_sys_info.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_configurer(Plugin())
""")
debug_out = io.StringIO()
cov = coverage.Coverage(debug=["sys"])
cov._debug_file = debug_out
cov.set_option("run:plugins", ["plugin_no_sys_info"])
cov.start()
cov.stop() # pragma: nested
out_lines = [line.strip() for line in debug_out.getvalue().splitlines()]
assert 'plugins.file_tracers: -none-' in out_lines
assert 'plugins.configurers: plugin_no_sys_info.Plugin' in out_lines
expected_end = [
"-- sys: plugin_no_sys_info.Plugin ----------------------------",
"-- end -------------------------------------------------------",
]
assert expected_end == out_lines[-len(expected_end):]
def test_local_files_are_importable(self):
self.make_file("importing_plugin.py", """\
from coverage import CoveragePlugin
import local_module
class MyPlugin(CoveragePlugin):
pass
def coverage_init(reg, options):
reg.add_noop(MyPlugin())
""")
self.make_file("local_module.py", "CONST = 1")
self.make_file(".coveragerc", """\
[run]
plugins = importing_plugin
""")
self.make_file("main_file.py", "print('MAIN')")
out = self.run_command("coverage run main_file.py")
assert out == "MAIN\n"
out = self.run_command("coverage html -q") # sneak in a test of -q
assert out == ""
@pytest.mark.skipif(env.C_TRACER, reason="This test is only about PyTracer.")
class PluginWarningOnPyTracerTest(CoverageTest):
"""Test that we get a controlled exception with plugins on PyTracer."""
def test_exception_if_plugins_on_pytracer(self):
self.make_file("simple.py", "a = 1")
cov = coverage.Coverage()
cov.set_option("run:plugins", ["tests.plugin1"])
expected_warnings = [
r"Plugin file tracers \(tests.plugin1.Plugin\) aren't supported with PyTracer",
]
with self.assert_warnings(cov, expected_warnings):
self.start_import_stop(cov, "simple")
@pytest.mark.skipif(not env.C_TRACER, reason="Plugins are only supported with the C tracer.")
class FileTracerTest(CoverageTest):
"""Tests of plugins that implement file_tracer."""
class GoodFileTracerTest(FileTracerTest):
"""Tests of file tracer plugin happy paths."""
def test_plugin1(self):
self.make_file("simple.py", """\
import try_xyz
a = 1
b = 2
""")
self.make_file("try_xyz.py", """\
c = 3
d = 4
""")
cov = coverage.Coverage()
CheckUniqueFilenames.hook(cov, '_should_trace')
CheckUniqueFilenames.hook(cov, '_check_include_omit_etc')
cov.set_option("run:plugins", ["tests.plugin1"])
# Import the Python file, executing it.
self.start_import_stop(cov, "simple")
_, statements, missing, _ = cov.analysis("simple.py")
assert statements == [1, 2, 3]
assert missing == []
zzfile = os.path.abspath(os.path.join("/src", "try_ABC.zz"))
_, statements, _, _ = cov.analysis(zzfile)
assert statements == [105, 106, 107, 205, 206, 207]
def make_render_and_caller(self):
"""Make the render.py and caller.py files we need."""
# plugin2 emulates a dynamic tracing plugin: the caller's locals
# are examined to determine the source file and line number.
# The plugin is in tests/plugin2.py.
self.make_file("render.py", """\
def render(filename, linenum):
# This function emulates a template renderer. The plugin
# will examine the `filename` and `linenum` locals to
# determine the source file and line number.
fiddle_around = 1 # not used, just chaff.
return "[{} @ {}]".format(filename, linenum)
def helper(x):
# This function is here just to show that not all code in
# this file will be part of the dynamic tracing.
return x+1
""")
self.make_file("caller.py", """\
import sys
from render import helper, render
assert render("foo_7.html", 4) == "[foo_7.html @ 4]"
# Render foo_7.html again to try the CheckUniqueFilenames asserts.
render("foo_7.html", 4)
assert helper(42) == 43
assert render("bar_4.html", 2) == "[bar_4.html @ 2]"
assert helper(76) == 77
# quux_5.html will be omitted from the results.
assert render("quux_5.html", 3) == "[quux_5.html @ 3]"
# For Python 2, make sure unicode is working.
assert render(u"uni_3.html", 2) == "[uni_3.html @ 2]"
""")
# will try to read the actual source files, so make some
# source files.
def lines(n):
"""Make a string with n lines of text."""
return "".join("line %d\n" % i for i in range(n))
self.make_file("bar_4.html", lines(4))
self.make_file("foo_7.html", lines(7))
def test_plugin2(self):
self.make_render_and_caller()
cov = coverage.Coverage(omit=["*quux*"])
CheckUniqueFilenames.hook(cov, '_should_trace')
CheckUniqueFilenames.hook(cov, '_check_include_omit_etc')
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
# The way plugin2 works, a file named foo_7.html will be claimed to
# have 7 lines in it. If render() was called with line number 4,
# then the plugin will claim that lines 4 and 5 were executed.
_, statements, missing, _ = cov.analysis("foo_7.html")
assert statements == [1, 2, 3, 4, 5, 6, 7]
assert missing == [1, 2, 3, 6, 7]
assert "foo_7.html" in line_counts(cov.get_data())
_, statements, missing, _ = cov.analysis("bar_4.html")
assert statements == [1, 2, 3, 4]
assert missing == [1, 4]
assert "bar_4.html" in line_counts(cov.get_data())
assert "quux_5.html" not in line_counts(cov.get_data())
_, statements, missing, _ = cov.analysis("uni_3.html")
assert statements == [1, 2, 3]
assert missing == [1]
assert "uni_3.html" in line_counts(cov.get_data())
def test_plugin2_with_branch(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
CheckUniqueFilenames.hook(cov, '_should_trace')
CheckUniqueFilenames.hook(cov, '_check_include_omit_etc')
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
# The way plugin2 works, a file named foo_7.html will be claimed to
# have 7 lines in it. If render() was called with line number 4,
# then the plugin will claim that lines 4 and 5 were executed.
analysis = cov._analyze("foo_7.html")
assert analysis.statements == {1, 2, 3, 4, 5, 6, 7}
# Plugins don't do branch coverage yet.
assert analysis.has_arcs() is True
assert analysis.arc_possibilities() == []
assert analysis.missing == {1, 2, 3, 6, 7}
def test_plugin2_with_text_report(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
repout = io.StringIO()
total = cov.report(file=repout, include=["*.html"], omit=["uni*.html"], show_missing=True)
report = repout.getvalue().splitlines()
expected = [
'Name Stmts Miss Branch BrPart Cover Missing',
'--------------------------------------------------------',
'bar_4.html 4 2 0 0 50% 1, 4',
'foo_7.html 7 5 0 0 29% 1-3, 6-7',
'--------------------------------------------------------',
'TOTAL 11 7 0 0 36%',
]
assert expected == report
assert round(abs(total-36.36), 2) == 0
def test_plugin2_with_html_report(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
total = cov.html_report(include=["*.html"], omit=["uni*.html"])
assert round(abs(total-36.36), 2) == 0
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/bar_4_html.html")
self.assert_exists("htmlcov/foo_7_html.html")
def test_plugin2_with_xml_report(self):
self.make_render_and_caller()
cov = coverage.Coverage(branch=True, omit=["*quux*"])
cov.set_option("run:plugins", ["tests.plugin2"])
self.start_import_stop(cov, "caller")
total = cov.xml_report(include=["*.html"], omit=["uni*.html"])
assert round(abs(total-36.36), 2) == 0
dom = ElementTree.parse("coverage.xml")
classes = {}
for elt in dom.findall(".//class"):
classes[elt.get('name')] = elt
assert classes['bar_4.html'].attrib == {
'branch-rate': '1',
'complexity': '0',
'filename': 'bar_4.html',
'line-rate': '0.5',
'name': 'bar_4.html',
}
assert classes['foo_7.html'].attrib == {
'branch-rate': '1',
'complexity': '0',
'filename': 'foo_7.html',
'line-rate': '0.2857',
'name': 'foo_7.html',
}
def test_defer_to_python(self):
# A plugin that measures, but then wants built-in python reporting.
self.make_file("fairly_odd_plugin.py", """\
# A plugin that claims all the odd lines are executed, and none of
# the even lines, and then punts reporting off to the built-in
# Python reporting.
import coverage.plugin
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
return OddTracer(filename)
def file_reporter(self, filename):
return "python"
class OddTracer(coverage.plugin.FileTracer):
def __init__(self, filename):
self.filename = filename
def source_filename(self):
return self.filename
def line_number_range(self, frame):
lineno = frame.f_lineno
if lineno % 2:
return (lineno, lineno)
else:
return (-1, -1)
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.make_file("unsuspecting.py", """\
a = 1
b = 2
c = 3
d = 4
e = 5
f = 6
""")
cov = coverage.Coverage(include=["unsuspecting.py"])
cov.set_option("run:plugins", ["fairly_odd_plugin"])
self.start_import_stop(cov, "unsuspecting")
repout = io.StringIO()
total = cov.report(file=repout, show_missing=True)
report = repout.getvalue().splitlines()
expected = [
'Name Stmts Miss Cover Missing',
'-----------------------------------------------',
'unsuspecting.py 6 3 50% 2, 4, 6',
'-----------------------------------------------',
'TOTAL 6 3 50%',
]
assert expected == report
assert total == 50
def test_find_unexecuted(self):
self.make_file("unexecuted_plugin.py", """\
import os
import coverage.plugin
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("foo.py"):
return MyTracer(filename)
def file_reporter(self, filename):
return MyReporter(filename)
def find_executable_files(self, src_dir):
# Check that src_dir is the right value
files = os.listdir(src_dir)
assert "foo.py" in files
assert "unexecuted_plugin.py" in files
return ["chimera.py"]
class MyTracer(coverage.plugin.FileTracer):
def __init__(self, filename):
self.filename = filename
def source_filename(self):
return self.filename
def line_number_range(self, frame):
return (999, 999)
class MyReporter(coverage.FileReporter):
def lines(self):
return {99, 999, 9999}
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.make_file("foo.py", "a = 1")
cov = coverage.Coverage(source=['.'])
cov.set_option("run:plugins", ["unexecuted_plugin"])
self.start_import_stop(cov, "foo")
# The file we executed claims to have run line 999.
_, statements, missing, _ = cov.analysis("foo.py")
assert statements == [99, 999, 9999]
assert missing == [99, 9999]
# The completely missing file is in the results.
_, statements, missing, _ = cov.analysis("chimera.py")
assert statements == [99, 999, 9999]
assert missing == [99, 999, 9999]
# But completely new filenames are not in the results.
assert len(cov.get_data().measured_files()) == 3
with pytest.raises(CoverageException):
cov.analysis("fictional.py")
class BadFileTracerTest(FileTracerTest):
"""Test error handling around file tracer plugins."""
def run_plugin(self, module_name):
"""Run a plugin with the given module_name.
Uses a few fixed Python files.
Returns the Coverage object.
"""
self.make_file("simple.py", """\
import other, another
a = other.f(2)
b = other.f(3)
c = another.g(4)
d = another.g(5)
""")
# The names of these files are important: some plugins apply themselves
# to "*other.py".
self.make_file("other.py", """\
def f(x):
return x+1
""")
self.make_file("another.py", """\
def g(x):
return x-1
""")
cov = coverage.Coverage()
cov.set_option("run:plugins", [module_name])
self.start_import_stop(cov, "simple")
cov.save() # pytest-cov does a save after stop, so we'll do it too.
return cov
def run_bad_plugin(self, module_name, plugin_name, our_error=True, excmsg=None, excmsgs=None):
"""Run a file, and see that the plugin failed.
`module_name` and `plugin_name` is the module and name of the plugin to
use.
`our_error` is True if the error reported to the user will be an
explicit error in our test code, marked with an '# Oh noes!' comment.
`excmsg`, if provided, is text that must appear in the stderr.
`excmsgs`, if provided, is a list of messages, one of which must
appear in the stderr.
The plugin will be disabled, and we check that a warning is output
explaining why.
"""
with pytest.warns(Warning) as warns:
self.run_plugin(module_name)
stderr = self.stderr()
stderr += "".join(w.message.args[0] for w in warns)
if our_error:
# The exception we're causing should only appear once.
assert stderr.count("# Oh noes!") == 1
# There should be a warning explaining what's happening, but only one.
# The message can be in two forms:
# Disabling plug-in '...' due to previous exception
# or:
# Disabling plug-in '...' due to an exception:
print([str(w) for w in warns.list])
warns = [w for w in warns.list if issubclass(w.category, CoverageWarning)]
assert len(warns) == 1
warnmsg = warns[0].message.args[0]
assert f"Disabling plug-in '{module_name}.{plugin_name}' due to " in warnmsg
if excmsg:
assert excmsg in stderr
if excmsgs:
found_exc = any(em in stderr for em in excmsgs) # pragma: part covered
assert found_exc, f"expected one of {excmsgs} in stderr"
def test_file_tracer_has_no_file_tracer_method(self):
self.make_file("bad_plugin.py", """\
class Plugin(object):
pass
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
def test_file_tracer_has_inherited_sourcefilename_method(self):
self.make_file("bad_plugin.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
# Just grab everything.
return FileTracer()
class FileTracer(coverage.FileTracer):
pass
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin(
"bad_plugin", "Plugin", our_error=False,
excmsg="Class 'bad_plugin.FileTracer' needs to implement source_filename()",
)
def test_plugin_has_inherited_filereporter_method(self):
self.make_file("bad_plugin.py", """\
import coverage
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
# Just grab everything.
return FileTracer()
class FileTracer(coverage.FileTracer):
def source_filename(self):
return "foo.xxx"
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
cov = self.run_plugin("bad_plugin")
expected_msg = "Plugin 'bad_plugin.Plugin' needs to implement file_reporter()"
with pytest.raises(NotImplementedError, match=expected_msg):
cov.report()
def test_file_tracer_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
17/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_file_tracer_fails_eventually(self):
# Django coverage plugin can report on a few files and then fail.
# https://github.com/nedbat/coveragepy/issues/1011
self.make_file("bad_plugin.py", """\
import os.path
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def __init__(self):
self.calls = 0
def file_tracer(self, filename):
print(filename)
self.calls += 1
if self.calls <= 2:
return FileTracer(filename)
else:
17/0 # Oh noes!
class FileTracer(coverage.FileTracer):
def __init__(self, filename):
self.filename = filename
def source_filename(self):
return os.path.basename(self.filename).replace(".py", ".foo")
def line_number_range(self, frame):
return -1, -1
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_file_tracer_returns_wrong(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return 3.14159
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin(
"bad_plugin", "Plugin", our_error=False, excmsg="'float' object has no attribute",
)
def test_has_dynamic_source_filename_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def has_dynamic_source_filename(self):
23/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_source_filename_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
42/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_source_filename_returns_wrong(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return 17.3
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin(
"bad_plugin", "Plugin", our_error=False,
excmsgs=[
"expected str, bytes or os.PathLike object, not float",
"'float' object has no attribute",
"object of type 'float' has no len()",
"'float' object is unsubscriptable",
],
)
def test_dynamic_source_filename_fails(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def has_dynamic_source_filename(self):
return True
def dynamic_source_filename(self, filename, frame):
101/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin("bad_plugin", "Plugin")
def test_line_number_range_raises_error(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
raise Exception("borked!")
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin(
"bad_plugin", "Plugin", our_error=False, excmsg="borked!",
)
def test_line_number_range_returns_non_tuple(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
return 42.23
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin(
"bad_plugin", "Plugin", our_error=False, excmsg="line_number_range must return 2-tuple",
)
def test_line_number_range_returns_triple(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
return (1, 2, 3)
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin(
"bad_plugin", "Plugin", our_error=False, excmsg="line_number_range must return 2-tuple",
)
def test_line_number_range_returns_pair_of_strings(self):
self.make_file("bad_plugin.py", """\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
return ("5", "7")
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""")
self.run_bad_plugin(
"bad_plugin", "Plugin", our_error=False,
excmsgs=[
"an integer is required",
"cannot be interpreted as an integer",
],
)
class ConfigurerPluginTest(CoverageTest):
"""Test configuring plugins."""
run_in_temp_dir = False
def test_configurer_plugin(self):
cov = coverage.Coverage()
cov.set_option("run:plugins", ["tests.plugin_config"])
cov.start()
cov.stop() # pragma: nested
excluded = cov.get_option("report:exclude_lines")
assert "pragma: custom" in excluded
assert "pragma: or whatever" in excluded
class DynamicContextPluginTest(CoverageTest):
"""Tests of plugins that implement `dynamic_context`."""
def make_plugin_capitalized_testnames(self, filename):
"""Create a dynamic context plugin that capitalizes the part after 'test_'."""
self.make_file(filename, """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def dynamic_context(self, frame):
name = frame.f_code.co_name
if name.startswith(("test_", "doctest_")):
parts = name.split("_", 1)
return "%s:%s" % (parts[0], parts[1].upper())
return None
def coverage_init(reg, options):
reg.add_dynamic_context(Plugin())
""")
def make_plugin_track_render(self, filename):
"""Make a dynamic context plugin that tracks 'render_' functions."""
self.make_file(filename, """\
from coverage import CoveragePlugin
class Plugin(CoveragePlugin):
def dynamic_context(self, frame):
name = frame.f_code.co_name
if name.startswith("render_"):
return 'renderer:' + name[7:]
return None
def coverage_init(reg, options):
reg.add_dynamic_context(Plugin())
""")
def make_test_files(self):
"""Make some files to use while testing dynamic context plugins."""
self.make_file("rendering.py", """\
def html_tag(tag, content):
return '<%s>%s</%s>' % (tag, content, tag)
def render_paragraph(text):
return html_tag('p', text)
def render_span(text):
return html_tag('span', text)
def render_bold(text):
return html_tag('b', text)
""")
self.make_file("testsuite.py", """\
import rendering
def test_html_tag():
assert rendering.html_tag('b', 'hello') == '<b>hello</b>'
def doctest_html_tag():
assert eval('''
rendering.html_tag('i', 'text') == '<i>text</i>'
'''.strip())
def test_renderers():
assert rendering.render_paragraph('hello') == '<p>hello</p>'
assert rendering.render_bold('wide') == '<b>wide</b>'
assert rendering.render_span('world') == '<span>world</span>'
def build_full_html():
html = '<html><body>%s</body></html>' % (
rendering.render_paragraph(
rendering.render_span('hello')))
return html
""")
def run_all_functions(self, cov, suite_name): # pragma: nested
"""Run all functions in `suite_name` under coverage."""
cov.start()
suite = import_local_file(suite_name)
try:
# Call all functions in this module
for name in dir(suite):
variable = getattr(suite, name)
if inspect.isfunction(variable):
variable()
finally:
cov.stop()
def test_plugin_standalone(self):
self.make_plugin_capitalized_testnames('plugin_tests.py')
self.make_test_files()
# Enable dynamic context plugin
cov = coverage.Coverage()
cov.set_option("run:plugins", ['plugin_tests'])
# Run the tests
self.run_all_functions(cov, 'testsuite')
# Labeled coverage is collected
data = cov.get_data()
filenames = self.get_measured_filenames(data)
expected = ['', 'doctest:HTML_TAG', 'test:HTML_TAG', 'test:RENDERERS']
assert expected == sorted(data.measured_contexts())
data.set_query_context("doctest:HTML_TAG")
assert [2] == data.lines(filenames['rendering.py'])
data.set_query_context("test:HTML_TAG")
assert [2] == data.lines(filenames['rendering.py'])
data.set_query_context("test:RENDERERS")
assert [2, 5, 8, 11] == sorted(data.lines(filenames['rendering.py']))
def test_static_context(self):
self.make_plugin_capitalized_testnames('plugin_tests.py')
self.make_test_files()
# Enable dynamic context plugin for coverage with named context
cov = coverage.Coverage(context='mytests')
cov.set_option("run:plugins", ['plugin_tests'])
# Run the tests
self.run_all_functions(cov, 'testsuite')
# Static context prefix is preserved
data = cov.get_data()
expected = [
'mytests',
'mytests|doctest:HTML_TAG',
'mytests|test:HTML_TAG',
'mytests|test:RENDERERS',
]
assert expected == sorted(data.measured_contexts())
def test_plugin_with_test_function(self):
self.make_plugin_capitalized_testnames('plugin_tests.py')
self.make_test_files()
# Enable both a plugin and test_function dynamic context
cov = coverage.Coverage()
cov.set_option("run:plugins", ['plugin_tests'])
cov.set_option("run:dynamic_context", "test_function")
# Run the tests
self.run_all_functions(cov, 'testsuite')
# test_function takes precedence over plugins - only
# functions that are not labeled by test_function are
# labeled by plugin_tests.
data = cov.get_data()
filenames = self.get_measured_filenames(data)
expected = [
'',
'doctest:HTML_TAG',
'testsuite.test_html_tag',
'testsuite.test_renderers',
]
assert expected == sorted(data.measured_contexts())
def assert_context_lines(context, lines):
data.set_query_context(context)
assert lines == sorted(data.lines(filenames['rendering.py']))
assert_context_lines("doctest:HTML_TAG", [2])
assert_context_lines("testsuite.test_html_tag", [2])
assert_context_lines("testsuite.test_renderers", [2, 5, 8, 11])
def test_multiple_plugins(self):
self.make_plugin_capitalized_testnames('plugin_tests.py')
self.make_plugin_track_render('plugin_renderers.py')
self.make_test_files()
# Enable two plugins
cov = coverage.Coverage()
cov.set_option("run:plugins", ['plugin_renderers', 'plugin_tests'])
self.run_all_functions(cov, 'testsuite')
# It is important to note, that line 11 (render_bold function) is never
# labeled as renderer:bold context, because it is only called from
# test_renderers function - so it already falls under test:RENDERERS
# context.
#
# render_paragraph and render_span (lines 5, 8) are directly called by
# testsuite.build_full_html, so they get labeled by renderers plugin.
data = cov.get_data()
filenames = self.get_measured_filenames(data)
expected = [
'',
'doctest:HTML_TAG',
'renderer:paragraph',
'renderer:span',
'test:HTML_TAG',
'test:RENDERERS',
]
assert expected == sorted(data.measured_contexts())
def assert_context_lines(context, lines):
data.set_query_context(context)
assert lines == sorted(data.lines(filenames['rendering.py']))
assert_context_lines("test:HTML_TAG", [2])
assert_context_lines("test:RENDERERS", [2, 5, 8, 11])
assert_context_lines("doctest:HTML_TAG", [2])
assert_context_lines("renderer:paragraph", [2, 5])
assert_context_lines("renderer:span", [2, 8])
| |
"""Certbot client interfaces."""
from abc import ABCMeta
from abc import abstractmethod
from argparse import ArgumentParser
import sys
from types import ModuleType
from typing import Any
from typing import Union
from typing import cast
from typing import Iterable
from typing import List
from typing import Optional
from typing import Type
from typing import TYPE_CHECKING
import warnings
import zope.interface
from acme.challenges import Challenge
from acme.challenges import ChallengeResponse
from acme.client import ClientBase
from certbot import configuration
from certbot.achallenges import AnnotatedChallenge
if TYPE_CHECKING:
from certbot._internal.account import Account
class AccountStorage(metaclass=ABCMeta):
"""Accounts storage interface."""
@abstractmethod
def find_all(self) -> List['Account']: # pragma: no cover
"""Find all accounts.
:returns: All found accounts.
:rtype: list
"""
raise NotImplementedError()
@abstractmethod
def load(self, account_id: str) -> 'Account': # pragma: no cover
"""Load an account by its id.
:raises .AccountNotFound: if account could not be found
:raises .AccountStorageError: if account could not be loaded
:returns: The account loaded
:rtype: .Account
"""
raise NotImplementedError()
@abstractmethod
def save(self, account: 'Account', client: ClientBase) -> None: # pragma: no cover
"""Save account.
:raises .AccountStorageError: if account could not be saved
"""
raise NotImplementedError()
class IConfig(zope.interface.Interface): # pylint: disable=inherit-non-class
"""Deprecated, use certbot.configuration.NamespaceConfig instead."""
class IPluginFactory(zope.interface.Interface): # pylint: disable=inherit-non-class
"""Deprecated, use certbot.interfaces.Plugin as ABC instead."""
class IPlugin(zope.interface.Interface): # pylint: disable=inherit-non-class
"""Deprecated, use certbot.interfaces.Plugin as ABC instead."""
class Plugin(metaclass=ABCMeta):
"""Certbot plugin.
Objects providing this interface will be called without satisfying
any entry point "extras" (extra dependencies) you might have defined
for your plugin, e.g (excerpt from ``setup.py`` script)::
setup(
...
entry_points={
'certbot.plugins': [
'name=example_project.plugin[plugin_deps]',
],
},
extras_require={
'plugin_deps': ['dep1', 'dep2'],
}
)
Therefore, make sure such objects are importable and usable without
extras. This is necessary, because CLI does the following operations
(in order):
- loads an entry point,
- calls `inject_parser_options`,
- requires an entry point,
- creates plugin instance (`__call__`).
"""
description: str = NotImplemented
"""Short plugin description"""
name: str = NotImplemented
"""Unique name of the plugin"""
@abstractmethod
def __init__(self, config: Optional[configuration.NamespaceConfig], name: str) -> None:
"""Create a new `Plugin`.
:param configuration.NamespaceConfig config: Configuration.
:param str name: Unique plugin name.
"""
super().__init__()
@abstractmethod
def prepare(self) -> None:
"""Prepare the plugin.
Finish up any additional initialization.
:raises .PluginError:
when full initialization cannot be completed.
:raises .MisconfigurationError:
when full initialization cannot be completed. Plugin will
be displayed on a list of available plugins.
:raises .NoInstallationError:
when the necessary programs/files cannot be located. Plugin
will NOT be displayed on a list of available plugins.
:raises .NotSupportedError:
when the installation is recognized, but the version is not
currently supported.
"""
@abstractmethod
def more_info(self) -> str:
"""Human-readable string to help the user.
Should describe the steps taken and any relevant info to help the user
decide which plugin to use.
:rtype str:
"""
@classmethod
@abstractmethod
def inject_parser_options(cls, parser: ArgumentParser, name: str) -> None:
"""Inject argument parser options (flags).
1. Be nice and prepend all options and destinations with
`~.common.option_namespace` and `~common.dest_namespace`.
2. Inject options (flags) only. Positional arguments are not
allowed, as this would break the CLI.
:param ArgumentParser parser: (Almost) top-level CLI parser.
:param str name: Unique plugin name.
"""
class IAuthenticator(IPlugin): # pylint: disable=inherit-non-class
"""Deprecated, use certbot.interfaces.Authenticator as ABC instead."""
class Authenticator(Plugin):
"""Generic Certbot Authenticator.
Class represents all possible tools processes that have the
ability to perform challenges and attain a certificate.
"""
@abstractmethod
def get_chall_pref(self, domain: str) -> Iterable[Type[Challenge]]:
"""Return `collections.Iterable` of challenge preferences.
:param str domain: Domain for which challenge preferences are sought.
:returns: `collections.Iterable` of challenge types (subclasses of
:class:`acme.challenges.Challenge`) with the most
preferred challenges first. If a type is not specified, it means the
Authenticator cannot perform the challenge.
:rtype: `collections.Iterable`
"""
@abstractmethod
def perform(self, achalls: List[AnnotatedChallenge]) -> List[ChallengeResponse]:
"""Perform the given challenge.
:param list achalls: Non-empty (guaranteed) list of
:class:`~certbot.achallenges.AnnotatedChallenge`
instances, such that it contains types found within
:func:`get_chall_pref` only.
:returns: list of ACME
:class:`~acme.challenges.ChallengeResponse` instances corresponding to each provided
:class:`~acme.challenges.Challenge`.
:rtype: :class:`collections.List` of
:class:`acme.challenges.ChallengeResponse`,
where responses are required to be returned in
the same order as corresponding input challenges
:raises .PluginError: If some or all challenges cannot be performed
"""
@abstractmethod
def cleanup(self, achalls: List[AnnotatedChallenge]) -> None:
"""Revert changes and shutdown after challenges complete.
This method should be able to revert all changes made by
perform, even if perform exited abnormally.
:param list achalls: Non-empty (guaranteed) list of
:class:`~certbot.achallenges.AnnotatedChallenge`
instances, a subset of those previously passed to :func:`perform`.
:raises PluginError: if original configuration cannot be restored
"""
class IInstaller(IPlugin): # pylint: disable=inherit-non-class
"""Deprecated, use certbot.interfaces.Installer as ABC instead."""
class Installer(Plugin):
"""Generic Certbot Installer Interface.
Represents any server that an X509 certificate can be placed.
It is assumed that :func:`save` is the only method that finalizes a
checkpoint. This is important to ensure that checkpoints are
restored in a consistent manner if requested by the user or in case
of an error.
Using :class:`certbot.reverter.Reverter` to implement checkpoints,
rollback, and recovery can dramatically simplify plugin development.
"""
@abstractmethod
def get_all_names(self) -> Iterable[str]:
"""Returns all names that may be authenticated.
:rtype: `collections.Iterable` of `str`
"""
@abstractmethod
def deploy_cert(self, domain: str, cert_path: str, key_path: str,
chain_path: str, fullchain_path: str) -> None:
"""Deploy certificate.
:param str domain: domain to deploy certificate file
:param str cert_path: absolute path to the certificate file
:param str key_path: absolute path to the private key file
:param str chain_path: absolute path to the certificate chain file
:param str fullchain_path: absolute path to the certificate fullchain
file (cert plus chain)
:raises .PluginError: when cert cannot be deployed
"""
@abstractmethod
def enhance(self, domain: str, enhancement: str,
options: Optional[Union[List[str], str]] = None) -> None:
"""Perform a configuration enhancement.
:param str domain: domain for which to provide enhancement
:param str enhancement: An enhancement as defined in
:const:`~certbot.plugins.enhancements.ENHANCEMENTS`
:param options: Flexible options parameter for enhancement.
Check documentation of
:const:`~certbot.plugins.enhancements.ENHANCEMENTS`
for expected options for each enhancement.
:raises .PluginError: If Enhancement is not supported, or if
an error occurs during the enhancement.
"""
@abstractmethod
def supported_enhancements(self) -> List[str]:
"""Returns a `collections.Iterable` of supported enhancements.
:returns: supported enhancements which should be a subset of
:const:`~certbot.plugins.enhancements.ENHANCEMENTS`
:rtype: :class:`collections.Iterable` of :class:`str`
"""
@abstractmethod
def save(self, title: Optional[str] = None, temporary: bool = False) -> None:
"""Saves all changes to the configuration files.
Both title and temporary are needed because a save may be
intended to be permanent, but the save is not ready to be a full
checkpoint.
It is assumed that at most one checkpoint is finalized by this
method. Additionally, if an exception is raised, it is assumed a
new checkpoint was not finalized.
:param str title: The title of the save. If a title is given, the
configuration will be saved as a new checkpoint and put in a
timestamped directory. `title` has no effect if temporary is true.
:param bool temporary: Indicates whether the changes made will
be quickly reversed in the future (challenges)
:raises .PluginError: when save is unsuccessful
"""
@abstractmethod
def rollback_checkpoints(self, rollback: int = 1) -> None:
"""Revert `rollback` number of configuration checkpoints.
:raises .PluginError: when configuration cannot be fully reverted
"""
@abstractmethod
def recovery_routine(self) -> None:
"""Revert configuration to most recent finalized checkpoint.
Remove all changes (temporary and permanent) that have not been
finalized. This is useful to protect against crashes and other
execution interruptions.
:raises .errors.PluginError: If unable to recover the configuration
"""
@abstractmethod
def config_test(self) -> None:
"""Make sure the configuration is valid.
:raises .MisconfigurationError: when the config is not in a usable state
"""
@abstractmethod
def restart(self) -> None:
"""Restart or refresh the server content.
:raises .PluginError: when server cannot be restarted
"""
class IDisplay(zope.interface.Interface): # pylint: disable=inherit-non-class
"""Deprecated, use your own Display implementation instead."""
class IReporter(zope.interface.Interface): # pylint: disable=inherit-non-class
"""Deprecated, use your own Reporter implementation instead."""
class RenewableCert(metaclass=ABCMeta):
"""Interface to a certificate lineage."""
@property
@abstractmethod
def cert_path(self) -> str:
"""Path to the certificate file.
:rtype: str
"""
@property
@abstractmethod
def key_path(self) -> str:
"""Path to the private key file.
:rtype: str
"""
@property
@abstractmethod
def chain_path(self) -> str:
"""Path to the certificate chain file.
:rtype: str
"""
@property
@abstractmethod
def fullchain_path(self) -> str:
"""Path to the full chain file.
The full chain is the certificate file plus the chain file.
:rtype: str
"""
@property
@abstractmethod
def lineagename(self) -> str:
"""Name given to the certificate lineage.
:rtype: str
"""
@abstractmethod
def names(self) -> List[str]:
"""What are the subject names of this certificate?
:returns: the subject names
:rtype: `list` of `str`
:raises .CertStorageError: if could not find cert file.
"""
# Updater interfaces
#
# When "certbot renew" is run, Certbot will iterate over each lineage and check
# if the selected installer for that lineage is a subclass of each updater
# class. If it is and the update of that type is configured to be run for that
# lineage, the relevant update function will be called for it. These functions
# are never called for other subcommands, so if an installer wants to perform
# an update during the run or install subcommand, it should do so when
# :func:`IInstaller.deploy_cert` is called.
class GenericUpdater(metaclass=ABCMeta):
"""Interface for update types not currently specified by Certbot.
This class allows plugins to perform types of updates that Certbot hasn't
defined (yet).
To make use of this interface, the installer should implement the interface
methods, and interfaces.GenericUpdater.register(InstallerClass) should
be called from the installer code.
The plugins implementing this enhancement are responsible of handling
the saving of configuration checkpoints as well as other calls to
interface methods of `interfaces.Installer` such as prepare() and restart()
"""
@abstractmethod
def generic_updates(self, lineage: RenewableCert, *args: Any, **kwargs: Any) -> None:
"""Perform any update types defined by the installer.
If an installer is a subclass of the class containing this method, this
function will always be called when "certbot renew" is run. If the
update defined by the installer should be run conditionally, the
installer needs to handle checking the conditions itself.
This method is called once for each lineage.
:param lineage: Certificate lineage object
:type lineage: RenewableCert
"""
class RenewDeployer(metaclass=ABCMeta):
"""Interface for update types run when a lineage is renewed
This class allows plugins to perform types of updates that need to run at
lineage renewal that Certbot hasn't defined (yet).
To make use of this interface, the installer should implement the interface
methods, and interfaces.RenewDeployer.register(InstallerClass) should
be called from the installer code.
"""
@abstractmethod
def renew_deploy(self, lineage: RenewableCert, *args: Any, **kwargs: Any) -> None:
"""Perform updates defined by installer when a certificate has been renewed
If an installer is a subclass of the class containing this method, this
function will always be called when a certificate has been renewed by
running "certbot renew". For example if a plugin needs to copy a
certificate over, or change configuration based on the new certificate.
This method is called once for each lineage renewed
:param lineage: Certificate lineage object
:type lineage: RenewableCert
"""
# This class takes a similar approach to the cryptography project to deprecate attributes
# in public modules. See the _ModuleWithDeprecation class here:
# https://github.com/pyca/cryptography/blob/91105952739442a74582d3e62b3d2111365b0dc7/src/cryptography/utils.py#L129
class _ZopeInterfacesDeprecationModule:
"""
Internal class delegating to a module, and displaying warnings when
attributes related to Zope interfaces are accessed.
"""
def __init__(self, module: ModuleType) -> None:
self.__dict__['_module'] = module
def __getattr__(self, attr: str) -> None:
if attr in ('IConfig', 'IPlugin', 'IPluginFactory', 'IAuthenticator',
'IInstaller', 'IDisplay', 'IReporter'):
warnings.warn('{0} attribute in certbot.interfaces module is deprecated '
'and will be removed soon.'.format(attr),
DeprecationWarning, stacklevel=2)
return getattr(self._module, attr)
def __setattr__(self, attr: str, value: Any) -> None: # pragma: no cover
setattr(self._module, attr, value)
def __delattr__(self, attr: str) -> None: # pragma: no cover
delattr(self._module, attr)
def __dir__(self) -> List[str]: # pragma: no cover
return ['_module'] + dir(self._module)
# Patching ourselves to warn about Zope interfaces deprecation and planned removal.
sys.modules[__name__] = cast(ModuleType, _ZopeInterfacesDeprecationModule(sys.modules[__name__]))
| |
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_config import cfg
import six
from neutron.agent.common import config as a_cfg
from neutron.agent.linux import ipset_manager
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import iptables_firewall
from neutron.agent import securitygroups_rpc as sg_cfg
from neutron.common import constants
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
_uuid = test_base._uuid
#TODO(mangelajo): replace all 'IPv4', 'IPv6' to constants
FAKE_PREFIX = {'IPv4': '10.0.0.0/24',
'IPv6': 'fe80::/48'}
FAKE_IP = {'IPv4': '10.0.0.1',
'IPv6': 'fe80::1'}
#TODO(mangelajo): replace all '*_sgid' strings for the constants
FAKE_SGID = 'fake_sgid'
OTHER_SGID = 'other_sgid'
_IPv6 = constants.IPv6
_IPv4 = constants.IPv4
class BaseIptablesFirewallTestCase(base.BaseTestCase):
def setUp(self):
super(BaseIptablesFirewallTestCase, self).setUp()
cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
cfg.CONF.register_opts(sg_cfg.security_group_opts, 'SECURITYGROUP')
cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT')
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
iptables_cls = self.iptables_cls_p.start()
self.iptables_inst = mock.Mock()
self.v4filter_inst = mock.Mock()
self.v6filter_inst = mock.Mock()
self.iptables_inst.ipv4 = {'filter': self.v4filter_inst,
'raw': self.v4filter_inst
}
self.iptables_inst.ipv6 = {'filter': self.v6filter_inst,
'raw': self.v6filter_inst
}
iptables_cls.return_value = self.iptables_inst
self.firewall = iptables_firewall.IptablesFirewallDriver()
self.firewall.iptables = self.iptables_inst
class IptablesFirewallTestCase(BaseIptablesFirewallTestCase):
def _fake_port(self):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']]}
def test_prepare_port_filter_with_no_sg(self):
port = self._fake_port()
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_filter_ipv4_ingress(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress'}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p tcp -m tcp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p tcp -m tcp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = mock.call.add_rule('ifake_dev', '-p icmp -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -p icmp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p udp -m udp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p udp -m udp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress'}
egress = mock.call.add_rule('ofake_dev', '-j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp'}
egress = mock.call.add_rule(
'ofake_dev', '-p tcp -m tcp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p tcp -m tcp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp'}
egress = mock.call.add_rule('ofake_dev', '-p icmp -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -p icmp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmp --icmp-type 8 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type_name(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 'echo-request',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmp --icmp-type echo-request -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type_code(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_port_range_max': 0,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmp --icmp-type 8/0 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp'}
egress = mock.call.add_rule(
'ofake_dev', '-p udp -m udp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p udp -m udp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress'}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p tcp -m tcp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p tcp -m tcp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p icmpv6 -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -p icmpv6 -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_filter_ingress_tcp_min_port_0(self, ethertype):
rule = {'ethertype': ethertype,
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 0,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 0:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ingress_tcp_min_port_0_for_ipv4(self):
self._test_filter_ingress_tcp_min_port_0('IPv4')
def test_filter_ingress_tcp_min_port_0_for_ipv6(self):
self._test_filter_ingress_tcp_min_port_0('IPv6')
def test_filter_ipv6_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p udp -m udp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p udp -m udp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress'}
egress = mock.call.add_rule('ofake_dev', '-j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp'}
egress = mock.call.add_rule(
'ofake_dev', '-p tcp -m tcp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p tcp -m tcp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp'}
egress = mock.call.add_rule(
'ofake_dev', '-p icmpv6 -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -p icmpv6 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmpv6 --icmpv6-type 8 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type_name(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 'echo-request',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmpv6 --icmpv6-type echo-request -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type_code(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_port_range_max': 0,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmpv6 --icmpv6-type 8/0 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp'}
egress = mock.call.add_rule(
'ofake_dev', '-p udp -m udp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p udp -m udp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_prepare_port_filter(self,
rule,
ingress_expected_call=None,
egress_expected_call=None):
port = self._fake_port()
ethertype = rule['ethertype']
prefix = FAKE_IP[ethertype]
filter_inst = self.v4filter_inst
dhcp_rule = [mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None)]
if ethertype == 'IPv6':
filter_inst = self.v6filter_inst
dhcp_rule = [mock.call.add_rule('ofake_dev', '-p icmpv6 '
'--icmpv6-type %s -j DROP'
% constants.ICMPV6_TYPE_RA,
comment=None),
mock.call.add_rule('ofake_dev',
'-p icmpv6 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-p udp -m udp '
'--sport 546 --dport 547 '
'-j RETURN', comment=None)]
sg = [rule]
port['security_group_rules'] = sg
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback',
'-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG)
]
if ethertype == 'IPv6':
for icmp6_type in constants.ICMPV6_ALLOWED_TYPES:
calls.append(
mock.call.add_rule('ifake_dev',
'-p icmpv6 --icmpv6-type %s -j RETURN' %
icmp6_type, comment=None))
calls += [
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None
),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None
)
]
if ingress_expected_call:
calls.append(ingress_expected_call)
calls += [mock.call.add_rule('ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s %s -m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN'
% prefix,
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP)]
calls += dhcp_rule
calls.append(mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None))
if ethertype == 'IPv4':
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None))
if ethertype == 'IPv6':
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 547 --dport 546 -j DROP',
comment=None))
calls += [
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
]
if egress_expected_call:
calls.append(egress_expected_call)
calls += [mock.call.add_rule('ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
filter_inst.assert_has_calls(calls)
def _test_remove_conntrack_entries(self, ethertype, protocol,
direction):
port = self._fake_port()
port['zone_id'] = 1
port['security_groups'] = 'fake_sg_id'
self.firewall.filtered_ports[port['device']] = port
self.firewall.updated_rule_sg_ids = set(['fake_sg_id'])
self.firewall.sg_rules['fake_sg_id'] = [
{'direction': direction, 'ethertype': ethertype,
'protocol': protocol}]
self.firewall.filter_defer_apply_on()
self.firewall.sg_rules['fake_sg_id'] = []
self.firewall.filter_defer_apply_off()
cmd = ['conntrack', '-D']
if protocol:
cmd.extend(['-p', protocol])
if ethertype == 'IPv4':
cmd.extend(['-f', 'ipv4'])
if direction == 'ingress':
cmd.extend(['-d', '10.0.0.1'])
else:
cmd.extend(['-s', '10.0.0.1'])
else:
cmd.extend(['-f', 'ipv6'])
if direction == 'ingress':
cmd.extend(['-d', 'fe80::1'])
else:
cmd.extend(['-s', 'fe80::1'])
cmd.extend(['-w', 1])
calls = [
mock.call(cmd, run_as_root=True, check_exit_code=True,
extra_ok_codes=[1])]
self.utils_exec.assert_has_calls(calls)
def test_remove_conntrack_entries_for_delete_rule_ipv4(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv4', pro, direction)
def test_remove_conntrack_entries_for_delete_rule_ipv6(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv6', pro, direction)
def test_remove_conntrack_entries_for_port_sec_group_change(self):
port = self._fake_port()
port['zone_id'] = 1
port['security_groups'] = ['fake_sg_id']
self.firewall.filtered_ports[port['device']] = port
self.firewall.updated_sg_members = set(['tapfake_dev'])
self.firewall.filter_defer_apply_on()
new_port = copy.deepcopy(port)
new_port['security_groups'] = ['fake_sg_id2']
self.firewall.filtered_ports[port['device']] = new_port
self.firewall.filter_defer_apply_off()
calls = [
mock.call(['conntrack', '-D', '-f', 'ipv4', '-d', '10.0.0.1',
'-w', 1],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1]),
mock.call(['conntrack', '-D', '-f', 'ipv6', '-d', 'fe80::1',
'-w', 1],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1])]
self.utils_exec.assert_has_calls(calls)
def test_update_delete_port_filter(self):
port = self._fake_port()
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'ingress'}]
self.firewall.prepare_port_filter(port)
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'egress'}]
self.firewall.update_port_filter(port)
self.firewall.update_port_filter({'device': 'no-exist-device'})
self.firewall.remove_port_filter(port)
self.firewall.remove_port_filter({'device': 'no-exist-device'})
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback',
'-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT'),
mock.call.remove_chain('ifake_dev'),
mock.call.remove_chain('ofake_dev'),
mock.call.remove_chain('sfake_dev'),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j RETURN',
comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT'),
mock.call.remove_chain('ifake_dev'),
mock.call.remove_chain('ofake_dev'),
mock.call.remove_chain('sfake_dev'),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain')]
self.v4filter_inst.assert_has_calls(calls)
def test_remove_unknown_port(self):
port = self._fake_port()
self.firewall.remove_port_filter(port)
# checking no exception occures
self.assertFalse(self.v4filter_inst.called)
def test_defer_apply(self):
with self.firewall.defer_apply():
pass
self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(),
mock.call.defer_apply_off()])
def test_filter_defer_with_exception(self):
try:
with self.firewall.defer_apply():
raise Exception("same exception")
except Exception:
pass
self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(),
mock.call.defer_apply_off()])
def _mock_chain_applies(self):
class CopyingMock(mock.MagicMock):
"""Copies arguments so mutable arguments can be asserted on.
Copied verbatim from unittest.mock documentation.
"""
def __call__(self, *args, **kwargs):
args = copy.deepcopy(args)
kwargs = copy.deepcopy(kwargs)
return super(CopyingMock, self).__call__(*args, **kwargs)
# Need to use CopyingMock because _{setup,remove}_chains_apply are
# usually called with that's modified between calls (i.e.,
# self.firewall.filtered_ports).
chain_applies = CopyingMock()
self.firewall._setup_chains_apply = chain_applies.setup
self.firewall._remove_chains_apply = chain_applies.remove
return chain_applies
def test_mock_chain_applies(self):
chain_applies = self._mock_chain_applies()
port_prepare = {'device': 'd1', 'mac_address': 'prepare'}
port_update = {'device': 'd1', 'mac_address': 'update'}
self.firewall.prepare_port_filter(port_prepare)
self.firewall.update_port_filter(port_update)
self.firewall.remove_port_filter(port_update)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup({'d1': port_prepare}, {}),
mock.call.remove({'d1': port_prepare}, {}),
mock.call.setup({'d1': port_update}, {}),
mock.call.remove({'d1': port_update}, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_need_pre_defer_copy(self):
chain_applies = self._mock_chain_applies()
port = self._fake_port()
device2port = {port['device']: port}
self.firewall.prepare_port_filter(port)
with self.firewall.defer_apply():
self.firewall.remove_port_filter(port)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup(device2port, {}),
mock.call.remove(device2port, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_coalesce_simple(self):
chain_applies = self._mock_chain_applies()
port = self._fake_port()
with self.firewall.defer_apply():
self.firewall.prepare_port_filter(port)
self.firewall.update_port_filter(port)
self.firewall.remove_port_filter(port)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_coalesce_multiple_ports(self):
chain_applies = self._mock_chain_applies()
port1 = {'device': 'd1', 'mac_address': 'mac1', 'network_id': 'net1'}
port2 = {'device': 'd2', 'mac_address': 'mac2', 'network_id': 'net1'}
device2port = {'d1': port1, 'd2': port2}
with self.firewall.defer_apply():
self.firewall.prepare_port_filter(port1)
self.firewall.prepare_port_filter(port2)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup(device2port, {})])
def test_ip_spoofing_filter_with_multiple_ips(self):
port = {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': ['10.0.0.1', 'fe80::1', '10.0.0.2']}
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.2 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_ip_spoofing_no_fixed_ips(self):
port = {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': []}
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ifake_dev', '-j $sg-fallback',
comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase):
def setUp(self):
super(IptablesFirewallEnhancedIpsetTestCase, self).setUp()
self.firewall.ipset = mock.Mock()
self.firewall.ipset.get_name.side_effect = (
ipset_manager.IpsetManager.get_name)
self.firewall.ipset.set_exists.return_value = True
def _fake_port(self, sg_id=FAKE_SGID):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']],
'security_groups': [sg_id],
'security_group_source_groups': [sg_id]}
def _fake_sg_rule_for_ethertype(self, ethertype, remote_group):
return {'direction': 'ingress', 'remote_group_id': remote_group,
'ethertype': ethertype}
def _fake_sg_rules(self, sg_id=FAKE_SGID, remote_groups=None):
remote_groups = remote_groups or {_IPv4: [FAKE_SGID],
_IPv6: [FAKE_SGID]}
rules = []
for ip_version, remote_group_list in six.iteritems(remote_groups):
for remote_group in remote_group_list:
rules.append(self._fake_sg_rule_for_ethertype(ip_version,
remote_group))
return {sg_id: rules}
def _fake_sg_members(self, sg_ids=None):
return {sg_id: copy.copy(FAKE_IP) for sg_id in (sg_ids or [FAKE_SGID])}
def test_prepare_port_filter_with_new_members(self):
self.firewall.sg_rules = self._fake_sg_rules()
self.firewall.sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.1', '10.0.0.2'], 'IPv6': ['fe80::1']}}
self.firewall.pre_sg_members = {}
port = self._fake_port()
self.firewall.prepare_port_filter(port)
calls = [
mock.call.set_members('fake_sgid', 'IPv4',
['10.0.0.1', '10.0.0.2']),
mock.call.set_members('fake_sgid', 'IPv6',
['fe80::1'])
]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def _setup_fake_firewall_members_and_rules(self, firewall):
firewall.sg_rules = self._fake_sg_rules()
firewall.pre_sg_rules = self._fake_sg_rules()
firewall.sg_members = self._fake_sg_members()
firewall.pre_sg_members = firewall.sg_members
def _prepare_rules_and_members_for_removal(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.pre_sg_members[OTHER_SGID] = (
self.firewall.pre_sg_members[FAKE_SGID])
def test_determine_remote_sgs_to_remove(self):
self._prepare_rules_and_members_for_removal()
ports = [self._fake_port()]
self.assertEqual(
{_IPv4: set([OTHER_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._determine_remote_sgs_to_remove(ports))
def test_determine_remote_sgs_to_remove_ipv6_unreferenced(self):
self._prepare_rules_and_members_for_removal()
ports = [self._fake_port()]
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [OTHER_SGID, FAKE_SGID],
_IPv6: [FAKE_SGID]})
self.assertEqual(
{_IPv4: set(), _IPv6: set([OTHER_SGID])},
self.firewall._determine_remote_sgs_to_remove(ports))
def test_get_remote_sg_ids_by_ipversion(self):
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID], _IPv6: [OTHER_SGID]})
ports = [self._fake_port()]
self.assertEqual(
{_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._get_remote_sg_ids_sets_by_ipversion(ports))
def test_get_remote_sg_ids(self):
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID, FAKE_SGID, FAKE_SGID],
_IPv6: [OTHER_SGID, OTHER_SGID, OTHER_SGID]})
port = self._fake_port()
self.assertEqual(
{_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._get_remote_sg_ids(port))
def test_determine_sg_rules_to_remove(self):
self.firewall.pre_sg_rules = self._fake_sg_rules(sg_id=OTHER_SGID)
ports = [self._fake_port()]
self.assertEqual(set([OTHER_SGID]),
self.firewall._determine_sg_rules_to_remove(ports))
def test_get_sg_ids_set_for_ports(self):
sg_ids = set([FAKE_SGID, OTHER_SGID])
ports = [self._fake_port(sg_id) for sg_id in sg_ids]
self.assertEqual(sg_ids,
self.firewall._get_sg_ids_set_for_ports(ports))
def test_clear_sg_members(self):
self.firewall.sg_members = self._fake_sg_members(
sg_ids=[FAKE_SGID, OTHER_SGID])
self.firewall._clear_sg_members(_IPv4, [OTHER_SGID])
self.assertEqual(0, len(self.firewall.sg_members[OTHER_SGID][_IPv4]))
def test_remove_unused_sg_members(self):
self.firewall.sg_members = self._fake_sg_members([FAKE_SGID,
OTHER_SGID])
self.firewall.sg_members[FAKE_SGID][_IPv4] = []
self.firewall.sg_members[FAKE_SGID][_IPv6] = []
self.firewall.sg_members[OTHER_SGID][_IPv6] = []
self.firewall._remove_unused_sg_members()
self.assertIn(OTHER_SGID, self.firewall.sg_members)
self.assertNotIn(FAKE_SGID, self.firewall.sg_members)
def test_remove_unused_security_group_info_clears_unused_rules(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.prepare_port_filter(self._fake_port())
# create another SG which won't be referenced by any filtered port
fake_sg_rules = self.firewall.sg_rules['fake_sgid']
self.firewall.pre_sg_rules[OTHER_SGID] = fake_sg_rules
self.firewall.sg_rules[OTHER_SGID] = fake_sg_rules
# call the cleanup function, and check the unused sg_rules are out
self.firewall._remove_unused_security_group_info()
self.assertNotIn(OTHER_SGID, self.firewall.sg_rules)
def test_remove_unused_security_group_info(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
# no filtered ports in 'fake_sgid', so all rules and members
# are not needed and we expect them to be cleaned up
self.firewall.prepare_port_filter(self._fake_port(OTHER_SGID))
self.firewall._remove_unused_security_group_info()
self.assertNotIn(FAKE_SGID, self.firewall.sg_members)
def test_remove_all_unused_info(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.filtered_ports = {}
self.firewall._remove_unused_security_group_info()
self.assertFalse(self.firewall.sg_members)
self.assertFalse(self.firewall.sg_rules)
def test_prepare_port_filter_with_deleted_member(self):
self.firewall.sg_rules = self._fake_sg_rules()
self.firewall.pre_sg_rules = self._fake_sg_rules()
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [
'10.0.0.1', '10.0.0.3', '10.0.0.4', '10.0.0.5'],
'IPv6': ['fe80::1']}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.prepare_port_filter(self._fake_port())
calls = [
mock.call.set_members('fake_sgid', 'IPv4',
['10.0.0.1', '10.0.0.3', '10.0.0.4',
'10.0.0.5']),
mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1'])]
self.firewall.ipset.assert_has_calls(calls, True)
def test_remove_port_filter_with_destroy_ipset_chain(self):
self.firewall.sg_rules = self._fake_sg_rules()
port = self._fake_port()
self.firewall.sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.1'],
'IPv6': ['fe80::1']}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': [],
'IPv6': []}}
self.firewall.prepare_port_filter(port)
self.firewall.filter_defer_apply_on()
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [],
'IPv6': []}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.1'],
'IPv6': ['fe80::1']}}
self.firewall.remove_port_filter(port)
self.firewall.filter_defer_apply_off()
calls = [
mock.call.set_members('fake_sgid', 'IPv4', ['10.0.0.1']),
mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1']),
mock.call.get_name('fake_sgid', 'IPv4'),
mock.call.set_exists('fake_sgid', 'IPv4'),
mock.call.get_name('fake_sgid', 'IPv6'),
mock.call.set_exists('fake_sgid', 'IPv6'),
mock.call.destroy('fake_sgid', 'IPv4'),
mock.call.destroy('fake_sgid', 'IPv6')]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def test_prepare_port_filter_with_sg_no_member(self):
self.firewall.sg_rules = self._fake_sg_rules()
self.firewall.sg_rules[FAKE_SGID].append(
{'direction': 'ingress', 'remote_group_id': 'fake_sgid2',
'ethertype': 'IPv4'})
self.firewall.sg_rules.update()
self.firewall.sg_members['fake_sgid'] = {
'IPv4': ['10.0.0.1', '10.0.0.2'], 'IPv6': ['fe80::1']}
self.firewall.pre_sg_members = {}
port = self._fake_port()
port['security_group_source_groups'].append('fake_sgid2')
self.firewall.prepare_port_filter(port)
calls = [mock.call.set_members('fake_sgid', 'IPv4',
['10.0.0.1', '10.0.0.2']),
mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1'])]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def test_filter_defer_apply_off_with_sg_only_ipv6_rule(self):
self.firewall.sg_rules = self._fake_sg_rules()
self.firewall.pre_sg_rules = self._fake_sg_rules()
self.firewall.ipset_chains = {'IPv4fake_sgid': ['10.0.0.2'],
'IPv6fake_sgid': ['fe80::1']}
self.firewall.sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.sg_rules['fake_sgid'].remove(
{'direction': 'ingress', 'remote_group_id': 'fake_sgid',
'ethertype': 'IPv4'})
self.firewall.sg_rules.update()
self.firewall._defer_apply = True
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._pre_defer_filtered_ports = {}
self.firewall._pre_defer_unfiltered_ports = {}
self.firewall.filter_defer_apply_off()
calls = [mock.call.destroy('fake_sgid', 'IPv4')]
self.firewall.ipset.assert_has_calls(calls, True)
def test_sg_rule_expansion_with_remote_ips(self):
other_ips = ['10.0.0.2', '10.0.0.3', '10.0.0.4']
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [FAKE_IP['IPv4']] + other_ips,
'IPv6': [FAKE_IP['IPv6']]}}
port = self._fake_port()
rule = self._fake_sg_rule_for_ethertype(_IPv4, FAKE_SGID)
rules = self.firewall._expand_sg_rule_with_remote_ips(
rule, port, 'ingress')
self.assertEqual(list(rules),
[dict(list(rule.items()) +
[('source_ip_prefix', '%s/32' % ip)])
for ip in other_ips])
def test_build_ipv4v6_mac_ip_list(self):
mac_oth = 'ffff-ffff-ffff'
mac_unix = 'ff:ff:ff:ff:ff:ff'
ipv4 = FAKE_IP['IPv4']
ipv6 = FAKE_IP['IPv6']
fake_ipv4_pair = []
fake_ipv4_pair.append((mac_unix, ipv4))
fake_ipv6_pair = []
fake_ipv6_pair.append((mac_unix, ipv6))
mac_ipv4_pairs = []
mac_ipv6_pairs = []
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv4,
mac_ipv4_pairs, mac_ipv6_pairs)
self.assertEqual(fake_ipv4_pair, mac_ipv4_pairs)
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv6,
mac_ipv4_pairs, mac_ipv6_pairs)
self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs)
def test_update_ipset_members(self):
self.firewall.sg_members[FAKE_SGID][_IPv4] = []
self.firewall.sg_members[FAKE_SGID][_IPv6] = []
sg_info = {constants.IPv4: [FAKE_SGID]}
self.firewall._update_ipset_members(sg_info)
calls = [mock.call.set_members(FAKE_SGID, constants.IPv4, [])]
self.firewall.ipset.assert_has_calls(calls)
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2020 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
topology pytest plugin module entry point.
This plugin provides a fixture ``topology`` that will load and build a topology
description from the module. This topology must be present in the module as a
constant variable ``TOPOLOGY``. It can be either a string and thus the method
:meth:`topology.manager.TopologyManager.parse` will be used, or a dictionary in
which case the method :meth:`topology.manager.TopologyManager.load` will be
used. Once built, the plugin registers the *unbuild* for when the module has
ended all the tests.
If the ``TOPOLOGY`` variable isn't present the fixture assumes the user will
prefer to build the topology using the standard NML objects with the
:class:`pynml.manager.NMLManager` instance enbeed into the
:class:`topology.manager.TopologyManager`.
To be able to select the platform engine this plugins registers the
``--topology-platform`` option that can be set in pytest command line.
For reference see:
http://pytest.org/dev/plugins.html#hook-specification-and-validation
"""
from time import time
from logging import getLogger
from os import getcwd, makedirs
from traceback import format_exc
from collections import OrderedDict
from pytest import fixture, fail, hookimpl, skip
from os.path import join, isabs, abspath, realpath, exists, isdir
from topology.args import parse_options, ExtendAction
from topology.logging import get_logger, StepLogger
log = getLogger(__name__)
class TopologyPlugin(object):
"""
pytest plugin for Topology.
:param str platform: Platform engine name to run the tests with.
:param str plot_dir: Directory to auto-plot topologies. ``None`` if
feature is disabled.
:param str plot_format: Format to plot the topologies.
:param str nml_dir: Directory to auto-export topologies. ``None`` if
feature is disabled.
:param dict injected_attr: A dictionary holding topology attributes to
inject.
:param str log_dir: Path where to store logs.
:param list szn_dir: List of paths to directories where ``*.szn`` files
are located.
:param dict platform_options: Dictionary holding parameters passed directly
to the topology platform object.
:param int build_retries: Amount of times to retry the build stage.
"""
def __init__(
self, platform, plot_dir, plot_format,
nml_dir, injected_attr, log_dir, szn_dir, platform_options,
build_retries
):
super(TopologyPlugin, self).__init__()
self.platform = platform
self.plot_dir = plot_dir
self.plot_format = plot_format
self.nml_dir = nml_dir
self.injected_attr = injected_attr
self.log_dir = log_dir
self.szn_dir = szn_dir
self.platform_options = platform_options
self.build_retries = build_retries
def pytest_report_header(self, config):
"""
pytest hook to print information of the report header.
"""
header = ["topology: platform='{}'".format(self.platform)]
if self.plot_dir:
header.append(" plot_dir='{}' ({})".format(
self.plot_dir, self.plot_format
))
if self.nml_dir:
header.append(" nml_dir='{}'".format(
self.nml_dir
))
if self.log_dir:
header.append(" log_dir='{}'".format(
self.log_dir
))
return '\n'.join(header)
@fixture(scope='module')
def topology(request):
"""
Fixture that injects a TopologyManager into as a test fixture.
See:
- https://pytest.org/latest/fixture.html
- https://pytest.org/latest/builtin.html#_pytest.python.FixtureRequest
"""
from ..manager import TopologyManager
from ..logging import manager as logmanager
plugin = request.config._topology_plugin
module = request.module
topomgr = TopologyManager(
engine=plugin.platform, options=plugin.platform_options
)
# Setup framework logging
logmanager.logging_context = module.__name__
if plugin.log_dir:
logmanager.logging_directory = plugin.log_dir
# Finalizer unbuild the topology and plot it
def finalizer():
# Do nothing is topology isn't built
if not topomgr.is_built():
return
# Plot topology
if plugin.plot_dir:
plot_file = join(
plugin.plot_dir,
'{}.{}'.format(module.__name__, plugin.plot_format)
)
topomgr.nml.save_graphviz(
plot_file, keep_gv=True
)
# Export topology as NML
if plugin.nml_dir:
nml_file = join(
plugin.nml_dir,
'{}.xml'.format(module.__name__)
)
topomgr.nml.save_nml(
nml_file, pretty=True
)
topomgr.unbuild()
# Autobuild topology if available.
if hasattr(module, 'TOPOLOGY'):
# Get topology description
topo = module.TOPOLOGY
# Get attributes to inject
suite_injected_attr = None
if plugin.injected_attr is not None:
suite_injected_attr = plugin.injected_attr.get(
abspath(module.__file__), None
)
try:
if isinstance(topo, dict):
topomgr.load(topo, inject=suite_injected_attr)
else:
topomgr.parse(topo, inject=suite_injected_attr)
except Exception:
fail(
'Error loading topology in module {}:\n{}'.format(
module.__name__,
format_exc()
),
pytrace=False
)
for iteration in range(plugin.build_retries + 1):
try:
topomgr.build()
log.info(
'Attempt {} on building topology was successful'.format(
iteration
)
)
break
except Exception:
msg = (
'{}\nAttempt {} to build topology failed.'
).format(format_exc(), iteration)
log.warning(msg)
else:
fail(
'Error building topology in module {}:\n{}'.format(
module.__name__,
format_exc()
), pytrace=False
)
request.addfinalizer(finalizer)
return topomgr
@fixture(scope='function')
def step(request):
"""
Fixture to log a step in a test.
"""
return get_logger(
OrderedDict([
('test_suite', request.module.__name__),
('test_case', request.function.__name__)
]),
category='step'
)
def pytest_addoption(parser):
"""
pytest hook to add CLI arguments.
"""
from ..platforms.manager import platforms, DEFAULT_PLATFORM
group = parser.getgroup('topology', 'Testing of network topologies')
group.addoption(
'--topology-platform',
default=DEFAULT_PLATFORM,
help='Select platform to run topology tests',
choices=platforms()
)
group.addoption(
'--topology-plot-dir',
default=None,
help='Directory to auto-plot topologies'
)
group.addoption(
'--topology-plot-format',
default='svg',
help='Format for plotting topologies'
)
group.addoption(
'--topology-nml-dir',
default=None,
help='Directory to export topologies as NML XML'
)
group.addoption(
'--topology-inject',
default=None,
help='Path to an attributes injection file'
)
group.addoption(
'--topology-log-dir',
default=None,
help='Path to a directory where logs are to be stored'
)
group.addoption(
'--topology-szn-dir',
default=None,
action='append',
help='Path to a directory where szn files are located. '
'Can be used multiple times'
)
group.addoption(
'--topology-platform-options',
nargs='+',
default=None,
action=ExtendAction,
help='An argument used by the topology platform '
'with the form <key>=<value>'
)
group.addoption(
'--topology-build-retries',
default=0,
type='int',
help='Retry building a topology up to defined times'
)
def pytest_sessionstart(session):
"""
pytest hook to configure plugin.
"""
config = session.config
# Get registered options
platform = config.getoption('--topology-platform')
plot_format = config.getoption('--topology-plot-format')
plot_dir = config.getoption('--topology-plot-dir')
nml_dir = config.getoption('--topology-nml-dir')
injection_file = config.getoption('--topology-inject')
log_dir = config.getoption('--topology-log-dir')
szn_dir = config.getoption('--topology-szn-dir')
platform_options = config.getoption('--topology-platform-options')
build_retries = config.getoption('--topology-build-retries')
if build_retries < 0:
raise Exception('--topology-build-retries can\'t be less than 0')
def create_dir(path):
if path:
if not isabs(path):
path = join(abspath(getcwd()), path)
if not exists(path):
makedirs(path)
# Determine plot, NML and log directory paths and create them if required
create_dir(plot_dir)
create_dir(nml_dir)
create_dir(log_dir)
# Parse attributes injection file
from pyszn.injection import parse_attribute_injection
injected_attr = None
if injection_file is not None:
log.info('Processing attribute injection...')
start_time = time()
# Get a list of all testing directories
search_paths = [
realpath(arg) for arg in config.args if isdir(arg)
]
injected_attr = parse_attribute_injection(
injection_file,
search_paths=search_paths,
ignored_paths=config.getini('norecursedirs'),
szn_dir=szn_dir
)
log.info(
'Attribute injection completed after {}s'
.format(time() - start_time)
)
# Create and register plugin
config._topology_plugin = TopologyPlugin(
platform,
plot_dir,
plot_format.lstrip('.'),
nml_dir,
injected_attr,
log_dir,
szn_dir,
parse_options(platform_options),
build_retries
)
config.pluginmanager.register(config._topology_plugin)
# Add test_id marker
config.addinivalue_line(
'markers',
'test_id(id): assign a test identifier to the test'
)
# Add topology_compatible marker
config.addinivalue_line(
'markers',
'platform_incompatible(platforms, reason=None): '
'mark a test as incompatible with a list of platform engines. '
'Optionally specify a reason for better reporting'
)
def pytest_unconfigure(config):
"""
pytest hook to unconfigure plugin.
"""
plugin = getattr(config, '_topology_plugin', None)
if plugin:
del config._topology_plugin
config.pluginmanager.unregister(plugin)
@hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
"""
pytest hook to setup test before run.
"""
test_id_marker = item.get_closest_marker('test_id')
incompatible_marker = item.get_closest_marker('platform_incompatible')
# If marked and xml logging enabled
if test_id_marker is not None and hasattr(item.config, '_xml'):
test_id = test_id_marker.args[0]
item.config._xml.node_reporter(item.nodeid).add_property(
'test_id', test_id
)
if incompatible_marker:
platform = item.config._topology_plugin.platform
if platform in incompatible_marker.args[0]:
message = (
incompatible_marker.kwargs.get('reason') or (
'Test is incompatible with {} platform'.format(platform)
)
)
skip(message)
__all__ = [
'TopologyPlugin',
'topology',
'pytest_addoption',
'StepLogger'
]
| |
import unittest
import json
from nose.plugins.skip import SkipTest
from dummyserver.server import HAS_IPV6
from dummyserver.testcase import (HTTPDummyServerTestCase,
IPv6HTTPDummyServerTestCase)
from urllib3.poolmanager import PoolManager
from urllib3.connectionpool import port_by_scheme
from urllib3.exceptions import MaxRetryError, SSLError
from urllib3.util.retry import Retry
class TestPoolManager(HTTPDummyServerTestCase):
def setUp(self):
self.base_url = 'http://%s:%d' % (self.host, self.port)
self.base_url_alt = 'http://%s:%d' % (self.host_alt, self.port)
def test_redirect(self):
http = PoolManager()
r = http.request('GET', '%s/redirect' % self.base_url,
fields={'target': '%s/' % self.base_url},
redirect=False)
self.assertEqual(r.status, 303)
r = http.request('GET', '%s/redirect' % self.base_url,
fields={'target': '%s/' % self.base_url})
self.assertEqual(r.status, 200)
self.assertEqual(r.data, b'Dummy server!')
def test_redirect_twice(self):
http = PoolManager()
r = http.request('GET', '%s/redirect' % self.base_url,
fields={'target': '%s/redirect' % self.base_url},
redirect=False)
self.assertEqual(r.status, 303)
r = http.request('GET', '%s/redirect' % self.base_url,
fields={'target': '%s/redirect?target=%s/' % (self.base_url, self.base_url)})
self.assertEqual(r.status, 200)
self.assertEqual(r.data, b'Dummy server!')
def test_redirect_to_relative_url(self):
http = PoolManager()
r = http.request('GET', '%s/redirect' % self.base_url,
fields = {'target': '/redirect'},
redirect = False)
self.assertEqual(r.status, 303)
r = http.request('GET', '%s/redirect' % self.base_url,
fields = {'target': '/redirect'})
self.assertEqual(r.status, 200)
self.assertEqual(r.data, b'Dummy server!')
def test_cross_host_redirect(self):
http = PoolManager()
cross_host_location = '%s/echo?a=b' % self.base_url_alt
try:
http.request('GET', '%s/redirect' % self.base_url,
fields={'target': cross_host_location},
timeout=1, retries=0)
self.fail("Request succeeded instead of raising an exception like it should.")
except MaxRetryError:
pass
r = http.request('GET', '%s/redirect' % self.base_url,
fields={'target': '%s/echo?a=b' % self.base_url_alt},
timeout=1, retries=1)
self.assertEqual(r._pool.host, self.host_alt)
def test_too_many_redirects(self):
http = PoolManager()
try:
r = http.request('GET', '%s/redirect' % self.base_url,
fields={'target': '%s/redirect?target=%s/' % (self.base_url, self.base_url)},
retries=1)
self.fail("Failed to raise MaxRetryError exception, returned %r" % r.status)
except MaxRetryError:
pass
try:
r = http.request('GET', '%s/redirect' % self.base_url,
fields={'target': '%s/redirect?target=%s/' % (self.base_url, self.base_url)},
retries=Retry(total=None, redirect=1))
self.fail("Failed to raise MaxRetryError exception, returned %r" % r.status)
except MaxRetryError:
pass
def test_raise_on_redirect(self):
http = PoolManager()
r = http.request('GET', '%s/redirect' % self.base_url,
fields={'target': '%s/redirect?target=%s/' % (self.base_url, self.base_url)},
retries=Retry(total=None, redirect=1, raise_on_redirect=False))
self.assertEqual(r.status, 303)
def test_raise_on_status(self):
http = PoolManager()
try:
# the default is to raise
r = http.request('GET', '%s/status' % self.base_url,
fields={'status': '500 Internal Server Error'},
retries=Retry(total=1, status_forcelist=range(500, 600)))
self.fail("Failed to raise MaxRetryError exception, returned %r" % r.status)
except MaxRetryError:
pass
try:
# raise explicitly
r = http.request('GET', '%s/status' % self.base_url,
fields={'status': '500 Internal Server Error'},
retries=Retry(total=1, status_forcelist=range(500, 600), raise_on_status=True))
self.fail("Failed to raise MaxRetryError exception, returned %r" % r.status)
except MaxRetryError:
pass
# don't raise
r = http.request('GET', '%s/status' % self.base_url,
fields={'status': '500 Internal Server Error'},
retries=Retry(total=1, status_forcelist=range(500, 600), raise_on_status=False))
self.assertEqual(r.status, 500)
def test_missing_port(self):
# Can a URL that lacks an explicit port like ':80' succeed, or
# will all such URLs fail with an error?
http = PoolManager()
# By globally adjusting `port_by_scheme` we pretend for a moment
# that HTTP's default port is not 80, but is the port at which
# our test server happens to be listening.
port_by_scheme['http'] = self.port
try:
r = http.request('GET', 'http://%s/' % self.host, retries=0)
finally:
port_by_scheme['http'] = 80
self.assertEqual(r.status, 200)
self.assertEqual(r.data, b'Dummy server!')
def test_headers(self):
http = PoolManager(headers={'Foo': 'bar'})
r = http.request('GET', '%s/headers' % self.base_url)
returned_headers = json.loads(r.data.decode())
self.assertEqual(returned_headers.get('Foo'), 'bar')
r = http.request('POST', '%s/headers' % self.base_url)
returned_headers = json.loads(r.data.decode())
self.assertEqual(returned_headers.get('Foo'), 'bar')
r = http.request_encode_url('GET', '%s/headers' % self.base_url)
returned_headers = json.loads(r.data.decode())
self.assertEqual(returned_headers.get('Foo'), 'bar')
r = http.request_encode_body('POST', '%s/headers' % self.base_url)
returned_headers = json.loads(r.data.decode())
self.assertEqual(returned_headers.get('Foo'), 'bar')
r = http.request_encode_url('GET', '%s/headers' % self.base_url, headers={'Baz': 'quux'})
returned_headers = json.loads(r.data.decode())
self.assertEqual(returned_headers.get('Foo'), None)
self.assertEqual(returned_headers.get('Baz'), 'quux')
r = http.request_encode_body('GET', '%s/headers' % self.base_url, headers={'Baz': 'quux'})
returned_headers = json.loads(r.data.decode())
self.assertEqual(returned_headers.get('Foo'), None)
self.assertEqual(returned_headers.get('Baz'), 'quux')
def test_http_with_ssl_keywords(self):
http = PoolManager(ca_certs='REQUIRED')
r = http.request('GET', 'http://%s:%s/' % (self.host, self.port))
self.assertEqual(r.status, 200)
def test_http_with_ca_cert_dir(self):
http = PoolManager(ca_certs='REQUIRED', ca_cert_dir='/nosuchdir')
r = http.request('GET', 'http://%s:%s/' % (self.host, self.port))
self.assertEqual(r.status, 200)
class TestIPv6PoolManager(IPv6HTTPDummyServerTestCase):
if not HAS_IPV6:
raise SkipTest("IPv6 is not supported on this system.")
def setUp(self):
self.base_url = 'http://[%s]:%d' % (self.host, self.port)
def test_ipv6(self):
http = PoolManager()
http.request('GET', self.base_url)
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous Health Check for Compass."""
import logging
from compass.actions.health_check import base
from compass.actions.health_check import utils as health_check_utils
class MiscCheck(base.BaseCheck):
"""health check for misc."""
NAME = "Miscellaneous Check"
MISC_MAPPING = {
"yum": "rsyslog ntp iproute openssh-clients python git wget "
"python-setuptools "
"amqp mod_wsgi httpd squid "
"dhcp bind rsync yum-utils xinetd tftp-server gcc "
"net-snmp-utils net-snmp".split(" "),
"pip": "netaddr flask flask_script flask_restful amqplib "
"flask_sqlalchemy paramiko mock celery six discover daemon "
"unittest2 chef".split(" "),
"disable": "iptables ip6tables".split(" "),
"enable": "httpd squid xinetd dhcpd named sshd rsyslog cobblerd "
"ntpd compass-celeryd compass-progress-updated".split(" "),
}
def run(self):
"""do health check."""
self.check_linux_dependencies()
print "[Done]"
self.check_pip_dependencies()
print "[Done]"
self.check_ntp()
print "[Done]"
self.check_rsyslogd()
print "[Done]"
self.check_chkconfig()
print "[Done]"
self.check_selinux()
print "[Done]"
if self.code == 1:
self.messages.append(
"[%s]Info: Miscellaneous check has completed "
"No problems found, all systems go." % self.NAME)
return (self.code, self.messages)
def check_linux_dependencies(self):
"""Checks if dependencies are installed."""
print "Checking Linux dependencies....",
if self.dist in ("centos", "redhat", "fedora", "scientific linux"):
pkg_type = "yum"
else:
pkg_type = "apt"
try:
pkg_module = __import__(pkg_type)
except Exception:
self._set_status(
0,
"[%s]Error: No module named %s, "
"please install it first." % (self.NAME, pkg_type))
return True
logging.info('import %s: %s', pkg_type, pkg_module)
method_name = 'self.check_' + pkg_type + '_dependencies(pkg_module)'
eval(method_name)
def check_yum_dependencies(self, pkg_module):
"""Checks if yum dependencies are installed.
:param pkg_module : python yum library
:type pkg_module : python module
"""
print "Checking Yum dependencies......",
yum_base = pkg_module.YumBase()
uninstalled = []
for package in self.MISC_MAPPING["yum"]:
if len(yum_base.rpmdb.searchNevra(name=package)) == 0:
self._set_status(
0,
"[%s]Error: %s package is required"
% (self.NAME, package))
uninstalled.append(package)
if len(uninstalled) != 0:
self._set_status(
0,
"[%s]Info: Uninstalled yum packages: %s"
% (self.NAME, ', '.join(item for item in uninstalled)))
return True
def check_pip_dependencies(self):
"""Checks if required pip packages are installed."""
print "Checking pip dependencies......",
uninstalled = []
for module in self.MISC_MAPPING['pip']:
try:
__import__(module)
except Exception:
self._set_status(
0,
"[%s]Error: pip package %s is requred"
% (self.NAME, module))
uninstalled.append(module)
if len(uninstalled) != 0:
self._set_status(
0,
"[%s]Info: Uninstalled pip packages: %s"
% (self.NAME, ', '.join(item for item in uninstalled)))
return True
def check_ntp(self):
"""Validates ntp configuration and service."""
print "Checking NTP......",
conf_err_msg = health_check_utils.check_path(self.NAME,
'/etc/ntp.conf')
if not conf_err_msg == "":
self._set_status(0, conf_err_msg)
serv_err_msg = health_check_utils.check_service_running(self.NAME,
'ntpd')
if not serv_err_msg == "":
self._set_status(0, serv_err_msg)
return True
def check_rsyslogd(self):
"""Validates rsyslogd configuration and service."""
print "Checking rsyslog......",
conf_err_msg = health_check_utils.check_path(self.NAME,
'/etc/rsyslog.conf')
if not conf_err_msg == "":
self._set_status(0, conf_err_msg)
dir_err_msg = health_check_utils.check_path(self.NAME,
'/etc/rsyslog.d/')
if not dir_err_msg == "":
self._set_status(0, dir_err_msg)
serv_err_msg = health_check_utils.check_service_running(self.NAME,
'rsyslogd')
if not serv_err_msg == "":
self._set_status(0, serv_err_msg)
return True
def check_chkconfig(self):
"""Check if required services are enabled on the start up."""
print "Checking chkconfig......",
serv_to_disable = []
for serv in self.MISC_MAPPING["disable"]:
if health_check_utils.check_chkconfig(serv) is True:
self._set_status(
0,
"[%s]Error: %s is not disabled"
% (self.NAME, serv))
serv_to_disable.append(serv)
if len(serv_to_disable) != 0:
self._set_status(
0,
"[%s]Info: You need to disable these services "
"on system start-up: %s"
% (self.NAME,
", ".join(item for item in serv_to_disable)))
serv_to_enable = []
for serv in self.MISC_MAPPING["enable"]:
if health_check_utils.check_chkconfig(serv) is False:
self._set_status(
0, "[%s]Error: %s is disabled" % (self.NAME, serv))
serv_to_enable.append(serv)
if len(serv_to_enable) != 0:
self._set_status(0, "[%s]Info: You need to enable these "
"services on system start-up: %s"
% (self.NAME,
", ".join(item for item in serv_to_enable)))
return True
def check_selinux(self):
"""Check if SELinux is disabled."""
print "Checking Selinux......",
disabled = False
with open("/etc/selinux/config") as selinux:
for line in selinux:
if "SELINUX=disabled" in line:
disabled = True
break
if disabled is False:
self._set_status(
0,
"[%s]Selinux is not disabled, "
"please disable it in /etc/selinux/config." % self.NAME)
return True
| |
#!/usr/bin/env python3
import argparse
import datetime
import errno
import hashlib
import logging
import os
import pwd
import re
import shutil
import subprocess
import sys
import time
import json
import uuid
if False:
from typing import Sequence, Set, Text, Any
DEPLOYMENTS_DIR = "/home/zulip/deployments"
LOCK_DIR = os.path.join(DEPLOYMENTS_DIR, "lock")
TIMESTAMP_FORMAT = '%Y-%m-%d-%H-%M-%S'
# Color codes
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BLACKONYELLOW = '\x1b[0;30;43m'
WHITEONRED = '\x1b[0;37;41m'
BOLDRED = '\x1B[1;31m'
GREEN = '\x1b[32m'
YELLOW = '\x1b[33m'
BLUE = '\x1b[34m'
MAGENTA = '\x1b[35m'
CYAN = '\x1b[36m'
def parse_cache_script_args(description):
# type: (Text) -> argparse.Namespace
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"--threshold", dest="threshold_days", type=int, default=14,
nargs="?", metavar="<days>", help="Any cache which is not in "
"use by a deployment not older than threshold days(current "
"installation in dev) and older than threshold days will be "
"deleted. (defaults to 14)")
parser.add_argument(
"--dry-run", dest="dry_run", action="store_true",
help="If specified then script will only print the caches "
"that it will delete/keep back. It will not delete any cache.")
parser.add_argument(
"--verbose", dest="verbose", action="store_true",
help="If specified then script will print a detailed report "
"of what is being will deleted/kept back.")
args = parser.parse_args()
args.verbose |= args.dry_run # Always print a detailed report in case of dry run.
return args
def get_deployment_version(extract_path):
# type: (str) -> str
version = '0.0.0'
for item in os.listdir(extract_path):
item_path = os.path.join(extract_path, item)
if item.startswith('zulip-server') and os.path.isdir(item_path):
with open(os.path.join(item_path, 'version.py')) as f:
result = re.search('ZULIP_VERSION = "(.*)"', f.read())
if result:
version = result.groups()[0]
break
return version
def is_invalid_upgrade(current_version, new_version):
# type: (str, str) -> bool
if new_version > '1.4.3' and current_version <= '1.3.10':
return True
return False
def subprocess_text_output(args):
# type: (Sequence[str]) -> str
return subprocess.check_output(args, universal_newlines=True).strip()
def su_to_zulip():
# type: () -> None
pwent = pwd.getpwnam("zulip")
os.setgid(pwent.pw_gid)
os.setuid(pwent.pw_uid)
os.environ['HOME'] = os.path.abspath(os.path.join(DEPLOYMENTS_DIR, '..'))
def make_deploy_path():
# type: () -> str
timestamp = datetime.datetime.now().strftime(TIMESTAMP_FORMAT)
return os.path.join(DEPLOYMENTS_DIR, timestamp)
if __name__ == '__main__':
cmd = sys.argv[1]
if cmd == 'make_deploy_path':
print(make_deploy_path())
def get_dev_uuid_var_path(create_if_missing=False):
# type: (bool) -> str
zulip_path = os.path.realpath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
uuid_path = os.path.join(os.path.realpath(os.path.dirname(zulip_path)), ".zulip-dev-uuid")
if os.path.exists(uuid_path):
with open(uuid_path) as f:
zulip_uuid = f.read().strip()
else:
if create_if_missing:
zulip_uuid = str(uuid.uuid4())
# We need sudo here, since the path will be under /srv/ in the
# development environment.
subprocess.check_call(["sudo", "/bin/bash", "-c",
"echo %s > %s" % (zulip_uuid, uuid_path)])
else:
raise AssertionError("Missing UUID file; please run tools/provision!")
result_path = os.path.join(zulip_path, "var", zulip_uuid)
os.makedirs(result_path, exist_ok=True)
return result_path
def get_deployment_lock(error_rerun_script):
# type: (str) -> None
start_time = time.time()
got_lock = False
while time.time() - start_time < 300:
try:
os.mkdir(LOCK_DIR)
got_lock = True
break
except OSError:
print(WARNING + "Another deployment in progress; waiting for lock... " +
"(If no deployment is running, rmdir %s)" % (LOCK_DIR,) + ENDC)
sys.stdout.flush()
time.sleep(3)
if not got_lock:
print(FAIL + "Deployment already in progress. Please run\n" +
" %s\n" % (error_rerun_script,) +
"manually when the previous deployment finishes, or run\n" +
" rmdir %s\n" % (LOCK_DIR,) +
"if the previous deployment crashed." +
ENDC)
sys.exit(1)
def release_deployment_lock():
# type: () -> None
shutil.rmtree(LOCK_DIR)
def run(args, **kwargs):
# type: (Sequence[str], **Any) -> None
# Output what we're doing in the `set -x` style
print("+ %s" % (" ".join(args)))
if kwargs.get('shell'):
# With shell=True we can only pass string to Popen
args = " ".join(args)
try:
subprocess.check_call(args, **kwargs)
except subprocess.CalledProcessError:
print()
print(WHITEONRED + "Error running a subcommand of %s: %s" % (sys.argv[0], " ".join(args)) +
ENDC)
print(WHITEONRED + "Actual error output for the subcommand is just above this." +
ENDC)
print()
raise
def log_management_command(cmd, log_path):
# type: (Text, Text) -> None
log_dir = os.path.dirname(log_path)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
formatter = logging.Formatter("%(asctime)s: %(message)s")
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(formatter)
logger = logging.getLogger("zulip.management")
logger.addHandler(file_handler)
logger.setLevel(logging.INFO)
logger.info("Ran '%s'" % (cmd,))
def get_environment():
# type: () -> Text
if os.path.exists(DEPLOYMENTS_DIR):
return "prod"
if os.environ.get("TRAVIS"):
return "travis"
return "dev"
def get_recent_deployments(threshold_days):
# type: (int) -> Set[Text]
# Returns a list of deployments not older than threshold days
# including `/root/zulip` directory if it exists.
recent = set()
threshold_date = datetime.datetime.now() - datetime.timedelta(days=threshold_days)
for dir_name in os.listdir(DEPLOYMENTS_DIR):
target_dir = os.path.join(DEPLOYMENTS_DIR, dir_name)
if not os.path.isdir(target_dir):
# Skip things like uwsgi sockets, symlinks, etc.
continue
if not os.path.exists(os.path.join(target_dir, "zerver")):
# Skip things like "lock" that aren't actually a deployment directory
continue
try:
date = datetime.datetime.strptime(dir_name, TIMESTAMP_FORMAT)
if date >= threshold_date:
recent.add(target_dir)
except ValueError:
# Always include deployments whose name is not in the format of a timestamp.
recent.add(target_dir)
# If it is a symlink then include the target as well.
if os.path.islink(target_dir):
recent.add(os.path.realpath(target_dir))
if os.path.exists("/root/zulip"):
recent.add("/root/zulip")
return recent
def get_threshold_timestamp(threshold_days):
# type: (int) -> int
# Given number of days, this function returns timestamp corresponding
# to the time prior to given number of days.
threshold = datetime.datetime.now() - datetime.timedelta(days=threshold_days)
threshold_timestamp = int(time.mktime(threshold.utctimetuple()))
return threshold_timestamp
def get_caches_to_be_purged(caches_dir, caches_in_use, threshold_days):
# type: (Text, Set[Text], int) -> Set[Text]
# Given a directory containing caches, a list of caches in use
# and threshold days, this function return a list of caches
# which can be purged. Remove the cache only if it is:
# 1: Not in use by the current installation(in dev as well as in prod).
# 2: Not in use by a deployment not older than `threshold_days`(in prod).
# 3: Not in use by '/root/zulip'.
# 4: Not older than `threshold_days`.
caches_to_purge = set()
threshold_timestamp = get_threshold_timestamp(threshold_days)
for cache_dir_base in os.listdir(caches_dir):
cache_dir = os.path.join(caches_dir, cache_dir_base)
if cache_dir in caches_in_use:
# Never purge a cache which is in use.
continue
if os.path.getctime(cache_dir) < threshold_timestamp:
caches_to_purge.add(cache_dir)
return caches_to_purge
def purge_unused_caches(caches_dir, caches_in_use, cache_type, args):
# type: (Text, Set[Text], Text, argparse.Namespace) -> None
all_caches = set([os.path.join(caches_dir, cache) for cache in os.listdir(caches_dir)])
caches_to_purge = get_caches_to_be_purged(caches_dir, caches_in_use, args.threshold_days)
caches_to_keep = all_caches - caches_to_purge
may_be_perform_purging(
caches_to_purge, caches_to_keep, cache_type, args.dry_run, args.verbose)
if args.verbose:
print("Done!")
def generate_sha1sum_emoji(zulip_path):
# type: (Text) -> Text
ZULIP_EMOJI_DIR = os.path.join(zulip_path, 'tools', 'setup', 'emoji')
sha = hashlib.sha1()
filenames = ['emoji_map.json', 'build_emoji', 'emoji_setup_utils.py']
for filename in filenames:
file_path = os.path.join(ZULIP_EMOJI_DIR, filename)
with open(file_path, 'rb') as reader:
sha.update(reader.read())
# Take into account the version of `emoji-datasource` package while generating success stamp.
PACKAGE_FILE_PATH = os.path.join(zulip_path, 'package.json')
with open(PACKAGE_FILE_PATH, 'r') as fp:
parsed_package_file = json.load(fp)
dependency_data = parsed_package_file['dependencies']
if 'emoji-datasource' in dependency_data:
emoji_datasource_version = dependency_data['emoji-datasource'].encode('utf-8')
else:
emoji_datasource_version = b"0"
sha.update(emoji_datasource_version)
return sha.hexdigest()
def may_be_perform_purging(dirs_to_purge, dirs_to_keep, dir_type, dry_run, verbose):
# type: (Set[Text], Set[Text], Text, bool, bool) -> None
if dry_run:
print("Performing a dry run...")
else:
print("Cleaning unused %ss..." % (dir_type,))
for directory in dirs_to_purge:
if verbose:
print("Cleaning unused %s: %s" % (dir_type, directory))
if not dry_run:
subprocess.check_call(["sudo", "rm", "-rf", directory])
for directory in dirs_to_keep:
if verbose:
print("Keeping used %s: %s" % (dir_type, directory))
| |
#!/usr/bin/python
# pylint: disable=too-many-lines
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
# Reason: Disable pylint too-many-lines because we don't want to split up this file.
# Status: Permanently disabled to keep this module as self-contained as possible.
"""Ansible module for retrieving and setting openshift related facts"""
DOCUMENTATION = '''
---
module: openshift_facts
short_description: Cluster Facts
author: Jason DeTiberus
requirements: [ ]
'''
EXAMPLES = '''
'''
import ConfigParser
import copy
import os
import StringIO
import yaml
from distutils.util import strtobool
from distutils.version import LooseVersion
import struct
import socket
def first_ip(network):
""" Return the first IPv4 address in network
Args:
network (str): network in CIDR format
Returns:
str: first IPv4 address
"""
atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0]
itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr))
(address, netmask) = network.split('/')
netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
return itoa((atoi(address) & netmask_i) + 1)
def hostname_valid(hostname):
""" Test if specified hostname should be considered valid
Args:
hostname (str): hostname to test
Returns:
bool: True if valid, otherwise False
"""
if (not hostname or
hostname.startswith('localhost') or
hostname.endswith('localdomain') or
len(hostname.split('.')) < 2):
return False
return True
def choose_hostname(hostnames=None, fallback=''):
""" Choose a hostname from the provided hostnames
Given a list of hostnames and a fallback value, choose a hostname to
use. This function will prefer fqdns if they exist (excluding any that
begin with localhost or end with localdomain) over ip addresses.
Args:
hostnames (list): list of hostnames
fallback (str): default value to set if hostnames does not contain
a valid hostname
Returns:
str: chosen hostname
"""
hostname = fallback
if hostnames is None:
return hostname
ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
ips = [i for i in hostnames
if (i is not None and isinstance(i, basestring)
and re.match(ip_regex, i))]
hosts = [i for i in hostnames
if i is not None and i != '' and i not in ips]
for host_list in (hosts, ips):
for host in host_list:
if hostname_valid(host):
return host
return hostname
def query_metadata(metadata_url, headers=None, expect_json=False):
""" Return metadata from the provided metadata_url
Args:
metadata_url (str): metadata url
headers (dict): headers to set for metadata request
expect_json (bool): does the metadata_url return json
Returns:
dict or list: metadata request result
"""
result, info = fetch_url(module, metadata_url, headers=headers)
if info['status'] != 200:
raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
if expect_json:
return module.from_json(result.read())
else:
return [line.strip() for line in result.readlines()]
def walk_metadata(metadata_url, headers=None, expect_json=False):
""" Walk the metadata tree and return a dictionary of the entire tree
Args:
metadata_url (str): metadata url
headers (dict): headers to set for metadata request
expect_json (bool): does the metadata_url return json
Returns:
dict: the result of walking the metadata tree
"""
metadata = dict()
for line in query_metadata(metadata_url, headers, expect_json):
if line.endswith('/') and not line == 'public-keys/':
key = line[:-1]
metadata[key] = walk_metadata(metadata_url + line,
headers, expect_json)
else:
results = query_metadata(metadata_url + line, headers,
expect_json)
if len(results) == 1:
# disable pylint maybe-no-member because overloaded use of
# the module name causes pylint to not detect that results
# is an array or hash
# pylint: disable=maybe-no-member
metadata[line] = results.pop()
else:
metadata[line] = results
return metadata
def get_provider_metadata(metadata_url, supports_recursive=False,
headers=None, expect_json=False):
""" Retrieve the provider metadata
Args:
metadata_url (str): metadata url
supports_recursive (bool): does the provider metadata api support
recursion
headers (dict): headers to set for metadata request
expect_json (bool): does the metadata_url return json
Returns:
dict: the provider metadata
"""
try:
if supports_recursive:
metadata = query_metadata(metadata_url, headers,
expect_json)
else:
metadata = walk_metadata(metadata_url, headers,
expect_json)
except OpenShiftFactsMetadataUnavailableError:
metadata = None
return metadata
def normalize_gce_facts(metadata, facts):
""" Normalize gce facts
Args:
metadata (dict): provider metadata
facts (dict): facts to update
Returns:
dict: the result of adding the normalized metadata to the provided
facts dict
"""
for interface in metadata['instance']['networkInterfaces']:
int_info = dict(ips=[interface['ip']], network_type='gce')
int_info['public_ips'] = [ac['externalIp'] for ac
in interface['accessConfigs']]
int_info['public_ips'].extend(interface['forwardedIps'])
_, _, network_id = interface['network'].rpartition('/')
int_info['network_id'] = network_id
facts['network']['interfaces'].append(int_info)
_, _, zone = metadata['instance']['zone'].rpartition('/')
facts['zone'] = zone
# Default to no sdn for GCE deployments
facts['use_openshift_sdn'] = False
# GCE currently only supports a single interface
facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
facts['network']['public_ip'] = pub_ip
facts['network']['hostname'] = metadata['instance']['hostname']
# TODO: attempt to resolve public_hostname
facts['network']['public_hostname'] = facts['network']['public_ip']
return facts
def normalize_aws_facts(metadata, facts):
""" Normalize aws facts
Args:
metadata (dict): provider metadata
facts (dict): facts to update
Returns:
dict: the result of adding the normalized metadata to the provided
facts dict
"""
for interface in sorted(
metadata['network']['interfaces']['macs'].values(),
key=lambda x: x['device-number']
):
int_info = dict()
var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
for ips_var, int_var in var_map.iteritems():
ips = interface.get(int_var)
if isinstance(ips, basestring):
int_info[ips_var] = [ips]
else:
int_info[ips_var] = ips
if 'vpc-id' in interface:
int_info['network_type'] = 'vpc'
else:
int_info['network_type'] = 'classic'
if int_info['network_type'] == 'vpc':
int_info['network_id'] = interface['subnet-id']
else:
int_info['network_id'] = None
facts['network']['interfaces'].append(int_info)
facts['zone'] = metadata['placement']['availability-zone']
# TODO: actually attempt to determine default local and public ips
# by using the ansible default ip fact and the ipv4-associations
# from the ec2 metadata
facts['network']['ip'] = metadata.get('local-ipv4')
facts['network']['public_ip'] = metadata.get('public-ipv4')
# TODO: verify that local hostname makes sense and is resolvable
facts['network']['hostname'] = metadata.get('local-hostname')
# TODO: verify that public hostname makes sense and is resolvable
facts['network']['public_hostname'] = metadata.get('public-hostname')
return facts
def normalize_openstack_facts(metadata, facts):
""" Normalize openstack facts
Args:
metadata (dict): provider metadata
facts (dict): facts to update
Returns:
dict: the result of adding the normalized metadata to the provided
facts dict
"""
# openstack ec2 compat api does not support network interfaces and
# the version tested on did not include the info in the openstack
# metadata api, should be updated if neutron exposes this.
facts['zone'] = metadata['availability_zone']
local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
facts['network']['ip'] = local_ipv4
facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
# TODO: verify local hostname makes sense and is resolvable
facts['network']['hostname'] = metadata['hostname']
# TODO: verify that public hostname makes sense and is resolvable
pub_h = metadata['ec2_compat']['public-hostname']
facts['network']['public_hostname'] = pub_h
return facts
def normalize_provider_facts(provider, metadata):
""" Normalize provider facts
Args:
provider (str): host provider
metadata (dict): provider metadata
Returns:
dict: the normalized provider facts
"""
if provider is None or metadata is None:
return {}
# TODO: test for ipv6_enabled where possible (gce, aws do not support)
# and configure ipv6 facts if available
# TODO: add support for setting user_data if available
facts = dict(name=provider, metadata=metadata,
network=dict(interfaces=[], ipv6_enabled=False))
if provider == 'gce':
facts = normalize_gce_facts(metadata, facts)
elif provider == 'ec2':
facts = normalize_aws_facts(metadata, facts)
elif provider == 'openstack':
facts = normalize_openstack_facts(metadata, facts)
return facts
def set_fluentd_facts_if_unset(facts):
""" Set fluentd facts if not already present in facts dict
dict: the facts dict updated with the generated fluentd facts if
missing
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated fluentd
facts if they were not already present
"""
if 'common' in facts:
if 'use_fluentd' not in facts['common']:
use_fluentd = False
facts['common']['use_fluentd'] = use_fluentd
return facts
def set_flannel_facts_if_unset(facts):
""" Set flannel facts if not already present in facts dict
dict: the facts dict updated with the flannel facts if
missing
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the flannel
facts if they were not already present
"""
if 'common' in facts:
if 'use_flannel' not in facts['common']:
use_flannel = False
facts['common']['use_flannel'] = use_flannel
return facts
def set_node_schedulability(facts):
""" Set schedulable facts if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated schedulable
facts if they were not already present
"""
if 'node' in facts:
if 'schedulable' not in facts['node']:
if 'master' in facts:
facts['node']['schedulable'] = False
else:
facts['node']['schedulable'] = True
return facts
def set_master_selectors(facts):
""" Set selectors facts if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated selectors
facts if they were not already present
"""
if 'master' in facts:
if 'infra_nodes' in facts['master']:
deployment_type = facts['common']['deployment_type']
if deployment_type == 'online':
selector = "type=infra"
else:
selector = "region=infra"
if 'router_selector' not in facts['master']:
facts['master']['router_selector'] = selector
if 'registry_selector' not in facts['master']:
facts['master']['registry_selector'] = selector
return facts
def set_metrics_facts_if_unset(facts):
""" Set cluster metrics facts if not already present in facts dict
dict: the facts dict updated with the generated cluster metrics facts if
missing
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated cluster metrics
facts if they were not already present
"""
if 'common' in facts:
if 'use_cluster_metrics' not in facts['common']:
use_cluster_metrics = False
facts['common']['use_cluster_metrics'] = use_cluster_metrics
return facts
def set_project_cfg_facts_if_unset(facts):
""" Set Project Configuration facts if not already present in facts dict
dict:
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated Project Configuration
facts if they were not already present
"""
config = {
'default_node_selector': '',
'project_request_message': '',
'project_request_template': '',
'mcs_allocator_range': 's0:/2',
'mcs_labels_per_project': 5,
'uid_allocator_range': '1000000000-1999999999/10000'
}
if 'master' in facts:
for key, value in config.items():
if key not in facts['master']:
facts['master'][key] = value
return facts
def set_identity_providers_if_unset(facts):
""" Set identity_providers fact if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated identity providers
facts if they were not already present
"""
if 'master' in facts:
deployment_type = facts['common']['deployment_type']
if 'identity_providers' not in facts['master']:
identity_provider = dict(
name='allow_all', challenge=True, login=True,
kind='AllowAllPasswordIdentityProvider'
)
if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
identity_provider = dict(
name='deny_all', challenge=True, login=True,
kind='DenyAllPasswordIdentityProvider'
)
facts['master']['identity_providers'] = [identity_provider]
return facts
def set_url_facts_if_unset(facts):
""" Set url facts if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated url facts if they
were not already present
"""
if 'master' in facts:
api_use_ssl = facts['master']['api_use_ssl']
api_port = facts['master']['api_port']
console_use_ssl = facts['master']['console_use_ssl']
console_port = facts['master']['console_port']
console_path = facts['master']['console_path']
etcd_use_ssl = facts['master']['etcd_use_ssl']
etcd_hosts = facts['master']['etcd_hosts']
etcd_port = facts['master']['etcd_port']
hostname = facts['common']['hostname']
public_hostname = facts['common']['public_hostname']
cluster_hostname = facts['master'].get('cluster_hostname')
cluster_public_hostname = facts['master'].get('cluster_public_hostname')
if 'etcd_urls' not in facts['master']:
etcd_urls = []
if etcd_hosts != '':
facts['master']['etcd_port'] = etcd_port
facts['master']['embedded_etcd'] = False
for host in etcd_hosts:
etcd_urls.append(format_url(etcd_use_ssl, host,
etcd_port))
else:
etcd_urls = [format_url(etcd_use_ssl, hostname,
etcd_port)]
facts['master']['etcd_urls'] = etcd_urls
if 'api_url' not in facts['master']:
api_hostname = cluster_hostname if cluster_hostname else hostname
facts['master']['api_url'] = format_url(api_use_ssl, api_hostname,
api_port)
if 'public_api_url' not in facts['master']:
api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
facts['master']['public_api_url'] = format_url(api_use_ssl,
api_public_hostname,
api_port)
if 'console_url' not in facts['master']:
console_hostname = cluster_hostname if cluster_hostname else hostname
facts['master']['console_url'] = format_url(console_use_ssl,
console_hostname,
console_port,
console_path)
if 'public_console_url' not in facts['master']:
console_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
facts['master']['public_console_url'] = format_url(console_use_ssl,
console_public_hostname,
console_port,
console_path)
return facts
def set_aggregate_facts(facts):
""" Set aggregate facts
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with aggregated facts
"""
all_hostnames = set()
internal_hostnames = set()
if 'common' in facts:
all_hostnames.add(facts['common']['hostname'])
all_hostnames.add(facts['common']['public_hostname'])
all_hostnames.add(facts['common']['ip'])
all_hostnames.add(facts['common']['public_ip'])
internal_hostnames.add(facts['common']['hostname'])
internal_hostnames.add(facts['common']['ip'])
cluster_domain = facts['common']['dns_domain']
if 'master' in facts:
if 'cluster_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_hostname'])
if 'cluster_public_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_public_hostname'])
svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
all_hostnames.update(svc_names)
internal_hostnames.update(svc_names)
first_svc_ip = first_ip(facts['master']['portal_net'])
all_hostnames.add(first_svc_ip)
internal_hostnames.add(first_svc_ip)
facts['common']['all_hostnames'] = list(all_hostnames)
facts['common']['internal_hostnames'] = list(internal_hostnames)
return facts
def set_etcd_facts_if_unset(facts):
"""
If using embedded etcd, loads the data directory from master-config.yaml.
If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf.
If anything goes wrong parsing these, the fact will not be set.
"""
if 'master' in facts and facts['master']['embedded_etcd']:
etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
if 'etcd_data_dir' not in etcd_facts:
try:
# Parse master config to find actual etcd data dir:
master_cfg_path = os.path.join(facts['common']['config_base'],
'master/master-config.yaml')
master_cfg_f = open(master_cfg_path, 'r')
config = yaml.safe_load(master_cfg_f.read())
master_cfg_f.close()
etcd_facts['etcd_data_dir'] = \
config['etcdConfig']['storageDirectory']
facts['etcd'] = etcd_facts
# We don't want exceptions bubbling up here:
# pylint: disable=broad-except
except Exception:
pass
else:
etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
# Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
try:
# Add a fake section for parsing:
ini_str = '[root]\n' + open('/etc/etcd/etcd.conf', 'r').read()
ini_fp = StringIO.StringIO(ini_str)
config = ConfigParser.RawConfigParser()
config.readfp(ini_fp)
etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
etcd_data_dir = etcd_data_dir[1:-1]
etcd_facts['etcd_data_dir'] = etcd_data_dir
facts['etcd'] = etcd_facts
# We don't want exceptions bubbling up here:
# pylint: disable=broad-except
except Exception:
pass
return facts
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
includes common.service_type, common.config_base, master.registry_url,
node.registry_url, node.storage_plugin_deps
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated deployment_type
facts
"""
# disabled to avoid breaking up facts related to deployment type into
# multiple methods for now.
# pylint: disable=too-many-statements, too-many-branches
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
if 'service_type' not in facts['common']:
service_type = 'atomic-openshift'
if deployment_type == 'origin':
service_type = 'origin'
elif deployment_type in ['enterprise']:
service_type = 'openshift'
facts['common']['service_type'] = service_type
if 'config_base' not in facts['common']:
config_base = '/etc/origin'
if deployment_type in ['enterprise', 'online']:
config_base = '/etc/openshift'
# Handle upgrade scenarios when symlinks don't yet exist:
if not os.path.exists(config_base) and os.path.exists('/etc/openshift'):
config_base = '/etc/openshift'
facts['common']['config_base'] = config_base
if 'data_dir' not in facts['common']:
data_dir = '/var/lib/origin'
if deployment_type in ['enterprise', 'online']:
data_dir = '/var/lib/openshift'
# Handle upgrade scenarios when symlinks don't yet exist:
if not os.path.exists(data_dir) and os.path.exists('/var/lib/openshift'):
data_dir = '/var/lib/openshift'
facts['common']['data_dir'] = data_dir
for role in ('master', 'node'):
if role in facts:
deployment_type = facts['common']['deployment_type']
if 'registry_url' not in facts[role]:
registry_url = 'openshift/origin-${component}:${version}'
if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
registry_url = 'openshift3/ose-${component}:${version}'
elif deployment_type == 'atomic-enterprise':
registry_url = 'aep3_beta/aep-${component}:${version}'
facts[role]['registry_url'] = registry_url
if 'master' in facts:
deployment_type = facts['common']['deployment_type']
openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
if 'disabled_features' in facts['master']:
if deployment_type == 'atomic-enterprise':
curr_disabled_features = set(facts['master']['disabled_features'])
facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features))
else:
if deployment_type == 'atomic-enterprise':
facts['master']['disabled_features'] = openshift_features
if 'node' in facts:
deployment_type = facts['common']['deployment_type']
if 'storage_plugin_deps' not in facts['node']:
if deployment_type in ['openshift-enterprise', 'atomic-enterprise']:
facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs']
else:
facts['node']['storage_plugin_deps'] = []
return facts
def set_version_facts_if_unset(facts):
""" Set version facts. This currently includes common.version and
common.version_greater_than_3_1_or_1_1.
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with version facts.
"""
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
facts['common']['version'] = version = get_openshift_version()
if version is not None:
if deployment_type == 'origin':
version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('1.0.6')
else:
version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('3.0.2.900')
else:
version_gt_3_1_or_1_1 = True
facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1
return facts
def set_sdn_facts_if_unset(facts, system_facts):
""" Set sdn facts if not already present in facts dict
Args:
facts (dict): existing facts
system_facts (dict): ansible_facts
Returns:
dict: the facts dict updated with the generated sdn facts if they
were not already present
"""
if 'common' in facts:
use_sdn = facts['common']['use_openshift_sdn']
if not (use_sdn == '' or isinstance(use_sdn, bool)):
facts['common']['use_openshift_sdn'] = bool(strtobool(str(use_sdn)))
if 'sdn_network_plugin_name' not in facts['common']:
plugin = 'redhat/openshift-ovs-subnet' if use_sdn else ''
facts['common']['sdn_network_plugin_name'] = plugin
if 'master' in facts:
if 'sdn_cluster_network_cidr' not in facts['master']:
facts['master']['sdn_cluster_network_cidr'] = '10.1.0.0/16'
if 'sdn_host_subnet_length' not in facts['master']:
facts['master']['sdn_host_subnet_length'] = '8'
if 'node' in facts and 'sdn_mtu' not in facts['node']:
node_ip = facts['common']['ip']
# default MTU if interface MTU cannot be detected
facts['node']['sdn_mtu'] = '1450'
for val in system_facts.itervalues():
if isinstance(val, dict) and 'mtu' in val:
mtu = val['mtu']
if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
facts['node']['sdn_mtu'] = str(mtu - 50)
return facts
def format_url(use_ssl, hostname, port, path=''):
""" Format url based on ssl flag, hostname, port and path
Args:
use_ssl (bool): is ssl enabled
hostname (str): hostname
port (str): port
path (str): url path
Returns:
str: The generated url string
"""
scheme = 'https' if use_ssl else 'http'
netloc = hostname
if (use_ssl and port != '443') or (not use_ssl and port != '80'):
netloc += ":%s" % port
return urlparse.urlunparse((scheme, netloc, path, '', '', ''))
def get_current_config(facts):
""" Get current openshift config
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the current openshift config
"""
current_config = dict()
roles = [role for role in facts if role not in ['common', 'provider']]
for role in roles:
if 'roles' in current_config:
current_config['roles'].append(role)
else:
current_config['roles'] = [role]
# TODO: parse the /etc/sysconfig/openshift-{master,node} config to
# determine the location of files.
# TODO: I suspect this isn't working right now, but it doesn't prevent
# anything from working properly as far as I can tell, perhaps because
# we override the kubeconfig path everywhere we use it?
# Query kubeconfig settings
kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
if role == 'node':
kubeconfig_dir = os.path.join(
kubeconfig_dir, "node-%s" % facts['common']['hostname']
)
kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
if (os.path.isfile('/usr/bin/openshift')
and os.path.isfile(kubeconfig_path)):
try:
_, output, _ = module.run_command(
["/usr/bin/openshift", "ex", "config", "view", "-o",
"json", "--kubeconfig=%s" % kubeconfig_path],
check_rc=False
)
config = json.loads(output)
cad = 'certificate-authority-data'
try:
for cluster in config['clusters']:
config['clusters'][cluster][cad] = 'masked'
except KeyError:
pass
try:
for user in config['users']:
config['users'][user][cad] = 'masked'
config['users'][user]['client-key-data'] = 'masked'
except KeyError:
pass
current_config['kubeconfig'] = config
# override pylint broad-except warning, since we do not want
# to bubble up any exceptions if oc config view
# fails
# pylint: disable=broad-except
except Exception:
pass
return current_config
def get_openshift_version():
""" Get current version of openshift on the host
Returns:
version: the current openshift version
"""
version = None
if os.path.isfile('/usr/bin/openshift'):
_, output, _ = module.run_command(['/usr/bin/openshift', 'version'])
versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
version = versions.get('openshift', '')
#TODO: acknowledge the possility of a containerized install
return version
def apply_provider_facts(facts, provider_facts):
""" Apply provider facts to supplied facts dict
Args:
facts (dict): facts dict to update
provider_facts (dict): provider facts to apply
roles: host roles
Returns:
dict: the merged facts
"""
if not provider_facts:
return facts
use_openshift_sdn = provider_facts.get('use_openshift_sdn')
if isinstance(use_openshift_sdn, bool):
facts['common']['use_openshift_sdn'] = use_openshift_sdn
common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
for h_var, ip_var in common_vars:
ip_value = provider_facts['network'].get(ip_var)
if ip_value:
facts['common'][ip_var] = ip_value
facts['common'][h_var] = choose_hostname(
[provider_facts['network'].get(h_var)],
facts['common'][ip_var]
)
facts['provider'] = provider_facts
return facts
def merge_facts(orig, new, additive_facts_to_overwrite):
""" Recursively merge facts dicts
Args:
orig (dict): existing facts
new (dict): facts to update
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
Returns:
dict: the merged facts
"""
additive_facts = ['named_certificates']
facts = dict()
for key, value in orig.iteritems():
if key in new:
if isinstance(value, dict) and isinstance(new[key], dict):
relevant_additive_facts = []
# Keep additive_facts_to_overwrite if key matches
for item in additive_facts_to_overwrite:
if '.' in item and item.startswith(key + '.'):
relevant_additive_facts.append(item)
facts[key] = merge_facts(value, new[key], relevant_additive_facts)
elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
# Fact is additive so we'll combine orig and new.
if isinstance(value, list) and isinstance(new[key], list):
new_fact = []
for item in copy.deepcopy(value) + copy.copy(new[key]):
if item not in new_fact:
new_fact.append(item)
facts[key] = new_fact
else:
facts[key] = copy.copy(new[key])
else:
facts[key] = copy.deepcopy(value)
new_keys = set(new.keys()) - set(orig.keys())
for key in new_keys:
facts[key] = copy.deepcopy(new[key])
return facts
def save_local_facts(filename, facts):
""" Save local facts
Args:
filename (str): local facts file
facts (dict): facts to set
"""
try:
fact_dir = os.path.dirname(filename)
if not os.path.exists(fact_dir):
os.makedirs(fact_dir)
with open(filename, 'w') as fact_file:
fact_file.write(module.jsonify(facts))
except (IOError, OSError) as ex:
raise OpenShiftFactsFileWriteError(
"Could not create fact file: %s, error: %s" % (filename, ex)
)
def get_local_facts_from_file(filename):
""" Retrieve local facts from fact file
Args:
filename (str): local facts file
Returns:
dict: the retrieved facts
"""
local_facts = dict()
try:
# Handle conversion of INI style facts file to json style
ini_facts = ConfigParser.SafeConfigParser()
ini_facts.read(filename)
for section in ini_facts.sections():
local_facts[section] = dict()
for key, value in ini_facts.items(section):
local_facts[section][key] = value
except (ConfigParser.MissingSectionHeaderError,
ConfigParser.ParsingError):
try:
with open(filename, 'r') as facts_file:
local_facts = json.load(facts_file)
except (ValueError, IOError):
pass
return local_facts
class OpenShiftFactsUnsupportedRoleError(Exception):
"""Origin Facts Unsupported Role Error"""
pass
class OpenShiftFactsFileWriteError(Exception):
"""Origin Facts File Write Error"""
pass
class OpenShiftFactsMetadataUnavailableError(Exception):
"""Origin Facts Metadata Unavailable Error"""
pass
class OpenShiftFacts(object):
""" Origin Facts
Attributes:
facts (dict): facts for the host
Args:
role (str): role for setting local facts
filename (str): local facts file to use
local_facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
Raises:
OpenShiftFactsUnsupportedRoleError:
"""
known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'etcd']
def __init__(self, role, filename, local_facts, additive_facts_to_overwrite=False):
self.changed = False
self.filename = filename
if role not in self.known_roles:
raise OpenShiftFactsUnsupportedRoleError(
"Role %s is not supported by this module" % role
)
self.role = role
self.system_facts = ansible_facts(module)
self.facts = self.generate_facts(local_facts, additive_facts_to_overwrite)
def generate_facts(self, local_facts, additive_facts_to_overwrite):
""" Generate facts
Args:
local_facts (dict): local_facts for overriding generated
defaults
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
Returns:
dict: The generated facts
"""
local_facts = self.init_local_facts(local_facts, additive_facts_to_overwrite)
roles = local_facts.keys()
defaults = self.get_defaults(roles)
provider_facts = self.init_provider_facts()
facts = apply_provider_facts(defaults, provider_facts)
facts = merge_facts(facts, local_facts, additive_facts_to_overwrite)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
facts = set_project_cfg_facts_if_unset(facts)
facts = set_fluentd_facts_if_unset(facts)
facts = set_flannel_facts_if_unset(facts)
facts = set_node_schedulability(facts)
facts = set_master_selectors(facts)
facts = set_metrics_facts_if_unset(facts)
facts = set_identity_providers_if_unset(facts)
facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_deployment_facts_if_unset(facts)
facts = set_version_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
facts = set_etcd_facts_if_unset(facts)
return dict(openshift=facts)
def get_defaults(self, roles):
""" Get default fact values
Args:
roles (list): list of roles for this host
Returns:
dict: The generated default facts
"""
defaults = dict()
ip_addr = self.system_facts['default_ipv4']['address']
exit_code, output, _ = module.run_command(['hostname', '-f'])
hostname_f = output.strip() if exit_code == 0 else ''
hostname_values = [hostname_f, self.system_facts['nodename'],
self.system_facts['fqdn']]
hostname = choose_hostname(hostname_values, ip_addr)
common = dict(use_openshift_sdn=True, ip=ip_addr, public_ip=ip_addr,
deployment_type='origin', hostname=hostname,
public_hostname=hostname, use_manageiq=False)
common['client_binary'] = 'oc' if os.path.isfile('/usr/bin/oc') else 'osc'
common['admin_binary'] = 'oadm' if os.path.isfile('/usr/bin/oadm') else 'osadm'
common['dns_domain'] = 'cluster.local'
common['install_examples'] = True
defaults['common'] = common
if 'master' in roles:
master = dict(api_use_ssl=True, api_port='8443',
console_use_ssl=True, console_path='/console',
console_port='8443', etcd_use_ssl=True, etcd_hosts='',
etcd_port='4001', portal_net='172.30.0.0/16',
embedded_etcd=True, embedded_kube=True,
embedded_dns=True, dns_port='53',
bind_addr='0.0.0.0', session_max_seconds=3600,
session_name='ssn', session_secrets_file='',
access_token_max_seconds=86400,
auth_token_max_seconds=500,
oauth_grant_method='auto')
defaults['master'] = master
if 'node' in roles:
node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16',
iptables_sync_period='5s', set_node_ip=False)
defaults['node'] = node
return defaults
def guess_host_provider(self):
""" Guess the host provider
Returns:
dict: The generated default facts for the detected provider
"""
# TODO: cloud provider facts should probably be submitted upstream
product_name = self.system_facts['product_name']
product_version = self.system_facts['product_version']
virt_type = self.system_facts['virtualization_type']
virt_role = self.system_facts['virtualization_role']
provider = None
metadata = None
# TODO: this is not exposed through module_utils/facts.py in ansible,
# need to create PR for ansible to expose it
bios_vendor = get_file_content(
'/sys/devices/virtual/dmi/id/bios_vendor'
)
if bios_vendor == 'Google':
provider = 'gce'
metadata_url = ('http://metadata.google.internal/'
'computeMetadata/v1/?recursive=true')
headers = {'Metadata-Flavor': 'Google'}
metadata = get_provider_metadata(metadata_url, True, headers,
True)
# Filter sshKeys and serviceAccounts from gce metadata
if metadata:
metadata['project']['attributes'].pop('sshKeys', None)
metadata['instance'].pop('serviceAccounts', None)
elif (virt_type == 'xen' and virt_role == 'guest'
and re.match(r'.*\.amazon$', product_version)):
provider = 'ec2'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
metadata = get_provider_metadata(metadata_url)
elif re.search(r'OpenStack', product_name):
provider = 'openstack'
metadata_url = ('http://169.254.169.254/openstack/latest/'
'meta_data.json')
metadata = get_provider_metadata(metadata_url, True, None,
True)
if metadata:
ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
metadata['ec2_compat'] = get_provider_metadata(
ec2_compat_url
)
# disable pylint maybe-no-member because overloaded use of
# the module name causes pylint to not detect that results
# is an array or hash
# pylint: disable=maybe-no-member
# Filter public_keys and random_seed from openstack metadata
metadata.pop('public_keys', None)
metadata.pop('random_seed', None)
if not metadata['ec2_compat']:
metadata = None
return dict(name=provider, metadata=metadata)
def init_provider_facts(self):
""" Initialize the provider facts
Returns:
dict: The normalized provider facts
"""
provider_info = self.guess_host_provider()
provider_facts = normalize_provider_facts(
provider_info.get('name'),
provider_info.get('metadata')
)
return provider_facts
def init_local_facts(self, facts=None, additive_facts_to_overwrite=False):
""" Initialize the provider facts
Args:
facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
Returns:
dict: The result of merging the provided facts with existing
local facts
"""
changed = False
facts_to_set = {self.role: dict()}
if facts is not None:
facts_to_set[self.role] = facts
local_facts = get_local_facts_from_file(self.filename)
for arg in ['labels', 'annotations']:
if arg in facts_to_set and isinstance(facts_to_set[arg],
basestring):
facts_to_set[arg] = module.from_json(facts_to_set[arg])
new_local_facts = merge_facts(local_facts, facts_to_set, additive_facts_to_overwrite)
for facts in new_local_facts.values():
keys_to_delete = []
for fact, value in facts.iteritems():
if value == "" or value is None:
keys_to_delete.append(fact)
for key in keys_to_delete:
del facts[key]
if new_local_facts != local_facts:
changed = True
if not module.check_mode:
save_local_facts(self.filename, new_local_facts)
self.changed = changed
return new_local_facts
def main():
""" main """
# disabling pylint errors for global-variable-undefined and invalid-name
# for 'global module' usage, since it is required to use ansible_facts
# pylint: disable=global-variable-undefined, invalid-name
global module
module = AnsibleModule(
argument_spec=dict(
role=dict(default='common', required=False,
choices=OpenShiftFacts.known_roles),
local_facts=dict(default=None, type='dict', required=False),
additive_facts_to_overwrite=dict(default=[], type='list', required=False),
),
supports_check_mode=True,
add_file_common_args=True,
)
role = module.params['role']
local_facts = module.params['local_facts']
additive_facts_to_overwrite = module.params['additive_facts_to_overwrite']
fact_file = '/etc/ansible/facts.d/openshift.fact'
openshift_facts = OpenShiftFacts(role, fact_file, local_facts, additive_facts_to_overwrite)
file_params = module.params.copy()
file_params['path'] = fact_file
file_args = module.load_file_common_arguments(file_params)
changed = module.set_fs_attributes_if_different(file_args,
openshift_facts.changed)
return module.exit_json(changed=changed,
ansible_facts=openshift_facts.facts)
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.facts import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| |
from django.conf.urls import include, url
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import LoginView
from django.contrib.auth.views import LogoutView
from django.views.i18n import JavaScriptCatalog
from django.views.static import serve
from devices.views import *
from network.views import *
from devicetypes.views import *
from main.views import *
from api.views import *
from mail.views import *
from devicegroups.views import *
from devicetags.views import *
from locations.views import *
from history.views import *
from users.views import *
from main.ajax import WidgetAdd, WidgetRemove, WidgetToggle, WidgetMove
from devices.ajax import AutocompleteName, AutocompleteDevice, AutocompleteSmallDevice, \
LoadExtraform, LoadMailtemplate, PreviewMail, AddDeviceField, \
UserLendings, PuppetDetails, PuppetSoftware
from devicetypes.ajax import GetTypeAttributes
from rest_framework.urlpatterns import format_suffix_patterns
from . import settings
from django.contrib import admin
from django.views.decorators.clickjacking import xframe_options_exempt
urlpatterns = [
url(r'^$', login_required(Home.as_view()), name="home"),
url(r'^accounts/login/$', LoginView.as_view(), name="login"),
url(r'^accounts/logout/$', LogoutView.as_view(), name="logout"),
url(r'^search/$', Search.as_view(), name="search"),
url(r'^devices/$', DeviceList.as_view(), name="device-list"),
url(r'^devices/page/(?P<page>[0-9]*)$', DeviceList.as_view(), name="device-list"),
url(r'^devices/department/(?P<department>[^/]*)/sorting/(?P<sorting>[^/]*)/filter/(?P<filter>[^/]*)$', DeviceList.as_view(), name="device-list"),
url(r'^devices/page/(?P<page>[0-9]*)/department/(?P<department>[^/]*)/sorting/(?P<sorting>[^/]*)/filter/(?P<filter>[^/]*)$', DeviceList.as_view(), name="device-list"),
url(r'^devices/add$', DeviceCreate.as_view(), name="device-add"),
url(r'^devices/add/template/(?P<templateid>[0-9]*)$', DeviceCreate.as_view(), name="device-add"),
url(r'^devices/add/copy/(?P<copyid>[0-9]*)$', DeviceCreate.as_view(), name="device-add-copy"),
url(r'^devices/(?P<pk>[0-9]*)/$', DeviceDetail.as_view(), name="device-detail"),
url(r'^devices/(?P<pk>[0-9]*)/edit/$', DeviceUpdate.as_view(), name="device-edit"),
url(r'^devices/(?P<pk>[0-9]*)/delete/$', DeviceDelete.as_view(), name="device-delete"),
url(r'^devices/(?P<pk>[0-9]*)/archive/$', DeviceArchive.as_view(), name="device-archive"),
url(r'^devices/(?P<pk>[0-9]*)/trash/$', DeviceTrash.as_view(), name="device-trash"),
url(r'^devices/(?P<pk>[0-9]*)/storage/$', DeviceStorage.as_view(), name="device-storage"),
url(r'^devices/(?P<pk>[0-9]*)/mail/$', DeviceMail.as_view(), name="device-mail"),
url(r'^devices/(?P<pk>[0-9]*)/ipaddress/$', DeviceIpAddress.as_view(), name="device-ipaddress"),
url(r'^devices/(?P<pk>[0-9]*)/ipaddress/(?P<ipaddress>[0-9]*)/remove$', DeviceIpAddressRemove.as_view(), name="device-ipaddress-remove"),
url(r'^devices/(?P<pk>[0-9]*)/ipaddress/(?P<ipaddress>[0-9]*)/purpose$', DeviceIpAddressPurpose.as_view(), name="device-ipaddress-purpose"),
url(r'^devices/(?P<pk>[0-9]*)/tags/$', DeviceTags.as_view(), name="device-tags"),
url(r'^devices/(?P<pk>[0-9]*)/tags/(?P<tag>[0-9]*)$', DeviceTagRemove.as_view(), name="device-tag-remove"),
url(r'^devices/(?P<pk>[0-9]*)/lending/$', DeviceLendingList.as_view(), name="device-lending-list"),
url(r'^devices/(?P<pk>[0-9]*)/lending/(?P<page>[0-9]*)$', DeviceLendingList.as_view(), name="device-lending-list"),
url(r'^devices/(?P<pk>[0-9]*)/inventoried/$', DeviceInventoried.as_view(), name="device-inventoried"),
url(r'^devices/(?P<pk>[0-9]*)/bookmark/$', DeviceBookmark.as_view(), name="device-bookmark"),
url(r'^devices/(?P<pk>[0-9]*)/notes/create/$', NoteCreate.as_view(), name="device-note-create"),
url(r'^devices/(?P<pk>[0-9]*)/notes/edit/$', NoteUpdate.as_view(), name="device-note-edit"),
url(r'^devices/(?P<device>[0-9]*)/notes/(?P<pk>[0-9]*)/delete/$', NoteDelete.as_view(), name="device-note-delete"),
url(r'^devices/(?P<pk>[0-9]*)/pictures/create/$', PictureCreate.as_view(), name="device-picture-create"),
url(r'^devices/(?P<device>[0-9]*)/pictures/(?P<pk>[0-9]*)/edit/$', PictureUpdate.as_view(), name="device-picture-edit"),
url(r'^devices/(?P<device>[0-9]*)/pictures/(?P<pk>[0-9]*)/delete/$', PictureDelete.as_view(), name="device-picture-delete"),
url(r'^devices/lend/$', DeviceLend.as_view(), name="device-lend"),
url(r'^devices/lend/(?P<pk>[0-9]*)$', DeviceLend.as_view(), name="device-lend"),
url(r'^devices/export/csv/$', ExportCsv.as_view(), name='export-csv'),
url(r'^devices/return/(?P<lending>[0-9]*)$', DeviceReturn.as_view(), name="device-return"),
url(r'^devices/public/$', xframe_options_exempt(PublicDeviceListView.as_view()), name="public-device-list"),
url(r'^devices/public/(?P<page>[0-9]*)$', xframe_options_exempt(PublicDeviceListView.as_view()), name="public-device-list"),
url(r'^devices/public/sorting/(?P<sorting>[^/]*)$', xframe_options_exempt(PublicDeviceListView.as_view()), name="public-device-list"),
url(r'^devices/public/page/(?P<page>[0-9]*)/sorting/(?P<sorting>[^/]*)$', xframe_options_exempt(PublicDeviceListView.as_view()), name="public-device-list"),
url(r'^devices/public/sorting/(?P<sorting>[^/]*)/group/(?P<group>[^/]*)$', xframe_options_exempt(PublicDeviceListView.as_view()), name="public-device-list"),
url(r'^devices/public/page/(?P<page>[0-9]*)/sorting/(?P<sorting>[^/]*)/group/(?P<group>[^/]*)$', xframe_options_exempt(PublicDeviceListView.as_view()), name="public-device-list"),
url(r'^devices/public/sorting/(?P<sorting>[^/]*)/group/(?P<group>[^/]*)/filter/(?P<filter>[^/]*)$', xframe_options_exempt(PublicDeviceListView.as_view()), name="public-device-list"),
url(r'^devices/public/page/(?P<page>[0-9]*)/sorting/(?P<sorting>[^/]*)/group/(?P<group>[^/]*)/filter/(?P<filter>[^/]*)$', xframe_options_exempt(PublicDeviceListView.as_view()), name="public-device-list"),
url(r'^devices/public/(?P<pk>[0-9]*)/$', xframe_options_exempt(PublicDeviceDetailView.as_view()), name="public-device-detail"),
url(r'^devices/templates/$', TemplateList.as_view(), name="template-list"),
url(r'^devices/templates/(?P<page>[0-9]*)$', TemplateList.as_view(), name="template-list"),
url(r'^devices/templates/add$', TemplateCreate.as_view(), name="template-add"),
url(r'^devices/templates/(?P<pk>[0-9]*)/edit/$', TemplateUpdate.as_view(), name="template-edit"),
url(r'^devices/templates/(?P<pk>[0-9]*)/delete/$', TemplateDelete.as_view(), name="template-delete"),
url(r'^types/$', TypeList.as_view(), name="type-list"),
url(r'^types/(?P<page>[0-9]*)$', TypeList.as_view(), name="type-list"),
url(r'^types/sorting/(?P<sorting>[^/]*)$', TypeList.as_view(), name="type-list"),
url(r'^types/page/(?P<page>[0-9]*)/sorting/(?P<sorting>[^/]*)$', TypeList.as_view(), name="type-list"),
url(r'^types/sorting/(?P<sorting>[^/]*)/filter/(?P<filter>[^/]*)$', TypeList.as_view(), name="type-list"),
url(r'^types/page/(?P<page>[0-9]*)/sorting/(?P<sorting>[^/]*)/filter/(?P<filter>[^/]*)$', TypeList.as_view(), name="type-list"),
url(r'^types/add$', TypeCreate.as_view(), name="type-add"),
url(r'^types/edit/(?P<pk>[0-9]*)$', TypeUpdate.as_view(), name="type-edit"),
url(r'^types/delete/(?P<pk>[0-9]*)$', TypeDelete.as_view(), name="type-delete"),
url(r'^types/view/(?P<pk>[0-9]*)$', TypeDetail.as_view(), name="type-detail"),
url(r'^types/merge/(?P<oldpk>[0-9]*)/(?P<newpk>[0-9]*)$', TypeMerge.as_view(), name="type-merge"),
url(r'^types/attribute/add$', TypeAttributeCreate.as_view(), name="typeattribute-add"),
url(r'^types/attribute/edit/(?P<pk>[0-9]*)$', TypeAttributeUpdate.as_view(), name="typeattribute-edit"),
url(r'^types/attribute/delete/(?P<pk>[0-9]*)$', TypeAttributeDelete.as_view(), name="typeattribute-delete"),
url(r'^rooms/$', RoomList.as_view(), name="room-list"),
url(r'^rooms/(?P<page>[0-9]*)$', RoomList.as_view(), name="room-list"),
url(r'^rooms/sorting/(?P<sorting>[^/]*)$', RoomList.as_view(), name="room-list"),
url(r'^rooms/page/(?P<page>[0-9]*)/sorting/(?P<sorting>[^/]*)$', RoomList.as_view(), name="room-list"),
url(r'^rooms/sorting/(?P<sorting>[^/]*)/filter/(?P<filter>[^/]*)$', RoomList.as_view(), name="room-list"),
url(r'^rooms/page/(?P<page>[0-9]*)/sorting/(?P<sorting>[^/]*)/filter/(?P<filter>[^/]*)$', RoomList.as_view(), name="room-list"),
url(r'^rooms/add$', RoomCreate.as_view(), name="room-add"),
url(r'^rooms/edit/(?P<pk>[^/]*)$', RoomUpdate.as_view(), name="room-edit"),
url(r'^rooms/delete/(?P<pk>[^/]*)$', RoomDelete.as_view(), name="room-delete"),
url(r'^rooms/view/(?P<pk>[^/]*)$', RoomDetail.as_view(), name="room-detail"),
url(r'^rooms/merge/(?P<oldpk>[0-9]*)/(?P<newpk>[0-9]*)$', RoomMerge.as_view(), name="room-merge"),
url(r'^buildings/$', BuildingList.as_view(), name="building-list"),
url(r'^buildings/(?P<page>[0-9]*)$', BuildingList.as_view(), name="building-list"),
url(r'^buildings/sorting/(?P<sorting>[^/]*)$', BuildingList.as_view(), name="building-list"),
url(r'^buildings/page/(?P<page>[0-9]*)/sorting/(?P<sorting>[^/]*)$', BuildingList.as_view(), name="building-list"),
url(r'^buildings/sorting/(?P<sorting>[^/]*)/filter/(?P<filter>[^/]*)$', BuildingList.as_view(), name="building-list"),
url(r'^buildings/page/(?P<page>[0-9]*)/sorting/(?P<sorting>[^/]*)/filter/(?P<filter>[^/]*)$', BuildingList.as_view(), name="building-list"),
url(r'^buildings/add$', BuildingCreate.as_view(), name="building-add"),
url(r'^buildings/edit/(?P<pk>[^/]*)$', BuildingUpdate.as_view(), name="building-edit"),
url(r'^buildings/delete/(?P<pk>[^/]*)$', BuildingDelete.as_view(), name="building-delete"),
url(r'^buildings/view/(?P<pk>[^/]*)$', BuildingDetail.as_view(), name="building-detail"),
url(r'^buildings/merge/(?P<oldpk>[0-9]*)/(?P<newpk>[0-9]*)$', BuildingMerge.as_view(), name="building-merge"),
url(r'^manufacturers/$', ManufacturerList.as_view(), name="manufacturer-list"),
url(r'^manufacturers/page/(?P<page>[0-9]*)$', ManufacturerList.as_view(), name="manufacturer-list"),
url(r'^manufacturers/sorting/(?P<sorting>[^/]*)$', ManufacturerList.as_view(), name="manufacturer-list"),
url(r'^manufacturers/page/(?P<page>[0-9]*)/sorting/(?P<sorting>[^/]*)$', ManufacturerList.as_view(), name="manufacturer-list"),
url(r'^manufacturers/sorting/(?P<sorting>[^/]*)/filter/(?P<filter>[^/]*)$', ManufacturerList.as_view(), name="manufacturer-list"),
url(r'^manufacturers/page/(?P<page>[0-9]*)/sorting/(?P<sorting>[^/]*)/filter/(?P<filter>[^/]*)$', ManufacturerList.as_view(), name="manufacturer-list"),
url(r'^manufacturers/add$', ManufacturerCreate.as_view(), name="manufacturer-add"),
url(r'^manufacturers/edit/(?P<pk>[^/]*)$', ManufacturerUpdate.as_view(), name="manufacturer-edit"),
url(r'^manufacturers/delete/(?P<pk>[^/]*)$', ManufacturerDelete.as_view(), name="manufacturer-delete"),
url(r'^manufacturers/view/(?P<pk>[^/]*)$', ManufacturerDetail.as_view(), name="manufacturer-detail"),
url(r'^manufacturers/merge/(?P<oldpk>[0-9]*)/(?P<newpk>[0-9]*)$', ManufacturerMerge.as_view(), name="manufacturer-merge"),
url(r'^mails/$', MailList.as_view(), name="mail-list"),
url(r'^mails/(?P<page>[0-9]*)$', MailList.as_view(), name="mail-list"),
url(r'^mails/add$', MailCreate.as_view(), name="mail-add"),
url(r'^mails/edit/(?P<pk>[^/]*)$', MailUpdate.as_view(), name="mail-edit"),
url(r'^mails/view/(?P<pk>[^/]*)$', MailDetail.as_view(), name="mail-detail"),
url(r'^mails/delete/(?P<pk>[^/]*)$', MailDelete.as_view(), name="mail-delete"),
url(r'^devicegroups/$', DevicegroupList.as_view(), name="devicegroup-list"),
url(r'^devicegroups/(?P<page>[0-9]*)$', DevicegroupList.as_view(), name="devicegroup-list"),
url(r'^devicegroups/department/(?P<department>[^/]*)/sorting/(?P<sorting>[^/]*)/filter/(?P<filter>[^/]*)$', DevicegroupList.as_view(), name="devicegroup-list"),
url(r'^devicegroups/page/(?P<page>[0-9]*)/department/(?P<department>[^/]*)/sorting/(?P<sorting>[^/]*)/filter/(?P<filter>[^/]*)$', DevicegroupList.as_view(), name="devicegroup-list"),
url(r'^devicegroups/add$', DevicegroupCreate.as_view(), name="devicegroup-add"),
url(r'^devicegroups/edit/(?P<pk>[^/]*)$', DevicegroupUpdate.as_view(), name="devicegroup-edit"),
url(r'^devicegroups/view/(?P<pk>[^/]*)$', DevicegroupDetail.as_view(), name="devicegroup-detail"),
url(r'^devicegroups/delete/(?P<pk>[^/]*)$', DevicegroupDelete.as_view(), name="devicegroup-delete"),
url(r'^devicetags/$', DevicetagList.as_view(), name="devicetag-list"),
url(r'^devicetags/(?P<page>[0-9]*)$', DevicetagList.as_view(), name="devicetag-list"),
url(r'^devicetags/sorting/(?P<sorting>[^/]*)$', DevicetagList.as_view(), name="devicetag-list"),
url(r'^devicetags/page/(?P<page>[0-9]*)/sorting/(?P<sorting>[^/]*)$', DevicetagList.as_view(), name="devicetag-list"),
url(r'^devicetags/sorting/(?P<sorting>[^/]*)/filter/(?P<filter>[^/]*)$', DevicetagList.as_view(), name="devicetag-list"),
url(r'^devicetags/page/(?P<page>[0-9]*)/sorting/(?P<sorting>[^/]*)/filter/(?P<filter>[^/]*)$', DevicetagList.as_view(), name="devicetag-list"),
url(r'^devicetags/add$', DevicetagCreate.as_view(), name="devicetag-add"),
url(r'^devicetags/edit/(?P<pk>[^/]*)$', DevicetagUpdate.as_view(), name="devicetag-edit"),
url(r'^devicetags/delete/(?P<pk>[^/]*)$', DevicetagDelete.as_view(), name="devicetag-delete"),
url(r'^sections/$', SectionList.as_view(), name="section-list"),
url(r'^sections/(?P<page>[0-9]*)$', SectionList.as_view(), name="section-list"),
url(r'^sections/sorting/(?P<sorting>[^/]*)$', SectionList.as_view(), name="section-list"),
url(r'^sections/page/(?P<page>[0-9]*)/sorting/(?P<sorting>[^/]*)$', SectionList.as_view(), name="section-list"),
url(r'^sections/sorting/(?P<sorting>[^/]*)/filter/(?P<filter>[^/]*)$', SectionList.as_view(), name="section-list"),
url(r'^sections/page/(?P<page>[0-9]*)/sorting/(?P<sorting>[^/]*)/filter/(?P<filter>[^/]*)$', SectionList.as_view(), name="section-list"),
url(r'^sections/add$', SectionCreate.as_view(), name="section-add"),
url(r'^sections/edit/(?P<pk>[^/]*)$', SectionUpdate.as_view(), name="section-edit"),
url(r'^sections/view/(?P<pk>[^/]*)$', SectionDetail.as_view(), name="section-detail"),
url(r'^sections/delete/(?P<pk>[^/]*)$', SectionDelete.as_view(), name="section-delete"),
url(r'^sections/merge/(?P<oldpk>[0-9]*)/(?P<newpk>[0-9]*)$', SectionMerge.as_view(), name="section-merge"),
url(r'^departments/$', DepartmentList.as_view(), name="department-list"),
url(r'^departments/(?P<page>[0-9]*)$', DepartmentList.as_view(), name="department-list"),
url(r'^departments/sorting/(?P<sorting>[^/]*)$', DepartmentList.as_view(), name="department-list"),
url(r'^departments/page/(?P<page>[0-9]*)/sorting/(?P<sorting>[^/]*)$', DepartmentList.as_view(), name="department-list"),
url(r'^departments/sorting/(?P<sorting>[^/]*)/filter/(?P<filter>[^/]*)$', DepartmentList.as_view(), name="department-list"),
url(r'^departments/page/(?P<page>[0-9]*)/sorting/(?P<sorting>[^/]*)/filter/(?P<filter>[^/]*)$', DepartmentList.as_view(), name="department-list"),
url(r'^departments/add$', DepartmentCreate.as_view(), name="department-add"),
url(r'^departments/edit/(?P<pk>[^/]*)$', DepartmentUpdate.as_view(), name="department-edit"),
url(r'^departments/adduser/(?P<pk>[^/]*)$', DepartmentAddUser.as_view(), name="department-add-user"),
url(r'^departments/removeuser/(?P<pk>[^/]*)$', DepartmentDeleteUser.as_view(), name="department-remove-user"),
url(r'^departments/view/(?P<pk>[^/]*)$', DepartmentDetail.as_view(), name="department-detail"),
url(r'^departments/delete/(?P<pk>[^/]*)$', DepartmentDelete.as_view(), name="department-delete"),
url(r'^ipaddresses/$', IpAddressList.as_view(), name="ipaddress-list"),
url(r'^ipaddresses/department/(?P<department>[^/]*)$', IpAddressList.as_view(), name="ipaddress-list"),
url(r'^ipaddresses/department/(?P<department>[^/]*)/filter/(?P<filter>[^/]*)$', IpAddressList.as_view(), name="ipaddress-list"),
url(r'^ipaddresses/department/(?P<department>[^/]*)/filter/(?P<filter>[^/]*)/search/(?P<search>[^/]*)$', IpAddressList.as_view(), name="ipaddress-list"),
url(r'^ipaddresses/page/(?P<page>[0-9]*)$', IpAddressList.as_view(), name="ipaddress-list"),
url(r'^ipaddresses/page/(?P<page>[0-9]*)/department/(?P<department>[^/]*)$', IpAddressList.as_view(), name="ipaddress-list"),
url(r'^ipaddresses/page/(?P<page>[0-9]*)/department/(?P<department>[^/]*)/filter/(?P<filter>[^/]*)$', IpAddressList.as_view(), name="ipaddress-list"),
url(r'^ipaddresses/page/(?P<page>[0-9]*)/department/(?P<department>[^/]*)/filter/(?P<filter>[^/]*)/search/(?P<search>[^/]*)$', IpAddressList.as_view(), name="ipaddress-list"),
url(r'^ipaddresses/add$', IpAddressCreate.as_view(), name="ipaddress-add"),
url(r'^ipaddresses/edit/(?P<pk>[^/]*)$', IpAddressUpdate.as_view(), name="ipaddress-edit"),
url(r'^ipaddresses/delete/(?P<pk>[^/]*)$', IpAddressDelete.as_view(), name="ipaddress-delete"),
url(r'^ipaddresses/view/(?P<pk>[^/]*)$', IpAddressDetail.as_view(), name="ipaddress-detail"),
url(r'^users/$', UserList.as_view(), name="user-list"),
url(r'^users/department/(?P<department>[^/]*)/filter/(?P<filter>[^/]*)$', UserList.as_view(), name="user-list"),
url(r'^users/page/(?P<page>[0-9]*)/department/(?P<department>[^/]*)/filter/(?P<filter>[^/]*)$', UserList.as_view(), name="user-list"),
url(r'^users/view/(?P<pk>[0-9]*)$', ProfileView.as_view(), name="userprofile"),
url(r'^users/view/(?P<pk>[0-9]*)/ipaddress/$', UserIpAddress.as_view(), name="user-ipaddress"),
url(r'^users/view/(?P<pk>[0-9]*)/ipaddress/(?P<ipaddress>[0-9]*)$', UserIpAddressRemove.as_view(), name="user-ipaddress-remove"),
url(r'^profile', login_required(UserprofileView.as_view()), name="userprofile"),
url(r'^settings', login_required(UsersettingsView.as_view()), name="usersettings"),
url(r'^history/global/$', Globalhistory.as_view(), name="globalhistory"),
url(r'^history/global/(?P<page>[0-9]*)$', Globalhistory.as_view(), name="globalhistory"),
url(r'^history/(?P<content_type_id>[0-9]*)/(?P<object_id>[0-9]*)$', HistoryList.as_view(), name="history-list"),
url(r'^history/(?P<content_type_id>[0-9]*)/(?P<object_id>[0-9]*)/(?P<page>[0-9]*)$', HistoryList.as_view(), name="history-list"),
url(r'^history/version/(?P<pk>[0-9]*)$', HistoryDetail.as_view(), name="history-detail"),
url(r'^admin/', admin.site.urls),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^jsi18n/', JavaScriptCatalog.as_view(), name='javascript-catalog'),
url(r'^oauth2/', include('oauth2_provider.urls', namespace='oauth2_provider')),
url(r'^', include('favicon.urls')),
url(r'^ajax/add_widget', login_required(WidgetAdd.as_view()), name="widget_add"),
url(r'^ajax/remove_widget', login_required(WidgetRemove.as_view()), name="widget_remove"),
url(r'^ajax/toggle_widget', login_required(WidgetToggle.as_view()), name="widget_toggle"),
url(r'^ajax/move_widget', login_required(WidgetMove.as_view()), name="widget_move"),
url(r'^ajax/autocomplete_name', login_required(AutocompleteName.as_view()), name="autocomplete-name"),
url(r'^ajax/autocomplete_device', login_required(AutocompleteDevice.as_view()), name="autocomplete-device"),
url(r'^ajax/autocomplete_smalldevice', login_required(AutocompleteSmallDevice.as_view()), name="autocomplete-smalldevice"),
url(r'^ajax/load_extraform', login_required(LoadExtraform.as_view()), name="load-extraform"),
url(r'^ajax/load_mailtemplate', login_required(LoadMailtemplate.as_view()), name="load-mailtemplate"),
url(r'^ajax/preview_mail', login_required(PreviewMail.as_view()), name="preview-mail"),
url(r'^ajax/add_device_field', login_required(AddDeviceField.as_view()), name="add-device-field"),
url(r'^ajax/get_attributes', login_required(GetTypeAttributes.as_view()), name="get-attributes"),
url(r'^ajax/user_lendings', login_required(UserLendings.as_view()), name="get-user-lendings"),
url(r'^ajax/puppetdetails/(?P<device>[0-9]*)', login_required(PuppetDetails.as_view()), name="puppet-details"),
url(r'^ajax/puppetsoftware/(?P<device>[0-9]*)', login_required(PuppetSoftware.as_view()), name="puppet-software"),
]
urlpatterns += format_suffix_patterns([
url(r'^api/$', api_root),
url(r'^api/devices/$', DeviceApiList.as_view(), name='device-api-list'),
url(r'^api/devices/create/$', DeviceApiCreate.as_view(), name='device-api-create'),
url(r'^api/devices/(?P<pk>\d+)/$', DeviceApiDetail.as_view(), name='device-api-detail'),
url(r'^api/devices/(?P<pk>\d+)/bookmark/$', DeviceApiBookmark.as_view(), name='device-api-bookmark'),
url(r'^api/devices/(?P<pk>\d+)/changeroom/$', DeviceApiRoomChange.as_view(), name='device-api-room'),
url(r'^api/devices/(?P<pk>\d+)/pictures/$', DeviceApiListPictures.as_view(), name='device-api-pictures'),
url(r'^api/devices/(?P<device>\d+)/pictures/(?P<pk>\d+)$', DeviceApiPicture.as_view(), name='device-api-picture'),
url(r'^api/devices/(?P<device>\d+)/pictures/(?P<pk>\d+)/rotate/(?P<orientation>[a-z]*)$', DeviceApiPictureRotate.as_view(), name='device-api-picture-rotate'),
url(r'^api/devices/lend/$', DeviceApiLend.as_view(), name='device-api-lend'),
url(r'^api/devices/return/$', DeviceApiReturn.as_view(), name='device-api-return'),
url(r'^api/smalldevices/$', SmallDeviceApiList.as_view(), name='smalldevice-api-lend'),
url(r'^api/smalldevices/(?P<subpart>[^/]*)/$', SmallDeviceApiList.as_view(), name='smalldevice-api-lend'),
url(r'^api/manufacturers/$', ManufacturerApiList.as_view(), name='manufacturer-api-list'),
url(r'^api/manufacturers/create/$', ManufacturerApiCreate.as_view(), name='manufacturer-api-create'),
url(r'^api/manufacturers/(?P<pk>\d+)/$', ManufacturerApiDetail.as_view(), name='manufacturer-api-detail'),
url(r'^api/rooms/$', RoomApiList.as_view(), name='room-api-list'),
url(r'^api/rooms/create/$', RoomApiCreate.as_view(), name='room-api-create'),
url(r'^api/rooms/(?P<pk>\d+)/$', RoomApiDetail.as_view(), name='room-api-detail'),
url(r'^api/types/$', TypeApiList.as_view(), name='type-api-list'),
url(r'^api/types/create/$', TypeApiCreate.as_view(), name='type-api-create'),
url(r'^api/types/(?P<pk>\d+)/$', TypeApiDetail.as_view(), name='type-api-detail'),
url(r'^api/buildings/$', BuildingApiList.as_view(), name='building-api-list'),
url(r'^api/buildings/create/$', BuildingApiCreate.as_view(), name='building-api-create'),
url(r'^api/buildings/(?P<pk>\d+)/$', BuildingApiDetail.as_view(), name='building-api-detail'),
url(r'^api/templates/$', TemplateApiList.as_view(), name='template-api-list'),
url(r'^api/templates/create/$', TemplateApiCreate.as_view(), name='template-api-create'),
url(r'^api/templates/(?P<pk>\d+)/$', TemplateApiDetail.as_view(), name='template-api-detail'),
url(r'^api/ipaddresses/$', IpAddressApiList.as_view(), name='ipaddress-api-list'),
url(r'^api/ipaddresses/create/$', IpAddressApiCreate.as_view(), name='ipaddress-api-create'),
url(r'^api/ipaddresses/(?P<pk>\d+)/$', IpAddressApiDetail.as_view(), name='ipaddress-api-detail'),
url(r'^api/users/$', UserApiList.as_view(), name='user-api-list'),
url(r'^api/users/(?P<pk>\d+)/$', UserApiDetail.as_view(), name='user-api-detail'),
url(r'^api/users/profile/$$', UserApiProfile.as_view(), name='user-api-profile'),
url(r'^api/useravatar/(?P<username>[^/]*)/$', UserApiAvatar.as_view(), name='user-api-avatar'),
url(r'^api/groups/$', GroupApiList.as_view(), name='group-api-list'),
url(r'^api/groups/(?P<pk>\d+)/$', GroupApiDetail.as_view(), name='group-api-detail'),
url(r'^api/auth/', include('rest_framework.urls', namespace='rest_framework')),
], allowed=["json", "html"])
if settings.DEBUG:
import debug_toolbar
# static files (images, css, javascript, etc.)
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
url(r'^media/(?P<path>.*)$', serve, {
'document_root': settings.MEDIA_ROOT,
}),
]
| |
import pytest
import types
from boto import ec2
from moto import mock_ec2, mock_s3
from mangrove.declarative import ServiceDeclaration, ServicePoolDeclaration
from mangrove.constants import WILDCARD_ALL_REGIONS
from mangrove.exceptions import InvalidServiceError, DoesNotExistError
class TestServiceDeclaration:
@mock_ec2
def test_from_string_with_a_valid_service_name_declaration(self):
sd = ServiceDeclaration()
sd.from_string('ec2')
expected_regions = [r.name for r in ec2.regions()]
assert sd.service_name == 'ec2'
assert sd.regions == expected_regions
def test_from_string_with_an_invalid_service_name_raises(self):
sd = ServiceDeclaration()
with pytest.raises(InvalidServiceError):
sd.from_string('ec3')
def test_from_string_with_an_invalid_type_raises(self):
sd = ServiceDeclaration()
with pytest.raises(TypeError):
sd.from_string(123)
@mock_ec2
def test_from_dict_with_a_valid_declaration(self):
sd = ServiceDeclaration()
sd.from_dict({
'ec2': {
'regions': ['eu-west-1', 'us-east-1'],
'default_region': 'eu-west-1'
}
})
assert sd.service_name == 'ec2'
assert sd.regions == ['eu-west-1', 'us-east-1']
assert sd.default_region == 'eu-west-1'
def test_from_dict_with_an_invalid_service_name_raises(self):
sd = ServiceDeclaration()
with pytest.raises(InvalidServiceError):
sd.from_dict({'ec3': {}})
def test_from_dict_with_invalid_declaration_type_raises(self):
sd = ServiceDeclaration()
with pytest.raises(TypeError):
sd.from_dict(123)
def test_from_string_with_an_invalid_service_name_raises(self):
sd = ServiceDeclaration()
with pytest.raises(InvalidServiceError):
sd.from_string('ec3')
def test_module_getter_without_service_name_is_none(self):
sd = ServiceDeclaration()
assert sd.service_name is None
assert sd.module is None
def test_module_getter_with_invalid_service_name(self):
sd = ServiceDeclaration()
with pytest.raises(InvalidServiceError):
sd.service_name = 'ec3'
sd.module
def test_module_getter_with_valid_service_name(self):
sd = ServiceDeclaration()
sd.service_name = 'ec2'
assert sd.module is not None
assert isinstance(sd.module, types.ModuleType)
def test_service_name_setter_raises_with_non_existing_boto_module(self):
sd = ServiceDeclaration()
with pytest.raises(InvalidServiceError):
sd.service_name = 'ec3'
def test_regions_setter_raises_with_invalid_type(self):
sd = ServiceDeclaration()
with pytest.raises(TypeError):
sd.regions = 123
def test_regions_setter_with_non_wildcard_string_raises(self):
sd = ServiceDeclaration()
with pytest.raises(ValueError):
sd.regions = 'abc'
def test_regions_setter_with_wildcard_but_unset_module_raises(self):
sd = ServiceDeclaration()
with pytest.raises(ValueError):
sd._module = None
sd.regions = '*'
@mock_ec2
def test_regions_setter_with_wildcard_converts_to_regions_list(self):
sd = ServiceDeclaration()
sd.service_name = 'ec2'
sd.regions = '*'
expected_regions = [r.name for r in ec2.regions()]
assert sd.regions == expected_regions
@mock_ec2
def test_regions_setter_with_wildcard_list_converts_to_regions_list(self):
sd = ServiceDeclaration()
sd.service_name = 'ec2'
sd.regions = ['*']
expected_regions = [r.name for r in ec2.regions()]
assert sd.regions == expected_regions
def test_regions_setter_with_regions_list(self):
sd = ServiceDeclaration()
sd.service_name = 'ec2'
sd.regions = ['us-east-1', 'eu-west-1']
assert sd.regions == ['us-east-1', 'eu-west-1']
def test_regions_setter_with_none_value(self):
sd = ServiceDeclaration()
sd.service_name = 'ec2'
sd.regions = None
assert sd.regions is None
def test_default_region_setter_with_empty_regions_raises(self):
sd = ServiceDeclaration()
sd.service_name = 'ec2'
sd.regions = []
with pytest.raises(ValueError):
sd.default_region = 'us-east-1'
def test_default_region_setter_with_region_not_part_of_regions(self):
sd = ServiceDeclaration()
sd.service_name = 'ec2'
sd.regions = ['us-west-1']
with pytest.raises(ValueError):
sd.default_region = 'us-east-1'
def test_default_region_setter_with_none_value(self):
sd = ServiceDeclaration()
sd.service_name = 'ec2'
sd.regions = ['us-east-1']
sd.default_region = None
assert sd.default_region is None
def test_default_region_setter(self):
sd = ServiceDeclaration()
sd.service_name = 'ec2'
sd.regions = ['us-east-1']
sd.default_region = 'us-east-1'
assert sd.default_region == 'us-east-1'
def test_load_invalid_type_raises(self):
with pytest.raises(TypeError):
ServiceDeclaration(123)
class TestServicePoolDeclaration:
@mock_ec2
@mock_s3
def test_from_dict_with_valid_declaration(self):
spd = ServicePoolDeclaration()
spd.from_dict({
'ec2': {},
's3': {
'region': 'us-east-1',
}
})
assert isinstance(spd['ec2'], ServiceDeclaration) is True
assert spd['ec2'].service_name == 'ec2'
assert isinstance(spd['s3'], ServiceDeclaration) is True
assert spd['s3'].service_name == 's3'
def test_from_dict_with_invalid_declaration_raises(self):
spd = ServicePoolDeclaration()
with pytest.raises(InvalidServiceError):
spd.from_dict({
'abc': {
'123': 'easy as',
}
})
| |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib
from itertools import ifilter
from swift.common import bufferedhttp
from swift.common import exceptions
from swift.common import http
class Sender(object):
"""
Sends SSYNC requests to the object server.
These requests are eventually handled by
:py:mod:`.ssync_receiver` and full documentation about the
process is there.
"""
def __init__(self, daemon, node, job, suffixes, remote_check_objs=None):
self.daemon = daemon
self.df_mgr = self.daemon._diskfile_mgr
self.node = node
self.job = job
self.suffixes = suffixes
self.connection = None
self.response = None
self.response_buffer = ''
self.response_chunk_left = 0
# available_map has an entry for each object in given suffixes that
# is available to be sync'd; each entry is a hash => timestamp
self.available_map = {}
# When remote_check_objs is given in job, ssync_sender trys only to
# make sure those objects exist or not in remote.
self.remote_check_objs = remote_check_objs
# send_list has an entry for each object that the receiver wants to
# be sync'ed; each entry is an object hash
self.send_list = []
self.failures = 0
def __call__(self):
"""
Perform ssync with remote node.
:returns: a 2-tuple, in the form (success, can_delete_objs) where
success is a boolean and can_delete_objs is the map of
objects that are in sync with the receiver. Each entry in
can_delete_objs maps a hash => timestamp
"""
if not self.suffixes:
return True, {}
try:
# Double try blocks in case our main error handler fails.
try:
# The general theme for these functions is that they should
# raise exceptions.MessageTimeout for client timeouts and
# exceptions.ReplicationException for common issues that will
# abort the replication attempt and log a simple error. All
# other exceptions will be logged with a full stack trace.
self.connect()
self.missing_check()
if self.remote_check_objs is None:
self.updates()
can_delete_obj = self.available_map
else:
# when we are initialized with remote_check_objs we don't
# *send* any requested updates; instead we only collect
# what's already in sync and safe for deletion
in_sync_hashes = (set(self.available_map.keys()) -
set(self.send_list))
can_delete_obj = dict((hash_, self.available_map[hash_])
for hash_ in in_sync_hashes)
self.disconnect()
if not self.failures:
return True, can_delete_obj
else:
return False, {}
except (exceptions.MessageTimeout,
exceptions.ReplicationException) as err:
self.daemon.logger.error(
'%s:%s/%s/%s %s', self.node.get('replication_ip'),
self.node.get('replication_port'), self.node.get('device'),
self.job.get('partition'), err)
except Exception:
# We don't want any exceptions to escape our code and possibly
# mess up the original replicator code that called us since it
# was originally written to shell out to rsync which would do
# no such thing.
self.daemon.logger.exception(
'%s:%s/%s/%s EXCEPTION in replication.Sender',
self.node.get('replication_ip'),
self.node.get('replication_port'),
self.node.get('device'), self.job.get('partition'))
except Exception:
# We don't want any exceptions to escape our code and possibly
# mess up the original replicator code that called us since it
# was originally written to shell out to rsync which would do
# no such thing.
# This particular exception handler does the minimal amount as it
# would only get called if the above except Exception handler
# failed (bad node or job data).
self.daemon.logger.exception('EXCEPTION in replication.Sender')
return False, {}
def connect(self):
"""
Establishes a connection and starts an SSYNC request
with the object server.
"""
with exceptions.MessageTimeout(
self.daemon.conn_timeout, 'connect send'):
self.connection = bufferedhttp.BufferedHTTPConnection(
'%s:%s' % (self.node['replication_ip'],
self.node['replication_port']))
self.connection.putrequest('SSYNC', '/%s/%s' % (
self.node['device'], self.job['partition']))
self.connection.putheader('Transfer-Encoding', 'chunked')
self.connection.putheader('X-Backend-Storage-Policy-Index',
int(self.job['policy']))
# a sync job must use the node's index for the frag_index of the
# rebuilt fragments instead of the frag_index from the job which
# will be rebuilding them
self.connection.putheader(
'X-Backend-Ssync-Frag-Index', self.node.get(
'index', self.job.get('frag_index', '')))
# a revert job to a handoff will not have a node index
self.connection.putheader('X-Backend-Ssync-Node-Index',
self.node.get('index', ''))
self.connection.endheaders()
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'connect receive'):
self.response = self.connection.getresponse()
if self.response.status != http.HTTP_OK:
self.response.read()
raise exceptions.ReplicationException(
'Expected status %s; got %s' %
(http.HTTP_OK, self.response.status))
def readline(self):
"""
Reads a line from the SSYNC response body.
httplib has no readline and will block on read(x) until x is
read, so we have to do the work ourselves. A bit of this is
taken from Python's httplib itself.
"""
data = self.response_buffer
self.response_buffer = ''
while '\n' not in data and len(data) < self.daemon.network_chunk_size:
if self.response_chunk_left == -1: # EOF-already indicator
break
if self.response_chunk_left == 0:
line = self.response.fp.readline()
i = line.find(';')
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
self.response_chunk_left = int(line.strip(), 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self.response.close()
raise exceptions.ReplicationException('Early disconnect')
if self.response_chunk_left == 0:
self.response_chunk_left = -1
break
chunk = self.response.fp.read(min(
self.response_chunk_left,
self.daemon.network_chunk_size - len(data)))
if not chunk:
# close the connection as protocol synchronisation is
# probably lost
self.response.close()
raise exceptions.ReplicationException('Early disconnect')
self.response_chunk_left -= len(chunk)
if self.response_chunk_left == 0:
self.response.fp.read(2) # discard the trailing \r\n
data += chunk
if '\n' in data:
data, self.response_buffer = data.split('\n', 1)
data += '\n'
return data
def missing_check(self):
"""
Handles the sender-side of the MISSING_CHECK step of a
SSYNC request.
Full documentation of this can be found at
:py:meth:`.Receiver.missing_check`.
"""
# First, send our list.
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'missing_check start'):
msg = ':MISSING_CHECK: START\r\n'
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
hash_gen = self.df_mgr.yield_hashes(
self.job['device'], self.job['partition'],
self.job['policy'], self.suffixes,
frag_index=self.job.get('frag_index'))
if self.remote_check_objs is not None:
hash_gen = ifilter(
lambda path_objhash_timestamp:
path_objhash_timestamp[1] in
self.remote_check_objs, hash_gen)
for path, object_hash, timestamp in hash_gen:
self.available_map[object_hash] = timestamp
with exceptions.MessageTimeout(
self.daemon.node_timeout,
'missing_check send line'):
msg = '%s %s\r\n' % (
urllib.quote(object_hash),
urllib.quote(timestamp))
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'missing_check end'):
msg = ':MISSING_CHECK: END\r\n'
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
# Now, retrieve the list of what they want.
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'missing_check start wait'):
line = self.readline()
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == ':MISSING_CHECK: START':
break
elif line:
raise exceptions.ReplicationException(
'Unexpected response: %r' % line[:1024])
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'missing_check line wait'):
line = self.readline()
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == ':MISSING_CHECK: END':
break
parts = line.split()
if parts:
self.send_list.append(parts[0])
def updates(self):
"""
Handles the sender-side of the UPDATES step of an SSYNC
request.
Full documentation of this can be found at
:py:meth:`.Receiver.updates`.
"""
# First, send all our subrequests based on the send_list.
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'updates start'):
msg = ':UPDATES: START\r\n'
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
for object_hash in self.send_list:
try:
df = self.df_mgr.get_diskfile_from_hash(
self.job['device'], self.job['partition'], object_hash,
self.job['policy'], frag_index=self.job.get('frag_index'))
except exceptions.DiskFileNotExist:
continue
url_path = urllib.quote(
'/%s/%s/%s' % (df.account, df.container, df.obj))
try:
df.open()
# EC reconstructor may have passed a callback to build
# an alternative diskfile...
df = self.job.get('sync_diskfile_builder', lambda *args: df)(
self.job, self.node, df.get_metadata())
except exceptions.DiskFileDeleted as err:
self.send_delete(url_path, err.timestamp)
except exceptions.DiskFileError:
pass
else:
self.send_put(url_path, df)
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'updates end'):
msg = ':UPDATES: END\r\n'
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
# Now, read their response for any issues.
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'updates start wait'):
line = self.readline()
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == ':UPDATES: START':
break
elif line:
raise exceptions.ReplicationException(
'Unexpected response: %r' % line[:1024])
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'updates line wait'):
line = self.readline()
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == ':UPDATES: END':
break
elif line:
raise exceptions.ReplicationException(
'Unexpected response: %r' % line[:1024])
def send_delete(self, url_path, timestamp):
"""
Sends a DELETE subrequest with the given information.
"""
msg = ['DELETE ' + url_path, 'X-Timestamp: ' + timestamp.internal]
msg = '\r\n'.join(msg) + '\r\n\r\n'
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'send_delete'):
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
def send_put(self, url_path, df):
"""
Sends a PUT subrequest for the url_path using the source df
(DiskFile) and content_length.
"""
msg = ['PUT ' + url_path, 'Content-Length: ' + str(df.content_length)]
# Sorted to make it easier to test.
for key, value in sorted(df.get_metadata().items()):
if key not in ('name', 'Content-Length'):
msg.append('%s: %s' % (key, value))
msg = '\r\n'.join(msg) + '\r\n\r\n'
with exceptions.MessageTimeout(self.daemon.node_timeout, 'send_put'):
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
for chunk in df.reader():
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'send_put chunk'):
self.connection.send('%x\r\n%s\r\n' % (len(chunk), chunk))
def disconnect(self):
"""
Closes down the connection to the object server once done
with the SSYNC request.
"""
try:
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'disconnect'):
self.connection.send('0\r\n\r\n')
except (Exception, exceptions.Timeout):
pass # We're okay with the above failing.
self.connection.close()
| |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import NoReverseMatch # noqa
from django.core.urlresolvers import reverse
from django.http import HttpResponse # noqa
from django.template import defaultfilters as filters
from django.utils import html
from django.utils.http import urlencode
from django.utils import safestring
from django.utils.translation import string_concat # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard import policy
DELETABLE_STATES = ("available", "error", "error_extending")
class VolumePolicyTargetMixin(policy.PolicyTargetMixin):
policy_target_attrs = (("project_id", 'os-vol-tenant-attr:tenant_id'),)
class LaunchVolume(tables.LinkAction):
name = "launch_volume"
verbose_name = _("Launch as Instance")
url = "horizon:project:instances:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
policy_rules = (("compute", "compute:create"),)
def get_link_url(self, datum):
base_url = reverse(self.url)
vol_id = "%s:vol" % self.table.get_object_id(datum)
params = urlencode({"source_type": "volume_id",
"source_id": vol_id})
return "?".join([base_url, params])
def allowed(self, request, volume=None):
if getattr(volume, 'bootable', '') == 'true':
return volume.status == "available"
return False
class DeleteVolume(VolumePolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Volume",
u"Delete Volumes",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Volume",
u"Scheduled deletion of Volumes",
count
)
policy_rules = (("volume", "volume:delete"),)
def delete(self, request, obj_id):
obj = self.table.get_object_by_id(obj_id)
name = self.table.get_object_display(obj)
try:
cinder.volume_delete(request, obj_id)
except Exception:
msg = _('Unable to delete volume "%s". One or more snapshots '
'depend on it.')
exceptions.check_message(["snapshots", "dependent"], msg % name)
raise
def allowed(self, request, volume=None):
if volume:
return volume.status in DELETABLE_STATES
return True
class CreateVolume(tables.LinkAction):
name = "create"
verbose_name = _("Create Volume")
url = "horizon:project:volumes:volumes:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("volume", "volume:create"),)
ajax = True
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(CreateVolume, self).__init__(attrs, **kwargs)
def allowed(self, request, volume=None):
limits = api.cinder.tenant_absolute_limits(request)
gb_available = (limits.get('maxTotalVolumeGigabytes', float("inf"))
- limits.get('totalGigabytesUsed', 0))
volumes_available = (limits.get('maxTotalVolumes', float("inf"))
- limits.get('totalVolumesUsed', 0))
if gb_available <= 0 or volumes_available <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Create Volume")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
return True
def single(self, table, request, object_id=None):
self.allowed(request, None)
return HttpResponse(self.render())
class ExtendVolume(VolumePolicyTargetMixin, tables.LinkAction):
name = "extend"
verbose_name = _("Extend Volume")
url = "horizon:project:volumes:volumes:extend"
classes = ("ajax-modal", "btn-extend")
policy_rules = (("volume", "volume:extend"),)
def allowed(self, request, volume=None):
return volume.status == "available"
class EditAttachments(tables.LinkAction):
name = "attachments"
verbose_name = _("Manage Attachments")
url = "horizon:project:volumes:volumes:attach"
classes = ("ajax-modal",)
icon = "pencil"
def allowed(self, request, volume=None):
if volume:
project_id = getattr(volume, "os-vol-tenant-attr:tenant_id", None)
attach_allowed = \
policy.check((("compute", "compute:attach_volume"),),
request,
{"project_id": project_id})
detach_allowed = \
policy.check((("compute", "compute:detach_volume"),),
request,
{"project_id": project_id})
if attach_allowed or detach_allowed:
return volume.status in ("available", "in-use")
return False
class CreateSnapshot(VolumePolicyTargetMixin, tables.LinkAction):
name = "snapshots"
verbose_name = _("Create Snapshot")
url = "horizon:project:volumes:volumes:create_snapshot"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("volume", "volume:create_snapshot"),)
def allowed(self, request, volume=None):
try:
limits = api.cinder.tenant_absolute_limits(request)
except Exception:
exceptions.handle(request, _('Unable to retrieve tenant limits.'))
limits = {}
snapshots_available = (limits.get('maxTotalSnapshots', float("inf"))
- limits.get('totalSnapshotsUsed', 0))
if snapshots_available <= 0 and "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
return volume.status in ("available", "in-use")
class CreateBackup(VolumePolicyTargetMixin, tables.LinkAction):
name = "backups"
verbose_name = _("Create Backup")
url = "horizon:project:volumes:volumes:create_backup"
classes = ("ajax-modal",)
policy_rules = (("volume", "backup:create"),)
def allowed(self, request, volume=None):
return (cinder.volume_backup_supported(request) and
volume.status == "available")
class UploadToImage(VolumePolicyTargetMixin, tables.LinkAction):
name = "upload_to_image"
verbose_name = _("Upload to Image")
url = "horizon:project:volumes:volumes:upload_to_image"
classes = ("ajax-modal",)
icon = "cloud-upload"
policy_rules = (("volume", "volume:upload_to_image"),)
def allowed(self, request, volume=None):
has_image_service_perm = \
request.user.has_perm('openstack.services.image')
return (volume.status in ("available", "in-use") and
has_image_service_perm)
class EditVolume(VolumePolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Volume")
url = "horizon:project:volumes:volumes:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("volume", "volume:update"),)
def allowed(self, request, volume=None):
return volume.status in ("available", "in-use")
class RetypeVolume(VolumePolicyTargetMixin, tables.LinkAction):
name = "retype"
verbose_name = _("Change Volume Type")
url = "horizon:project:volumes:volumes:retype"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("volume", "volume:retype"),)
def allowed(self, request, volume=None):
retype_supported = cinder.retype_supported()
return volume.status in ("available", "in-use") and retype_supported
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, volume_id):
volume = cinder.volume_get(request, volume_id)
return volume
def get_size(volume):
return _("%sGB") % volume.size
def get_attachment_name(request, attachment):
server_id = attachment.get("server_id", None)
if "instance" in attachment and attachment['instance']:
name = attachment["instance"].name
else:
try:
server = api.nova.server_get(request, server_id)
name = server.name
except Exception:
name = None
exceptions.handle(request, _("Unable to retrieve "
"attachment information."))
try:
url = reverse("horizon:project:instances:detail", args=(server_id,))
instance = '<a href="%s">%s</a>' % (url, html.escape(name))
except NoReverseMatch:
instance = name
return instance
class AttachmentColumn(tables.Column):
"""Customized column class.
So it that does complex processing on the attachments
for a volume instance.
"""
def get_raw_data(self, volume):
request = self.table.request
link = _('Attached to %(instance)s on %(dev)s')
attachments = []
# Filter out "empty" attachments which the client returns...
for attachment in [att for att in volume.attachments if att]:
# When a volume is attached it may return the server_id
# without the server name...
instance = get_attachment_name(request, attachment)
vals = {"instance": instance,
"dev": html.escape(attachment.get("device", ""))}
attachments.append(link % vals)
return safestring.mark_safe(", ".join(attachments))
def get_volume_type(volume):
return volume.volume_type if volume.volume_type != "None" else None
def get_encrypted_value(volume):
if not hasattr(volume, 'encrypted') or volume.encrypted is None:
return "-"
elif volume.encrypted is False:
return _("No")
else:
return _("Yes")
class VolumesTableBase(tables.DataTable):
STATUS_CHOICES = (
("in-use", True),
("available", True),
("creating", None),
("error", False),
("error_extending", False),
)
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:volumes:volumes:detail")
description = tables.Column("description",
verbose_name=_("Description"),
truncate=40)
size = tables.Column(get_size,
verbose_name=_("Size"),
attrs={'data-type': 'size'})
status = tables.Column("status",
filters=(filters.title,),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES)
def get_object_display(self, obj):
return obj.name
class VolumesFilterAction(tables.FilterAction):
def filter(self, table, volumes, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [volume for volume in volumes
if q in volume.name.lower()]
class VolumesTable(VolumesTableBase):
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:volumes:volumes:detail")
volume_type = tables.Column(get_volume_type,
verbose_name=_("Type"),
empty_value="-")
attachments = AttachmentColumn("attachments",
verbose_name=_("Attached To"))
availability_zone = tables.Column("availability_zone",
verbose_name=_("Availability Zone"))
bootable = tables.Column('is_bootable',
verbose_name=_("Bootable"),
filters=(filters.yesno, filters.capfirst))
encryption = tables.Column(get_encrypted_value,
verbose_name=_("Encrypted"))
class Meta:
name = "volumes"
verbose_name = _("Volumes")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (CreateVolume, DeleteVolume, VolumesFilterAction)
row_actions = (EditVolume, ExtendVolume, LaunchVolume, EditAttachments,
CreateSnapshot, CreateBackup, RetypeVolume,
UploadToImage, DeleteVolume)
class DetachVolume(tables.BatchAction):
name = "detach"
classes = ('btn-danger', 'btn-detach')
policy_rules = (("compute", "compute:detach_volume"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Detach Volume",
u"Detach Volumes",
count
)
# This action is asynchronous.
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Detaching Volume",
u"Detaching Volumes",
count
)
def action(self, request, obj_id):
attachment = self.table.get_object_by_id(obj_id)
api.nova.instance_volume_detach(request,
attachment.get('server_id', None),
obj_id)
def get_success_url(self, request):
return reverse('horizon:project:volumes:index')
class AttachedInstanceColumn(tables.Column):
"""Customized column class that does complex processing on the attachments
for a volume instance.
"""
def get_raw_data(self, attachment):
request = self.table.request
return safestring.mark_safe(get_attachment_name(request, attachment))
class AttachmentsTable(tables.DataTable):
instance = AttachedInstanceColumn(get_attachment_name,
verbose_name=_("Instance"))
device = tables.Column("device",
verbose_name=_("Device"))
def get_object_id(self, obj):
return obj['id']
def get_object_display(self, attachment):
instance_name = get_attachment_name(self.request, attachment)
vals = {"volume_name": attachment['volume_name'],
"instance_name": html.strip_tags(instance_name)}
return _("Volume %(volume_name)s on instance %(instance_name)s") % vals
def get_object_by_id(self, obj_id):
for obj in self.data:
if self.get_object_id(obj) == obj_id:
return obj
raise ValueError('No match found for the id "%s".' % obj_id)
class Meta:
name = "attachments"
verbose_name = _("Attachments")
table_actions = (DetachVolume,)
row_actions = (DetachVolume,)
| |
import numpy as np
import os
import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import glob
# Import everything needed to process video clip for later
from moviepy.editor import VideoFileClip
from IPython.display import HTML
# Load in the chessboard calibration images to a list
cal_image_loc = glob.glob('camera_cal/*.jpg')
calibration_images = []
for fname in cal_image_loc:
img = mpimg.imread(fname)
calibration_images.append(img)
# Prepare object points
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1,2)
# Arrays for later storing object points and image points
objpoints = []
imgpoints = []
# Iterate through images for their points
for image in calibration_images:
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (9,6), None)
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
cv2.drawChessboardCorners(image, (9, 6), corners, ret)
# Returns camera calibration
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
# Define a class to receive the characteristics of each line detection
class Left_Line():
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# recent polynomial coefficients
self.recent_fit = []
# polynomial coefficients averaged over the last n iterations
self.best_fit = None
# polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
# difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
# x values for detected line pixels
self.allx = None
# y values for detected line pixels
self.ally = None
# counter to reset after 5 iterations if issues arise
self.counter = 0
class Right_Line():
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# recent polynomial coefficients
self.recent_fit = []
# polynomial coefficients averaged over the last n iterations
self.best_fit = None
# polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
# difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
# x values for detected line pixels
self.allx = None
# y values for detected line pixels
self.ally = None
# counter to reset after 5 iterations if issues arise
self.counter = 0
def pipeline(img, s_thresh=(125, 255), sx_thresh=(10, 100), R_thresh = (200, 255), sobel_kernel = 3):
""" Pipeline to create binary image.
This version uses thresholds on the R & S color channels and Sobelx.
Binary activation occurs where any two of the three are activated.
"""
distorted_img = np.copy(img)
dst = cv2.undistort(distorted_img, mtx, dist, None, mtx)
# Pull R
R = dst[:,:,0]
# Convert to HLS colorspace
hls = cv2.cvtColor(dst, cv2.COLOR_RGB2HLS).astype(np.float)
h_channel = hls[:,:,0]
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
# Sobelx - takes the derivate in x, absolute value, then rescale
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0, ksize = sobel_kernel)
abs_sobelx = np.absolute(sobelx)
scaled_sobelx = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobelx)
sxbinary[(scaled_sobelx >= sx_thresh[0]) & (scaled_sobelx <= sx_thresh[1])] = 1
# Threshold R color channel
R_binary = np.zeros_like(R)
R_binary[(R >= R_thresh[0]) & (R <= R_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# If two of the three are activated, activate in the binary image
combined_binary = np.zeros_like(sxbinary)
combined_binary[((s_binary == 1) & (sxbinary == 1)) | ((sxbinary == 1) & (R_binary == 1))
| ((s_binary == 1) & (R_binary == 1))] = 1
return combined_binary
def birds_eye(img, mtx, dist):
""" Birds eye first undistorts the image, using the calibration from earlier.
Next, using defined source image points and destination points,
it will transform the image as if the road was viewed from above,
like a bird would see. Returns the birds eye image and transform matrix.
"""
# Put the image through the pipeline to get the binary image
binary_img = pipeline(img)
# Undistort
undist = cv2.undistort(binary_img, mtx, dist, None, mtx)
# Grab the image shape
img_size = (undist.shape[1], undist.shape[0])
# Source points - defined area of lane line edges
src = np.float32([[690,450],[1110,img_size[1]],[175,img_size[1]],[595,450]])
# 4 destination points to transfer
offset = 300 # offset for dst points
dst = np.float32([[img_size[0]-offset, 0],[img_size[0]-offset, img_size[1]],
[offset, img_size[1]],[offset, 0]])
# Use cv2.getPerspectiveTransform() to get M, the transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Use cv2.warpPerspective() to warp the image to a top-down view
top_down = cv2.warpPerspective(undist, M, img_size)
return top_down, M
def count_check(line):
""" Resets to using new sliding windows below if
upon failing five times in a row.
"""
if line.counter >= 5:
line.detected = False
def first_lines(img, mtx, dist):
""" First Lines uses the birds eye image from above,
creates a histogram of where the binary activations occur,
and uses sliding windows along the peak areas to estimate
where the lane lines are.
"""
# Load the birds eye image and transform matrix from birds_eye
binary_warped, perspective_M = birds_eye(img, mtx, dist)
# Histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]/2:,:], axis=0)
# Output image an to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
# The challenge videos sometimes throw errors, so the below try first
# Upon the error being thrown, set line.detected to False
# Left line first
try:
n = 5
left_line.current_fit = np.polyfit(lefty, leftx, 2)
left_line.all_x = leftx
left_line.all_y = lefty
left_line.recent_fit.append(left_line.current_fit)
if len(left_line.recent_fit) > 1:
left_line.diffs = (left_line.recent_fit[-2] - left_line.recent_fit[-1]) / left_line.recent_fit[-2]
left_line.recent_fit = left_line.recent_fit[-n:]
left_line.best_fit = np.mean(left_line.recent_fit, axis = 0)
left_fit = left_line.current_fit
left_line.detected = True
left_line.counter = 0
except TypeError:
left_fit = left_line.best_fit
left_line.detected = False
except np.linalg.LinAlgError:
left_fit = left_line.best_fit
left_line.detected = False
# Next, right line
try:
n = 5
right_line.current_fit = np.polyfit(righty, rightx, 2)
right_line.all_x = rightx
right_line.all_y = righty
right_line.recent_fit.append(right_line.current_fit)
if len(right_line.recent_fit) > 1:
right_line.diffs = (right_line.recent_fit[-2] - right_line.recent_fit[-1]) / right_line.recent_fit[-2]
right_line.recent_fit = right_line.recent_fit[-n:]
right_line.best_fit = np.mean(right_line.recent_fit, axis = 0)
right_fit = right_line.current_fit
right_line.detected = True
right_line.counter = 0
except TypeError:
right_fit = right_line.best_fit
right_line.detected = False
except np.linalg.LinAlgError:
right_fit = right_line.best_fit
right_line.detected = False
def second_ord_poly(line, val):
""" Simple function being used to help calculate distance from center.
Only used within Draw Lines below. Finds the base of the line at the
bottom of the image.
"""
a = line[0]
b = line[1]
c = line[2]
formula = (a*val**2)+(b*val)+c
return formula
def draw_lines(img, mtx, dist):
""" Draw Lines will first check whether the lines are detected.
If not, go back up to First Lines. If they are, we do not have to search
the whole image for the lines. We can then draw the lines,
as well as detect where the car is in relation to the middle of the lane,
and what type of curvature it is driving at.
"""
# Pull in the image
binary_warped, perspective_M = birds_eye(img, mtx, dist)
# Check if lines were last detected; if not, re-run first_lines
if left_line.detected == False | right_line.detected == False:
first_lines(img, mtx, dist)
# Set the fit as the current fit for now
left_fit = left_line.current_fit
right_fit = right_line.current_fit
# Again, find the lane indicators
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Set the x and y values of points on each line
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each again.
# Similar to first_lines, need to try in case of errors
# Left line first
try:
n = 5
left_line.current_fit = np.polyfit(lefty, leftx, 2)
left_line.all_x = leftx
left_line.all_y = lefty
left_line.recent_fit.append(left_line.current_fit)
if len(left_line.recent_fit) > 1:
left_line.diffs = (left_line.recent_fit[-2] - left_line.recent_fit[-1]) / left_line.recent_fit[-2]
left_line.recent_fit = left_line.recent_fit[-n:]
left_line.best_fit = np.mean(left_line.recent_fit, axis = 0)
left_fit = left_line.current_fit
left_line.detected = True
left_line.counter = 0
except TypeError:
left_fit = left_line.best_fit
count_check(left_line)
except np.linalg.LinAlgError:
left_fit = left_line.best_fit
count_check(left_line)
# Now right line
try:
n = 5
right_line.current_fit = np.polyfit(righty, rightx, 2)
right_line.all_x = rightx
right_line.all_y = righty
right_line.recent_fit.append(right_line.current_fit)
if len(right_line.recent_fit) > 1:
right_line.diffs = (right_line.recent_fit[-2] - right_line.recent_fit[-1]) / right_line.recent_fit[-2]
right_line.recent_fit = right_line.recent_fit[-n:]
right_line.best_fit = np.mean(right_line.recent_fit, axis = 0)
right_fit = right_line.current_fit
right_line.detected = True
right_line.counter = 0
except TypeError:
right_fit = right_line.best_fit
count_check(right_line)
except np.linalg.LinAlgError:
right_fit = right_line.best_fit
count_check(right_line)
# Generate x and y values for plotting
fity = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
fit_leftx = left_fit[0]*fity**2 + left_fit[1]*fity + left_fit[2]
fit_rightx = right_fit[0]*fity**2 + right_fit[1]*fity + right_fit[2]
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([fit_leftx-margin, fity]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([fit_leftx+margin, fity])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([fit_rightx-margin, fity]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([fit_rightx+margin, fity])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Calculate the pixel curve radius
y_eval = np.max(fity)
left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(left_line.all_y*ym_per_pix, left_line.all_x*xm_per_pix, 2)
right_fit_cr = np.polyfit(right_line.all_y*ym_per_pix, right_line.all_x*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
avg_rad = round(np.mean([left_curverad, right_curverad]),0)
rad_text = "Radius of Curvature = {}(m)".format(avg_rad)
# Calculating middle of the image, aka where the car camera is
middle_of_image = img.shape[1] / 2
car_position = middle_of_image * xm_per_pix
# Calculating middle of the lane
left_line_base = second_ord_poly(left_fit_cr, img.shape[0] * ym_per_pix)
right_line_base = second_ord_poly(right_fit_cr, img.shape[0] * ym_per_pix)
lane_mid = (left_line_base+right_line_base)/2
# Calculate distance from center and list differently based on left or right
dist_from_center = lane_mid - car_position
if dist_from_center >= 0:
center_text = "{} meters left of center".format(round(dist_from_center,2))
else:
center_text = "{} meters right of center".format(round(-dist_from_center,2))
# List car's position in relation to middle on the image and radius of curvature
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, center_text, (10,50), font, 1,(255,255,255),2)
cv2.putText(img, rad_text, (10,100), font, 1,(255,255,255),2)
# Invert the transform matrix from birds_eye (to later make the image back to normal below)
Minv = np.linalg.inv(perspective_M)
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([fit_leftx, fity]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([fit_rightx, fity])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(img, 1, newwarp, 0.3, 0)
return result
def process_image(image):
""" This processes through everything above.
Will return the image with car position, lane curvature, and lane lines drawn.
"""
result = draw_lines(image, mtx, dist)
return result
# Set the class lines equal to the variables used above
left_line = Left_Line()
right_line = Right_Line()
# Convert to video
# vid_output is where the image will be saved to
vid_output = 'reg_vid.mp4'
# The file referenced in clip1 is the original video before anything has been done to it
clip1 = VideoFileClip("project_video.mp4")
# NOTE: this function expects color images
vid_clip = clip1.fl_image(process_image)
vid_clip.write_videofile(vid_output, audio=False)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FirewallPolicyRuleGroupsOperations:
"""FirewallPolicyRuleGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
firewall_policy_name: str,
rule_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'ruleGroupName': self._serialize.url("rule_group_name", rule_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleGroups/{ruleGroupName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
firewall_policy_name: str,
rule_group_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified FirewallPolicyRuleGroup.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy.
:type firewall_policy_name: str
:param rule_group_name: The name of the FirewallPolicyRuleGroup.
:type rule_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
firewall_policy_name=firewall_policy_name,
rule_group_name=rule_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'ruleGroupName': self._serialize.url("rule_group_name", rule_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleGroups/{ruleGroupName}'} # type: ignore
async def get(
self,
resource_group_name: str,
firewall_policy_name: str,
rule_group_name: str,
**kwargs: Any
) -> "_models.FirewallPolicyRuleGroup":
"""Gets the specified FirewallPolicyRuleGroup.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy.
:type firewall_policy_name: str
:param rule_group_name: The name of the FirewallPolicyRuleGroup.
:type rule_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FirewallPolicyRuleGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.FirewallPolicyRuleGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicyRuleGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'ruleGroupName': self._serialize.url("rule_group_name", rule_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FirewallPolicyRuleGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleGroups/{ruleGroupName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
firewall_policy_name: str,
rule_group_name: str,
parameters: "_models.FirewallPolicyRuleGroup",
**kwargs: Any
) -> "_models.FirewallPolicyRuleGroup":
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicyRuleGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'ruleGroupName': self._serialize.url("rule_group_name", rule_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FirewallPolicyRuleGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FirewallPolicyRuleGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('FirewallPolicyRuleGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleGroups/{ruleGroupName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
firewall_policy_name: str,
rule_group_name: str,
parameters: "_models.FirewallPolicyRuleGroup",
**kwargs: Any
) -> AsyncLROPoller["_models.FirewallPolicyRuleGroup"]:
"""Creates or updates the specified FirewallPolicyRuleGroup.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy.
:type firewall_policy_name: str
:param rule_group_name: The name of the FirewallPolicyRuleGroup.
:type rule_group_name: str
:param parameters: Parameters supplied to the create or update FirewallPolicyRuleGroup
operation.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.FirewallPolicyRuleGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either FirewallPolicyRuleGroup or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_06_01.models.FirewallPolicyRuleGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicyRuleGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
firewall_policy_name=firewall_policy_name,
rule_group_name=rule_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FirewallPolicyRuleGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'ruleGroupName': self._serialize.url("rule_group_name", rule_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleGroups/{ruleGroupName}'} # type: ignore
def list(
self,
resource_group_name: str,
firewall_policy_name: str,
**kwargs: Any
) -> AsyncIterable["_models.FirewallPolicyRuleGroupListResult"]:
"""Lists all FirewallPolicyRuleGroups in a FirewallPolicy resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy.
:type firewall_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FirewallPolicyRuleGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.FirewallPolicyRuleGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicyRuleGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('FirewallPolicyRuleGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleGroups'} # type: ignore
| |
# -*- coding: utf-8 -*-
"""
@file
@brief Defines runpython directives.
See `Tutorial: Writing a simple extension
<https://www.sphinx-doc.org/en/master/development/tutorials/helloworld.html>`_
"""
import sys
import os
from contextlib import redirect_stdout, redirect_stderr
import traceback
import warnings
from io import StringIO
import sphinx
from docutils import nodes, core
from docutils.parsers.rst import Directive, directives
from docutils.statemachine import StringList
from sphinx.util.nodes import nested_parse_with_titles
from ..loghelper.flog import run_cmd
from ..texthelper.texts_language import TITLES
from ..pycode.code_helper import remove_extra_spaces_and_pep8
from .sphinx_collapse_extension import collapse_node
class RunPythonCompileError(Exception):
"""
exception raised when a piece of code
included in the documentation does not compile
"""
pass
class RunPythonExecutionError(Exception):
"""
Exception raised when a piece of code
included in the documentation raises an exception.
"""
pass
def run_python_script(script, params=None, comment=None, setsysvar=None, process=False,
exception=False, warningout=None, chdir=None, context=None,
store_in_file=None):
"""
Executes a script :epkg:`python` as a string.
@param script python script
@param params params to add before the execution
@param comment message to add in a exception when the script fails
@param setsysvar if not None, add a member to module *sys*,
set up this variable to True,
if is remove after the execution
@param process run the script in a separate process
@param exception expects an exception to be raised,
fails if it is not, the function returns no output and the
error message
@param warningout warning to disable (name of warnings)
@param chdir change directory before running this script (if not None)
@param context if not None, added to the local context
@param store_in_file stores the script into this file
and calls tells python the source can be found here,
that is useful is the script is using module
``inspect`` to retrieve the source which are not
stored in memory
@return stdout, stderr, context
If the execution throws an exception such as
``NameError: name 'math' is not defined`` after importing
the module ``math``. It comes from the fact
the domain name used by the function
`exec <https://docs.python.org/3/library/functions.html#exec>`_
contains the declared objects. Example:
::
import math
def coordonnees_polaires(x,y):
rho = math.sqrt(x*x+y*y)
theta = math.atan2 (y,x)
return rho, theta
coordonnees_polaires(1, 1)
The code can be modified into:
::
def fake_function():
import math
def coordonnees_polaires(x,y):
rho = math.sqrt(x*x+y*y)
theta = math.atan2 (y,x)
return rho, theta
coordonnees_polaires(1, 1)
fake_function()
Section :ref:`l-image-rst-runpython` explains
how to display an image with this directive.
.. versionchanged:: 1.9
Parameter *store_in_file* was added.
"""
def warning_filter(warningout):
if warningout in (None, ''):
warnings.simplefilter("always")
elif isinstance(warningout, str):
li = [_.strip() for _ in warningout.split()]
warning_filter(li)
elif isinstance(warningout, list):
def interpret(s):
return eval(s) if isinstance(s, str) else s
warns = [interpret(w) for w in warningout]
for w in warns:
warnings.simplefilter("ignore", w)
else:
raise ValueError(
"Unexpected value for warningout: {0}".format(warningout))
if params is None:
params = {}
if process:
if context is not None and len(context) != 0:
raise RunPythonExecutionError( # pragma: no cover
"context cannot be used if the script runs in a separate process.")
cmd = sys.executable
header = ["# coding: utf-8", "import sys"]
if setsysvar:
header.append("sys.{0} = True".format(setsysvar))
add = 0
for path in sys.path:
if path.endswith("source") or path.endswith("source/") or path.endswith("source\\"):
header.append("sys.path.append('{0}')".format(
path.replace("\\", "\\\\")))
add += 1
if add == 0:
for path in sys.path:
if path.endswith("src") or path.endswith("src/") or path.endswith("src\\"):
header.append("sys.path.append('{0}')".format(
path.replace("\\", "\\\\")))
add += 1
if add == 0:
# It did not find any path linked to the copy of
# the current module in the documentation
# it assumes the first path of `sys.path` is part
# of the unit test.
path = sys.path[0]
path = os.path.join(path, "..", "..", "src")
if os.path.exists(path):
header.append("sys.path.append('{0}')".format(
path.replace("\\", "\\\\")))
add += 1
else:
path = sys.path[0]
path = os.path.join(path, "src")
if os.path.exists(path):
header.append("sys.path.append('{0}')".format(
path.replace("\\", "\\\\")))
add += 1
if add == 0:
# We do nothing unless the execution failed.
exc_path = RunPythonExecutionError(
"Unable to find a path to add:\n{0}".format("\n".join(sys.path)))
else:
exc_path = None
header.append('')
script = "\n".join(header) + script
if store_in_file is not None:
with open(store_in_file, "w", encoding="utf-8") as f:
f.write(script)
script_arg = None
cmd += ' ' + store_in_file
else:
script_arg = script
try:
out, err = run_cmd(cmd, script_arg, wait=True, change_path=chdir)
return out, err, None
except Exception as ee: # pragma: no cover
if not exception:
message = ("--SCRIPT--\n{0}\n--PARAMS--\n{1}\n--COMMENT--\n"
"{2}\n--ERR--\n{3}\n--OUT--\n{4}\n--EXC--\n{5}"
"").format(script, params, comment, "",
str(ee), ee)
if exc_path:
message += "\n---EXC--\n{0}".format(exc_path)
raise RunPythonExecutionError(message) from ee
return str(ee), str(ee), None
else:
if store_in_file:
raise NotImplementedError(
"store_in_file is only implemented if process is True.")
try:
obj = compile(script, "", "exec")
except Exception as ec: # pragma: no cover
if comment is None:
comment = ""
if not exception:
message = "SCRIPT:\n{0}\nPARAMS\n{1}\nCOMMENT\n{2}".format(
script, params, comment)
raise RunPythonCompileError(message) from ec
return "", "Cannot compile the do to {0}".format(ec), None
globs = globals().copy()
loc = locals()
for k, v in params.items():
loc[k] = v
loc["__dict__"] = params
if context is not None:
for k, v in context.items():
globs["__runpython__" + k] = v
globs['__runpython__script__'] = script
if setsysvar is not None:
sys.__dict__[setsysvar] = True
sout = StringIO()
serr = StringIO()
with redirect_stdout(sout):
with redirect_stderr(sout):
with warnings.catch_warnings():
warning_filter(warningout)
if chdir is not None:
current = os.getcwd()
os.chdir(chdir)
try:
exec(obj, globs, loc)
except Exception as ee:
if chdir is not None:
os.chdir(current)
if setsysvar is not None:
del sys.__dict__[setsysvar]
if comment is None:
comment = ""
gout = sout.getvalue()
gerr = serr.getvalue()
excs = traceback.format_exc()
lines = excs.split("\n")
excs = "\n".join(
_ for _ in lines if "sphinx_runpython_extension.py" not in _)
if not exception:
message = ("--SCRIPT--\n{0}\n--PARAMS--\n{1}\n--COMMENT--"
"\n{2}\n--ERR--\n{3}\n--OUT--\n{4}\n--EXC--"
"\n{5}\n--TRACEBACK--\n{6}").format(
script, params, comment, gout, gerr,
ee, excs)
raise RunPythonExecutionError(message) from ee
return (gout + "\n" + gerr), (gerr + "\n" + excs), None
if chdir is not None:
os.chdir(current)
if setsysvar is not None:
del sys.__dict__[setsysvar]
gout = sout.getvalue()
gerr = serr.getvalue()
avoid = {"__runpython____WD__",
"__runpython____k__", "__runpython____w__"}
context = {k[13:]: v for k, v in globs.items() if k.startswith(
"__runpython__") and k not in avoid}
return gout, gerr, context
class runpython_node(nodes.Structural, nodes.Element):
"""
Defines *runpython* node.
"""
pass
class RunPythonDirective(Directive):
"""
Extracts script to run described by ``.. runpython::``
and modifies the documentation.
.. exref::
:title: A python script which generates documentation
The following code prints the version of Python
on the standard output. It is added to the documentation::
.. runpython::
:showcode:
import sys
print("sys.version_info=", str(sys.version_info))
If give the following results:
.. runpython::
import sys
print("sys.version_info=", str(sys.version_info))
Options *showcode* can be used to display the code.
The option *rst* will assume the output is in RST format and must be
interpreted. *showout* will complement the RST output with the raw format.
The directive has a couple of options:
* ``:assert:`` condition to validate at the end of the execution
to check it went right
* ``:current:`` runs the script in the source file directory
* ``:exception:`` the code throws an exception but it is expected. The error is displayed.
* ``:indent:<int>`` to indent the output
* ``:language:``: changes ``::`` into ``.. code-block:: language``
* ``:linenos:`` to show line numbers
* ``:nopep8:`` if present, leaves the code as it is and does not apply pep8 by default,
see @see fn remove_extra_spaces_and_pep8.
* ``:numpy_precision: <precision>``, run ``numpy.set_printoptions(precision=...)``,
precision is 3 by default
* ``:process:`` run the script in an another process
* ``:restore:`` restore the local context stored in :epkg:`sphinx` application
by the previous call to *runpython*
* ``:rst:`` to interpret the output, otherwise, it is considered as raw text
* ``:setsysvar:`` adds a member to *sys* module, the module can act differently based on that information,
if the value is left empty, *sys.enable_disabled_documented_pieces_of_code* will be be set up to *True*.
* ``:showcode:`` to show the code before its output
* ``:showout`` if *:rst:* is set up, this flag adds the raw rst output to check what is happening
* ``:sin:<text_for_in>`` which text to display before the code (by default *In*)
* ``:sout:<text_for_in>`` which text to display before the output (by default *Out*)
* ``:sphinx:`` by default, function `nested_parse_with_titles
<https://www.sphinx-doc.org/en/master/extdev/markupapi.html?highlight=nested_parse#parsing-directive-content-as-rest>`_ is
used to parse the output of the script, if this option is set to false,
`public_doctree <http://code.nabla.net/doc/docutils/api/docutils/core/docutils.core.publish_doctree.html>`_.
* ``:store:`` stores the local context in :epkg:`sphinx` application to restore it later
by another call to *runpython*
* ``:toggle:`` add a button to hide or show the code, it takes the values
``code`` or ``out`` or ``both``. The direction then hides the given section
but adds a button to show it.
* ``:warningout:`` name of warnings to disable (ex: ``ImportWarning``),
separated by spaces
* ``:store_in_file:`` the directive store the script in a file,
then executes this file (only if ``:process:`` is enabled),
this trick is needed when the script to executes relies on
function such :epkg:`*py:inspect:getsource` which requires
the script to be stored somewhere in order to retrieve it.
Option *rst* can be used the following way::
.. runpython::
:rst:
for l in range(0,10):
print("**line**", "*" +str(l)+"*")
print('')
Which displays interpreted :epkg:`RST`:
.. runpython::
:rst:
for l in range(0,10):
print("**line**", "*" +str(l)+"*")
print('')
If the directive produces RST text to be included later in the documentation,
it is able to interpret
`docutils directives <http://docutils.sourceforge.net/docs/ref/rst/directives.html>`_
and `Sphinx directives
<https://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html>`_
with function `nested_parse_with_titles <http://sphinx-doc.org/extdev/
markupapi.html?highlight=nested_parse>`_. However, if this text contains
titles, it is better to use option ``:sphinx: false``.
Unless *process* option is enabled, global variables cannot be used.
`sphinx-autorun <https://pypi.org/project/sphinx-autorun/>`_ offers a similar
service except it cannot produce compile :epkg:`RST` content,
hide the source and a couple of other options.
Option *toggle* can hide or unhide the piece of code
or/and its output.
The directive also adds local variables such as
``__WD__`` which contains the path to the documentation
which contains the directive. It is useful to load additional
files ``os.path.join(__WD__, ...)``.
.. runpython::
:toggle: out
:showcode:
print("Hide or unhide this output.")
.. versionchanged:: 1.9
Options *store_in_file* was added.
"""
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'indent': directives.unchanged,
'showcode': directives.unchanged,
'showout': directives.unchanged,
'rst': directives.unchanged,
'sin': directives.unchanged,
'sout': directives.unchanged,
'sphinx': directives.unchanged,
'sout2': directives.unchanged,
'setsysvar': directives.unchanged,
'process': directives.unchanged,
'exception': directives.unchanged,
'nopep8': directives.unchanged,
'warningout': directives.unchanged,
'toggle': directives.unchanged,
'current': directives.unchanged,
'assert': directives.unchanged,
'language': directives.unchanged,
'store': directives.unchanged,
'restore': directives.unchanged,
'numpy_precision': directives.unchanged,
'store_in_file': directives.unchanged,
'linenos': directives.unchanged,
}
has_content = True
runpython_class = runpython_node
def run(self):
"""
Extracts the information in a dictionary,
runs the script.
@return a list of nodes
"""
# settings
sett = self.state.document.settings
language_code = sett.language_code
lineno = self.lineno
# add the instance to the global settings
if hasattr(sett, "out_runpythonlist"):
sett.out_runpythonlist.append(self)
# env
if hasattr(self.state.document.settings, "env"):
env = self.state.document.settings.env
else:
env = None
if env is None:
docname = "___unknown_docname___"
else:
docname = env.docname
# post
bool_set = (True, 1, "True", "1", "true")
bool_set_ = (True, 1, "True", "1", "true", '')
p = {
'showcode': 'showcode' in self.options,
'linenos': 'linenos' in self.options,
'showout': 'showout' in self.options,
'rst': 'rst' in self.options,
'sin': self.options.get('sin', TITLES[language_code]["In"]),
'sout': self.options.get('sout', TITLES[language_code]["Out"]),
'sout2': self.options.get('sout2', TITLES[language_code]["Out2"]),
'sphinx': 'sphinx' not in self.options or self.options['sphinx'] in bool_set,
'setsysvar': self.options.get('setsysvar', None),
'process': 'process' in self.options and self.options['process'] in bool_set_,
'exception': 'exception' in self.options and self.options['exception'] in bool_set_,
'nopep8': 'nopep8' in self.options and self.options['nopep8'] in bool_set_,
'warningout': self.options.get('warningout', '').strip(),
'toggle': self.options.get('toggle', '').strip(),
'current': 'current' in self.options and self.options['current'] in bool_set_,
'assert': self.options.get('assert', '').strip(),
'language': self.options.get('language', '').strip(),
'store_in_file': self.options.get('store_in_file', None),
'numpy_precision': self.options.get('numpy_precision', '3').strip(),
'store': 'store' in self.options and self.options['store'] in bool_set_,
'restore': 'restore' in self.options and self.options['restore'] in bool_set_,
}
if p['setsysvar'] is not None and len(p['setsysvar']) == 0:
p['setsysvar'] = 'enable_disabled_documented_pieces_of_code'
dind = 0 if p['rst'] else 4
p['indent'] = int(self.options.get("indent", dind))
# run the script
name = "run_python_script_{0}".format(id(p))
if p['process']:
content = ["if True:"]
else:
content = ["def {0}():".format(name)]
if "numpy" in "\n".join(self.content) and p['numpy_precision'] not in (None, 'None', '-', ''):
try:
import numpy # pylint: disable=W0611
prec = int(p['numpy_precision'])
content.append(" import numpy")
content.append(" numpy.set_printoptions(%d)" % prec)
except (ImportError, ValueError): # pragma: no cover
pass
content.append(' ## __WD__ ##')
if p["restore"]:
context = getattr(env, "runpython_context", None)
for k in sorted(context):
content.append(
" {0} = globals()['__runpython__{0}']".format(k))
else:
context = None
modified_content = self.modify_script_before_running(
"\n".join(self.content))
if p['assert']:
footer = []
assert_condition = p['assert'].split('\n')
for cond in assert_condition:
footer.append("if not({0}):".format(cond))
footer.append(
" raise AssertionError('''Condition '{0}' failed.''')".format(cond))
modified_content += "\n\n" + "\n".join(footer)
for line in modified_content.split("\n"):
content.append(" " + line)
if p["store"]:
content.append(' for __k__, __v__ in locals().copy().items():')
content.append(
" globals()['__runpython__' + __k__] = __v__")
if not p['process']:
content.append("{0}()".format(name))
script = "\n".join(content)
script_disp = "\n".join(self.content)
if not p["nopep8"]:
try:
script_disp = remove_extra_spaces_and_pep8(
script_disp, is_string=True)
except Exception as e: # pragma: no cover
if '.' in docname:
comment = ' File "{0}", line {1}'.format(docname, lineno)
else:
comment = ' File "{0}.rst", line {1}\n File "{0}.py", line {1}\n'.format(
docname, lineno)
raise ValueError(
"Pep8 issue with\n'{0}'\n---SCRIPT---\n{1}".format(docname, script)) from e
# if an exception is raised, the documentation should report a warning
# return [document.reporter.warning('messagr', line=self.lineno)]
current_source = self.state.document.current_source
docstring = ":docstring of " in current_source
if docstring:
current_source = current_source.split(":docstring of ")[0]
if os.path.exists(current_source):
comment = ' File "{0}", line {1}'.format(current_source, lineno)
if docstring:
new_name = os.path.split(current_source)[0] + ".py"
comment += '\n File "{0}", line {1}'.format(new_name, lineno)
cs_source = current_source
else:
if '.' in docname:
comment = ' File "{0}", line {1}'.format(docname, lineno)
else:
comment = ' File "{0}.rst", line {1}\n File "{0}.py", line {1}\n'.format(
docname, lineno)
cs_source = docname
# Add __WD__.
cs_source_dir = os.path.dirname(cs_source).replace("\\", "/")
script = script.replace(
'## __WD__ ##', "__WD__ = '{0}'".format(cs_source_dir))
out, err, context = run_python_script(script, comment=comment, setsysvar=p['setsysvar'],
process=p["process"], exception=p['exception'],
warningout=p['warningout'],
chdir=cs_source_dir if p['current'] else None,
context=context, store_in_file=p['store_in_file'])
if p['store']:
# Stores modified local context.
setattr(env, "runpython_context", context)
else:
context = {}
setattr(env, "runpython_context", context)
if out is not None:
out = out.rstrip(" \n\r\t")
if err is not None:
err = err.rstrip(" \n\r\t")
content = out
if len(err) > 0:
content += "\n[runpythonerror]\n" + err
# add member
self.exe_class = p.copy()
self.exe_class.update(dict(out=out, err=err, script=script))
# add indent
def add_indent(content, nbind):
"local function"
lines = content.split("\n")
if nbind > 0:
lines = [(" " * nbind + _) for _ in lines]
content = "\n".join(lines)
return content
content = add_indent(content, p['indent'])
# build node
node = self.__class__.runpython_class(rawsource=content, indent=p["indent"],
showcode=p["showcode"], rst=p["rst"],
sin=p["sin"], sout=p["sout"])
if p["showcode"]:
if 'code' in p['toggle'] or 'both' in p['toggle']:
hide = TITLES[language_code]['hide'] + \
' ' + TITLES[language_code]['code']
unhide = TITLES[language_code]['unhide'] + \
' ' + TITLES[language_code]['code']
secin = collapse_node(hide=hide, unhide=unhide, show=False)
node += secin
else:
secin = node
pin = nodes.paragraph(text=p["sin"])
if p['language'] in (None, ''):
p['language'] = 'python'
if p['language']:
pcode = nodes.literal_block(
script_disp, script_disp, language=p['language'],
linenos=p['linenos'])
else:
pcode = nodes.literal_block(
script_disp, script_disp, linenos=p['linenos'])
secin += pin
secin += pcode
elif len(self.options.get('sout', '')) == 0:
p["sout"] = ''
p["sout2"] = ''
# RST output.
if p["rst"]:
settings_overrides = {}
try:
sett.output_encoding
except KeyError: # pragma: no cover
settings_overrides["output_encoding"] = "unicode"
# try:
# sett.doctitle_xform
# except KeyError:
# settings_overrides["doctitle_xform"] = True
try:
sett.warning_stream
except KeyError: # pragma: no cover
settings_overrides["warning_stream"] = StringIO()
# 'initial_header_level': 2,
secout = node
if 'out' in p['toggle'] or 'both' in p['toggle']:
hide = TITLES[language_code]['hide'] + \
' ' + TITLES[language_code]['outl']
unhide = TITLES[language_code]['unhide'] + \
' ' + TITLES[language_code]['outl']
secout = collapse_node(hide=hide, unhide=unhide, show=False)
node += secout
elif len(p["sout"]) > 0:
secout += nodes.paragraph(text=p["sout"])
try:
if p['sphinx']:
st = StringList(content.replace("\r", "").split("\n"))
nested_parse_with_titles(self.state, st, secout)
dt = None
else:
dt = core.publish_doctree(
content, settings=sett,
settings_overrides=settings_overrides)
except Exception as e: # pragma: no cover
tab = content
content = ["::"]
st = StringIO()
traceback.print_exc(file=st)
content.append("")
trace = st.getvalue()
trace += "\n----------------------OPT\n" + str(p)
trace += "\n----------------------EXC\n" + str(e)
trace += "\n----------------------SETT\n" + str(sett)
trace += "\n----------------------ENV\n" + str(env)
trace += "\n----------------------DOCNAME\n" + str(docname)
trace += "\n----------------------CODE\n"
content.extend(" " + _ for _ in trace.split("\n"))
content.append("")
content.append("")
content.extend(" " + _ for _ in tab.split("\n"))
content = "\n".join(content)
pout = nodes.literal_block(content, content)
secout += pout
dt = None
if dt is not None:
for ch in dt.children:
node += ch
# Regular output.
if not p["rst"] or p["showout"]:
text = p["sout2"] if p["rst"] else p["sout"]
secout = node
if 'out' in p['toggle'] or 'both' in p['toggle']:
hide = TITLES[language_code]['hide'] + \
' ' + TITLES[language_code]['outl']
unhide = TITLES[language_code]['unhide'] + \
' ' + TITLES[language_code]['outl']
secout = collapse_node(hide=hide, unhide=unhide, show=False)
node += secout
elif len(text) > 0:
pout2 = nodes.paragraph(text=text)
node += pout2
pout = nodes.literal_block(content, content)
secout += pout
p['runpython'] = node
# classes
node['classes'] += ["runpython"]
ns = [node]
return ns
def modify_script_before_running(self, script):
"""
Takes the script as a string
and returns another string before it is run.
It does not modify what is displayed.
The function can be overwritten by any class
based on this one.
"""
return script
def visit_runpython_node(self, node):
"""
What to do when visiting a node @see cl runpython_node
the function should have different behaviour,
depending on the format, or the setup should
specify a different function for each.
"""
pass
def depart_runpython_node(self, node):
"""
What to do when leaving a node @see cl runpython_node
the function should have different behaviour,
depending on the format, or the setup should
specify a different function for each.
"""
pass
def setup(app):
"""
setup for ``runpython`` (sphinx)
"""
app.add_config_value('out_runpythonlist', [], 'env')
if hasattr(app, "add_mapping"):
app.add_mapping('runpython', runpython_node)
app.add_node(runpython_node,
html=(visit_runpython_node, depart_runpython_node),
epub=(visit_runpython_node, depart_runpython_node),
elatex=(visit_runpython_node, depart_runpython_node),
latex=(visit_runpython_node, depart_runpython_node),
rst=(visit_runpython_node, depart_runpython_node),
md=(visit_runpython_node, depart_runpython_node),
text=(visit_runpython_node, depart_runpython_node))
app.add_directive('runpython', RunPythonDirective)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
| |
# pylint: disable=star-args, locally-disabled, too-few-public-methods, no-self-use, invalid-name
"""test_cmds.py - Unittests related to command implementations."""
import sys, os, unittest
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from haproxy import cmds
class TestCommands(unittest.TestCase):
"""Tests all of the commands."""
def setUp(self):
self.maxDiff = None
self.pem_cert_content = """
-----BEGIN CERTIFICATE-----
MIIGNjCCBR6gAwIBAgITAPoWnilNUBNcAb8iJ2dgK1eXeTANBgkqhkiG9w0BAQsF
ADAiMSAwHgYDVQQDDBdGYWtlIExFIEludGVybWVkaWF0ZSBYMTAeFw0yMTAyMDMw
ODQ2MTBaFw0yMTA1MDQwODQ2MTBaMBoxGDAWBgNVBAMTD3Rlc3QuYW5kZW1hbi5k
ZTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL7DSlOfRdoKZdX825O4
Q+uEN85NYR/SJtSLDfaaRebanbDzxp90PEIHCqZyf0q7Zz5eF6qd2ycldtJSVk8b
lVOyJjPIOLUrUAeF6I07b/AOBO/8DU9G3lARSOQkPmC80ahGAW3F1eaccf08qncW
CGxKKXmeL9mbAsA4k6+6pIq8YRBqMCE2bkRQ/scAa8pL7ms5hceONWfqjHC12zIp
yavvnfNVZ6z7QlwHEh3Rajk1IaHLyE7+9+oQ3zXqFtM6sBvXlvVhwsizgkH3ZodN
81ycvHoP1MWqHGHX0klREQ9qRrHuSuqHsjJHX8gtbqI2Z9DVOUUEunbIkImTwqYj
e5tp7g4RQJUgAdsauyN02NTdeUeci+JDvA3FHJpAtA7tDXIeNcyPjRho17i4VUIc
Yasu5JDF0iSPDT/Srxt6EsDntDFDco1HXMsFqUhMbY2+gUWC3P0n98VWSO+BCtAd
Fbc4+N3QEM8RnQKI86WHR/vnVDoigOhALupXa6czjLGMjaSLDI0nyJ5M81r8ZuBZ
Wu2Q6HTikNmoWl3w6x+9WvY6TQd9OpCjQUu13UMVAco8CGEOj0ZqhhLTccX8dxPK
/01bXMtFRivJfe6vML+O0N54JbI5caXmaEdcEuazAVJWt1ZPGFTMjiw/O0S6Hb0V
YJKXqjJs9t95O5MpL9W4YvGxAgMBAAGjggJrMIICZzAOBgNVHQ8BAf8EBAMCBaAw
HQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYD
VR0OBBYEFHQLXiD/GxQD11ocGiFauejS5RRmMB8GA1UdIwQYMBaAFMDMA0a5WCDM
XHJw8+EuyyCm9Wg6MHcGCCsGAQUFBwEBBGswaTAyBggrBgEFBQcwAYYmaHR0cDov
L29jc3Auc3RnLWludC14MS5sZXRzZW5jcnlwdC5vcmcwMwYIKwYBBQUHMAKGJ2h0
dHA6Ly9jZXJ0LnN0Zy1pbnQteDEubGV0c2VuY3J5cHQub3JnLzAaBgNVHREEEzAR
gg90ZXN0LmFuZGVtYW4uZGUwTAYDVR0gBEUwQzAIBgZngQwBAgEwNwYLKwYBBAGC
3xMBAQEwKDAmBggrBgEFBQcCARYaaHR0cDovL2Nwcy5sZXRzZW5jcnlwdC5vcmcw
ggEDBgorBgEEAdZ5AgQCBIH0BIHxAO8AdQAW6GnB0ZXq18P4lxrj8HYB94zhtp0x
qFIYtoN/MagVCAAAAXdnSPbpAAAEAwBGMEQCICAST5iJD7DVrcKRvu9rvNVVnkOW
hAYUgihWr/1Gu6VdAiAcRcZYBP0hIHmFExM9ehJ+J7YmqM35SyiC7s0chsNdHQB2
AN2ZNPyl5ySAyVZofYE0mQhJskn3tWnYx7yrP1zB825kAAABd2dI+N0AAAQDAEcw
RQIgaaUndm8O3+nCl5OHTf6rOdi9VF9szVckdgDargdWKkgCIQCAjW4UvuMIv4Bt
c6auowPcpdqHjL8XRcztJA3XUGRGHTANBgkqhkiG9w0BAQsFAAOCAQEABza4/ocY
J/XwN8PP+Ane7fVerqL7mRfhzJhxz4mbCPfv4Drq3kUu9fnhR/vaGgdaNdnO83a9
PUBCm6FCPMcVwX0uKDJ9J4Xj+SVjnVu4+7uhS5LyygtaegoBZyMb5ppxWH1n5r47
10ug+KptERFf1datb8/jsEVF7rYCtPXBygjfGAbGuCxViakr4BNcOBPNL+MusfvP
qpH8kEyPAIwHX02XvvpLTy77qiyTpQSuFOusOJptNNqBUeBehqpf8FHn01fnKkcW
pKmFJ2e2VSnTZIBJvD58HMR+WNAEp7tHffHk2z/mPPtdRdxW5Zieoe5+6+HDtwgG
+VCAIWMkC36Dvg==
-----END CERTIFICATE-----
-----BEGIN RSA PRIVATE KEY-----
MIIJKgIBAAKCAgEAvsNKU59F2gpl1fzbk7hD64Q3zk1hH9Im1IsN9ppF5tqdsPPG
n3Q8QgcKpnJ/SrtnPl4Xqp3bJyV20lJWTxuVU7ImM8g4tStQB4XojTtv8A4E7/wN
T0beUBFI5CQ+YLzRqEYBbcXV5pxx/TyqdxYIbEopeZ4v2ZsCwDiTr7qkirxhEGow
ITZuRFD+xwBrykvuazmFx441Z+qMcLXbMinJq++d81VnrPtCXAcSHdFqOTUhocvI
Tv736hDfNeoW0zqwG9eW9WHCyLOCQfdmh03zXJy8eg/UxaocYdfSSVERD2pGse5K
6oeyMkdfyC1uojZn0NU5RQS6dsiQiZPCpiN7m2nuDhFAlSAB2xq7I3TY1N15R5yL
4kO8DcUcmkC0Du0Nch41zI+NGGjXuLhVQhxhqy7kkMXSJI8NP9KvG3oSwOe0MUNy
jUdcywWpSExtjb6BRYLc/Sf3xVZI74EK0B0Vtzj43dAQzxGdAojzpYdH++dUOiKA
6EAu6ldrpzOMsYyNpIsMjSfInkzzWvxm4Fla7ZDodOKQ2ahaXfDrH71a9jpNB306
kKNBS7XdQxUByjwIYQ6PRmqGEtNxxfx3E8r/TVtcy0VGK8l97q8wv47Q3nglsjlx
peZoR1wS5rMBUla3Vk8YVMyOLD87RLodvRVgkpeqMmz233k7kykv1bhi8bECAwEA
AQKCAgEAswbSPXJPetahRdcdNyAKVgBq4ykJinSOTpAF1bZo/cOTlFrjwAe0+X5k
R1tTDQ6dURG7AjtNTgrB3Za6O1m2paqeYaB5X8U7QSQx4EG0xsRRa+vPjeQDhX8D
OmCtTdpGpLa2Zo/xM5EFBVUm4cYCt6ZOED4dyAnK5hzytUvjWfR6343Yh4LurxyY
TqidgGgMZALDA0n54wFjNe/lu8kt5Ddns9MmDlhrqbRVEzjSiMfNPWvjHAf7IGcf
JBkBvNDqL+b/XGCYDgUxrLkDNt44E2VhGOi8lZkVM9n5FyeGbEIgAKKTGlGpMbh8
MoA4wPFwMrO5IIXUfN+zjfnnBkZsnAomGQYDh/hrsQPwU7MoyfO0Wzw+RzLWK8JH
EnjR7O/Lgh+A2AdLhCLiRC5td2uuJ2yLRIRUlcQPsCsYnCCL6Ip9IwK1idmQySGw
bG83decXNSJUv5h3qF6f3fl+JPrHnAbviBzEJ67xAf1MdHbFxwYvRFVfEHj9RZ3W
z+cw7ofD8XVHTfXn0XipvYqI/bVsitMXI35pOt+/ZV8rjJlXopw+IV6U9/60cBkk
BXC7ONDyH2pNwxPbRgcLm2sEK0L9qhxRzCj0iD1WyOAiFJX4ytVbJhR7pt0goiun
i2XDh2l8hoK1lKZNS/yJ+VhnbX595mdqScmIXD8utlgK8f0bLfECggEBAORXimSK
gzegnsBjieTtzC6MmRRxxN46vnMZ2LCeLMxhs3vM7LBcBfsQYqbt/FVFtYBRpr+d
TGTmfPXqKuSqbtAbghxAMo/lECXzALa0nQSsz1fFhX8B7slFarsDmmCb1GmXF/kG
ku/Uoa7jmY3htBj5rjVHjDKPZFVetU+2wbuwlU17Bj4nlSzqud4NMlu56pm3FZ/1
BAhMxm3z6dLnOgqJzpN1QmKZHNkjLmi8fza/HQM5pP3DpQcPiyuLzywGIqHaO1qT
OIdpZfLEvNpMV7bJ2bagv5nX3TVRWWsBkh0HCAuH30qqaVPpQvkPem1zsM3x+D5q
+PhMIPGpbQiUyCUCggEBANXefd0ZcJymG15WJyO44eFwzgMz9ezfdB8INa+vCOiZ
Y7FtYDgEKu4uzBxtMjO4mQO6DCkfi7JwTJFN4ag3dJEJNGmrf7Xe84IAImJQk0Of
BojAXCFAuNf1Xl3prkvnvtzNirwQMHCUbv5wYzOqglgj2i/hjIj3/Wbt91riq5j+
4qQT4kkw/XgCtbQ27HohKIcC/mXbHchEi7NtXrGoM1xqmu1mGH1uul3LQ6p5VwHc
ZFiIAC0awsx9Qe9khZ5EGpZuS0tqJsREcv8ygYMvWcPJEv8aMQM7Nj4biA5rKEgo
L+66ibpntldvbz2qntEvJ2rKzGci0RDUQHy4sW8/d50CggEBAKCZaX7ZZPzk/YL2
/2+CSQ+cV7ZnZj2fN4Ag96UROxTsyp4SPY60yogQuDIMRGN9SfDcfNlcOvTkn5Me
hdiafqHkFxjjlixawYbPaPsYAS/ek156UDBKHbZ2GmE6YYP9VeKGIJhHpWUFOkqV
TdTaoB7IzVwv3E1bSQg6Om+8bHoj8n6yPmvMz0DuPpgM1BRrqLNAb/c3DwT/ari+
ywBJHSt4TVCtMmnCouWdtvB3U0ogFLnF+2N4DUPwDMQt6yJdllIb+Y706NdkrA2Z
jfJDq5WmVnf6i4gaqTzs4GVAj5HW9jOV9ti/DqGz+CTQXB1LN1lCDIVqG34XnTwb
G9LjQfkCggEAZwYAt4tTtgJGWNFDlW+wT/sZIm3bX7ncpD4+Ll0w+2s4nPXFTfaj
/4zHgkIP1t5rx2HODdlGYDS8jZpow7HDE0LN3sFgienWf5808QtDhWWLrkCLoPEe
mdl3FeJFtgby6EaTODjMPM8kEKlvACp5E6BhsIMEQc7EYNrtNvjOFKtj3go+DWfu
EeusQB3dGI/0h+UnS0WcOSbb7RkYbphJ9ZDdBNMTpQi7+ga6l9pP0XOrWwJYo2Gq
yPrl0j4oJ69C54hF+RQvjIg0pT5dKSacJTYtUnn5dkcFwDFe/yMbinbhcCynwAXJ
zqC9g4U3cCk44bbDdENPVr4IOox13NND+QKCAQEAilm2oMZoP3WGkBMTSzJl6OGd
F8NnE95noleknNFYuThhCT6T4Z1s28VpxXV7d0DTNOtXj+TzeZq4jrwkgOSZbif0
8ky4gRZmm0iFwvAu8ZXk1olHbhMZnCOfh0Qhd4bU2tSoWgWVIAQWEHUhDI7Q1rsX
s4sCjYHKuNMEKdfYvxtKeiunoFqdmT65hwM9o3TfvJfm/RChb7i/nVruXQ6IhPEM
9WYZS7hlKyqVBESJuonR15biy7Xov5ELl6A821cskZO3vTwtlBSeCDiqaeVLpKR3
aYwf5YZo7v+N8KBSLEdLNjoKK4PfXUdczD7uOUllbd4/MRgCn4EmFvmpljGiEQ==
-----END RSA PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIIEqzCCApOgAwIBAgIRAIvhKg5ZRO08VGQx8JdhT+UwDQYJKoZIhvcNAQELBQAw
GjEYMBYGA1UEAwwPRmFrZSBMRSBSb290IFgxMB4XDTE2MDUyMzIyMDc1OVoXDTM2
MDUyMzIyMDc1OVowIjEgMB4GA1UEAwwXRmFrZSBMRSBJbnRlcm1lZGlhdGUgWDEw
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDtWKySDn7rWZc5ggjz3ZB0
8jO4xti3uzINfD5sQ7Lj7hzetUT+wQob+iXSZkhnvx+IvdbXF5/yt8aWPpUKnPym
oLxsYiI5gQBLxNDzIec0OIaflWqAr29m7J8+NNtApEN8nZFnf3bhehZW7AxmS1m0
ZnSsdHw0Fw+bgixPg2MQ9k9oefFeqa+7Kqdlz5bbrUYV2volxhDFtnI4Mh8BiWCN
xDH1Hizq+GKCcHsinDZWurCqder/afJBnQs+SBSL6MVApHt+d35zjBD92fO2Je56
dhMfzCgOKXeJ340WhW3TjD1zqLZXeaCyUNRnfOmWZV8nEhtHOFbUCU7r/KkjMZO9
AgMBAAGjgeMwgeAwDgYDVR0PAQH/BAQDAgGGMBIGA1UdEwEB/wQIMAYBAf8CAQAw
HQYDVR0OBBYEFMDMA0a5WCDMXHJw8+EuyyCm9Wg6MHoGCCsGAQUFBwEBBG4wbDA0
BggrBgEFBQcwAYYoaHR0cDovL29jc3Auc3RnLXJvb3QteDEubGV0c2VuY3J5cHQu
b3JnLzA0BggrBgEFBQcwAoYoaHR0cDovL2NlcnQuc3RnLXJvb3QteDEubGV0c2Vu
Y3J5cHQub3JnLzAfBgNVHSMEGDAWgBTBJnSkikSg5vogKNhcI5pFiBh54DANBgkq
hkiG9w0BAQsFAAOCAgEABYSu4Il+fI0MYU42OTmEj+1HqQ5DvyAeyCA6sGuZdwjF
UGeVOv3NnLyfofuUOjEbY5irFCDtnv+0ckukUZN9lz4Q2YjWGUpW4TTu3ieTsaC9
AFvCSgNHJyWSVtWvB5XDxsqawl1KzHzzwr132bF2rtGtazSqVqK9E07sGHMCf+zp
DQVDVVGtqZPHwX3KqUtefE621b8RI6VCl4oD30Olf8pjuzG4JKBFRFclzLRjo/h7
IkkfjZ8wDa7faOjVXx6n+eUQ29cIMCzr8/rNWHS9pYGGQKJiY2xmVC9h12H99Xyf
zWE9vb5zKP3MVG6neX1hSdo7PEAb9fqRhHkqVsqUvJlIRmvXvVKTwNCP3eCjRCCI
PTAvjV+4ni786iXwwFYNz8l3PmPLCyQXWGohnJ8iBm+5nk7O2ynaPVW0U2W+pt2w
SVuvdDM5zGv2f9ltNWUiYZHJ1mmO97jSY/6YfdOUH66iRtQtDkHBRdkNBsMbD+Em
2TgBldtHNSJBfB3pm9FblgOcJ0FSWcUDWJ7vO0+NTXlgrRofRT6pVywzxVo6dND0
WzYlTWeUVsO40xJqhgUQRER9YLOLxJ0O6C8i0xFxAMKOtSdodMB3RIwt7RFQ0uyt
n5Z5MqkYhlMI3J1tPRTp1nEt9fyGspBOO05gi148Qasp+3N+svqKomoQglNoAxU=
-----END CERTIFICATE-----
"""
self.Resp = {
"disable": "disable server redis-ro/redis-ro0",
"set-server-agent": "set server redis-ro/redis-ro0 agent up",
"set-server-health": "set server redis-ro/redis-ro0 health stopping",
"set-server-state": "set server redis-ro/redis-ro0 state drain",
"set-server-weight": "set server redis-ro/redis-ro0 weight 10",
"frontends": "show stat",
"info": "show info",
"sessions": "show sess",
"servers": "show stat",
"show-ssl-crt-lists": "show ssl crt-list",
"show-ssl-crt-list": "show ssl crt-list -n /tmp/haproxy/ssl/601a7392cc9984.99301413.certlist",
"show-ssl-certs": "show ssl cert",
"show-ssl-cert": "show ssl cert /tmp/haproxy/ssl/601a70e4844b0.pem",
"add-to-crt-list": "add ssl crt-list /tmp/haproxy/ssl/601a7392cc9984.99301413.certlist /tmp/haproxy/ssl/601a70e4844b0.pem",
"del-from-crt-list": "del ssl crt-list /tmp/haproxy/ssl/601a7392cc9984.99301413.certlist /tmp/haproxy/ssl/601a70e4844b0.pem",
"new-ssl-cert": "new ssl cert /tmp/haproxy/ssl/601a70e4844b0.pem",
"update-ssl-cert": "set ssl cert /tmp/haproxy/ssl/601a70e4844b0.pem <<\n%s" % self.pem_cert_content,
"del-ssl-cert": "del ssl cert /tmp/haproxy/ssl/601a70e4844b0.pem",
"commit-ssl-cert": "commit ssl cert /tmp/haproxy/ssl/601a70e4844b0.pem",
"abort-ssl-cert": "abort ssl cert /tmp/haproxy/ssl/601a70e4844b0.pem",
}
self.Resp = dict([(k, v + "\r\n") for k, v in self.Resp.items()])
def test_setServerAgent(self):
"""Test 'set server agent' command"""
args = {"backend": "redis-ro", "server": "redis-ro0", "value": "up"}
cmdOutput = cmds.setServerAgent(**args).getCmd()
self.assertEqual(cmdOutput, self.Resp["set-server-agent"])
def test_setServerHealth(self):
"""Test 'set server health' command"""
args = {"backend": "redis-ro", "server": "redis-ro0", "value": "stopping"}
cmdOutput = cmds.setServerHealth(**args).getCmd()
self.assertEqual(cmdOutput, self.Resp["set-server-health"])
def test_setServerState(self):
"""Test 'set server state' command"""
args = {"backend": "redis-ro", "server": "redis-ro0", "value": "drain"}
cmdOutput = cmds.setServerState(**args).getCmd()
self.assertEqual(cmdOutput, self.Resp["set-server-state"])
def test_setServerWeight(self):
"""Test 'set server weight' command"""
args = {"backend": "redis-ro", "server": "redis-ro0", "value": "10"}
cmdOutput = cmds.setServerWeight(**args).getCmd()
self.assertEqual(cmdOutput, self.Resp["set-server-weight"])
def test_showFrontends(self):
"""Test 'frontends/backends' commands"""
args = {}
cmdOutput = cmds.showFrontends(**args).getCmd()
self.assertEqual(cmdOutput, self.Resp["frontends"])
def test_showInfo(self):
"""Test 'show info' command"""
cmdOutput = cmds.showInfo().getCmd()
self.assertEqual(cmdOutput, self.Resp["info"])
def test_showSessions(self):
"""Test 'show sess' command"""
cmdOutput = cmds.showSessions().getCmd()
self.assertEqual(cmdOutput, self.Resp["sessions"])
def test_showServers(self):
"""Test 'show stat' command"""
args = {"backend": "redis-ro"}
cmdOutput = cmds.showServers(**args).getCmd()
self.assertEqual(cmdOutput, self.Resp["servers"])
def test_showSslCrtLists(self):
"""Test 'show ssl crt-list' command"""
cmdOutput = cmds.showSslCrtLists().getCmd()
self.assertEqual(cmdOutput, self.Resp["show-ssl-crt-lists"])
def test_showSslCrtList(self):
"""Test 'show ssl crt-list <crt-list>' command"""
args = {
"crt_list": "/tmp/haproxy/ssl/601a7392cc9984.99301413.certlist",
}
cmdOutput = cmds.showSslCrtList(**args).getCmd()
self.assertEqual(cmdOutput, self.Resp["show-ssl-crt-list"])
def test_showSslCerts(self):
"""Test 'show ssl cert' command"""
cmdOutput = cmds.showSslCerts().getCmd()
self.assertEqual(cmdOutput, self.Resp["show-ssl-certs"])
def test_showSslCert(self):
"""Test 'show ssl cert <certfile>' command"""
args = {
"certfile": "/tmp/haproxy/ssl/601a70e4844b0.pem"
}
cmdOutput = cmds.showSslCert(**args).getCmd()
self.assertEqual(cmdOutput, self.Resp["show-ssl-cert"])
def test_addToSslCrtList(self):
"""Test 'add ssl crt-list <crt-list> <certfile>' command"""
args = {
"crt_list": "/tmp/haproxy/ssl/601a7392cc9984.99301413.certlist",
"certfile": "/tmp/haproxy/ssl/601a70e4844b0.pem"
}
cmdOutput = cmds.addToSslCrtList(**args).getCmd()
self.assertEqual(cmdOutput, self.Resp["add-to-crt-list"])
def test_delFromSslCrtList(self):
"""Test 'del ssl crt-list <crt-list> <certfile>' command"""
args = {
"crt_list": "/tmp/haproxy/ssl/601a7392cc9984.99301413.certlist",
"certfile": "/tmp/haproxy/ssl/601a70e4844b0.pem"
}
cmdOutput = cmds.delFromSslCrtList(**args).getCmd()
self.assertEqual(cmdOutput, self.Resp["del-from-crt-list"])
def test_newSslCrt(self):
"""Test 'new ssl cert <certfile>' command"""
args = {
"certfile": "/tmp/haproxy/ssl/601a70e4844b0.pem",
}
cmdOutput = cmds.newSslCrt(**args).getCmd()
self.assertEqual(cmdOutput, self.Resp["new-ssl-cert"])
def test_updateSslCrt(self):
"""Test 'set ssl cert <certfile> <payload>' command"""
args = {
"certfile": "/tmp/haproxy/ssl/601a70e4844b0.pem",
"payload": "%s" % self.pem_cert_content
}
cmdOutput = cmds.updateSslCrt(**args).getCmd()
self.assertEqual(cmdOutput, self.Resp["update-ssl-cert"])
def test_delSslCrt(self):
"""Test 'del ssl cert <certfile>' command"""
args = {
"certfile": "/tmp/haproxy/ssl/601a70e4844b0.pem",
}
cmdOutput = cmds.delSslCrt(**args).getCmd()
self.assertEqual(cmdOutput, self.Resp["del-ssl-cert"])
def test_commitSslCrt(self):
"""Test 'commit ssl cert <certfile>' command"""
args = {
"certfile": "/tmp/haproxy/ssl/601a70e4844b0.pem",
}
cmdOutput = cmds.commitSslCrt(**args).getCmd()
self.assertEqual(cmdOutput, self.Resp["commit-ssl-cert"])
def test_abortSslCrt(self):
"""Test 'abort ssl cert <certfile>' command"""
args = {
"certfile": "/tmp/haproxy/ssl/601a70e4844b0.pem",
}
cmdOutput = cmds.abortSslCrt(**args).getCmd()
self.assertEqual(cmdOutput, self.Resp["abort-ssl-cert"])
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TFGAN classifier_metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
import tempfile
import numpy as np
from google.protobuf import text_format
from tensorflow.contrib.gan.python.eval.python import classifier_metrics_impl as classifier_metrics
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
mock = test.mock
def _numpy_softmax(x):
e_x = np.exp(x - np.max(x, axis=1)[:, None])
return e_x / np.sum(e_x, axis=1)[:, None]
def _expected_inception_score(logits):
p = _numpy_softmax(logits)
q = np.expand_dims(np.mean(p, 0), 0)
per_example_logincscore = np.sum(p * (np.log(p) - np.log(q)), 1)
return np.exp(np.mean(per_example_logincscore))
def _approximate_matrix_sqrt(mat, eps=1e-8):
# Unlike tensorflow, numpy's return order is (u, s, v)
u, s, v = np.linalg.svd(mat)
si = np.where(s < eps, s, np.sqrt(s))
# Note the "v" returned by numpy is actually v = V^T
# (when referencing the SVD equation A = U S V^T)
# This is unlike Tensorflow which returns v = V
return np.dot(np.dot(u, np.diag(si)), v)
def _expected_fid(real_imgs, gen_imgs):
m = np.mean(real_imgs, axis=0)
m_v = np.mean(gen_imgs, axis=0)
sigma = np.cov(real_imgs, rowvar=False)
sigma_v = np.cov(gen_imgs, rowvar=False)
sqcc = _approximate_matrix_sqrt(np.dot(sigma, sigma_v))
mean = np.square(m - m_v).sum()
trace = np.trace(sigma + sigma_v - 2 * sqcc)
fid = mean + trace
return fid
# A dummy GraphDef string with the minimum number of Ops.
graphdef_string = """
node {
name: "inputs"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
dim {
size: -1
}
dim {
size: 299
}
dim {
size: 299
}
dim {
size: 3
}
}
}
}
}
node {
name: "InceptionV3/Logits/SpatialSqueeze"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
dim {
size: -1
}
dim {
size: 1001
}
}
}
}
}
node {
name: "InceptionV3/Logits/AvgPool_1a_8x8/AvgPool"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
dim {
size: -1
}
dim {
size: 2048
}
}
}
}
}
versions {
producer: 24
}
"""
def _get_dummy_graphdef():
dummy_graphdef = graph_pb2.GraphDef()
text_format.Merge(graphdef_string, dummy_graphdef)
return dummy_graphdef
def _run_with_mock(function, *args, **kwargs):
with mock.patch.object(
classifier_metrics,
'get_graph_def_from_url_tarball') as mock_tarball_getter:
mock_tarball_getter.return_value = _get_dummy_graphdef()
return function(*args, **kwargs)
class ClassifierMetricsTest(test.TestCase):
def test_run_inception_graph(self):
"""Test `run_inception` graph construction."""
batch_size = 7
img = array_ops.ones([batch_size, 299, 299, 3])
logits = _run_with_mock(classifier_metrics.run_inception, img)
self.assertTrue(isinstance(logits, ops.Tensor))
logits.shape.assert_is_compatible_with([batch_size, 1001])
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_run_inception_graph_pool_output(self):
"""Test `run_inception` graph construction with pool output."""
batch_size = 3
img = array_ops.ones([batch_size, 299, 299, 3])
pool = _run_with_mock(
classifier_metrics.run_inception, img,
output_tensor=classifier_metrics.INCEPTION_V3_FINAL_POOL)
self.assertTrue(isinstance(pool, ops.Tensor))
pool.shape.assert_is_compatible_with([batch_size, 2048])
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_inception_score_graph(self):
"""Test `inception_score` graph construction."""
score = _run_with_mock(classifier_metrics.inception_score,
array_ops.zeros([6, 299, 299, 3]), num_batches=3)
self.assertTrue(isinstance(score, ops.Tensor))
score.shape.assert_has_rank(0)
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_frechet_inception_distance_graph(self):
"""Test `frechet_inception_distance` graph construction."""
img = array_ops.ones([7, 299, 299, 3])
distance = _run_with_mock(
classifier_metrics.frechet_inception_distance, img, img)
self.assertTrue(isinstance(distance, ops.Tensor))
distance.shape.assert_has_rank(0)
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_run_inception_multicall(self):
"""Test that `run_inception` can be called multiple times."""
for batch_size in (7, 3, 2):
img = array_ops.ones([batch_size, 299, 299, 3])
_run_with_mock(classifier_metrics.run_inception, img)
def test_invalid_input(self):
"""Test that functions properly fail on invalid input."""
with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):
classifier_metrics.run_inception(array_ops.ones([7, 50, 50, 3]))
p = array_ops.zeros([8, 10])
p_logits = array_ops.zeros([8, 10])
q = array_ops.zeros([10])
with self.assertRaisesRegexp(ValueError, 'must be floating type'):
classifier_metrics._kl_divergence(
array_ops.zeros([8, 10], dtype=dtypes.int32), p_logits, q)
with self.assertRaisesRegexp(ValueError, 'must be floating type'):
classifier_metrics._kl_divergence(
p, array_ops.zeros([8, 10], dtype=dtypes.int32), q)
with self.assertRaisesRegexp(ValueError, 'must be floating type'):
classifier_metrics._kl_divergence(
p, p_logits, array_ops.zeros([10], dtype=dtypes.int32))
with self.assertRaisesRegexp(ValueError, 'must have rank 2'):
classifier_metrics._kl_divergence(array_ops.zeros([8]), p_logits, q)
with self.assertRaisesRegexp(ValueError, 'must have rank 2'):
classifier_metrics._kl_divergence(p, array_ops.zeros([8]), q)
with self.assertRaisesRegexp(ValueError, 'must have rank 1'):
classifier_metrics._kl_divergence(p, p_logits, array_ops.zeros([10, 8]))
def test_inception_score_value(self):
"""Test that `inception_score` gives the correct value."""
logits = np.array([np.array([1, 2] * 500 + [4]),
np.array([4, 5] * 500 + [6])])
unused_image = array_ops.zeros([2, 299, 299, 3])
incscore = _run_with_mock(classifier_metrics.inception_score, unused_image)
with self.test_session(use_gpu=True) as sess:
incscore_np = sess.run(incscore, {'concat:0': logits})
self.assertAllClose(_expected_inception_score(logits), incscore_np)
def test_frechet_classifier_distance_value(self):
"""Test that `frechet_classifier_distance` gives the correct value."""
np.random.seed(0)
test_pool_real_a = np.float32(np.random.randn(64, 256))
test_pool_gen_a = np.float32(np.random.randn(64, 256))
fid_op = _run_with_mock(classifier_metrics.frechet_classifier_distance,
test_pool_real_a, test_pool_gen_a,
classifier_fn=lambda x: x)
with self.test_session() as sess:
actual_fid = sess.run(fid_op)
expected_fid = _expected_fid(test_pool_real_a, test_pool_gen_a)
self.assertAllClose(expected_fid, actual_fid, 0.01)
def test_preprocess_image_graph(self):
"""Test `preprocess_image` graph construction."""
incorrectly_sized_image = array_ops.zeros([520, 240, 3])
correct_image = classifier_metrics.preprocess_image(
image=incorrectly_sized_image)
_run_with_mock(classifier_metrics.run_inception,
array_ops.expand_dims(correct_image, 0))
def test_get_graph_def_from_url_tarball(self):
"""Test `get_graph_def_from_url_tarball`."""
# Write dummy binary GraphDef to tempfile.
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(_get_dummy_graphdef().SerializeToString())
relative_path = os.path.relpath(tmp_file.name)
# Create gzip tarball.
tar_dir = tempfile.mkdtemp()
tar_filename = os.path.join(tar_dir, 'tmp.tar.gz')
with tarfile.open(tar_filename, 'w:gz') as tar:
tar.add(relative_path)
with mock.patch.object(classifier_metrics, 'urllib') as mock_urllib:
mock_urllib.request.urlretrieve.return_value = tar_filename, None
graph_def = classifier_metrics.get_graph_def_from_url_tarball(
'unused_url', relative_path)
self.assertIsInstance(graph_def, graph_pb2.GraphDef)
self.assertEqual(_get_dummy_graphdef(), graph_def)
if __name__ == '__main__':
test.main()
| |
# -*- coding: utf-8 -*-
import arcpy
import mds
import mds.messages
import numpy
import netCDF4
import os.path
#
# LIMITATIONS:
# > Attributes:
# Attribute values are copied wholesale from the original variable. Hence,
# if these values describe the the values in the new variable, i.e. as with
# valid_range, actual_range, unpacked_range, they will be incorrect and should
# be manually altered. This affects all statistics types, but is only
# problematic with the RANGE, STD, SUM, and VARIANCE.
#
class GetVariableStatisticsOverDimension(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Get Variable Statistics Over Dimension"
self.description = "Calculates statistics for a variable in a " + \
"multidimensional dataset, such as netCDF or HDF, over a specified" + \
"dimension. "
self.canRunInBackground = False
# Statistics choices
statistics_numpy = {'MAXIMUM':'max', \
'MEAN':'mean', \
'MINIMUM':'min', \
'RANGE':'ptp', \
'STD':'std', \
'SUM':'sum', \
'VARIANCE':'var'}
# List of dictionaries of statistics
# Sublist elements indices:
# 0: object
# 1: dictionary defined by 'displayname':'methodname'
# where object.methodname() is valid and displayname is what is
# shown to the user
self.statistics = [[numpy.ma, statistics_numpy]]
self.default_statistic = "MEAN"
def getParameterInfo(self):
"""Define parameter definitions"""
parameters = []
# Input parameter
parameters.append(arcpy.Parameter(
displayName="Input File or URL String",
name="in_file",
datatype=["DEFile","GPString"],
parameterType="Required",
direction="Input"))
# Variable parameter
parameters.append(arcpy.Parameter(
displayName="Variable",
name="variable",
datatype="GPString",
parameterType="Required",
direction="Input"))
parameters[-1].parameterDependencies = [parameters[-2].name]
# Dimension parameter
parameters.append(arcpy.Parameter(
displayName="Dimension",
name="dimension",
datatype="GPString",
parameterType="Required",
direction="Input"))
parameters[-1].parameterDependencies = [parameters[-2].name]
# Output parameter
parameters.append(arcpy.Parameter(
displayName="Output netCDF File",
name="out_netcdf_file",
datatype="DEFile",
multiValue=False,
parameterType="Required",
direction="Output"))
# Output variable parameter
parameters.append(arcpy.Parameter(
displayName="Output Variable Name",
name="out_variable",
datatype="GPString",
multiValue=False,
parameterType="Optional",
direction="Output"))
# Type parameter
parameters.append(arcpy.Parameter(
displayName="Statistic Type",
name="statistic_type",
datatype="GPString",
parameterType="Optional",
direction="Input"))
parameters[-1].filter.type = "ValueList"
parameters[-1].filter.list = sorted([key for stat in \
self.statistics for key in stat[1].keys()])
parameters[-1].value = self.default_statistic
return parameters
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
input_parameter = parameters[0]
variable_parameter = parameters[1]
dimension_parameter = parameters[2]
output_parameter = parameters[3]
output_var_parameter = parameters[4]
type_parameter = parameters[5]
dataset = None
# Open dataset and populate variable names
if input_parameter.value is not None:
try:
dataset = mds.netcdf.Dataset(input_parameter.valueAsText, '')
except RuntimeError, exception:
if "No such file or directory" in str(exception) or \
"Invalid argument" in str(exception):
input_parameter.setErrorMessage(
mds.messages.INPUT_DATASET_DOES_NOT_RESOLVE_TO_FILENAME.format(
input_parameter.valueAsText))
elif "Malformed or inaccessible DAP DDS" in str(exception):
input_parameter.setErrorMessage(
mds.messages.INPUT_DATASET_URL_MALFORMED.format(
input_parameter.valueAsText))
else:
input_parameter.setErrorMessage(
mds.messages.INPUT_DATASET_GENERIC_ERROR.format(
input_parameter.valueAsText, str(exception)))
except Exception, exception:
input_parameter.setErrorMessage(
mds.messages.INPUT_DATASET_GENERIC_ERROR.format(
input_parameter.valueAsText, str(exception)))
if dataset is not None:
# Fill variable list
variable_parameter.filter.type = "ValueList"
variable_parameter.filter.list = list(dataset.variable_names())
else:
# Clear variable list if no input specified
variable_parameter.filter.type = "ValueList"
variable_parameter.filter.list = []
variable_parameter.value = ""
# Clear dimension list if no input specified
dimension_parameter.filter.type = "ValueList"
dimension_parameter.filter.list = []
dimension_parameter.value = ""
# Update dimension list
if (variable_parameter.value is not None) and (dataset is not None):
# Fill dimensions list
dimension_parameter.filter.type = "ValueList"
dimension_parameter.filter.list = list(
dataset.variable_dimension_names(variable_parameter.valueAsText))
else:
# Clear dimension list if no input specified
dimension_parameter.filter.type = "ValueList"
dimension_parameter.filter.list = []
dimension_parameter.value = ""
# Ensure an output variable name is entered
if (output_var_parameter.altered) and (output_var_parameter.value is None):
output_var_parameter.setErrorMessage(
'%s: Must input a variable name.' % output_var_parameter.name)
# Ensure output variable name is not the same as an existing variable's
if (output_var_parameter.value is not None) and \
(dataset is not None) and (output_var_parameter.value in \
dataset.variable_names()):
output_var_parameter.setErrorMessage(
'%s: Name cannot be the same as that of an existing variable.' \
% output_var_parameter.name)
# Populate a default output variable name and update it with changes
# to other parameters as long as the user hasn't modified it themself
if (variable_parameter.value is not None) and \
(dimension_parameter.value is not None) and \
(not output_var_parameter.altered):
if type_parameter.value is None:
output_var_parameter.value = variable_parameter.value + \
"_MEAN" + dimension_parameter.value
else:
output_var_parameter.value = variable_parameter.value + \
"_" + type_parameter.value + dimension_parameter.value
# Ensure output file has a .nc extension
if output_parameter.value is not None:
output_filename = output_parameter.valueAsText
if os.path.splitext(output_filename)[1] != ".nc":
output_parameter.setErrorMessage(
mds.messages.OUTPUT_FILE_EXTENSION_MUST_BE_NC)
return
# ---------------------------------------------------------
# Statistics
def calculate_statistic(self, variable, dimension, statistic):
# Apply statistic
for stat in self.statistics:
if statistic in stat[1]:
func = getattr(stat[0], stat[1][statistic])
break
else:
# Default
func = getattr(numpy.ma, 'mean')
return func(variable, axis=dimension)
# ---------------------------------------------------------
def execute(self, parameters, messages):
"""The source code of the tool."""
input_parameter = parameters[0]
variable_parameter = parameters[1]
dimension_parameter = parameters[2]
output_parameter = parameters[3]
output_var_parameter = parameters[4]
type_parameter = parameters[5]
dataset_name = input_parameter.valueAsText
# Open dataset
try:
dataset = mds.netcdf.Dataset(dataset_name,'')
except RuntimeError, exception:
# Handle errors not detected by updateMessages.
messages.addErrorMessage(str(exception))
raise arcpy.ExecuteError
# Variable of interest
var1 = dataset.variable(variable_parameter.valueAsText)
# Dimension of interest
dim1 = var1.dimensions.index(dimension_parameter.valueAsText)
# Perform statistic
result1 = self.calculate_statistic(var1[:], dim1, \
type_parameter.valueAsText)
# Collect output dataset information
output_dims = list(dataset.variable_dimension_names(
variable_parameter.valueAsText))
output_dims.remove(dimension_parameter.valueAsText)
output_dims = tuple(output_dims)
output_filename = output_parameter.valueAsText
output_name = output_var_parameter.valueAsText
# Create new dataset
dataset.xcopy(dataset.data_variable_names(), output_filename)
# Create new variable in dataset
with netCDF4.Dataset(output_filename, mode="a") as newdataset:
newvar = newdataset.createVariable(output_name, var1.dtype, \
output_dims)
for attribute_name in var1.ncattrs():
newvar.setncattr(attribute_name, var1.getncattr(attribute_name))
newvar[:] = result1
# Output new variable name
arcpy.SetParameter(5, output_name)
return
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import simplejson as json
except ImportError:
import json
from libcloud.container.base import (
ContainerDriver,
Container,
ContainerCluster,
ContainerImage,
)
from libcloud.container.types import ContainerState
from libcloud.container.utils.docker import RegistryClient
from libcloud.common.aws import SignedAWSConnection, AWSJsonResponse
__all__ = ["ElasticContainerDriver"]
ECS_VERSION = "2014-11-13"
ECR_VERSION = "2015-09-21"
ECS_HOST = "ecs.%s.amazonaws.com"
ECR_HOST = "ecr.%s.amazonaws.com"
ROOT = "/"
ECS_TARGET_BASE = "AmazonEC2ContainerServiceV%s" % (ECS_VERSION.replace("-", ""))
ECR_TARGET_BASE = "AmazonEC2ContainerRegistry_V%s" % (ECR_VERSION.replace("-", ""))
class ECSJsonConnection(SignedAWSConnection):
version = ECS_VERSION
host = ECS_HOST
responseCls = AWSJsonResponse
service_name = "ecs"
class ECRJsonConnection(SignedAWSConnection):
version = ECR_VERSION
host = ECR_HOST
responseCls = AWSJsonResponse
service_name = "ecr"
class ElasticContainerDriver(ContainerDriver):
name = "Amazon Elastic Container Service"
website = "https://aws.amazon.com/ecs/details/"
ecr_repository_host = "%s.dkr.ecr.%s.amazonaws.com"
connectionCls = ECSJsonConnection
ecrConnectionClass = ECRJsonConnection
supports_clusters = False
status_map = {"RUNNING": ContainerState.RUNNING}
def __init__(self, access_id, secret, region):
super(ElasticContainerDriver, self).__init__(access_id, secret)
self.region = region
self.region_name = region
self.connection.host = ECS_HOST % (region)
# Setup another connection class for ECR
conn_kwargs = self._ex_connection_class_kwargs()
self.ecr_connection = self.ecrConnectionClass(access_id, secret, **conn_kwargs)
self.ecr_connection.host = ECR_HOST % (region)
self.ecr_connection.driver = self
self.ecr_connection.connect()
def _ex_connection_class_kwargs(self):
return {"signature_version": "4"}
def list_images(self, ex_repository_name):
"""
List the images in an ECR repository
:param ex_repository_name: The name of the repository to check
defaults to the default repository.
:type ex_repository_name: ``str``
:return: a list of images
:rtype: ``list`` of :class:`libcloud.container.base.ContainerImage`
"""
request = {}
request["repositoryName"] = ex_repository_name
list_response = self.ecr_connection.request(
ROOT,
method="POST",
data=json.dumps(request),
headers=self._get_ecr_headers("ListImages"),
).object
repository_id = self.ex_get_repository_id(ex_repository_name)
host = self._get_ecr_host(repository_id)
return self._to_images(list_response["imageIds"], host, ex_repository_name)
def list_clusters(self):
"""
Get a list of potential locations to deploy clusters into
:param location: The location to search in
:type location: :class:`libcloud.container.base.ClusterLocation`
:rtype: ``list`` of :class:`libcloud.container.base.ContainerCluster`
"""
listdata = self.connection.request(
ROOT,
method="POST",
data=json.dumps({}),
headers=self._get_headers("ListClusters"),
).object
request = {"clusters": listdata["clusterArns"]}
data = self.connection.request(
ROOT,
method="POST",
data=json.dumps(request),
headers=self._get_headers("DescribeClusters"),
).object
return self._to_clusters(data)
def create_cluster(self, name, location=None):
"""
Create a container cluster
:param name: The name of the cluster
:type name: ``str``
:param location: The location to create the cluster in
:type location: :class:`libcloud.container.base.ClusterLocation`
:rtype: :class:`libcloud.container.base.ContainerCluster`
"""
request = {"clusterName": name}
response = self.connection.request(
ROOT,
method="POST",
data=json.dumps(request),
headers=self._get_headers("CreateCluster"),
).object
return self._to_cluster(response["cluster"])
def destroy_cluster(self, cluster):
"""
Delete a cluster
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
"""
request = {"cluster": cluster.id}
data = self.connection.request(
ROOT,
method="POST",
data=json.dumps(request),
headers=self._get_headers("DeleteCluster"),
).object
return data["cluster"]["status"] == "INACTIVE"
def list_containers(self, image=None, cluster=None):
"""
List the deployed container images
:param image: Filter to containers with a certain image
:type image: :class:`libcloud.container.base.ContainerImage`
:param cluster: Filter to containers in a cluster
:type cluster: :class:`libcloud.container.base.ContainerCluster`
:rtype: ``list`` of :class:`libcloud.container.base.Container`
"""
request = {"cluster": "default"}
if cluster is not None:
request["cluster"] = cluster.id
if image is not None:
request["family"] = image.name
list_response = self.connection.request(
ROOT,
method="POST",
data=json.dumps(request),
headers=self._get_headers("ListTasks"),
).object
if len(list_response["taskArns"]) == 0:
return []
containers = self.ex_list_containers_for_task(list_response["taskArns"])
return containers
def deploy_container(
self,
name,
image,
cluster=None,
parameters=None,
start=True,
ex_cpu=10,
ex_memory=500,
ex_container_port=None,
ex_host_port=None,
):
"""
Creates a task definition from a container image that can be run
in a cluster.
:param name: The name of the new container
:type name: ``str``
:param image: The container image to deploy
:type image: :class:`libcloud.container.base.ContainerImage`
:param cluster: The cluster to deploy to, None is default
:type cluster: :class:`libcloud.container.base.ContainerCluster`
:param parameters: Container Image parameters
:type parameters: ``str``
:param start: Start the container on deployment
:type start: ``bool``
:rtype: :class:`libcloud.container.base.Container`
"""
data = {}
if ex_container_port is None and ex_host_port is None:
port_maps = []
else:
port_maps = [{"containerPort": ex_container_port, "hostPort": ex_host_port}]
data["containerDefinitions"] = [
{
"mountPoints": [],
"name": name,
"image": image.name,
"cpu": ex_cpu,
"environment": [],
"memory": ex_memory,
"portMappings": port_maps,
"essential": True,
"volumesFrom": [],
}
]
data["family"] = name
response = self.connection.request(
ROOT,
method="POST",
data=json.dumps(data),
headers=self._get_headers("RegisterTaskDefinition"),
).object
if start:
return self.ex_start_task(response["taskDefinition"]["taskDefinitionArn"])[
0
]
else:
return Container(
id=None,
name=name,
image=image,
state=ContainerState.RUNNING,
ip_addresses=[],
extra={
"taskDefinitionArn": response["taskDefinition"]["taskDefinitionArn"]
},
driver=self.connection.driver,
)
def get_container(self, id):
"""
Get a container by ID
:param id: The ID of the container to get
:type id: ``str``
:rtype: :class:`libcloud.container.base.Container`
"""
containers = self.ex_list_containers_for_task([id])
return containers[0]
def start_container(self, container, count=1):
"""
Start a deployed task
:param container: The container to start
:type container: :class:`libcloud.container.base.Container`
:param count: Number of containers to start
:type count: ``int``
:rtype: :class:`libcloud.container.base.Container`
"""
return self.ex_start_task(container.extra["taskDefinitionArn"], count)
def stop_container(self, container):
"""
Stop a deployed container
:param container: The container to stop
:type container: :class:`libcloud.container.base.Container`
:rtype: :class:`libcloud.container.base.Container`
"""
request = {"task": container.extra["taskArn"]}
response = self.connection.request(
ROOT,
method="POST",
data=json.dumps(request),
headers=self._get_headers("StopTask"),
).object
containers = []
containers.extend(
self._to_containers(response["task"], container.extra["taskDefinitionArn"])
)
return containers
def restart_container(self, container):
"""
Restart a deployed container
:param container: The container to restart
:type container: :class:`libcloud.container.base.Container`
:rtype: :class:`libcloud.container.base.Container`
"""
self.stop_container(container)
return self.start_container(container)
def destroy_container(self, container):
"""
Destroy a deployed container
:param container: The container to destroy
:type container: :class:`libcloud.container.base.Container`
:rtype: :class:`libcloud.container.base.Container`
"""
return self.stop_container(container)
def ex_start_task(self, task_arn, count=1):
"""
Run a task definition and get the containers
:param task_arn: The task ARN to Run
:type task_arn: ``str``
:param count: The number of containers to start
:type count: ``int``
:rtype: ``list`` of :class:`libcloud.container.base.Container`
"""
request = None
request = {"count": count, "taskDefinition": task_arn}
response = self.connection.request(
ROOT,
method="POST",
data=json.dumps(request),
headers=self._get_headers("RunTask"),
).object
containers = []
for task in response["tasks"]:
containers.extend(self._to_containers(task, task_arn))
return containers
def ex_list_containers_for_task(self, task_arns):
"""
Get a list of containers by ID collection (ARN)
:param task_arns: The list of ARNs
:type task_arns: ``list`` of ``str``
:rtype: ``list`` of :class:`libcloud.container.base.Container`
"""
describe_request = {"tasks": task_arns}
descripe_response = self.connection.request(
ROOT,
method="POST",
data=json.dumps(describe_request),
headers=self._get_headers("DescribeTasks"),
).object
containers = []
for task in descripe_response["tasks"]:
containers.extend(self._to_containers(task, task["taskDefinitionArn"]))
return containers
def ex_create_service(self, name, cluster, task_definition, desired_count=1):
"""
Runs and maintains a desired number of tasks from a specified
task definition. If the number of tasks running in a service
drops below desired_count, Amazon ECS spawns another
instantiation of the task in the specified cluster.
:param name: the name of the service
:type name: ``str``
:param cluster: The cluster to run the service on
:type cluster: :class:`libcloud.container.base.ContainerCluster`
:param task_definition: The task definition name or ARN for the
service
:type task_definition: ``str``
:param desired_count: The desired number of tasks to be running
at any one time
:type desired_count: ``int``
:rtype: ``object`` The service object
"""
request = {
"serviceName": name,
"taskDefinition": task_definition,
"desiredCount": desired_count,
"cluster": cluster.id,
}
response = self.connection.request(
ROOT,
method="POST",
data=json.dumps(request),
headers=self._get_headers("CreateService"),
).object
return response["service"]
def ex_list_service_arns(self, cluster=None):
"""
List the services
:param cluster: The cluster hosting the services
:type cluster: :class:`libcloud.container.base.ContainerCluster`
:rtype: ``list`` of ``str``
"""
request = {}
if cluster is not None:
request["cluster"] = cluster.id
response = self.connection.request(
ROOT,
method="POST",
data=json.dumps(request),
headers=self._get_headers("ListServices"),
).object
return response["serviceArns"]
def ex_describe_service(self, service_arn):
"""
Get the details of a service
:param cluster: The hosting cluster
:type cluster: :class:`libcloud.container.base.ContainerCluster`
:param service_arn: The service ARN to describe
:type service_arn: ``str``
:return: The service object
:rtype: ``object``
"""
request = {"services": [service_arn]}
response = self.connection.request(
ROOT,
method="POST",
data=json.dumps(request),
headers=self._get_headers("DescribeServices"),
).object
return response["services"][0]
def ex_destroy_service(self, service_arn):
"""
Deletes a service
:param cluster: The target cluster
:type cluster: :class:`libcloud.container.base.ContainerCluster`
:param service_arn: The service ARN to destroy
:type service_arn: ``str``
"""
request = {"service": service_arn}
response = self.connection.request(
ROOT,
method="POST",
data=json.dumps(request),
headers=self._get_headers("DeleteService"),
).object
return response["service"]
def ex_get_registry_client(self, repository_name):
"""
Get a client for an ECR repository
:param repository_name: The unique name of the repository
:type repository_name: ``str``
:return: a docker registry API client
:rtype: :class:`libcloud.container.utils.docker.RegistryClient`
"""
repository_id = self.ex_get_repository_id(repository_name)
token = self.ex_get_repository_token(repository_id)
host = self._get_ecr_host(repository_id)
return RegistryClient(host=host, username="AWS", password=token)
def ex_get_repository_token(self, repository_id):
"""
Get the authorization token (12 hour expiry) for a repository
:param repository_id: The ID of the repository
:type repository_id: ``str``
:return: A token for login
:rtype: ``str``
"""
request = {"RegistryIds": [repository_id]}
response = self.ecr_connection.request(
ROOT,
method="POST",
data=json.dumps(request),
headers=self._get_ecr_headers("GetAuthorizationToken"),
).object
return response["authorizationData"][0]["authorizationToken"]
def ex_get_repository_id(self, repository_name):
"""
Get the ID of a repository
:param repository_name: The unique name of the repository
:type repository_name: ``str``
:return: The repository ID
:rtype: ``str``
"""
request = {"repositoryNames": [repository_name]}
list_response = self.ecr_connection.request(
ROOT,
method="POST",
data=json.dumps(request),
headers=self._get_ecr_headers("DescribeRepositories"),
).object
repository_id = list_response["repositories"][0]["registryId"]
return repository_id
def _get_ecr_host(self, repository_id):
return self.ecr_repository_host % (repository_id, self.region)
def _get_headers(self, action):
"""
Get the default headers for a request to the ECS API
"""
return {
"x-amz-target": "%s.%s" % (ECS_TARGET_BASE, action),
"Content-Type": "application/x-amz-json-1.1",
}
def _get_ecr_headers(self, action):
"""
Get the default headers for a request to the ECR API
"""
return {
"x-amz-target": "%s.%s" % (ECR_TARGET_BASE, action),
"Content-Type": "application/x-amz-json-1.1",
}
def _to_clusters(self, data):
clusters = []
for cluster in data["clusters"]:
clusters.append(self._to_cluster(cluster))
return clusters
def _to_cluster(self, data):
return ContainerCluster(
id=data["clusterArn"],
name=data["clusterName"],
driver=self.connection.driver,
)
def _to_containers(self, data, task_definition_arn):
clusters = []
for cluster in data["containers"]:
clusters.append(self._to_container(cluster, task_definition_arn))
return clusters
def _to_container(self, data, task_definition_arn):
return Container(
id=data["containerArn"],
name=data["name"],
image=ContainerImage(
id=None,
name=data["name"],
path=None,
version=None,
driver=self.connection.driver,
),
ip_addresses=None,
state=self.status_map.get(data["lastStatus"], None),
extra={
"taskArn": data["taskArn"],
"taskDefinitionArn": task_definition_arn,
},
driver=self.connection.driver,
)
def _to_images(self, data, host, repository_name):
images = []
for image in data:
images.append(self._to_image(image, host, repository_name))
return images
def _to_image(self, data, host, repository_name):
path = "%s/%s:%s" % (host, repository_name, data["imageTag"])
return ContainerImage(
id=None,
name=path,
path=path,
version=data["imageTag"],
driver=self.connection.driver,
)
| |
import json
from unittest import mock
import colander
import pytest
import requests_mock
from redis import RedisError
from requests.exceptions import RequestException
from ichnaea.api.exceptions import LocationNotFound
from ichnaea.api.locate.constants import DataSource
from ichnaea.api.locate.fallback import (
ExternalResult,
FallbackCache,
FallbackPositionSource,
DEFAULT_SCHEMA,
COMBAIN_V1_SCHEMA,
GOOGLEMAPS_V1_SCHEMA,
GOOGLEMAPS_V1_OUTBOUND_SCHEMA,
ICHNAEA_V1_OUTBOUND_SCHEMA,
ICHNAEA_V1_RESULT_SCHEMA,
UNWIREDLABS_V1_SCHEMA,
UNWIREDLABS_V1_OUTBOUND_SCHEMA,
UNWIREDLABS_V1_RESULT_SCHEMA,
)
from ichnaea.api.locate.query import Query
from ichnaea.api.locate.result import Position, PositionResultList
from ichnaea.api.locate.tests.base import BaseSourceTest, DummyModel
from ichnaea.api.locate.tests.test_query import QueryTest
from ichnaea.models import Radio
from ichnaea.tests.factories import (
BlueShardFactory,
CellShardFactory,
KeyFactory,
WifiShardFactory,
)
UNWIREDLABS_KEY = KeyFactory(
fallback_name="labs",
fallback_schema=UNWIREDLABS_V1_SCHEMA,
fallback_url="http://127.0.0.1:9/process.php#my_secret_token",
)
def _mock_redis_client():
client = mock.Mock()
client.pipeline.return_value = client
client.__enter__ = mock.Mock(return_value=client)
client.__exit__ = mock.Mock(return_value=None)
client.expire.return_value = mock.Mock()
client.get.return_value = mock.Mock()
client.mget.return_value = mock.Mock()
client.set.return_value = mock.Mock()
client.mset.return_value = mock.Mock()
return client
class TestExternalResult(object):
def test_not_found(self):
result = ExternalResult(None, None, None, None)
assert result.not_found()
def test_not_found_accuracy(self):
result = ExternalResult(1.0, 1.0, None, None)
assert result.not_found()
def test_found(self):
result = ExternalResult(1.0, 1.0, 10, None)
assert not result.not_found()
def test_found_fallback(self):
result = ExternalResult(1.0, 1.0, 10, "lacf")
assert not result.not_found()
def test_score(self):
result = ExternalResult(1.0, 1.0, 10, None)
assert result.score == 10.0
def test_score_fallback(self):
result = ExternalResult(1.0, 1.0, 10, "lacf")
assert result.score == 5.0
class TestGoogleMapsV1OutboundSchema(object):
def _call(self, *args, **kw):
return GOOGLEMAPS_V1_OUTBOUND_SCHEMA.deserialize(*args, **kw)
def test_empty(self):
assert self._call({}) == {"considerIp": False}
assert self._call({"unknown_field": 1}) == {"considerIp": False}
def test_fallback(self):
assert self._call({"fallbacks": {"ipf": True, "lacf": False}}) == {
"considerIp": False
}
def test_query(self):
query = Query()
data = self._call(query.json())
assert data == {"considerIp": False}
def test_cell(self):
cell = CellShardFactory.build(radio=Radio.lte)
query = Query(
cell=[
{
"radioType": cell.radio,
"mobileCountryCode": cell.mcc,
"mobileNetworkCode": cell.mnc,
"locationAreaCode": cell.lac,
"cellId": cell.cid,
"age": 1200,
"signalStrength": -70,
"timingAdvance": 15,
"unknown_field": "foo",
}
]
)
data = self._call(query.json())
assert data == {
"cellTowers": [
{
"radioType": cell.radio.name,
"mobileCountryCode": cell.mcc,
"mobileNetworkCode": cell.mnc,
"locationAreaCode": cell.lac,
"cellId": cell.cid,
"age": 1200,
"signalStrength": -70,
"timingAdvance": 15,
}
],
"considerIp": False,
}
def test_wifi(self):
wifis = WifiShardFactory.build_batch(2)
query = Query(
wifi=[
{
"macAddress": wifi.mac,
"age": 2000,
"channel": 11,
"signalStrength": -90,
"signalToNoiseRatio": 13,
}
for wifi in wifis
]
)
data = self._call(query.json())
assert data == {
"wifiAccessPoints": [
{
"macAddress": wifis[0].mac,
"age": 2000,
"channel": 11,
"signalStrength": -90,
"signalToNoiseRatio": 13,
},
{
"macAddress": wifis[1].mac,
"age": 2000,
"channel": 11,
"signalStrength": -90,
"signalToNoiseRatio": 13,
},
],
"considerIp": False,
}
class TestIchnaeaV1ResultSchema(object):
def _call(self, *args, **kw):
return ICHNAEA_V1_RESULT_SCHEMA.deserialize(*args, **kw)
def test_empty(self):
with pytest.raises(colander.Invalid):
self._call({})
def test_accuracy_float(self):
data = self._call({"location": {"lat": 1.0, "lng": 1.0}, "accuracy": 11.6})
assert data == {"lat": 1.0, "lon": 1.0, "accuracy": 11.6, "fallback": None}
def test_accuracy_missing(self):
with pytest.raises(colander.Invalid):
self._call({"location": {"lat": 1.0, "lng": 1.0}, "fallback": "lacf"})
def test_fallback(self):
data = self._call(
{"location": {"lat": 1.0, "lng": 1.0}, "accuracy": 10.0, "fallback": "lacf"}
)
assert data == {"lat": 1.0, "lon": 1.0, "accuracy": 10.0, "fallback": "lacf"}
def test_fallback_invalid(self):
data = self._call(
{"location": {"lat": 1.0, "lng": 1.0}, "accuracy": 10.0, "fallback": "cidf"}
)
assert data == {"lat": 1.0, "lon": 1.0, "accuracy": 10.0, "fallback": None}
def test_fallback_missing(self):
data = self._call({"location": {"lat": 1.0, "lng": 1.0}, "accuracy": 10.0})
assert data == {"lat": 1.0, "lon": 1.0, "accuracy": 10.0, "fallback": None}
def test_location_incomplete(self):
with pytest.raises(colander.Invalid):
self._call({"location": {"lng": 1.0}, "accuracy": 10.0, "fallback": "lacf"})
def test_location_missing(self):
with pytest.raises(colander.Invalid):
self._call({"accuracy": 10.0, "fallback": "lacf"})
class TestIchnaeaV1OutboundSchema(object):
def _call(self, *args, **kw):
return ICHNAEA_V1_OUTBOUND_SCHEMA.deserialize(*args, **kw)
def test_empty(self):
with pytest.raises(colander.Invalid):
self._call({})
with pytest.raises(colander.Invalid):
self._call({"unknown_field": 1})
def test_fallback(self):
assert self._call({"fallbacks": {"ipf": False}}) == {
"fallbacks": {"lacf": True}
}
assert self._call({"fallbacks": {"lacf": False}}) == {
"fallbacks": {"lacf": False}
}
assert self._call({"fallbacks": {"ipf": True, "lacf": False}}) == {
"fallbacks": {"lacf": False}
}
def test_query(self):
query = Query()
data = self._call(query.json())
assert data == {"fallbacks": {"lacf": True}}
def test_blue(self):
blues = BlueShardFactory.build_batch(2)
query = Query(
blue=[
{
"macAddress": blue.mac,
"age": 1500,
"name": "beacon",
"signalStrength": -90,
}
for blue in blues
]
)
data = self._call(query.json())
assert data == {
"bluetoothBeacons": [
{
"macAddress": blues[0].mac,
"age": 1500,
"name": "beacon",
"signalStrength": -90,
},
{
"macAddress": blues[1].mac,
"age": 1500,
"name": "beacon",
"signalStrength": -90,
},
],
"fallbacks": {"lacf": True},
}
def test_cell(self):
cell = CellShardFactory.build(radio=Radio.lte)
query = Query(
cell=[
{
"radioType": cell.radio,
"mobileCountryCode": cell.mcc,
"mobileNetworkCode": cell.mnc,
"locationAreaCode": cell.lac,
"cellId": cell.cid,
"age": 1200,
"asu": None,
"primaryScramblingCode": 5,
"signalStrength": -70,
"timingAdvance": 15,
"unknown_field": "foo",
}
]
)
data = self._call(query.json())
assert data == {
"cellTowers": [
{
"radioType": cell.radio.name,
"mobileCountryCode": cell.mcc,
"mobileNetworkCode": cell.mnc,
"locationAreaCode": cell.lac,
"cellId": cell.cid,
"primaryScramblingCode": 5,
"age": 1200,
"signalStrength": -70,
"timingAdvance": 15,
}
],
"fallbacks": {"lacf": True},
}
def test_wifi(self):
wifis = WifiShardFactory.build_batch(2)
query = Query(
wifi=[
{
"macAddress": wifi.mac,
"age": 2000,
"signalStrength": -90,
"ssid": "wifi",
}
for wifi in wifis
]
)
data = self._call(query.json())
assert data == {
"wifiAccessPoints": [
{
"macAddress": wifis[0].mac,
"age": 2000,
"signalStrength": -90,
"ssid": "wifi",
},
{
"macAddress": wifis[1].mac,
"age": 2000,
"signalStrength": -90,
"ssid": "wifi",
},
],
"fallbacks": {"lacf": True},
}
class TestUnwiredlabsV1ResultSchema(object):
def _call(self, *args, **kw):
return UNWIREDLABS_V1_RESULT_SCHEMA.deserialize(*args, **kw)
def test_empty(self):
with pytest.raises(colander.Invalid):
self._call({})
def test_not_found(self):
data = self._call({"status": "error", "message": "No matches found"})
assert data == {"lat": None, "lon": None, "accuracy": None, "fallback": None}
def test_status_error(self):
with pytest.raises(colander.Invalid):
self._call(
{
"status": "error",
"message": "Invalid request",
"lat": 1.0,
"lon": 1.0,
"accuracy": 11,
}
)
def test_status_missing(self):
with pytest.raises(colander.Invalid):
self._call({"lat": 1.0, "lon": 1.0, "accuracy": 11})
def test_accuracy_float(self):
data = self._call({"status": "ok", "lat": 1.0, "lon": 1.0, "accuracy": 11.6})
assert data == {"lat": 1.0, "lon": 1.0, "accuracy": 11.6, "fallback": None}
def test_accuracy_missing(self):
with pytest.raises(colander.Invalid):
self._call({"status": "ok", "lat": 1.0, "lon": 1.0})
def test_fallback(self):
data = self._call(
{"status": "ok", "lat": 1.0, "lon": 1.0, "accuracy": 10, "fallback": "lacf"}
)
assert data == {"lat": 1.0, "lon": 1.0, "accuracy": 10.0, "fallback": "lacf"}
def test_fallback_invalid(self):
data = self._call(
{"status": "ok", "lat": 1.0, "lon": 1.0, "accuracy": 10, "fallback": "scf"}
)
assert data == {"lat": 1.0, "lon": 1.0, "accuracy": 10.0, "fallback": None}
def test_fallback_missing(self):
data = self._call({"status": "ok", "lat": 1.0, "lon": 1.0, "accuracy": 10})
assert data == {"lat": 1.0, "lon": 1.0, "accuracy": 10.0, "fallback": None}
def test_lat_missing(self):
with pytest.raises(colander.Invalid):
self._call({"status": "ok", "lon": 1.0, "accuracy": 10})
def test_lon_missing(self):
with pytest.raises(colander.Invalid):
self._call({"status": "ok", "lat": 1.0, "accuracy": 10})
class TestUnwiredlabsV1OutboundSchema(object):
def _call(self, *args, **kw):
return UNWIREDLABS_V1_OUTBOUND_SCHEMA.deserialize(*args, **kw)
def test_empty(self):
with pytest.raises(colander.Invalid):
self._call({})
with pytest.raises(colander.Invalid):
self._call({"unknown_field": 1})
def test_fallback(self):
assert self._call({"fallbacks": {"ipf": False}}) == {
"fallbacks": {"lacf": True},
"token": None,
}
assert self._call({"fallbacks": {"lacf": False}}) == {
"fallbacks": {"lacf": False},
"token": None,
}
assert self._call({"fallbacks": {"ipf": True, "lacf": False}}) == {
"fallbacks": {"lacf": False},
"token": None,
}
def test_query(self):
query = Query()
data = self._call(query.json())
assert data == {"fallbacks": {"lacf": True}, "token": None}
def test_cell(self):
cell = CellShardFactory.build(radio=Radio.lte)
query = Query(
cell=[
{
"radioType": cell.radio,
"mobileCountryCode": cell.mcc,
"mobileNetworkCode": cell.mnc,
"locationAreaCode": cell.lac,
"cellId": cell.cid,
"asu": 17,
"primaryScramblingCode": 5,
"signalStrength": -70,
"timingAdvance": 15,
"unknown_field": "foo",
}
]
)
data = self._call(query.json())
assert data == {
"cells": [
{
"radio": cell.radio.name,
"mcc": cell.mcc,
"mnc": cell.mnc,
"lac": cell.lac,
"cid": cell.cid,
"asu": 17,
"psc": 5,
"signal": -70,
"tA": 15,
}
],
"fallbacks": {"lacf": True},
"token": None,
}
def test_wifi(self):
wifis = WifiShardFactory.build_batch(2)
query = Query(
wifi=[
{
"macAddress": wifi.mac,
"channel": 3,
"frequency": 2412,
"signalStrength": -70,
"signalToNoiseRatio": 13,
"ssid": "wifi",
}
for wifi in wifis
]
)
data = self._call(query.json())
assert data == {
"wifi": [
{
"bssid": wifis[0].mac,
"channel": 3,
"frequency": 2412,
"signal": -70,
"signalToNoiseRatio": 13,
},
{
"bssid": wifis[1].mac,
"channel": 3,
"frequency": 2412,
"signal": -70,
"signalToNoiseRatio": 13,
},
],
"fallbacks": {"lacf": True},
"token": None,
}
@pytest.fixture(scope="function")
def cache(raven, redis, session):
yield FallbackCache(raven, redis)
@pytest.fixture(scope="function")
def unwiredlabs_cache(raven, redis, session):
yield FallbackCache(raven, redis, schema=UNWIREDLABS_V1_SCHEMA)
class TestCache(QueryTest):
fallback_tag = "fallback_name:fall"
def _query(self, **kwargs):
if "api_key" not in kwargs:
kwargs["api_key"] = KeyFactory(fallback_cache_expire=60)
return Query(**kwargs)
def test_get_blue(self, cache, metricsmock):
blues = BlueShardFactory.build_batch(2)
query = self._query(blue=self.blue_model_query(blues))
assert cache.get(query) is None
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=[self.fallback_tag, "status:miss"]
)
def test_set_blue(self, cache, metricsmock):
blues = BlueShardFactory.build_batch(2)
blue = blues[0]
query = self._query(blue=self.blue_model_query(blues))
result = ExternalResult(blue.lat, blue.lon, blue.radius, None)
cache.set(query, result)
assert cache.get(query) == result
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=[self.fallback_tag, "status:hit"]
)
def test_get_cell(self, cache, metricsmock):
cells = CellShardFactory.build_batch(1)
query = self._query(cell=self.cell_model_query(cells))
assert cache.get(query) is None
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=[self.fallback_tag, "status:miss"]
)
def test_set_cell(self, cache, redis, metricsmock):
cell = CellShardFactory.build()
query = self._query(cell=self.cell_model_query([cell]))
result = ExternalResult(cell.lat, cell.lon, cell.radius, None)
cache.set(query, result, expire=60)
keys = redis.keys("cache:fallback:ichnaea:v1:1:cell:*")
assert len(keys) == 1
assert 50 < redis.ttl(keys[0]) <= 60
assert cache.get(query) == result
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=[self.fallback_tag, "status:hit"]
)
def test_get_cell_unwiredlabs(self, unwiredlabs_cache, metricsmock):
cells = CellShardFactory.build_batch(1)
query = self._query(api_key=UNWIREDLABS_KEY, cell=self.cell_model_query(cells))
assert unwiredlabs_cache.get(query) is None
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=["fallback_name:labs", "status:miss"]
)
def test_set_cell_unwiredlabs(self, unwiredlabs_cache, redis, metricsmock):
cell = CellShardFactory.build()
query = self._query(api_key=UNWIREDLABS_KEY, cell=self.cell_model_query([cell]))
result = ExternalResult(cell.lat, cell.lon, cell.radius, None)
unwiredlabs_cache.set(query, result, expire=60)
keys = redis.keys("cache:fallback:unwiredlabs:v1:1:cell:*")
assert len(keys) == 1
assert 50 < redis.ttl(keys[0]) <= 60
assert unwiredlabs_cache.get(query) == result
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=["fallback_name:labs", "status:hit"]
)
def test_set_cell_not_found(self, cache, redis, metricsmock):
cell = CellShardFactory.build()
query = self._query(cell=self.cell_model_query([cell]))
result = ExternalResult(None, None, None, None)
cache.set(query, result)
keys = redis.keys("cache:fallback:ichnaea:v1:1:cell:*")
assert len(keys) == 1
assert redis.get(keys[0]) == b'"404"'
assert cache.get(query) == result
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=[self.fallback_tag, "status:hit"]
)
def test_get_cell_multi(self, cache, metricsmock):
cells = CellShardFactory.build_batch(2)
query = self._query(cell=self.cell_model_query(cells))
assert cache.get(query) is None
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=[self.fallback_tag, "status:bypassed"]
)
def test_get_wifi(self, cache, metricsmock):
wifis = WifiShardFactory.build_batch(2)
query = self._query(wifi=self.wifi_model_query(wifis))
assert cache.get(query) is None
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=[self.fallback_tag, "status:miss"]
)
def test_set_wifi(self, cache, metricsmock):
wifis = WifiShardFactory.build_batch(2)
wifi = wifis[0]
query = self._query(wifi=self.wifi_model_query(wifis))
result = ExternalResult(wifi.lat, wifi.lon, wifi.radius, None)
cache.set(query, result)
assert cache.get(query) == result
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=[self.fallback_tag, "status:hit"]
)
def test_set_wifi_inconsistent(self, cache, metricsmock):
wifis1 = WifiShardFactory.build_batch(2)
cache.set(
self._query(wifi=self.wifi_model_query(wifis1)),
ExternalResult(wifis1[0].lat, wifis1[0].lon, 100, None),
)
# similar lat/lon, worse accuracy
wifis2 = WifiShardFactory.build_batch(
2, lat=wifis1[0].lat + 0.0001, lon=wifis1[0].lon
)
cache.set(
self._query(wifi=self.wifi_model_query(wifis2)),
ExternalResult(wifis2[0].lat, wifis2[0].lon, 200, None),
)
# check combined query, avg lat/lon, max accuracy
query = self._query(wifi=self.wifi_model_query(wifis1 + wifis2))
cached = cache.get(query)
assert cached[0] == (wifis1[0].lat + wifis2[0].lat) / 2.0
assert cached[1] == wifis1[0].lon
assert round(cached[2], 2) == 205.56
assert cached[3] is None
# different lat/lon
wifis3 = WifiShardFactory.build_batch(2, lat=wifis1[0].lat + 10.0)
cache.set(
self._query(wifi=self.wifi_model_query(wifis3)),
ExternalResult(wifis3[0].lat, wifis3[0].lon, 300, None),
)
# check combined query, inconsistent result
query = self._query(wifi=self.wifi_model_query(wifis1 + wifis2 + wifis3))
assert cache.get(query) is None
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=[self.fallback_tag, "status:hit"]
)
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=[self.fallback_tag, "status:inconsistent"]
)
@pytest.mark.parametrize(
"mix1,mix2", [("cell", "wifi"), ("blue", "cell"), ("blue", "wifi")]
)
def test_get_mixed(self, cache, metricsmock, mix1, mix2):
"""A fallback query with mixed station types is not cached."""
kwargs = {}
mix = set((mix1, mix2))
assert len(mix) == 2
if "cell" in mix:
kwargs["cell"] = self.cell_model_query(CellShardFactory.build_batch(1))
if "blue" in mix:
kwargs["blue"] = self.blue_model_query(BlueShardFactory.build_batch(2))
if "wifi" in mix:
kwargs["wifi"] = self.wifi_model_query(WifiShardFactory.build_batch(2))
query = self._query(**kwargs)
assert cache.get(query) is None
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=[self.fallback_tag, "status:bypassed"]
)
class BaseFallbackTest(object):
api_key = KeyFactory(valid_key="test", allow_fallback=True)
fallback_model = DummyModel(lat=51.5366, lon=0.03989, radius=1500.0)
fallback_not_found_status = 404
Source = FallbackPositionSource
@property
def fallback_tag(self):
return "fallback_name:%s" % self.api_key.fallback_name
@property
def fallback_result(self):
return {
"location": {
"lat": self.fallback_model.lat,
"lng": self.fallback_model.lon,
},
"accuracy": float(self.fallback_model.radius),
"fallback": "lacf",
}
@property
def fallback_not_found(self):
return LocationNotFound().json_body()
def _check_success_fallbacks(self, request_json):
assert "considerIp" not in request_json
assert request_json["fallbacks"] == {"lacf": True, "ipf": False}
def test_success(self, geoip_db, http_session, session, source, metricsmock):
cell = CellShardFactory.build()
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
"POST", requests_mock.ANY, json=self.fallback_result
)
query = self.model_query(
geoip_db,
http_session,
session,
cells=[cell],
fallback={"lacf": True, "ipf": False},
)
results = source.search(query)
self.check_model_results(results, [self.fallback_model])
assert results.best().score == 5.0
request_json = mock_request.request_history[0].json()
self._check_success_fallbacks(request_json)
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=[self.fallback_tag, "status:miss"]
)
metricsmock.assert_timing_once(
"locate.fallback.lookup.timing", tags=[self.fallback_tag]
)
def test_cache_empty_result(
self, geoip_db, http_session, session, source, metricsmock
):
cell = CellShardFactory.build()
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
"POST",
requests_mock.ANY,
json=self.fallback_not_found,
status_code=self.fallback_not_found_status,
)
query = self.model_query(geoip_db, http_session, session, cells=[cell])
results = source.search(query)
self.check_model_results(results, None)
assert mock_request.call_count == 1
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=[self.fallback_tag, "status:miss"]
)
metricsmock.assert_incr_once(
"locate.fallback.lookup",
tags=[self.fallback_tag, "status:%s" % self.fallback_not_found_status],
)
metricsmock.clear_records()
query = self.model_query(geoip_db, http_session, session, cells=[cell])
results = source.search(query)
self.check_model_results(results, None)
assert mock_request.call_count == 1
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=[self.fallback_tag, "status:hit"]
)
class TestDefaultFallback(BaseFallbackTest, BaseSourceTest):
@property
def fallback_cached_result(self):
return json.dumps(
{
"lat": self.fallback_model.lat,
"lon": self.fallback_model.lon,
"accuracy": float(self.fallback_model.radius),
"fallback": "lacf",
}
)
def test_failed_call(self, geoip_db, http_session, raven, session, source):
cell = CellShardFactory.build()
with requests_mock.Mocker() as mock_request:
def raise_request_exception(request, context):
raise RequestException()
mock_request.register_uri(
"POST", requests_mock.ANY, json=raise_request_exception
)
query = self.model_query(geoip_db, http_session, session, cells=[cell])
results = source.search(query)
self.check_model_results(results, None)
raven.check([("RequestException", 1)])
def test_invalid_json(self, geoip_db, http_session, raven, session, source):
cell = CellShardFactory.build()
with requests_mock.Mocker() as mock_request:
mock_request.register_uri("POST", requests_mock.ANY, json=["invalid json"])
query = self.model_query(geoip_db, http_session, session, cells=[cell])
results = source.search(query)
self.check_model_results(results, None)
raven.check([("Invalid", 1)])
def test_malformed_json(self, geoip_db, http_session, raven, session, source):
cell = CellShardFactory.build()
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
"POST", requests_mock.ANY, content=b"[invalid json"
)
query = self.model_query(geoip_db, http_session, session, cells=[cell])
results = source.search(query)
self.check_model_results(results, None)
raven.check([("JSONDecodeError", 1)])
def test_403_response(
self, geoip_db, http_session, raven, session, source, metricsmock
):
cell = CellShardFactory.build()
with requests_mock.Mocker() as mock_request:
mock_request.register_uri("POST", requests_mock.ANY, status_code=403)
query = self.model_query(geoip_db, http_session, session, cells=[cell])
results = source.search(query)
self.check_model_results(results, None)
raven.check([("HTTPError", 1)])
metricsmock.assert_incr_once(
"locate.fallback.lookup", tags=[self.fallback_tag, "status:403"]
)
def test_404_response(
self, geoip_db, http_session, raven, session, source, metricsmock
):
cell = CellShardFactory.build()
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
"POST",
requests_mock.ANY,
json=LocationNotFound().json_body(),
status_code=404,
)
query = self.model_query(geoip_db, http_session, session, cells=[cell])
results = source.search(query)
self.check_model_results(results, None)
raven.check([("HTTPError", 0)])
metricsmock.assert_incr_once(
"locate.fallback.lookup", tags=[self.fallback_tag, "status:404"]
)
def test_500_response(
self, geoip_db, http_session, raven, session, source, metricsmock
):
cell = CellShardFactory.build()
with requests_mock.Mocker() as mock_request:
mock_request.register_uri("POST", requests_mock.ANY, status_code=500)
query = self.model_query(geoip_db, http_session, session, cells=[cell])
results = source.search(query)
self.check_model_results(results, None)
raven.check([("HTTPError", 1)])
metricsmock.assert_incr_once(
"locate.fallback.lookup", tags=[self.fallback_tag, "status:500"]
)
metricsmock.assert_timing_once(
"locate.fallback.lookup.timing", tags=[self.fallback_tag]
)
def test_api_key_disallows(self, geoip_db, http_session, session, source):
api_key = KeyFactory(allow_fallback=False)
cells = CellShardFactory.build_batch(2)
wifis = WifiShardFactory.build_batch(2)
query = self.model_query(
geoip_db, http_session, session, cells=cells, wifis=wifis, api_key=api_key
)
self.check_should_search(source, query, False)
def test_check_one_blue(self, geoip_db, http_session, session, source):
blue = BlueShardFactory.build()
query = self.model_query(geoip_db, http_session, session, blues=[blue])
self.check_should_search(source, query, False)
def test_check_one_wifi(self, geoip_db, http_session, session, source):
wifi = WifiShardFactory.build()
query = self.model_query(geoip_db, http_session, session, wifis=[wifi])
self.check_should_search(source, query, False)
def test_check_empty(self, geoip_db, http_session, session, source):
query = self.model_query(geoip_db, http_session, session)
self.check_should_search(source, query, False)
def test_check_invalid_cell(self, geoip_db, http_session, session, source):
malformed_cell = CellShardFactory.build()
malformed_cell.mcc = 99999
query = self.model_query(
geoip_db, http_session, session, cells=[malformed_cell]
)
self.check_should_search(source, query, False)
def test_check_invalid_wifi(self, geoip_db, http_session, session, source):
wifi = WifiShardFactory.build()
malformed_wifi = WifiShardFactory.build()
malformed_wifi.mac = "abcd"
query = self.model_query(
geoip_db, http_session, session, wifis=[wifi, malformed_wifi]
)
self.check_should_search(source, query, False)
def test_check_empty_result(self, geoip_db, http_session, session, source):
wifis = WifiShardFactory.build_batch(2)
query = self.model_query(geoip_db, http_session, session, wifis=wifis)
self.check_should_search(source, query, True)
def test_check_geoip_result(
self, london_model, geoip_db, http_session, session, source
):
wifis = WifiShardFactory.build_batch(2)
results = PositionResultList(
Position(
source=DataSource.geoip,
lat=london_model.lat,
lon=london_model.lon,
accuracy=float(london_model.radius),
score=0.6,
)
)
query = self.model_query(
geoip_db, http_session, session, wifis=wifis, ip=london_model.ip
)
self.check_should_search(source, query, True, results=results)
def test_check_already_good_result(self, geoip_db, http_session, session, source):
wifis = WifiShardFactory.build_batch(2)
results = PositionResultList(
Position(
source=DataSource.internal, lat=1.0, lon=1.0, accuracy=100.0, score=1.0
)
)
query = self.model_query(geoip_db, http_session, session, wifis=wifis)
self.check_should_search(source, query, False, results=results)
def test_rate_limit_allow(self, geoip_db, http_session, session, source):
cell = CellShardFactory()
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
"POST", requests_mock.ANY, json=self.fallback_result
)
for _ in range(self.api_key.fallback_ratelimit):
query = self.model_query(geoip_db, http_session, session, cells=[cell])
results = source.search(query)
self.check_model_results(results, [self.fallback_model])
def test_rate_limit_blocks(self, geoip_db, http_session, redis, session, source):
cell = CellShardFactory()
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
"POST", requests_mock.ANY, json=self.fallback_result
)
ratelimit_key = source._ratelimit_key(
self.api_key.fallback_name, self.api_key.fallback_ratelimit_interval
)
redis.set(ratelimit_key, self.api_key.fallback_ratelimit)
query = self.model_query(geoip_db, http_session, session, cells=[cell])
results = source.search(query)
self.check_model_results(results, None)
def test_rate_limit_redis_failure(self, geoip_db, http_session, session, source):
cell = CellShardFactory.build()
mock_redis_client = _mock_redis_client()
mock_redis_client.pipeline.side_effect = RedisError()
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
"POST", requests_mock.ANY, json=self.fallback_result
)
with mock.patch.object(source, "redis_client", mock_redis_client):
query = self.model_query(geoip_db, http_session, session, cells=[cell])
results = source.search(query)
self.check_model_results(results, None)
assert mock_redis_client.pipeline.called
assert not mock_request.called
def test_get_cache_redis_failure(
self, geoip_db, http_session, raven, session, source, metricsmock
):
cell = CellShardFactory.build()
mock_redis_client = _mock_redis_client()
mock_redis_client.mget.side_effect = RedisError()
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
"POST", requests_mock.ANY, json=self.fallback_result
)
with mock.patch.object(
source.caches[DEFAULT_SCHEMA], "redis_client", mock_redis_client
):
query = self.model_query(geoip_db, http_session, session, cells=[cell])
results = source.search(query)
self.check_model_results(results, [self.fallback_model])
assert mock_redis_client.mget.called
assert mock_request.called
raven.check([("RedisError", 1)])
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=[self.fallback_tag, "status:failure"]
)
def test_set_cache_redis_failure(
self, geoip_db, http_session, raven, session, source, metricsmock
):
cell = CellShardFactory.build()
mock_redis_client = _mock_redis_client()
mock_redis_client.mget.return_value = []
mock_redis_client.mset.side_effect = RedisError()
mock_redis_client.expire.side_effect = RedisError()
mock_redis_client.execute.side_effect = RedisError()
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
"POST", requests_mock.ANY, json=self.fallback_result
)
with mock.patch.object(
source.caches[DEFAULT_SCHEMA], "redis_client", mock_redis_client
):
query = self.model_query(geoip_db, http_session, session, cells=[cell])
results = source.search(query)
self.check_model_results(results, [self.fallback_model])
assert mock_redis_client.mget.called
assert mock_redis_client.mset.called
assert mock_request.called
raven.check([("RedisError", 1)])
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=[self.fallback_tag, "status:miss"]
)
def test_cache_single_cell(
self, geoip_db, http_session, session, source, metricsmock
):
cell = CellShardFactory.build()
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
"POST", requests_mock.ANY, json=self.fallback_result
)
query = self.model_query(geoip_db, http_session, session, cells=[cell])
query.cell[0].signalStrength = -77
results = source.search(query)
self.check_model_results(results, [self.fallback_model])
assert results.best().score == 5.0
assert mock_request.call_count == 1
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=["fallback_name:fall", "status:miss"]
)
metricsmock.assert_incr_once(
"locate.fallback.lookup", tags=["fallback_name:fall", "status:200"]
)
metricsmock.assert_timing_once(
"locate.fallback.lookup.timing", tags=["fallback_name:fall"]
)
metricsmock.clear_records()
# vary the signal strength, not part of cache key
query.cell[0].signalStrength = -82
results = source.search(query)
self.check_model_results(results, [self.fallback_model])
assert results.best().score == 5.0
assert mock_request.call_count == 1
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=["fallback_name:fall", "status:hit"]
)
metricsmock.assert_incr_once(
"locate.source",
tags=["key:test", "source:fallback", "accuracy:medium", "status:hit"],
)
def test_dont_recache(self, geoip_db, http_session, session, source, metricsmock):
cell = CellShardFactory.build()
mock_redis_client = _mock_redis_client()
mock_redis_client.mget.return_value = [self.fallback_cached_result]
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
"POST", requests_mock.ANY, json=self.fallback_result
)
with mock.patch.object(
source.caches[DEFAULT_SCHEMA], "redis_client", mock_redis_client
):
query = self.model_query(geoip_db, http_session, session, cells=[cell])
results = source.search(query)
self.check_model_results(results, [self.fallback_model])
assert mock_redis_client.mget.called
assert not mock_redis_client.mset.called
metricsmock.assert_incr_once(
"locate.fallback.cache", tags=[self.fallback_tag, "status:hit"]
)
class TestCombainV1Fallback(BaseFallbackTest, BaseSourceTest):
api_key = KeyFactory(fallback_name="combain", fallback_schema=COMBAIN_V1_SCHEMA)
class TestGoogleMapsV1Fallback(BaseFallbackTest, BaseSourceTest):
api_key = KeyFactory(
fallback_name="googlemaps", fallback_schema=GOOGLEMAPS_V1_SCHEMA
)
def _check_success_fallbacks(self, request_json):
assert "fallbacks" not in request_json
assert request_json["considerIp"] is False
class TestUnwiredLabsV1Fallback(BaseFallbackTest, BaseSourceTest):
api_key = UNWIREDLABS_KEY
fallback_not_found_status = 200
@property
def fallback_result(self):
return {
"lat": self.fallback_model.lat,
"lon": self.fallback_model.lon,
"accuracy": int(self.fallback_model.radius),
"fallback": "lacf",
"status": "ok",
}
@property
def fallback_not_found(self):
return {"status": "error", "message": "No matches found"}
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated by generateDS.py.
#
import sys
import getopt
import re as re_
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError("Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")')
return input_data
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
STRING_CLEANUP_PAT = re_.compile(r"[\n\r\s]+")
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
# First try with no namespace.
value = attrs.get(attr_name)
if value is None:
# Now try the other possible namespaces.
namespaces = node.nsmap.itervalues()
for namespace in namespaces:
value = attrs.get('{%s}%s' % (namespace, attr_name, ))
if value is not None:
break
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace,name)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class SpecialDate(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('SpecialProperty', 'xs:string', 0),
MemberSpec_('valueOf_', 'xs:date', 0),
]
subclass = None
superclass = None
def __init__(self, SpecialProperty=None, valueOf_=None):
self.SpecialProperty = _cast(None, SpecialProperty)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if SpecialDate.subclass:
return SpecialDate.subclass(*args_, **kwargs_)
else:
return SpecialDate(*args_, **kwargs_)
factory = staticmethod(factory)
def get_SpecialProperty(self): return self.SpecialProperty
def set_SpecialProperty(self, SpecialProperty): self.SpecialProperty = SpecialProperty
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='SpecialDate', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='SpecialDate')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SpecialDate'):
if self.SpecialProperty is not None and 'SpecialProperty' not in already_processed:
already_processed.append('SpecialProperty')
outfile.write(' SpecialProperty=%s' % (self.gds_format_string(quote_attrib(self.SpecialProperty).encode(ExternalEncoding), input_name='SpecialProperty'), ))
def exportChildren(self, outfile, level, namespace_='', name_='SpecialDate', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='SpecialDate'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.SpecialProperty is not None and 'SpecialProperty' not in already_processed:
already_processed.append('SpecialProperty')
showIndent(outfile, level)
outfile.write('SpecialProperty = "%s",\n' % (self.SpecialProperty,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('SpecialProperty', node)
if value is not None and 'SpecialProperty' not in already_processed:
already_processed.append('SpecialProperty')
self.SpecialProperty = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class SpecialDate
class ExtremeDate(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('ExtremeProperty', 'xs:string', 0),
MemberSpec_('valueOf_', ['RelationType', 'RelationType2', 'RelationType3', 'xs:string'], 0),
]
subclass = None
superclass = None
def __init__(self, ExtremeProperty=None, valueOf_=None):
self.ExtremeProperty = _cast(None, ExtremeProperty)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if ExtremeDate.subclass:
return ExtremeDate.subclass(*args_, **kwargs_)
else:
return ExtremeDate(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ExtremeProperty(self): return self.ExtremeProperty
def set_ExtremeProperty(self, ExtremeProperty): self.ExtremeProperty = ExtremeProperty
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='ExtremeDate', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='ExtremeDate')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ExtremeDate'):
if self.ExtremeProperty is not None and 'ExtremeProperty' not in already_processed:
already_processed.append('ExtremeProperty')
outfile.write(' ExtremeProperty=%s' % (self.gds_format_string(quote_attrib(self.ExtremeProperty).encode(ExternalEncoding), input_name='ExtremeProperty'), ))
def exportChildren(self, outfile, level, namespace_='', name_='ExtremeDate', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ExtremeDate'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.ExtremeProperty is not None and 'ExtremeProperty' not in already_processed:
already_processed.append('ExtremeProperty')
showIndent(outfile, level)
outfile.write('ExtremeProperty = "%s",\n' % (self.ExtremeProperty,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('ExtremeProperty', node)
if value is not None and 'ExtremeProperty' not in already_processed:
already_processed.append('ExtremeProperty')
self.ExtremeProperty = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(ExtremeDate, self).buildChildren(child_, node, nodeName_, True)
pass
# end class ExtremeDate
class singleExtremeDate(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('ExtremeProperty', 'xs:string', 0),
MemberSpec_('valueOf_', 'xs:string', 0),
]
subclass = None
superclass = None
def __init__(self, ExtremeProperty=None, valueOf_=None):
self.ExtremeProperty = _cast(None, ExtremeProperty)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if singleExtremeDate.subclass:
return singleExtremeDate.subclass(*args_, **kwargs_)
else:
return singleExtremeDate(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ExtremeProperty(self): return self.ExtremeProperty
def set_ExtremeProperty(self, ExtremeProperty): self.ExtremeProperty = ExtremeProperty
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='singleExtremeDate', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='singleExtremeDate')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='singleExtremeDate'):
if self.ExtremeProperty is not None and 'ExtremeProperty' not in already_processed:
already_processed.append('ExtremeProperty')
outfile.write(' ExtremeProperty=%s' % (self.gds_format_string(quote_attrib(self.ExtremeProperty).encode(ExternalEncoding), input_name='ExtremeProperty'), ))
def exportChildren(self, outfile, level, namespace_='', name_='singleExtremeDate', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='singleExtremeDate'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.ExtremeProperty is not None and 'ExtremeProperty' not in already_processed:
already_processed.append('ExtremeProperty')
showIndent(outfile, level)
outfile.write('ExtremeProperty = "%s",\n' % (self.ExtremeProperty,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('ExtremeProperty', node)
if value is not None and 'ExtremeProperty' not in already_processed:
already_processed.append('ExtremeProperty')
self.ExtremeProperty = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class singleExtremeDate
class containerType(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('simplefactoid', 'simpleFactoidType', 1),
MemberSpec_('mixedfactoid', 'mixedFactoidType', 0),
]
subclass = None
superclass = None
def __init__(self, simplefactoid=None, mixedfactoid=None):
if simplefactoid is None:
self.simplefactoid = []
else:
self.simplefactoid = simplefactoid
self.mixedfactoid = mixedfactoid
def factory(*args_, **kwargs_):
if containerType.subclass:
return containerType.subclass(*args_, **kwargs_)
else:
return containerType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_simplefactoid(self): return self.simplefactoid
def set_simplefactoid(self, simplefactoid): self.simplefactoid = simplefactoid
def add_simplefactoid(self, value): self.simplefactoid.append(value)
def insert_simplefactoid(self, index, value): self.simplefactoid[index] = value
def get_mixedfactoid(self): return self.mixedfactoid
def set_mixedfactoid(self, mixedfactoid): self.mixedfactoid = mixedfactoid
def export(self, outfile, level, namespace_='', name_='containerType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='containerType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='containerType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='containerType', fromsubclass_=False):
for simplefactoid_ in self.simplefactoid:
simplefactoid_.export(outfile, level, namespace_, name_='simplefactoid')
if self.mixedfactoid:
self.mixedfactoid.export(outfile, level, namespace_, name_='mixedfactoid', )
def hasContent_(self):
if (
self.simplefactoid or
self.mixedfactoid is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='containerType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('simplefactoid=[\n')
level += 1
for simplefactoid_ in self.simplefactoid:
showIndent(outfile, level)
outfile.write('model_.simpleFactoidType(\n')
simplefactoid_.exportLiteral(outfile, level, name_='simpleFactoidType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.mixedfactoid is not None:
showIndent(outfile, level)
outfile.write('mixedfactoid=model_.mixedFactoidType(\n')
self.mixedfactoid.exportLiteral(outfile, level, name_='mixedfactoid')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'simplefactoid':
obj_ = simpleFactoidType.factory()
obj_.build(child_)
self.simplefactoid.append(obj_)
elif nodeName_ == 'mixedfactoid':
obj_ = mixedFactoidType.factory()
obj_.build(child_)
self.set_mixedfactoid(obj_)
# end class containerType
class simpleFactoidType(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('relation', ['RelationType', 'RelationType2', 'RelationType3', 'xs:string'], 0),
]
subclass = None
superclass = None
def __init__(self, relation=None):
self.relation = relation
def factory(*args_, **kwargs_):
if simpleFactoidType.subclass:
return simpleFactoidType.subclass(*args_, **kwargs_)
else:
return simpleFactoidType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_relation(self): return self.relation
def set_relation(self, relation): self.relation = relation
def validate_RelationType(self, value):
# Validate type RelationType, a restriction on RelationType2.
pass
def export(self, outfile, level, namespace_='', name_='simpleFactoidType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='simpleFactoidType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='simpleFactoidType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='simpleFactoidType', fromsubclass_=False):
if self.relation is not None:
showIndent(outfile, level)
outfile.write('<%srelation>%s</%srelation>\n' % (namespace_, self.gds_format_string(quote_xml(self.relation).encode(ExternalEncoding), input_name='relation'), namespace_))
def hasContent_(self):
if (
self.relation is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='simpleFactoidType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.relation is not None:
showIndent(outfile, level)
outfile.write('relation=%s,\n' % quote_python(self.relation).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'relation':
relation_ = child_.text
relation_ = self.gds_validate_string(relation_, node, 'relation')
self.relation = relation_
self.validate_RelationType(self.relation) # validate type RelationType
# end class simpleFactoidType
class mixedFactoidType(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('relation', ['RelationType', 'RelationType2', 'RelationType3', 'xs:string'], 0),
MemberSpec_('valueOf_', [], 0),
]
subclass = None
superclass = None
def __init__(self, relation=None, valueOf_=None, mixedclass_=None, content_=None):
self.relation = relation
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if mixedFactoidType.subclass:
return mixedFactoidType.subclass(*args_, **kwargs_)
else:
return mixedFactoidType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_relation(self): return self.relation
def set_relation(self, relation): self.relation = relation
def validate_RelationType(self, value):
# Validate type RelationType, a restriction on RelationType2.
pass
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='mixedFactoidType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='mixedFactoidType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='mixedFactoidType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='mixedFactoidType', fromsubclass_=False):
if not fromsubclass_:
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_)
def hasContent_(self):
if (
self.relation is not None or
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='mixedFactoidType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'relation' and child_.text is not None:
valuestr_ = child_.text
obj_ = self.mixedclass_(MixedContainer.CategorySimple,
MixedContainer.TypeString, 'relation', valuestr_)
self.content_.append(obj_)
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
# end class mixedFactoidType
class BaseType(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('BaseProperty1', 'xs:string', 0),
MemberSpec_('BaseProperty2', 'xs:string', 0),
MemberSpec_('valueOf_', 'xs:string', 0),
]
subclass = None
superclass = None
def __init__(self, BaseProperty1=None, BaseProperty2=None, valueOf_=None):
self.BaseProperty1 = _cast(None, BaseProperty1)
self.BaseProperty2 = _cast(None, BaseProperty2)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if BaseType.subclass:
return BaseType.subclass(*args_, **kwargs_)
else:
return BaseType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_BaseProperty1(self): return self.BaseProperty1
def set_BaseProperty1(self, BaseProperty1): self.BaseProperty1 = BaseProperty1
def get_BaseProperty2(self): return self.BaseProperty2
def set_BaseProperty2(self, BaseProperty2): self.BaseProperty2 = BaseProperty2
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='BaseType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='BaseType')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='BaseType'):
if self.BaseProperty1 is not None and 'BaseProperty1' not in already_processed:
already_processed.append('BaseProperty1')
outfile.write(' BaseProperty1=%s' % (self.gds_format_string(quote_attrib(self.BaseProperty1).encode(ExternalEncoding), input_name='BaseProperty1'), ))
if self.BaseProperty2 is not None and 'BaseProperty2' not in already_processed:
already_processed.append('BaseProperty2')
outfile.write(' BaseProperty2=%s' % (self.gds_format_string(quote_attrib(self.BaseProperty2).encode(ExternalEncoding), input_name='BaseProperty2'), ))
def exportChildren(self, outfile, level, namespace_='', name_='BaseType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='BaseType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.BaseProperty1 is not None and 'BaseProperty1' not in already_processed:
already_processed.append('BaseProperty1')
showIndent(outfile, level)
outfile.write('BaseProperty1 = "%s",\n' % (self.BaseProperty1,))
if self.BaseProperty2 is not None and 'BaseProperty2' not in already_processed:
already_processed.append('BaseProperty2')
showIndent(outfile, level)
outfile.write('BaseProperty2 = "%s",\n' % (self.BaseProperty2,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('BaseProperty1', node)
if value is not None and 'BaseProperty1' not in already_processed:
already_processed.append('BaseProperty1')
self.BaseProperty1 = value
value = find_attr_value_('BaseProperty2', node)
if value is not None and 'BaseProperty2' not in already_processed:
already_processed.append('BaseProperty2')
self.BaseProperty2 = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class BaseType
class DerivedType(BaseType):
member_data_items_ = [
MemberSpec_('DerivedProperty1', 'xs:string', 0),
MemberSpec_('DerivedProperty2', 'xs:string', 0),
MemberSpec_('valueOf_', 'BaseType', 0),
]
subclass = None
superclass = BaseType
def __init__(self, BaseProperty1=None, BaseProperty2=None, DerivedProperty1=None, DerivedProperty2=None, valueOf_=None):
super(DerivedType, self).__init__(BaseProperty1, BaseProperty2, valueOf_, )
self.DerivedProperty1 = _cast(None, DerivedProperty1)
self.DerivedProperty2 = _cast(None, DerivedProperty2)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if DerivedType.subclass:
return DerivedType.subclass(*args_, **kwargs_)
else:
return DerivedType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_DerivedProperty1(self): return self.DerivedProperty1
def set_DerivedProperty1(self, DerivedProperty1): self.DerivedProperty1 = DerivedProperty1
def get_DerivedProperty2(self): return self.DerivedProperty2
def set_DerivedProperty2(self, DerivedProperty2): self.DerivedProperty2 = DerivedProperty2
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='DerivedType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='DerivedType')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="DerivedType"')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='DerivedType'):
super(DerivedType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DerivedType')
if self.DerivedProperty1 is not None and 'DerivedProperty1' not in already_processed:
already_processed.append('DerivedProperty1')
outfile.write(' DerivedProperty1=%s' % (self.gds_format_string(quote_attrib(self.DerivedProperty1).encode(ExternalEncoding), input_name='DerivedProperty1'), ))
if self.DerivedProperty2 is not None and 'DerivedProperty2' not in already_processed:
already_processed.append('DerivedProperty2')
outfile.write(' DerivedProperty2=%s' % (self.gds_format_string(quote_attrib(self.DerivedProperty2).encode(ExternalEncoding), input_name='DerivedProperty2'), ))
def exportChildren(self, outfile, level, namespace_='', name_='DerivedType', fromsubclass_=False):
super(DerivedType, self).exportChildren(outfile, level, namespace_, name_, True)
pass
def hasContent_(self):
if (
self.valueOf_ or
super(DerivedType, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DerivedType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.DerivedProperty1 is not None and 'DerivedProperty1' not in already_processed:
already_processed.append('DerivedProperty1')
showIndent(outfile, level)
outfile.write('DerivedProperty1 = "%s",\n' % (self.DerivedProperty1,))
if self.DerivedProperty2 is not None and 'DerivedProperty2' not in already_processed:
already_processed.append('DerivedProperty2')
showIndent(outfile, level)
outfile.write('DerivedProperty2 = "%s",\n' % (self.DerivedProperty2,))
super(DerivedType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DerivedType, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('DerivedProperty1', node)
if value is not None and 'DerivedProperty1' not in already_processed:
already_processed.append('DerivedProperty1')
self.DerivedProperty1 = value
value = find_attr_value_('DerivedProperty2', node)
if value is not None and 'DerivedProperty2' not in already_processed:
already_processed.append('DerivedProperty2')
self.DerivedProperty2 = value
super(DerivedType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(DerivedType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class DerivedType
class MyInteger(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('MyAttr', 'xs:string', 0),
MemberSpec_('valueOf_', 'xs:integer', 0),
]
subclass = None
superclass = None
def __init__(self, MyAttr=None, valueOf_=None):
self.MyAttr = _cast(None, MyAttr)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if MyInteger.subclass:
return MyInteger.subclass(*args_, **kwargs_)
else:
return MyInteger(*args_, **kwargs_)
factory = staticmethod(factory)
def get_MyAttr(self): return self.MyAttr
def set_MyAttr(self, MyAttr): self.MyAttr = MyAttr
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='MyInteger', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='MyInteger')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MyInteger'):
if self.MyAttr is not None and 'MyAttr' not in already_processed:
already_processed.append('MyAttr')
outfile.write(' MyAttr=%s' % (self.gds_format_string(quote_attrib(self.MyAttr).encode(ExternalEncoding), input_name='MyAttr'), ))
def exportChildren(self, outfile, level, namespace_='', name_='MyInteger', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='MyInteger'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.MyAttr is not None and 'MyAttr' not in already_processed:
already_processed.append('MyAttr')
showIndent(outfile, level)
outfile.write('MyAttr = "%s",\n' % (self.MyAttr,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('MyAttr', node)
if value is not None and 'MyAttr' not in already_processed:
already_processed.append('MyAttr')
self.MyAttr = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class MyInteger
class MyBoolean(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('MyAttr', 'xs:string', 0),
MemberSpec_('valueOf_', 'xs:boolean', 0),
]
subclass = None
superclass = None
def __init__(self, MyAttr=None, valueOf_=None):
self.MyAttr = _cast(None, MyAttr)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if MyBoolean.subclass:
return MyBoolean.subclass(*args_, **kwargs_)
else:
return MyBoolean(*args_, **kwargs_)
factory = staticmethod(factory)
def get_MyAttr(self): return self.MyAttr
def set_MyAttr(self, MyAttr): self.MyAttr = MyAttr
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='MyBoolean', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='MyBoolean')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MyBoolean'):
if self.MyAttr is not None and 'MyAttr' not in already_processed:
already_processed.append('MyAttr')
outfile.write(' MyAttr=%s' % (self.gds_format_string(quote_attrib(self.MyAttr).encode(ExternalEncoding), input_name='MyAttr'), ))
def exportChildren(self, outfile, level, namespace_='', name_='MyBoolean', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='MyBoolean'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.MyAttr is not None and 'MyAttr' not in already_processed:
already_processed.append('MyAttr')
showIndent(outfile, level)
outfile.write('MyAttr = "%s",\n' % (self.MyAttr,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('MyAttr', node)
if value is not None and 'MyAttr' not in already_processed:
already_processed.append('MyAttr')
self.MyAttr = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class MyBoolean
class MyFloat(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('MyAttr', 'xs:string', 0),
MemberSpec_('valueOf_', 'xs:float', 0),
]
subclass = None
superclass = None
def __init__(self, MyAttr=None, valueOf_=None):
self.MyAttr = _cast(None, MyAttr)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if MyFloat.subclass:
return MyFloat.subclass(*args_, **kwargs_)
else:
return MyFloat(*args_, **kwargs_)
factory = staticmethod(factory)
def get_MyAttr(self): return self.MyAttr
def set_MyAttr(self, MyAttr): self.MyAttr = MyAttr
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='MyFloat', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='MyFloat')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MyFloat'):
if self.MyAttr is not None and 'MyAttr' not in already_processed:
already_processed.append('MyAttr')
outfile.write(' MyAttr=%s' % (self.gds_format_string(quote_attrib(self.MyAttr).encode(ExternalEncoding), input_name='MyAttr'), ))
def exportChildren(self, outfile, level, namespace_='', name_='MyFloat', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='MyFloat'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.MyAttr is not None and 'MyAttr' not in already_processed:
already_processed.append('MyAttr')
showIndent(outfile, level)
outfile.write('MyAttr = "%s",\n' % (self.MyAttr,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('MyAttr', node)
if value is not None and 'MyAttr' not in already_processed:
already_processed.append('MyAttr')
self.MyAttr = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class MyFloat
class MyDouble(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('MyAttr', 'xs:string', 0),
MemberSpec_('valueOf_', 'xs:double', 0),
]
subclass = None
superclass = None
def __init__(self, MyAttr=None, valueOf_=None):
self.MyAttr = _cast(None, MyAttr)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if MyDouble.subclass:
return MyDouble.subclass(*args_, **kwargs_)
else:
return MyDouble(*args_, **kwargs_)
factory = staticmethod(factory)
def get_MyAttr(self): return self.MyAttr
def set_MyAttr(self, MyAttr): self.MyAttr = MyAttr
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='MyDouble', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='MyDouble')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MyDouble'):
if self.MyAttr is not None and 'MyAttr' not in already_processed:
already_processed.append('MyAttr')
outfile.write(' MyAttr=%s' % (self.gds_format_string(quote_attrib(self.MyAttr).encode(ExternalEncoding), input_name='MyAttr'), ))
def exportChildren(self, outfile, level, namespace_='', name_='MyDouble', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='MyDouble'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.MyAttr is not None and 'MyAttr' not in already_processed:
already_processed.append('MyAttr')
showIndent(outfile, level)
outfile.write('MyAttr = "%s",\n' % (self.MyAttr,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('MyAttr', node)
if value is not None and 'MyAttr' not in already_processed:
already_processed.append('MyAttr')
self.MyAttr = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class MyDouble
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'container'
rootClass = containerType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(sys.stdout, 0, name_=rootTag,
## namespacedef_='')
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'container'
rootClass = containerType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(sys.stdout, 0, name_="container",
## namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'container'
rootClass = containerType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('#from extensions2_sup import *\n\n')
## sys.stdout.write('import extensions2_sup as model_\n\n')
## sys.stdout.write('rootObj = model_.rootTag(\n')
## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
## sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"BaseType",
"DerivedType",
"ExtremeDate",
"MyBoolean",
"MyDouble",
"MyFloat",
"MyInteger",
"SpecialDate",
"containerType",
"mixedFactoidType",
"simpleFactoidType",
"singleExtremeDate"
]
| |
'''
All primary_storage operations for test.
@author: Youyk
'''
import apibinding.api_actions as api_actions
import zstackwoodpecker.test_util as test_util
import account_operations
import apibinding.inventory as inventory
def create_primary_storage(primary_storage_option, session_uuid=None):
if primary_storage_option.type == inventory.CEPH_PRIMARY_STORAGE_TYPE:
return create_ceph_primary_storage(primary_storage_option, session_uuid=None)
return create_nfs_primary_storage(primary_storage_option, session_uuid=None)
def create_nfs_primary_storage(primary_storage_option, session_uuid=None):
action = api_actions.AddNfsPrimaryStorageAction()
action.timeout = 30000
action.name = primary_storage_option.get_name()
action.description = primary_storage_option.get_description()
action.type = primary_storage_option.get_type()
action.url = primary_storage_option.get_url()
action.zoneUuid = primary_storage_option.get_zone_uuid()
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.action_logger('Create Primary Storage [uuid:] %s [name:] %s' % \
(evt.inventory.uuid, action.name))
return evt.inventory
def create_local_primary_storage(primary_storage_option, session_uuid=None):
action = api_actions.AddLocalPrimaryStorageAction()
action.timeout = 30000
action.name = primary_storage_option.get_name()
action.description = primary_storage_option.get_description()
action.type = primary_storage_option.get_type()
action.url = primary_storage_option.get_url()
action.zoneUuid = primary_storage_option.get_zone_uuid()
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.action_logger('Create Primary Storage [uuid:] %s [name:] %s' % \
(evt.inventory.uuid, action.name))
return evt.inventory
def add_mon_to_ceph_primary_storage(mon_urls, ceph_ps_uuid, system_tag=None, user_tag=None, session_uuid=None):
action = api_actions.AddMonToCephPrimaryStorageAction()
action.timeout = 300000
action.monUrls = mon_urls
action.uuid = ceph_ps_uuid
action.systemTags = system_tag
action.userTags = user_tag
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.action_logger('Add Mon To Ceph Primary Storage [uuid:] %s [name:] %s' % \
(evt.inventory.uuid, action.name))
return evt.inventory
def remove_mon_from_ceph_primary_storage(mon_hostnames, ceph_ps_uuid, system_tag=None, user_tag=None, session_uuid=None):
action = api_actions.RemoveMonFromCephBackupStorageAction()
action.timeout = 300000
action.monHostnames = mon_hostnames
action.uuid = ceph_ps_uuid
action.systemTags = system_tag
action.userTags = user_tag
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.action_logger('Add Mon To Ceph Primary Storage [uuid:] %s [name:] %s' % \
(evt.inventory.uuid, action.name))
return evt.inventory
def create_ceph_primary_storage(primary_storage_option, session_uuid=None):
action = api_actions.AddCephPrimaryStorageAction()
action.timeout = 300000
action.name = primary_storage_option.get_name()
action.description = primary_storage_option.get_description()
action.type = primary_storage_option.get_type()
action.monUrls = primary_storage_option.get_monUrls()
action.imageCachePoolName = \
primary_storage_option.get_imageCachePoolName()
action.dataVolumePoolName = \
primary_storage_option.get_dataVolumePoolName()
action.rootVolumePoolName = \
primary_storage_option.get_rootVolumePoolName()
action.zoneUuid = primary_storage_option.get_zone_uuid()
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.action_logger('Create Primary Storage [uuid:] %s [name:] %s' % \
(evt.inventory.uuid, action.name))
return evt.inventory
def add_ceph_primary_storage_pool(primary_storage_uuid, pool_name, aliasName=None, isCreate=None, resourceUuid=None, poolType="Root", description=None, session_uuid=None):
action = api_actions.AddCephPrimaryStoragePoolAction()
action.timeout = 300000
action.primaryStorageUuid = primary_storage_uuid
action.poolName = pool_name
action.aliasName = aliasName
action.description = description
action.isCreate = isCreate
action.type = poolType
action.resourceUuid = resourceUuid
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.action_logger('Create Primary Storage [uuid:] %s Pool [uuid:] %s [name:] %s' % \
(action.primaryStorageUuid, evt.inventory.uuid, action.poolName))
return evt.inventory
def create_sharedblock_primary_storage(primary_storage_option, disk_uuid, session_uuid=None):
action = api_actions.AddSharedBlockGroupPrimaryStorageAction()
action.timeout = 300000
action.name = primary_storage_option.get_name()
action.zoneUuid = primary_storage_option.get_zone_uuid()
action.description = primary_storage_option.get_description()
action.diskUuids = disk_uuid
evt = account_operations.execute_action_with_session(action, session_uuid)
test_util.action_logger('Create SharedBlock Primary Storage [uuid:] %s [name:] %s' % \
(evt.inventory.uuid, action.name))
return evt.inventory
def delete_primary_storage(primary_storage_uuid, session_uuid=None):
'''
Delete PS will delete all VMs and Volumes using this ps.
'''
action = api_actions.DeletePrimaryStorageAction()
action.uuid = primary_storage_uuid
action.timeout = 600000
test_util.action_logger('Delete Primary Storage [uuid:] %s' % primary_storage_uuid)
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def delete_ceph_primary_storage_pool(primary_storage_pool_uuid, session_uuid=None):
'''
Delete PS Pool will delete all Volumes using this ps.
'''
action = api_actions.DeleteCephPrimaryStoragePoolAction()
action.uuid = primary_storage_pool_uuid
action.timeout = 600000
test_util.action_logger('Delete Primary Storage Pool [uuid:] %s' % primary_storage_pool_uuid)
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def attach_primary_storage(primary_storage_uuid, cluster_uuid, session_uuid=None):
action = api_actions.AttachPrimaryStorageToClusterAction()
action.clusterUuid = cluster_uuid
action.primaryStorageUuid = primary_storage_uuid
action.timeout = 300000
test_util.action_logger('Attach Primary Storage [uuid:] %s to Cluster [uuid:] %s' % \
(primary_storage_uuid, cluster_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def detach_primary_storage(primary_storage_uuid, cluster_uuid, \
session_uuid=None):
'''
Detach PS will stop all VMs using this volume.
'''
action = api_actions.DetachPrimaryStorageFromClusterAction()
action.clusterUuid = cluster_uuid
action.primaryStorageUuid = primary_storage_uuid
action.timeout = 300000
test_util.action_logger('Detach Primary Storage [uuid:] %s from Cluster [uuid:] %s' % \
(primary_storage_uuid, cluster_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def change_primary_storage_state(primary_storage_uuid, state, session_uuid=None):
action = api_actions.ChangePrimaryStorageStateAction()
action.uuid = primary_storage_uuid
action.stateEvent = state
action.timeout = 300000
test_util.action_logger('Change Primary Storage [uuid:] %s to [state:] %s' \
% (primary_storage_uuid, state))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def cleanup_imagecache_on_primary_storage(primary_storage_uuid, session_uuid=None):
action = api_actions.CleanUpImageCacheOnPrimaryStorageAction()
action.uuid = primary_storage_uuid
action.timeout = 300000
test_util.action_logger('Cleanup Imagecache on Primary Storage [uuid:] %s' \
% (primary_storage_uuid))
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def reconnect_primary_storage(primary_storage_uuid, session_uuid=None):
'''
Reconnect primary storage
'''
action = api_actions.ReconnectPrimaryStorageAction()
action.uuid = primary_storage_uuid
action.timeout = 6000000
test_util.action_logger('Reconnect Primary Storage [uuid:] %s' % primary_storage_uuid)
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt.inventory
def get_trash_on_primary_storage(primary_storage_uuid, session_uuid=None):
action = api_actions.GetTrashOnPrimaryStorageAction()
action.uuid = primary_storage_uuid
action.timeout = 6000000
test_util.action_logger('Get Trash On Primary Storage [uuid:] %s' % primary_storage_uuid)
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def clean_up_trash_on_primary_storage(primary_storage_uuid, trash_id=None, session_uuid=None):
action = api_actions.CleanUpTrashOnPrimaryStorageAction()
action.uuid = primary_storage_uuid
action.trashId = trash_id
action.timeout = 6000000
test_util.action_logger('Clean Up Trash On Primary Storage [uuid:] %s' % primary_storage_uuid)
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
| |
#!/usr/bin/env python
# IMPORT python base modules
import argparse
import glob
import subprocess
import sys
import os
from PySide.QtCore import *
from PySide.QtGui import *
# IMPORT custom modules
import BasketGlobals as config
import BasketBuilder
import gui.GUI_Launch as LauncherGUI
import utils.appconfig as appconfig
class Launcher:
def __init__(self):
localize = LocalizeProject()
localize.buildlocal()
def launch(self, appPath, filePath):
if appPath == config.applicationPath('.nk'):
print("Launching Nuke File for %s - %s" % (os.environ['SEQ'], os.environ['SHOT']))
subprocess.Popen([appPath, '--nukex', filePath], creationflags=subprocess.CREATE_NEW_CONSOLE)
return
if appPath == config.applicationPath('.ma'):
# appconfig.get_config_value('app', 'parsepath')
# print("Launching Maya File for %s - %s (%s)" % (os.environ['SEQ'], os.environ['SHOT'], config.STAGE_DIRS[config.stageNum()]))
subprocess.Popen([appPath, '-file', filePath, '-proj', appconfig.get_config_value('project', 'projdir'), '-script', r'\\awexpress.westphal.drexel.edu\digm_share\Classof2017\LobstersAreWeird\basket\maya\mayaLaunchCall.mel'])
return
else:
subprocess.Popen([appPath, filePath])
return
def createNewFile(self, appPath):
# NUKE is a Special Snowflake
if appPath == config.applicationPath('.nk'):
print("Launching New Nuke File")
subprocess.Popen([appPath, '--nukex'], creationflags=subprocess.CREATE_NEW_CONSOLE)
return
# Maya Needs its special little MEL file
if appPath == config.applicationPath('.ma'):
print("Launching New Maya File")
subprocess.Popen([appPath, '-proj', appconfig.get_config_value('project', 'projdir'), '-script', r'\\awexpress.westphal.drexel.edu\digm_share\Classof2017\LobstersAreWeird\basket\maya\mayaLaunchCall.mel'])
return
# Houdini and Premiere are Chill AF
else:
print("Launching New Houdini/Premiere File")
subprocess.Popen(appPath)
return
# Get the latest nuke file
def latestfile(self, stage, tag):
filetypes = ['', '*.ma', '*.ma', '*.ma', '*.hip', '*.ma', '*.ma', '*.nk', '*.pproj']
newest = ''
# If the user passes a tag filter
# sadly glob doesn't seem to have a way to filter
if tag is not None:
matchedfiles = []
# Build list of files that match the tag substring
for file in os.listdir(config.stageDir(stage)):
if os.path.basename(file).find(tag) > -1:
matchedfiles.append(file)
newest = ''
# Filter the matched list to get the newest file
for f in matchedfiles:
lasttime = os.path.getctime(os.path.join(config.stageDir(stage), newest))
newtime = os.path.getctime(os.path.join(config.stageDir(stage), f))
if newtime >= lasttime:
newest = f
# rebuild the files path before returning to the launcher
newest = os.path.join(config.stageDir(stage), newest)
# if the tag exists in the file name, it returns a number
# if it doesnt' exist, returns -1
else:
newest = max(glob.iglob(os.path.join(config.stageDir(stage), filetypes[stage])), key=os.path.getmtime)
return [newest, os.path.getmtime(newest)]
@Slot(int, str)
def goLaunch(self, stage, tag):
self.launch(config.applicationPath(stage), self.latestfile(stage, tag)[0])
@Slot(int)
def goNewFile(self, stage):
self.createNewFile(
config.applicationPath(stage)
)
@Slot(str)
def goAsset(self, path):
config.setSeq('assets')
psuedoShot = os.path.dirname(path.strip(str(os.path.join(config.serverDir(), 'working', 'assets'))))
config.setShot(psuedoShot)
filename, file_extension = os.path.splitext(path)
self.launch(config.applicationPath(file_extension), path)
@Slot(str)
def goNewAsset(self, name):
config.setSeq('assets')
config.setShot(str(name))
BasketBuilder.make_dir(os.path.join('working', os.getenv('SEQ'), os.getenv('SHOT')))
self.createNewFile(config.applicationPath('.ma'))
@Slot(int, str, str)
def renderScene(self, stage, tag, cam):
cmd = '"C:\\Program Files\\Autodesk\\Maya2016.5\\bin\\render.exe" -r rman'
project = ' -proj %s' % appconfig.get_config_value('project', 'projdir')
shadingRate = ' -setAttr ShadingRate 16'
if cam != '':
camera = ' -cam %s' % cam
else:
camera = ''
file = ' "%s"' % self.latestfile(stage, tag)[0]
render_cmd = '%s%s%s%s%s' % (cmd, project, shadingRate, camera, file)
print render_cmd
# subprocess.Popen(render_cmd)
# return
class LocalizeProject:
def __init__(self):
print 'Setting up Local Project Structure'
def buildlocal(self):
BasketBuilder.build_base_local()
# Copy the remaining (working, publish, frames) folders down
BasketBuilder.rep_prod_dir()
''' BEGIN FUNCTION
Run the command line program, parse incoming arguments '''
# Catch the initial input
# User can choose to enter commandline mode if they want
def catch():
initParse = argparse.ArgumentParser()
initParse.add_argument("-c", "--cmd",
help="Enter CommandLine Mode",
action="store_true")
args = initParse.parse_args()
if args.cmd is True:
initialize()
else:
goUI()
def initialize():
basketLaunch = Launcher()
# Initialize the command line argument parser
parser = argparse.ArgumentParser(
prog="BasketLauncher",
description="Application launcher to keep Ian's head on straight through Senior Project")
# Add Arguments to the parser
parser.add_argument("--show",
help="Define the show",
type=str)
parser.add_argument("-s", "--seq",
required=True,
help="Define the sequence",
type=str)
parser.add_argument("-sh", "--shot",
required=True,
help="Define the shot",
type=str)
parser.add_argument("-st", "--stage",
required=True,
help="Define the step of the process",
type=int)
parser.add_argument("-t", "--tag",
help="Define a specific tag to open the most recent file",
type=str)
# store_true means to receive no arguments, provide callback of TRUE when flag is used
parser.add_argument("-r", "--render",
help="# # # NO ACTION # # #",
action="store_true")
# Parse the arguments passed into the command line
args = parser.parse_args()
# print the value of a parsed arg
# print("sequence: %s | shot: %s" % (args.seq, args.shot))
config.setSeq(args.seq)
config.setShot(args.shot)
basketLaunch.launch(
config.applicationPath(args.stage),
basketLaunch.latestfile(args.stage, args.tag)[0]
)
def goUI():
appLaunch = Launcher()
app = QApplication(sys.argv)
gui = LauncherGUI.Launcher()
gui.setWindowTitle('LAWncher')
emitter = gui.centralWidget()
emitter.launch.connect(appLaunch.goLaunch)
emitter.createnew.connect(appLaunch.goNewFile)
emitter.openasset.connect(appLaunch.goAsset)
emitter.newasset.connect(appLaunch.goNewAsset)
# emitter.renderscene.connect(appLaunch.renderScene)
os.environ['RMS_SCRIPT_PATHS'] = appconfig.get_config_value('project', 'rmsworkspace')
sys.exit(app.exec_())
# Runs if the file is run directly... NOT imported
if __name__ == "__main__":
catch()
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import OrderedDict, Mapping
from .copyutils import do_copy
import numpy as np
from astropy import log
from astropy.extern.six import string_types
import astropy.units as u
from .numpyutils import is_numeric_array
__all__ = ['BaseDescriptor', 'AdvancedDescriptor',
'ArrayData', 'PlainArrayData', 'Mask', 'ArrayMask', 'Meta', 'Unit',
'WCS', 'Flags', 'Uncertainty', 'UncertaintyData']
class BaseDescriptor(object):
"""A basic descriptor for a `property`-like attribute with getter, setter \
and deleter.
This descriptor can be used as decorator or to directly set a class
attribute. It will save the attributes with a leading underscore (private)
and, if set, copy the value while using the setter.
Parameters
----------
attr : `str` or `types.MethodType`
Name of the property or a method that **must** not contain a body but
can be used to determine the name and docstring of the attribute.
doc : `str`, optional
Documentation for the attribute of the class. If the ``attr`` is a
method with a documentation this parameter will be ignored.
Default is ``""``.
copy : `bool`, optional
If ``True`` setting the attribute will use a copy of the value instead
of a reference.
Default is ``False``.
Examples
--------
This descriptor can be used to specify a class attribute directly::
>>> from nddata.utils.descriptors import BaseDescriptor
>>> class Test1(object):
... data = BaseDescriptor('data', doc="Some data.", copy=True)
>>> print(Test1.data.__doc__)
Some data.
or as descriptor so that the name and documentation can be given like for
`property` with the difference that the method shouldn't contain any body::
>>> class Test2(object):
... @BaseDescriptor
... def data(self):
... '''Some data.'''
>>> print(Test2.data.__doc__)
Some data.
but then it's not possible to specify the copy parameter and the default
will be used (``False``).
The ``get`` will return the private attribute or if that wasn't set it
will return ``None``::
>>> t1 = Test1()
>>> t2 = Test2()
>>> t1.data # It wasn't set yet so it returns None
>>> hasattr(t1, '_data') # make sure there is no private attribute
False
>>> t1.data = 1 # set the attribute
>>> t1.data # the getter returns the set value
1
>>> t1._data # value is saved in the private attribute
1
but it's generally not recommended to access the private attribute
directly. It is just shown here to illustrate how it internally works.
We've already seen the ``set``, but here it's important how the ``copy``
parameter was set::
>>> data = [1, 2, 3] # lists are mutable
>>> t1.data = data # t1 copies while set
>>> t2.data = data # t2 does not
>>> data[0] = 5 # change the original list
>>> t1.data # t1.data hasn't changed
[1, 2, 3]
>>> t2.data # while t2.data is changed
[5, 2, 3]
The ``delete`` will just delete the private attribute::
>>> hasattr(t1, '_data') # it has some private data
True
>>> del t1.data # now delete it.
>>> hasattr(t1, '_data') # the private attribute was deleted
False
>>> t1.data # and the getter will return None
.. note::
Make sure you really have some strong arguments before using this
descriptor since it's not nearly as efficient as using a normal
class attribute or a `property` and behaves differently.
Not using it as decorator also allows to customize the name of the
private attribute::
>>> class Testclass(object):
... data = BaseDescriptor('notdata')
>>> t = Testclass()
>>> t.data = 10 # set the data
>>> hasattr(t, '_data') # it is not saved as _data
False
>>> t._notdata # but as _nodata.
10
"""
def __init__(self, attr, doc="", copy=False):
# To allow it to be used as decorator one needs to check if the
# first attribute is a string or not. A string means it was used
# explicitly otherwise it was used as decorator.
if not isinstance(attr, string_types):
# Not a string so take the documentation (if avaiable) and name
# from the method.
if attr.__doc__:
doc = attr.__doc__
attr = attr.__name__
self.__doc__ = doc # Set the documentation of the instance.
self.attr = '_' + attr # Add leading underscore to the attribute name
self.copy = copy # Copy while setting the attribute?
def __get__(self, instance, owner=None):
# If no instance is given return the descriptor instance. Very
# important if one wants the attribute to be documented by sphinx!
if instance is None:
return self
return getattr(instance, self.attr, None)
def __set__(self, instance, value):
if self.copy:
# Copy it if necessary with deepcopy
value = do_copy(value)
setattr(instance, self.attr, value)
def __delete__(self, instance):
delattr(instance, self.attr)
class AdvancedDescriptor(BaseDescriptor):
"""Like `BaseDescriptor` but allows to specify a default for the getter \
and conditions for the setter.
Parameters
----------
args, kwargs :
See `BaseDescriptor`.
Notes
-----
Using this descriptor directly is discouraged. It's less efficient than
`BaseDescriptor` and does the same. Only if you subclass it and override
:meth:`create_default` or :meth:`process_value` it can offer an easily
extendable attribute. But make sure you read the comments on the
methods carefully when extending it.
.. note::
The methods `create_default` and `process_value` are public but
normally they shouldn't be called directly. These should only serve
documentation purposes to explain when and how the descriptor
internally works.
Examples
--------
Apart from small differences this descriptor works exactly like
`BaseDescriptor`. The difference only shows what happens if you use the
getter when the private attribute doesn't exist::
>>> from nddata.utils.descriptors import (BaseDescriptor,
... AdvancedDescriptor)
>>> class Test(object):
... data1 = BaseDescriptor('data1', copy=False)
... data2 = AdvancedDescriptor('data2', copy=False)
>>> t = Test()
>>> t.data1 # access the getter.
>>> t.data2
>>> hasattr(t, '_data1') # data1 has no private attribute.
False
>>> hasattr(t, '_data2') # but data2 has.
True
>>> t._data2 # the default value (None) was saved.
Also deleting the attribute again will not delete it but only reset it to
the default value::
>>> t.data1 = 1
>>> t.data2 = 1
>>> del t.data1
>>> del t.data2
>>> hasattr(t, '_data1') # private data1 was deleted.
False
>>> hasattr(t, '_data2') # but private data2 was not.
True
>>> t._data2 # the default value (None) was saved again.
"""
def create_default(self):
"""Create a default value for the property and return it.
It is called during:
- ``__get__``: if there is no private attribute.
- ``__set__``: if the value is ``None``.
- ``__delete__``: always.
- ``__get__``: if the private attribute is ``None`` but which should
be nearly impossible except one sets the private attribute itself
which is discouraged to ensure correct behaviour.
The default value is set as private attribute after this function
is called to ensure correct behaviour in case the default is a
mutable object.
Examples
--------
Some examples for subclassing::
>>> from nddata.utils.descriptors import AdvancedDescriptor
>>> class DefaultZero(AdvancedDescriptor):
... def create_default(self):
... return 0
>>> class Test(object):
... data = DefaultZero('data')
The default value is created when the attribute wasn't set before::
>>> t = Test()
>>> t.data
0
And also if it is set to None::
>>> t.data = 1
>>> t.data
1
>>> t.data = None
>>> t.data
0
And when the attribute is deleted::
>>> t.data = 1
>>> del t.data
>>> t.data
0
"""
def _set_get_default(self, instance):
"""Method that creates the default, sets the private attribute and
returns it.
Should not be overridden in subclasses it's just here because to
reduce code repetition.
"""
# In case we are dealing with mutable types we need to return the
# instance we saved!
default = self.create_default() # Create a default value
setattr(instance, self.attr, default) # save it as private attribute
return default # and return it
def process_value(self, instance, value):
"""Take the value do appropriate conversions or checks and return it.
Parameters
----------
instance, value : any type
The two parameters given to ``__set__``.
Returns
-------
verified_value : any type
The checked or converted value that will be set.
Notes
-----
If the setter is called ``a.x = v`` then ``a`` is the ``instance`` and
``v`` is the ``value``.
Examples
--------
An example for subclassing it, for example we want the value to be
converted to a numpy-array (see also `ArrayData` which essentially does
just that)::
>>> from nddata.utils.descriptors import AdvancedDescriptor
>>> import numpy as np
>>> class ArrayDescriptor(AdvancedDescriptor):
... def process_value(self, instance, value):
... return np.asarray(value)
>>> class Test(object):
... data1 = ArrayDescriptor('data', copy=False)
... data2 = ArrayDescriptor('data', copy=True)
While setting the value will be converted to a `numpy.ndarray`::
>>> t = Test()
>>> t.data1 = 1
>>> t.data1
array(1)
>>> t.data1 = [1,2,3]
>>> t.data1
array([1, 2, 3])
>>> t.data1 = np.arange(5)
>>> t.data1
array([0, 1, 2, 3, 4])
But this method is not called if the value is ``None``::
>>> t.data1 = None
>>> t.data1
One special property of this descriptor is that if it should copy
during setting (like the ``data2`` class property above) it checks
if the value is the same after ``process_value`` by checking if
``value_before is value_after`` and only copies it if the condition is
``True``.
In this example the value isn't the same after ``process_value`` so it
will not be copied again afterwards::
>>> t.data2 = [1]
>>> t.data2
array([1])
but it will be copied afterwards if it is the same::
>>> t.data2 = np.array([1])
>>> t.data2
array([1])
So make certain that if you alter the value in a way that it isn't the
same afterwards that you really copied it in ``process_value``!
Another common case is to check if the value respects some
requirements. For example if it's only allowed to set numerical
values::
>>> from nddata.utils.descriptors import AdvancedDescriptor
>>> from numbers import Number
>>> class NumberDescriptor(AdvancedDescriptor):
... def process_value(self, instance, value):
... if not isinstance(value, Number):
... raise TypeError()
... return value # otherwise return it
>>> class Test(object):
... data = NumberDescriptor('data')
>>> t = Test()
>>> t.data = 1
>>> t.data
1
>>> t.data = 1.0
>>> t.data
1.0
>>> t.data = 1+2j
>>> t.data
(1+2j)
>>> try:
... t.data = 'a'
... except TypeError:
... print('failed to set attribute.')
failed to set attribute.
"""
return value
def __get__(self, instance, owner=None):
# Fetch the result, this makes also sure self is returned in case
# it is called on the instance!
result = super(AdvancedDescriptor, self).__get__(instance, owner)
# the super had a default of None, so assume None means we should
# create a default value.
if result is None:
result = self._set_get_default(instance)
return result
def __set__(self, instance, value):
if value is None:
# No need for super because default should make sure it doesn't
# need to be copied!
self._set_get_default(instance)
else:
# Check if the conditions are met. It's expected that this raises
# an Exception if they are not met!
v_value = self.process_value(instance, value)
if self.copy and value is not v_value:
# The value was altered during the conditions check so
# disable copy for the time of the super call and set it again
# afterwards.
# TODO: Python isn't multithreaded normally but this could
# be a problem if run in different threads. At least test it!
self.copy = False
super(AdvancedDescriptor, self).__set__(instance, v_value)
self.copy = True
# For debugging purposes
# from astropy import log
# log.debug('temporarly disabled copy because data was copied '
# 'during process_value.')
else:
super(AdvancedDescriptor, self).__set__(instance, v_value)
def __delete__(self, instance):
# no need to super because we don't want to delete the attribute just
# reset it.
self._set_get_default(instance)
class WCS(BaseDescriptor):
"""A `BaseDescriptor` without any alterations.
"""
pass
class Mask(BaseDescriptor):
"""A `BaseDescriptor` without any alterations.
"""
pass
class Flags(BaseDescriptor):
"""A `BaseDescriptor` without any alterations.
"""
pass
class UncertaintyData(BaseDescriptor):
"""A `BaseDescriptor` without any alterations.
"""
pass
class Meta(AdvancedDescriptor):
"""An `AdvancedDescriptor` which defaults to `~collections.OrderedDict` \
and checks if the value is a `~collections.Mapping`.
Parameters
----------
args, kwargs :
see :class:`AdvancedDescriptor`.
"""
def create_default(self):
"""Returns an empty `~collections.OrderedDict`.
"""
return OrderedDict()
def process_value(self, instance, value):
"""Checks if the value is a `~collections.Mapping`.
Parameters
----------
args, kwargs :
see :meth:`AdvancedDescriptor.process_value`.
Raises
------
TypeError
If the value is not a subclass of `~collections.Mapping`.
Returns
-------
value : subclass of `~collections.Mapping`.
The value that is being set as private attribute.
"""
if not isinstance(value, Mapping):
raise TypeError("attribute '{0}' must be dict-like"
"".format(self.attr))
return value
class ArrayData(AdvancedDescriptor):
"""An `AdvancedDescriptor` which checks if the value looks like \
numerical `numpy.ndarray` or converts it to one.
Parameters
----------
args, kwargs :
see :class:`AdvancedDescriptor`.
"""
def create_default(self):
"""No default value, this returns ``None``.
"""
def process_value(self, instance, value):
"""Checks if the value is a `numpy.ndarray` or similar enough.
Parameters
----------
args, kwargs :
see :meth:`AdvancedDescriptor.process_value`.
Raises
------
TypeError
If the value is not a `numpy.ndarray` or a subclass or the dtype
is not numerical.
Returns
-------
value : `numpy.ndarray`-like
The value that is being set as private attribute.
See also
--------
nddata.utils.numpyutils.is_numeric_array
Notes
-----
The criteria are that the value has the attributes ``shape``,
``__getitem__`` and ``__array__`` and the check if it's numerical
requires that it has a valid ``dtype.kind``. Otherwise it is attempted
to cast it with `numpy.asarray`.
"""
# Save the original class name for the error message if it cannot be
# converted to an allowed numpy.ndarray
name = value.__class__.__name__
# NumPy array like means has these 3 attributes
if any(not hasattr(value, attr)
for attr in ('shape', '__getitem__', '__array__')):
# It doesn't look like a NumPy array so convert it to one.
# don't allow subclasses because masks, unit or else are
# already saved elsewhere.
value = np.asarray(value)
# Final check if the array is numeric. This will internally use
# np.asarray again. This shouldn't be a problem in most cases but
# if anyone finds a valid type and creating or setting data is slow
# check if this function is the bottleneck.
if not is_numeric_array(value):
raise TypeError("could not convert {0} to numeric numpy array."
"".format(name))
return value
class PlainArrayData(AdvancedDescriptor):
"""An `AdvancedDescriptor` which converts the input to a plain \
`numpy.ndarray` and checks if it is of numerical dtype.
Parameters
----------
args, kwargs :
see :class:`AdvancedDescriptor`.
"""
def create_default(self):
"""No default value, this returns ``None``.
"""
def process_value(self, instance, value):
"""Casts the value to a plain `numpy.ndarray`.
Parameters
----------
args, kwargs :
see :meth:`AdvancedDescriptor.process_value`.
Raises
------
TypeError
If the value is not numerical.
Returns
-------
value : `numpy.ndarray`
The value that is being set as private attribute.
See also
--------
nddata.utils.numpyutils.is_numeric_array
"""
# Save the original class name for the error message if it cannot be
# converted to an allowed numpy.ndarray
name = value.__class__.__name__
value = np.asarray(value)
# check if the array is numeric. This will internally use
# np.asarray again. This shouldn't be a problem in most cases but
# if anyone finds a valid type and creating or setting data is slow
# check if this function is the bottleneck.
if not is_numeric_array(value):
raise TypeError("could not convert {0} to numeric numpy array."
"".format(name))
return value
class ArrayMask(AdvancedDescriptor):
"""An `AdvancedDescriptor` which checks if the value looks like \
boolean `numpy.ndarray` or converts it to one.
Parameters
----------
args, kwargs :
see :class:`AdvancedDescriptor`.
Examples
--------
Creating a class using this descriptor is mostly identical to assigning a
`property`, even though the setter and deleter are created automatically
and cannot be overriden with the property syntax ``@mask.setter``. Note
that like all `AdvancedDescriptor` the body of the attribute is ignored so
leave it empty or insert a ``pass``::
>>> from nddata.utils.descriptors import ArrayMask
>>> class Test(object):
... @ArrayMask
... def mask(self):
... '''Some documentation of the masks purpose.'''
The documentation is kept::
>>> Test.mask.__doc__
'Some documentation of the masks purpose.'
The setter will now always convert the input to a `numpy.ndarray` with
dtype `bool`::
>>> t = Test()
>>> t.mask = True
>>> t.mask
array(True, dtype=bool)
Notice that every Python object can be evaluated as boolean, but see for
yourself::
>>> t.mask = [True, False, 1, 'a']
>>> t.mask
array([ True, False, True, True], dtype=bool)
.. note::
One use would be to override the ``mask`` of `~nddata.nddata.NDData`
or `~nddata.nddata.NDDataBase` if you want a more
`numpy.ma.MaskedArray`-like behaviour and don't want to convert the
mask yourself. Just import ``NDData`` (or ``NDDataBase``) and set the
descriptor:
``NDData.mask = ArrayMask('mask', 'docstring', copy=False)``
But be aware that this will change will affect all your ``NDData``
instances in the current session!
"""
def create_default(self):
"""No default value, this returns ``None``.
"""
def process_value(self, instance, value):
"""Checks if the value is a `numpy.ndarray` of boolean type or casts \
it to one.
Parameters
----------
args, kwargs :
see :meth:`AdvancedDescriptor.process_value`.
Returns
-------
value : `numpy.ndarray`-like
The value that is being set as private attribute.
"""
# Very simple, if it's already a numpy.ndarray with dtype bool return
# it.
if isinstance(value, np.ndarray) and value.dtype == bool:
return value
# If it wasn't convert it to one explicitly.
return np.array(value, dtype=bool, copy=False, subok=False)
class Unit(AdvancedDescriptor):
"""An `AdvancedDescriptor` which converts the value to an \
`~astropy.units.Unit`.
Parameters
----------
args, kwargs :
see :class:`AdvancedDescriptor`.
"""
def create_default(self):
"""No default value, this returns ``None``.
"""
def process_value(self, instance, value):
"""Converts the value to a `~astropy.units.Unit`.
Parameters
----------
args, kwargs :
see :meth:`AdvancedDescriptor.process_value`.
Raises
------
UnitConversionError
If the value is not castable to `~astropy.units.Unit`.
Returns
-------
value : `~astropy.units.Unit`
The value that is being set as private attribute.
"""
# Just convert it to a unit. This will raise an Exception if not
# possible.
return u.Unit(value)
class Uncertainty(AdvancedDescriptor):
"""An `AdvancedDescriptor` which ensures that \
`~nddata.nddata.meta.NDUncertainty` is setup correctly as \
uncertainty.
Parameters
----------
args, kwargs :
see :class:`AdvancedDescriptor`.
"""
def create_default(self):
"""No default value, this returns ``None``.
"""
def process_value(self, instance, value):
"""Makes sure the uncertainty is setup correctly when set.
Parameters
----------
args, kwargs :
see :meth:`AdvancedDescriptor.process_value`.
Returns
-------
value : `~nddata.nddata.meta.NDUncertainty`-like
The value that is being set as private attribute.
Notes
-----
During ``__set__`` it checks if the value has an ``uncertainty_type``
attribute. If it hasn't the value is wrapped as
`~nddata.nddata.UnknownUncertainty`.
Then if it's a subclass of `~nddata.nddata.meta.NDUncertainty` which
already has a ``parent`` then it's wrapped as **reference** in another
class (same class as before) so we have two uncertainties each linking
to their own parent instead of stealing the ``parent``. Then the
``parent_nddata`` is set to the instance the setter was called on.
"""
from ..nddata.meta import NDUncertainty
# There is one requirements on the uncertainty: That
# it has an attribute 'uncertainty_type'.
# If it does not match this requirement convert it to an unknown
# uncertainty.
if not hasattr(value, 'uncertainty_type'):
from ..nddata import UnknownUncertainty
log.info('uncertainty should have attribute uncertainty_type.')
# This wrapping would make the parents think that the value
# was already copied so we must make sure it's copied here!
value = UnknownUncertainty(value, copy=self.copy)
# If it is a subclass of NDUncertainty we must set the
# parent_nddata attribute. (#4152)
if isinstance(value, NDUncertainty):
# In case the uncertainty already has a parent create a new
# instance because we need to assume that we don't want to
# steal the uncertainty from another NDData object
if value._parent_nddata is not None:
# FIXME: Unfortunatly printing a log info pops up far too often
# so there is no hint when a new uncertainty was created
# because the old one already had a parent...
# log.info('created another uncertainty because the '
# 'uncertainty already had a parent.')
# Copy it if necessary because the parent will think it was
# already copied since it's another instance!
value = value.__class__(value, copy=self.copy)
# Then link it to this NDData instance (internally this needs
# to be saved as weakref but that's done by NDUncertainty
# setter).
value.parent_nddata = instance
return value
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command-line flag library.
Emulates gflags by wrapping cfg.ConfigOpts.
The idea is to move fully to cfg eventually, and this wrapper is a
stepping stone.
"""
import os
import socket
import sys
from nova.openstack.common import cfg
FLAGS = cfg.CONF
def parse_args(argv, default_config_files=None):
FLAGS.disable_interspersed_args()
return argv[:1] + FLAGS(argv[1:],
project='nova',
default_config_files=default_config_files)
class UnrecognizedFlag(Exception):
pass
def DECLARE(name, module_string, flag_values=FLAGS):
if module_string not in sys.modules:
__import__(module_string, globals(), locals())
if name not in flag_values:
raise UnrecognizedFlag('%s not defined by %s' % (name, module_string))
def _get_my_ip():
"""
Returns the actual ip of the local machine.
This code figures out what source address would be used if some traffic
were to be sent out to some well known address on the Internet. In this
case, a Google DNS server is used, but the specific address does not
matter much. No traffic is actually sent.
"""
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return "127.0.0.1"
core_opts = [
cfg.StrOpt('connection_type',
default=None,
help='Deprecated (use compute_driver instead): Virtualization '
'api connection type : libvirt, xenapi, or fake'),
cfg.StrOpt('sql_connection',
default='sqlite:///$state_path/$sqlite_db',
help='The SQLAlchemy connection string used to connect to the '
'database'),
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for nova-api'),
cfg.StrOpt('pybasedir',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory where the nova python module is installed'),
cfg.StrOpt('bindir',
default='$pybasedir/bin',
help='Directory where nova binaries are installed'),
cfg.StrOpt('state_path',
default='$pybasedir',
help="Top-level directory for maintaining nova's state"),
cfg.StrOpt('lock_path',
default='$pybasedir',
help='Directory to use for lock files'),
]
debug_opts = [
cfg.BoolOpt('fake_network',
default=False,
help='If passed, use fake network devices and addresses'),
cfg.IntOpt('sql_connection_debug',
default=0,
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('sql_connection_trace',
default=False,
help='Add python stack traces to SQL as comment strings'),
]
FLAGS.register_cli_opts(core_opts)
FLAGS.register_cli_opts(debug_opts)
global_opts = [
cfg.StrOpt('my_ip',
default=_get_my_ip(),
help='ip address of this host'),
cfg.ListOpt('region_list',
default=[],
help='list of region=fqdn pairs separated by commas'),
cfg.StrOpt('aws_access_key_id',
default='admin',
help='AWS Access ID'),
cfg.StrOpt('aws_secret_access_key',
default='admin',
help='AWS Access Key'),
cfg.StrOpt('glance_host',
default='$my_ip',
help='default glance hostname or ip'),
cfg.IntOpt('glance_port',
default=9292,
help='default glance port'),
cfg.ListOpt('glance_api_servers',
default=['$glance_host:$glance_port'],
help='A list of the glance api servers available to nova. '
'Prefix with https:// for ssl-based glance api servers. '
'([hostname|ip]:port)'),
cfg.BoolOpt('glance_api_insecure',
default=False,
help='Allow to perform insecure SSL (https) requests to '
'glance'),
cfg.IntOpt('glance_num_retries',
default=0,
help='Number retries when downloading an image from glance'),
cfg.IntOpt('s3_port',
default=3333,
help='port used when accessing the s3 api'),
cfg.StrOpt('s3_host',
default='$my_ip',
help='hostname or ip for openstack to use when accessing '
'the s3 api'),
cfg.StrOpt('cert_topic',
default='cert',
help='the topic cert nodes listen on'),
cfg.StrOpt('compute_topic',
default='compute',
help='the topic compute nodes listen on'),
cfg.StrOpt('console_topic',
default='console',
help='the topic console proxy nodes listen on'),
cfg.StrOpt('scheduler_topic',
default='scheduler',
help='the topic scheduler nodes listen on'),
cfg.StrOpt('volume_topic',
default='volume',
help='the topic volume nodes listen on'),
cfg.StrOpt('network_topic',
default='network',
help='the topic network nodes listen on'),
cfg.BoolOpt('api_rate_limit',
default=True,
help='whether to rate limit the api'),
cfg.ListOpt('enabled_apis',
default=['ec2', 'osapi_compute', 'osapi_volume', 'metadata'],
help='a list of APIs to enable by default'),
cfg.StrOpt('ec2_host',
default='$my_ip',
help='the ip of the ec2 api server'),
cfg.StrOpt('ec2_dmz_host',
default='$my_ip',
help='the internal ip of the ec2 api server'),
cfg.IntOpt('ec2_port',
default=8773,
help='the port of the ec2 api server'),
cfg.StrOpt('ec2_scheme',
default='http',
help='the protocol to use when connecting to the ec2 api '
'server (http, https)'),
cfg.StrOpt('ec2_path',
default='/services/Cloud',
help='the path prefix used to call the ec2 api server'),
cfg.ListOpt('osapi_compute_ext_list',
default=[],
help='Specify list of extensions to load when using osapi_'
'compute_extension option with nova.api.openstack.'
'compute.contrib.select_extensions'),
cfg.MultiStrOpt('osapi_compute_extension',
default=[
'nova.api.openstack.compute.contrib.standard_extensions'
],
help='osapi compute extension to load'),
cfg.ListOpt('osapi_volume_ext_list',
default=[],
help='Specify list of extensions to load when using osapi_'
'volume_extension option with nova.api.openstack.'
'volume.contrib.select_extensions'),
cfg.MultiStrOpt('osapi_volume_extension',
default=[
'nova.api.openstack.volume.contrib.standard_extensions'
],
help='osapi volume extension to load'),
cfg.StrOpt('osapi_path',
default='/v1.1/',
help='the path prefix used to call the openstack api server'),
cfg.StrOpt('osapi_compute_link_prefix',
default=None,
help='Base URL that will be presented to users in links '
'to the OpenStack Compute API'),
cfg.StrOpt('osapi_glance_link_prefix',
default=None,
help='Base URL that will be presented to users in links '
'to glance resources'),
cfg.IntOpt('osapi_max_limit',
default=1000,
help='the maximum number of items returned in a single '
'response from a collection resource'),
cfg.StrOpt('metadata_host',
default='$my_ip',
help='the ip for the metadata api server'),
cfg.IntOpt('metadata_port',
default=8775,
help='the port for the metadata api port'),
cfg.StrOpt('default_image',
default='ami-11111',
help='default image to use, testing only'),
cfg.StrOpt('default_instance_type',
default='m1.small',
help='default instance type to use, testing only'),
cfg.StrOpt('null_kernel',
default='nokernel',
help='kernel image that indicates not to use a kernel, but to '
'use a raw disk image instead'),
cfg.StrOpt('vpn_image_id',
default='0',
help='image id used when starting up a cloudpipe vpn server'),
cfg.StrOpt('vpn_key_suffix',
default='-vpn',
help='Suffix to add to project name for vpn key and secgroups'),
cfg.StrOpt('sqlite_db',
default='nova.sqlite',
help='the filename to use with sqlite'),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help='If passed, use synchronous mode for sqlite'),
cfg.IntOpt('sql_idle_timeout',
default=3600,
help='timeout before idle sql connections are reaped'),
cfg.IntOpt('sql_max_retries',
default=10,
help='maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('sql_retry_interval',
default=10,
help='interval between retries of opening a sql connection'),
cfg.StrOpt('compute_manager',
default='nova.compute.manager.ComputeManager',
help='full class name for the Manager for compute'),
cfg.StrOpt('console_manager',
default='nova.console.manager.ConsoleProxyManager',
help='full class name for the Manager for console proxy'),
cfg.StrOpt('cert_manager',
default='nova.cert.manager.CertManager',
help='full class name for the Manager for cert'),
cfg.StrOpt('instance_dns_manager',
default='nova.network.dns_driver.DNSDriver',
help='full class name for the DNS Manager for instance IPs'),
cfg.StrOpt('instance_dns_domain',
default='',
help='full class name for the DNS Zone for instance IPs'),
cfg.StrOpt('floating_ip_dns_manager',
default='nova.network.dns_driver.DNSDriver',
help='full class name for the DNS Manager for floating IPs'),
cfg.StrOpt('network_manager',
default='nova.network.manager.VlanManager',
help='full class name for the Manager for network'),
cfg.StrOpt('volume_manager',
default='nova.volume.manager.VolumeManager',
help='full class name for the Manager for volume'),
cfg.StrOpt('scheduler_manager',
default='nova.scheduler.manager.SchedulerManager',
help='full class name for the Manager for scheduler'),
cfg.StrOpt('host',
default=socket.gethostname(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, or IP address. '
'However, the node name must be valid within '
'an AMQP key, and if using ZeroMQ, a valid '
'hostname, FQDN, or IP address'),
cfg.StrOpt('node_availability_zone',
default='nova',
help='availability zone of this node'),
cfg.ListOpt('memcached_servers',
default=None,
help='Memcached servers or None for in process cache.'),
cfg.StrOpt('instance_usage_audit_period',
default='month',
help='time period to generate instance usages for. '
'Time period must be hour, day, month or year'),
cfg.IntOpt('bandwidth_poll_interval',
deprecated_name='bandwith_poll_interval',
default=600,
help='interval to pull bandwidth usage info'),
cfg.BoolOpt('start_guests_on_host_boot',
default=False,
help='Whether to restart guests when the host reboots'),
cfg.BoolOpt('resume_guests_state_on_host_boot',
default=False,
help='Whether to start guests that were running before the '
'host rebooted'),
cfg.StrOpt('default_ephemeral_format',
default=None,
help='The default format an ephemeral_volume will be '
'formatted with on creation.'),
cfg.StrOpt('root_helper',
default='sudo',
help='Deprecated: command to use for running commands as root'),
cfg.StrOpt('rootwrap_config',
default=None,
help='Path to the rootwrap configuration file to use for '
'running commands as root'),
cfg.StrOpt('network_driver',
default='nova.network.linux_net',
help='Driver to use for network creation'),
cfg.BoolOpt('use_ipv6',
default=False,
help='use ipv6'),
cfg.BoolOpt('enable_instance_password',
default=True,
help='Allows use of instance password during '
'server creation'),
cfg.IntOpt('password_length',
default=12,
help='Length of generated instance admin passwords'),
cfg.BoolOpt('monkey_patch',
default=False,
help='Whether to log monkey patching'),
cfg.ListOpt('monkey_patch_modules',
default=[
'nova.api.ec2.cloud:nova.notifier.api.notify_decorator',
'nova.compute.api:nova.notifier.api.notify_decorator'
],
help='List of modules/decorators to monkey patch'),
cfg.BoolOpt('allow_resize_to_same_host',
default=False,
help='Allow destination machine to match source for resize. '
'Useful when testing in single-host environments.'),
cfg.IntOpt('reclaim_instance_interval',
default=0,
help='Interval in seconds for reclaiming deleted instances'),
cfg.IntOpt('zombie_instance_updated_at_window',
default=172800,
help='Number of seconds zombie instances are cleaned up.'),
cfg.IntOpt('service_down_time',
default=60,
help='maximum time since last check-in for up service'),
cfg.StrOpt('default_schedule_zone',
default=None,
help='availability zone to use when user doesn\'t specify one'),
cfg.ListOpt('isolated_images',
default=[],
help='Images to run on isolated host'),
cfg.ListOpt('isolated_hosts',
default=[],
help='Host reserved for specific images'),
cfg.StrOpt('cache_images',
default='all',
help='Cache glance images locally. `all` will cache all'
' images, `some` will only cache images that have the'
' image_property `cache_in_nova=True`, and `none` turns'
' off caching entirely'),
cfg.BoolOpt('use_cow_images',
default=True,
help='Whether to use cow images'),
cfg.StrOpt('compute_api_class',
default='nova.compute.api.API',
help='The full class name of the compute API class to use'),
cfg.StrOpt('network_api_class',
default='nova.network.api.API',
help='The full class name of the network API class to use'),
cfg.StrOpt('volume_api_class',
default='nova.volume.api.API',
help='The full class name of the volume API class to use'),
cfg.StrOpt('security_group_handler',
default='nova.network.sg.NullSecurityGroupHandler',
help='The full class name of the security group handler class'),
cfg.StrOpt('default_access_ip_network_name',
default=None,
help='Name of network to use to set access ips for instances'),
cfg.StrOpt('auth_strategy',
default='noauth',
help='The strategy to use for auth: noauth or keystone.'),
cfg.ListOpt('non_inheritable_image_properties',
default=['cache_in_nova',
'instance_uuid',
'user_id',
'image_type',
'backup_type',
'min_ram',
'min_disk'],
help='These are image properties which a snapshot should not'
' inherit from an instance'),
cfg.BoolOpt('defer_iptables_apply',
default=False,
help='Whether to batch up the application of IPTables rules'
' during a host restart and apply all at the end of the'
' init phase'),
]
FLAGS.register_opts(global_opts)
| |
# Each setting is documented at:
# https://docs.djangoproject.com/en/1.9/ref/settings/
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
import os
from datetime import timedelta
from django.utils.translation import ugettext_lazy as _
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ACHTUNG_choose-a-secret-key-and-keep-it-secret_ACHTUNG'
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_SECONDS = 0
SECURE_PROXY_SSL_HEADER = None
SECURE_REDIRECT_EXEMPT = []
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
X_FRAME_OPTIONS = 'SAMEORIGIN'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DEBUG_PROPAGATE_EXCEPTIONS = False
SILENCED_SYSTEM_CHECKS = []
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
]
AUTH_USER_MODEL = 'auth.User'
LOGIN_REDIRECT_URL = '/accounts/profile/'
LOGIN_URL = '/accounts/login/'
PASSWORD_RESET_TIMEOUT_DAYS = 1
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher'
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.'
'UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'NumericPasswordValidator',
},
]
ABSOLUTE_URL_OVERRIDES = {}
ADMINS = []
MANAGERS = ADMINS
ALLOWED_HOSTS = []
ALLOWED_INCLUDE_ROOTS = []
APPEND_SLASH = True
PREPEND_WWW = False
DISALLOWED_USER_AGENTS = []
INTERNAL_IPS = []
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
# 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
# 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
# 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
}
}
CORS_ORIGIN_ALLOW_ALL = True
CSRF_COOKIE_AGE = 31449600 # one year
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_HTTPONLY = False
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
SESSION_CACHE_ALIAS = 'default'
SESSION_COOKIE_AGE = 1209600 # two weeks
SESSION_COOKIE_DOMAIN = None
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_NAME = 'sessionid'
SESSION_COOKIE_PATH = '/'
SESSION_COOKIE_SECURE = False
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# SESSION_ENGINE = 'django.contrib.sessions.backends.file'
# SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
# SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
# SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_FILE_PATH = None
SESSION_SAVE_EVERY_REQUEST = False
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'heimdalerp',
'USER': 'heimdalerp',
'PASSWORD': None,
#'HOST': '127.0.0.1', # Uncomment to use TCP/IP
#'PORT': '5432', # Uncomment to use TCP/IP
'ATOMIC_REQUESTS': False,
'AUTOCOMMIT': True,
'CONN_MAX_AGE': 0,
'OPTIONS': {},
'TIME_ZONE': None,
'TEST': {
'NAME': 'test_heimdalerp',
'CHARSET': None,
'COLLATION': None,
'DEPENDENCIES': [],
'MIRROR': None,
'SERIALIZE': True,
'USER': 'heimdalerp',
'PASSWORD': None,
},
}
}
DATABASE_ROUTERS = []
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication', # Disable in production.
'rest_framework.authentication.SessionAuthentication', # Disable in production.
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated'
],
'PAGE_SIZE': 100000,
'DEFAULT_FILTER_BACKENDS': (
'rest_framework.filters.DjangoFilterBackend',
),
'TEST_REQUEST_DEFAULT_FORMAT': 'json'
}
JWT_AUTH = {
'JWT_ENCODE_HANDLER':
'rest_framework_jwt.utils.jwt_encode_handler',
'JWT_DECODE_HANDLER':
'rest_framework_jwt.utils.jwt_decode_handler',
'JWT_PAYLOAD_HANDLER':
'rest_framework_jwt.utils.jwt_payload_handler',
'JWT_PAYLOAD_GET_USER_ID_HANDLER':
'rest_framework_jwt.utils.jwt_get_user_id_from_payload_handler',
'JWT_RESPONSE_PAYLOAD_HANDLER':
'rest_framework_jwt.utils.jwt_response_payload_handler',
'JWT_SECRET_KEY': SECRET_KEY,
'JWT_ALGORITHM': 'HS256',
'JWT_VERIFY': True,
'JWT_VERIFY_EXPIRATION': True,
'JWT_LEEWAY': 0,
'JWT_EXPIRATION_DELTA': timedelta(hours=12),
'JWT_AUDIENCE': None,
'JWT_ISSUER': None,
'JWT_ALLOW_REFRESH': False,
'JWT_REFRESH_EXPIRATION_DELTA': timedelta(days=7),
'JWT_AUTH_HEADER_PREFIX': 'JWT',
}
DEFAULT_CHARSET = 'utf-8'
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_EXCEPTION_REPORTER_FILTER = (
'django.views.debug.SafeExceptionReporterFilter'
)
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
FILE_CHARSET = 'utf-8'
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler'
]
FILE_UPLOAD_MAX_MEMORY_SIZE = 52428800 # 50MB
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
FILE_UPLOAD_PERMISSIONS = None
FILE_UPLOAD_TEMP_DIR = None # defaults to '/tmp' on UNIX-like OSs
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
SERVER_EMAIL = "root@localhost"
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
EMAIL_HOST_PASSWORD = ''
EMAIL_HOST_USER = ''
EMAIL_SUBJECT_PREFIX = '[HeimdalERP] '
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
FIRST_DAY_OF_WEEK = 0 # Sunday
FIXTURE_DIRS = []
FORCE_SCRIPT_NAME = None
FORMAT_MODULE_PATH = None
IGNORABLE_404_URLS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework_jwt',
'corsheaders',
'reversion',
# HeimdalERP Core Apps
'geo',
'persons',
'hr',
'contact',
'invoice',
'accounting',
# HeimdalERP Custom Apps
'rest_framework_proxy',
'invoice_ar',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
]
USE_ETAGS = False
ROOT_URLCONF = 'heimdalerp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'heimdalerp.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'es'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_PATH = '/'
LOCALE_PATHS = [
'locale'
]
LANGUAGES = [
('en', _('English')),
('es', _('Spanish')),
]
TIME_ZONE = 'America/Argentina/Buenos_Aires'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = '' # '/var/www/heimdalerp/static/'
STATIC_URL = '/static/'
STATICFILE_DIRS = []
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder'
]
MEDIA_ROOT = '' # '/var/www/heimdalerp/media/'
MEDIA_URL = '/media/' # It could also be: 'http://media.example.com/'
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TEST_NON_SERIALIZED_APPS = []
#
# CUSTOM MODULES SETTINGS
#
# invoice_ar
REST_PROXY = {
'HOST': 'https://soa.afip.gob.ar'
}
| |
"""Hypergeometric and Meijer G-functions"""
from __future__ import print_function, division
from sympy.core import S, I, pi, oo, ilcm, Mod, C
from sympy.core.function import Function, Derivative, ArgumentIndexError
from sympy.core.containers import Tuple
from sympy.core.compatibility import reduce, range
from sympy.core.mul import Mul
from sympy.functions import (sqrt, exp, log, sin, cos, asin, atan,
sinh, cosh, asinh, acosh, atanh, acoth)
class TupleArg(Tuple):
def limit(self, x, xlim, dir='+'):
""" Compute limit x->xlim.
"""
from sympy.series.limits import limit
return TupleArg(*[limit(f, x, xlim, dir) for f in self.args])
# TODO should __new__ accept **options?
# TODO should constructors should check if parameters are sensible?
def _prep_tuple(v):
"""
Turn an iterable argument V into a Tuple and unpolarify, since both
hypergeometric and meijer g-functions are unbranched in their parameters.
Examples:
>>> from sympy.functions.special.hyper import _prep_tuple
>>> _prep_tuple([1, 2, 3])
(1, 2, 3)
>>> _prep_tuple((4, 5))
(4, 5)
>>> _prep_tuple((7, 8, 9))
(7, 8, 9)
"""
from sympy.simplify.simplify import unpolarify
return TupleArg(*[unpolarify(x) for x in v])
class TupleParametersBase(Function):
""" Base class that takes care of differentiation, when some of
the arguments are actually tuples. """
# This is not deduced automatically since there are Tuples as arguments.
is_commutative = True
def _eval_derivative(self, s):
try:
res = 0
if self.args[0].has(s) or self.args[1].has(s):
for i, p in enumerate(self._diffargs):
m = self._diffargs[i].diff(s)
if m != 0:
res += self.fdiff((1, i))*m
return res + self.fdiff(3)*self.args[2].diff(s)
except (ArgumentIndexError, NotImplementedError):
return Derivative(self, s)
class hyper(TupleParametersBase):
r"""
The (generalized) hypergeometric function is defined by a series where
the ratios of successive terms are a rational function of the summation
index. When convergent, it is continued analytically to the largest
possible domain.
The hypergeometric function depends on two vectors of parameters, called
the numerator parameters :math:`a_p`, and the denominator parameters
:math:`b_q`. It also has an argument :math:`z`. The series definition is
.. math ::
{}_pF_q\left(\begin{matrix} a_1, \dots, a_p \\ b_1, \dots, b_q \end{matrix}
\middle| z \right)
= \sum_{n=0}^\infty \frac{(a_1)_n \dots (a_p)_n}{(b_1)_n \dots (b_q)_n}
\frac{z^n}{n!},
where :math:`(a)_n = (a)(a+1)\dots(a+n-1)` denotes the rising factorial.
If one of the :math:`b_q` is a non-positive integer then the series is
undefined unless one of the `a_p` is a larger (i.e. smaller in
magnitude) non-positive integer. If none of the :math:`b_q` is a
non-positive integer and one of the :math:`a_p` is a non-positive
integer, then the series reduces to a polynomial. To simplify the
following discussion, we assume that none of the :math:`a_p` or
:math:`b_q` is a non-positive integer. For more details, see the
references.
The series converges for all :math:`z` if :math:`p \le q`, and thus
defines an entire single-valued function in this case. If :math:`p =
q+1` the series converges for :math:`|z| < 1`, and can be continued
analytically into a half-plane. If :math:`p > q+1` the series is
divergent for all :math:`z`.
Note: The hypergeometric function constructor currently does *not* check
if the parameters actually yield a well-defined function.
Examples
========
The parameters :math:`a_p` and :math:`b_q` can be passed as arbitrary
iterables, for example:
>>> from sympy.functions import hyper
>>> from sympy.abc import x, n, a
>>> hyper((1, 2, 3), [3, 4], x)
hyper((1, 2, 3), (3, 4), x)
There is also pretty printing (it looks better using unicode):
>>> from sympy import pprint
>>> pprint(hyper((1, 2, 3), [3, 4], x), use_unicode=False)
_
|_ /1, 2, 3 | \
| | | x|
3 2 \ 3, 4 | /
The parameters must always be iterables, even if they are vectors of
length one or zero:
>>> hyper((1, ), [], x)
hyper((1,), (), x)
But of course they may be variables (but if they depend on x then you
should not expect much implemented functionality):
>>> hyper((n, a), (n**2,), x)
hyper((n, a), (n**2,), x)
The hypergeometric function generalizes many named special functions.
The function hyperexpand() tries to express a hypergeometric function
using named special functions.
For example:
>>> from sympy import hyperexpand
>>> hyperexpand(hyper([], [], x))
exp(x)
You can also use expand_func:
>>> from sympy import expand_func
>>> expand_func(x*hyper([1, 1], [2], -x))
log(x + 1)
More examples:
>>> from sympy import S
>>> hyperexpand(hyper([], [S(1)/2], -x**2/4))
cos(x)
>>> hyperexpand(x*hyper([S(1)/2, S(1)/2], [S(3)/2], x**2))
asin(x)
We can also sometimes hyperexpand parametric functions:
>>> from sympy.abc import a
>>> hyperexpand(hyper([-a], [], x))
(-x + 1)**a
See Also
========
sympy.simplify.hyperexpand
sympy.functions.special.gamma_functions.gamma
meijerg
References
==========
.. [1] Luke, Y. L. (1969), The Special Functions and Their Approximations,
Volume 1
.. [2] http://en.wikipedia.org/wiki/Generalized_hypergeometric_function
"""
def __new__(cls, ap, bq, z):
# TODO should we check convergence conditions?
return Function.__new__(cls, _prep_tuple(ap), _prep_tuple(bq), z)
@classmethod
def eval(cls, ap, bq, z):
from sympy import unpolarify
if len(ap) <= len(bq):
nz = unpolarify(z)
if z != nz:
return hyper(ap, bq, nz)
def fdiff(self, argindex=3):
if argindex != 3:
raise ArgumentIndexError(self, argindex)
nap = Tuple(*[a + 1 for a in self.ap])
nbq = Tuple(*[b + 1 for b in self.bq])
fac = Mul(*self.ap)/Mul(*self.bq)
return fac*hyper(nap, nbq, self.argument)
def _eval_expand_func(self, **hints):
from sympy import gamma, hyperexpand
if len(self.ap) == 2 and len(self.bq) == 1 and self.argument == 1:
a, b = self.ap
c = self.bq[0]
return gamma(c)*gamma(c - a - b)/gamma(c - a)/gamma(c - b)
return hyperexpand(self)
def _eval_rewrite_as_Sum(self, ap, bq, z):
from sympy.functions import factorial, RisingFactorial, Piecewise
n = C.Dummy("n", integer=True)
rfap = Tuple(*[RisingFactorial(a, n) for a in ap])
rfbq = Tuple(*[RisingFactorial(b, n) for b in bq])
coeff = Mul(*rfap) / Mul(*rfbq)
return Piecewise((C.Sum(coeff * z**n / factorial(n), (n, 0, oo)),
self.convergence_statement), (self, True))
@property
def argument(self):
""" Argument of the hypergeometric function. """
return self.args[2]
@property
def ap(self):
""" Numerator parameters of the hypergeometric function. """
return Tuple(*self.args[0])
@property
def bq(self):
""" Denominator parameters of the hypergeometric function. """
return Tuple(*self.args[1])
@property
def _diffargs(self):
return self.ap + self.bq
@property
def eta(self):
""" A quantity related to the convergence of the series. """
return sum(self.ap) - sum(self.bq)
@property
def radius_of_convergence(self):
"""
Compute the radius of convergence of the defining series.
Note that even if this is not oo, the function may still be evaluated
outside of the radius of convergence by analytic continuation. But if
this is zero, then the function is not actually defined anywhere else.
>>> from sympy.functions import hyper
>>> from sympy.abc import z
>>> hyper((1, 2), [3], z).radius_of_convergence
1
>>> hyper((1, 2, 3), [4], z).radius_of_convergence
0
>>> hyper((1, 2), (3, 4), z).radius_of_convergence
oo
"""
if any(a.is_integer and (a <= 0) == True for a in self.ap + self.bq):
aints = [a for a in self.ap if a.is_Integer and (a <= 0) == True]
bints = [a for a in self.bq if a.is_Integer and (a <= 0) == True]
if len(aints) < len(bints):
return S(0)
popped = False
for b in bints:
cancelled = False
while aints:
a = aints.pop()
if a >= b:
cancelled = True
break
popped = True
if not cancelled:
return S(0)
if aints or popped:
# There are still non-positive numerator parameters.
# This is a polynomial.
return oo
if len(self.ap) == len(self.bq) + 1:
return S(1)
elif len(self.ap) <= len(self.bq):
return oo
else:
return S(0)
@property
def convergence_statement(self):
""" Return a condition on z under which the series converges. """
from sympy import And, Or, re, Ne, oo
R = self.radius_of_convergence
if R == 0:
return False
if R == oo:
return True
# The special functions and their approximations, page 44
e = self.eta
z = self.argument
c1 = And(re(e) < 0, abs(z) <= 1)
c2 = And(0 <= re(e), re(e) < 1, abs(z) <= 1, Ne(z, 1))
c3 = And(re(e) >= 1, abs(z) < 1)
return Or(c1, c2, c3)
def _eval_simplify(self, ratio, measure):
from sympy.simplify.hyperexpand import hyperexpand
return hyperexpand(self)
def _sage_(self):
import sage.all as sage
ap = [arg._sage_() for arg in self.args[0]]
bq = [arg._sage_() for arg in self.args[1]]
return sage.hypergeometric(ap, bq, self.argument._sage_())
class meijerg(TupleParametersBase):
r"""
The Meijer G-function is defined by a Mellin-Barnes type integral that
resembles an inverse Mellin transform. It generalizes the hypergeometric
functions.
The Meijer G-function depends on four sets of parameters. There are
"*numerator parameters*"
:math:`a_1, \dots, a_n` and :math:`a_{n+1}, \dots, a_p`, and there are
"*denominator parameters*"
:math:`b_1, \dots, b_m` and :math:`b_{m+1}, \dots, b_q`.
Confusingly, it is traditionally denoted as follows (note the position
of `m`, `n`, `p`, `q`, and how they relate to the lengths of the four
parameter vectors):
.. math ::
G_{p,q}^{m,n} \left(\begin{matrix}a_1, \dots, a_n & a_{n+1}, \dots, a_p \\
b_1, \dots, b_m & b_{m+1}, \dots, b_q
\end{matrix} \middle| z \right).
However, in sympy the four parameter vectors are always available
separately (see examples), so that there is no need to keep track of the
decorating sub- and super-scripts on the G symbol.
The G function is defined as the following integral:
.. math ::
\frac{1}{2 \pi i} \int_L \frac{\prod_{j=1}^m \Gamma(b_j - s)
\prod_{j=1}^n \Gamma(1 - a_j + s)}{\prod_{j=m+1}^q \Gamma(1- b_j +s)
\prod_{j=n+1}^p \Gamma(a_j - s)} z^s \mathrm{d}s,
where :math:`\Gamma(z)` is the gamma function. There are three possible
contours which we will not describe in detail here (see the references).
If the integral converges along more than one of them the definitions
agree. The contours all separate the poles of :math:`\Gamma(1-a_j+s)`
from the poles of :math:`\Gamma(b_k-s)`, so in particular the G function
is undefined if :math:`a_j - b_k \in \mathbb{Z}_{>0}` for some
:math:`j \le n` and :math:`k \le m`.
The conditions under which one of the contours yields a convergent integral
are complicated and we do not state them here, see the references.
Note: Currently the Meijer G-function constructor does *not* check any
convergence conditions.
Examples
========
You can pass the parameters either as four separate vectors:
>>> from sympy.functions import meijerg
>>> from sympy.abc import x, a
>>> from sympy.core.containers import Tuple
>>> from sympy import pprint
>>> pprint(meijerg((1, 2), (a, 4), (5,), [], x), use_unicode=False)
__1, 2 /1, 2 a, 4 | \
/__ | | x|
\_|4, 1 \ 5 | /
or as two nested vectors:
>>> pprint(meijerg([(1, 2), (3, 4)], ([5], Tuple()), x), use_unicode=False)
__1, 2 /1, 2 3, 4 | \
/__ | | x|
\_|4, 1 \ 5 | /
As with the hypergeometric function, the parameters may be passed as
arbitrary iterables. Vectors of length zero and one also have to be
passed as iterables. The parameters need not be constants, but if they
depend on the argument then not much implemented functionality should be
expected.
All the subvectors of parameters are available:
>>> from sympy import pprint
>>> g = meijerg([1], [2], [3], [4], x)
>>> pprint(g, use_unicode=False)
__1, 1 /1 2 | \
/__ | | x|
\_|2, 2 \3 4 | /
>>> g.an
(1,)
>>> g.ap
(1, 2)
>>> g.aother
(2,)
>>> g.bm
(3,)
>>> g.bq
(3, 4)
>>> g.bother
(4,)
The Meijer G-function generalizes the hypergeometric functions.
In some cases it can be expressed in terms of hypergeometric functions,
using Slater's theorem. For example:
>>> from sympy import hyperexpand
>>> from sympy.abc import a, b, c
>>> hyperexpand(meijerg([a], [], [c], [b], x), allow_hyper=True)
x**c*gamma(-a + c + 1)*hyper((-a + c + 1,),
(-b + c + 1,), -x)/gamma(-b + c + 1)
Thus the Meijer G-function also subsumes many named functions as special
cases. You can use expand_func or hyperexpand to (try to) rewrite a
Meijer G-function in terms of named special functions. For example:
>>> from sympy import expand_func, S
>>> expand_func(meijerg([[],[]], [[0],[]], -x))
exp(x)
>>> hyperexpand(meijerg([[],[]], [[S(1)/2],[0]], (x/2)**2))
sin(x)/sqrt(pi)
See Also
========
hyper
sympy.simplify.hyperexpand
References
==========
.. [1] Luke, Y. L. (1969), The Special Functions and Their Approximations,
Volume 1
.. [2] http://en.wikipedia.org/wiki/Meijer_G-function
"""
def __new__(cls, *args):
if len(args) == 5:
args = [(args[0], args[1]), (args[2], args[3]), args[4]]
if len(args) != 3:
raise TypeError("args must eiter be as, as', bs, bs', z or "
"as, bs, z")
def tr(p):
if len(p) != 2:
raise TypeError("wrong argument")
return TupleArg(_prep_tuple(p[0]), _prep_tuple(p[1]))
# TODO should we check convergence conditions?
return Function.__new__(cls, tr(args[0]), tr(args[1]), args[2])
def fdiff(self, argindex=3):
if argindex != 3:
return self._diff_wrt_parameter(argindex[1])
if len(self.an) >= 1:
a = list(self.an)
a[0] -= 1
G = meijerg(a, self.aother, self.bm, self.bother, self.argument)
return 1/self.argument * ((self.an[0] - 1)*self + G)
elif len(self.bm) >= 1:
b = list(self.bm)
b[0] += 1
G = meijerg(self.an, self.aother, b, self.bother, self.argument)
return 1/self.argument * (self.bm[0]*self - G)
else:
return S.Zero
def _diff_wrt_parameter(self, idx):
# Differentiation wrt a parameter can only be done in very special
# cases. In particular, if we want to differentiate with respect to
# `a`, all other gamma factors have to reduce to rational functions.
#
# Let MT denote mellin transform. Suppose T(-s) is the gamma factor
# appearing in the definition of G. Then
#
# MT(log(z)G(z)) = d/ds T(s) = d/da T(s) + ...
#
# Thus d/da G(z) = log(z)G(z) - ...
# The ... can be evaluated as a G function under the above conditions,
# the formula being most easily derived by using
#
# d Gamma(s + n) Gamma(s + n) / 1 1 1 \
# -- ------------ = ------------ | - + ---- + ... + --------- |
# ds Gamma(s) Gamma(s) \ s s + 1 s + n - 1 /
#
# which follows from the difference equation of the digamma function.
# (There is a similar equation for -n instead of +n).
# We first figure out how to pair the parameters.
an = list(self.an)
ap = list(self.aother)
bm = list(self.bm)
bq = list(self.bother)
if idx < len(an):
an.pop(idx)
else:
idx -= len(an)
if idx < len(ap):
ap.pop(idx)
else:
idx -= len(ap)
if idx < len(bm):
bm.pop(idx)
else:
bq.pop(idx - len(bm))
pairs1 = []
pairs2 = []
for l1, l2, pairs in [(an, bq, pairs1), (ap, bm, pairs2)]:
while l1:
x = l1.pop()
found = None
for i, y in enumerate(l2):
if not Mod((x - y).simplify(), 1):
found = i
break
if found is None:
raise NotImplementedError('Derivative not expressible '
'as G-function?')
y = l2[i]
l2.pop(i)
pairs.append((x, y))
# Now build the result.
res = log(self.argument)*self
for a, b in pairs1:
sign = 1
n = a - b
base = b
if n < 0:
sign = -1
n = b - a
base = a
for k in range(n):
res -= sign*meijerg(self.an + (base + k + 1,), self.aother,
self.bm, self.bother + (base + k + 0,),
self.argument)
for a, b in pairs2:
sign = 1
n = b - a
base = a
if n < 0:
sign = -1
n = a - b
base = b
for k in range(n):
res -= sign*meijerg(self.an, self.aother + (base + k + 1,),
self.bm + (base + k + 0,), self.bother,
self.argument)
return res
def get_period(self):
"""
Return a number P such that G(x*exp(I*P)) == G(x).
>>> from sympy.functions.special.hyper import meijerg
>>> from sympy.abc import z
>>> from sympy import pi, S
>>> meijerg([1], [], [], [], z).get_period()
2*pi
>>> meijerg([pi], [], [], [], z).get_period()
oo
>>> meijerg([1, 2], [], [], [], z).get_period()
oo
>>> meijerg([1,1], [2], [1, S(1)/2, S(1)/3], [1], z).get_period()
12*pi
"""
# This follows from slater's theorem.
def compute(l):
# first check that no two differ by an integer
for i, b in enumerate(l):
if not b.is_Rational:
return oo
for j in range(i + 1, len(l)):
if not Mod((b - l[j]).simplify(), 1):
return oo
return reduce(ilcm, (x.q for x in l), 1)
beta = compute(self.bm)
alpha = compute(self.an)
p, q = len(self.ap), len(self.bq)
if p == q:
if beta == oo or alpha == oo:
return oo
return 2*pi*ilcm(alpha, beta)
elif p < q:
return 2*pi*beta
else:
return 2*pi*alpha
def _eval_expand_func(self, **hints):
from sympy import hyperexpand
return hyperexpand(self)
def _eval_evalf(self, prec):
# The default code is insufficient for polar arguments.
# mpmath provides an optional argument "r", which evaluates
# G(z**(1/r)). I am not sure what its intended use is, but we hijack it
# here in the following way: to evaluate at a number z of |argument|
# less than (say) n*pi, we put r=1/n, compute z' = root(z, n)
# (carefully so as not to loose the branch information), and evaluate
# G(z'**(1/r)) = G(z'**n) = G(z).
from sympy.functions import exp_polar, ceiling
from sympy import Expr
import mpmath
z = self.argument
znum = self.argument._eval_evalf(prec)
if znum.has(exp_polar):
znum, branch = znum.as_coeff_mul(exp_polar)
if len(branch) != 1:
return
branch = branch[0].args[0]/I
else:
branch = S(0)
n = ceiling(abs(branch/S.Pi)) + 1
znum = znum**(S(1)/n)*exp(I*branch / n)
# Convert all args to mpf or mpc
try:
[z, r, ap, bq] = [arg._to_mpmath(prec)
for arg in [znum, 1/n, self.args[0], self.args[1]]]
except ValueError:
return
with mpmath.workprec(prec):
v = mpmath.meijerg(ap, bq, z, r)
return Expr._from_mpmath(v, prec)
def integrand(self, s):
""" Get the defining integrand D(s). """
from sympy import gamma
return self.argument**s \
* Mul(*(gamma(b - s) for b in self.bm)) \
* Mul(*(gamma(1 - a + s) for a in self.an)) \
/ Mul(*(gamma(1 - b + s) for b in self.bother)) \
/ Mul(*(gamma(a - s) for a in self.aother))
@property
def argument(self):
""" Argument of the Meijer G-function. """
return self.args[2]
@property
def an(self):
""" First set of numerator parameters. """
return Tuple(*self.args[0][0])
@property
def ap(self):
""" Combined numerator parameters. """
return Tuple(*(self.args[0][0] + self.args[0][1]))
@property
def aother(self):
""" Second set of numerator parameters. """
return Tuple(*self.args[0][1])
@property
def bm(self):
""" First set of denominator parameters. """
return Tuple(*self.args[1][0])
@property
def bq(self):
""" Combined denominator parameters. """
return Tuple(*(self.args[1][0] + self.args[1][1]))
@property
def bother(self):
""" Second set of denominator parameters. """
return Tuple(*self.args[1][1])
@property
def _diffargs(self):
return self.ap + self.bq
@property
def nu(self):
""" A quantity related to the convergence region of the integral,
c.f. references. """
return sum(self.bq) - sum(self.ap)
@property
def delta(self):
""" A quantity related to the convergence region of the integral,
c.f. references. """
return len(self.bm) + len(self.an) - S(len(self.ap) + len(self.bq))/2
class HyperRep(Function):
"""
A base class for "hyper representation functions".
This is used exclusively in hyperexpand(), but fits more logically here.
pFq is branched at 1 if p == q+1. For use with slater-expansion, we want
define an "analytic continuation" to all polar numbers, which is
continuous on circles and on the ray t*exp_polar(I*pi). Moreover, we want
a "nice" expression for the various cases.
This base class contains the core logic, concrete derived classes only
supply the actual functions.
"""
@classmethod
def eval(cls, *args):
from sympy import unpolarify
newargs = tuple(map(unpolarify, args[:-1])) + args[-1:]
if args != newargs:
return cls(*newargs)
@classmethod
def _expr_small(cls, x):
""" An expression for F(x) which holds for |x| < 1. """
raise NotImplementedError
@classmethod
def _expr_small_minus(cls, x):
""" An expression for F(-x) which holds for |x| < 1. """
raise NotImplementedError
@classmethod
def _expr_big(cls, x, n):
""" An expression for F(exp_polar(2*I*pi*n)*x), |x| > 1. """
raise NotImplementedError
@classmethod
def _expr_big_minus(cls, x, n):
""" An expression for F(exp_polar(2*I*pi*n + pi*I)*x), |x| > 1. """
raise NotImplementedError
def _eval_rewrite_as_nonrep(self, *args):
from sympy import Piecewise
x, n = self.args[-1].extract_branch_factor(allow_half=True)
minus = False
newargs = self.args[:-1] + (x,)
if not n.is_Integer:
minus = True
n -= S(1)/2
newerargs = newargs + (n,)
if minus:
small = self._expr_small_minus(*newargs)
big = self._expr_big_minus(*newerargs)
else:
small = self._expr_small(*newargs)
big = self._expr_big(*newerargs)
if big == small:
return small
return Piecewise((big, abs(x) > 1), (small, True))
def _eval_rewrite_as_nonrepsmall(self, *args):
x, n = self.args[-1].extract_branch_factor(allow_half=True)
args = self.args[:-1] + (x,)
if not n.is_Integer:
return self._expr_small_minus(*args)
return self._expr_small(*args)
class HyperRep_power1(HyperRep):
""" Return a representative for hyper([-a], [], z) == (1 - z)**a. """
@classmethod
def _expr_small(cls, a, x):
return (1 - x)**a
@classmethod
def _expr_small_minus(cls, a, x):
return (1 + x)**a
@classmethod
def _expr_big(cls, a, x, n):
if a.is_integer:
return cls._expr_small(a, x)
return (x - 1)**a*exp((2*n - 1)*pi*I*a)
@classmethod
def _expr_big_minus(cls, a, x, n):
if a.is_integer:
return cls._expr_small_minus(a, x)
return (1 + x)**a*exp(2*n*pi*I*a)
class HyperRep_power2(HyperRep):
""" Return a representative for hyper([a, a - 1/2], [2*a], z). """
@classmethod
def _expr_small(cls, a, x):
return 2**(2*a - 1)*(1 + sqrt(1 - x))**(1 - 2*a)
@classmethod
def _expr_small_minus(cls, a, x):
return 2**(2*a - 1)*(1 + sqrt(1 + x))**(1 - 2*a)
@classmethod
def _expr_big(cls, a, x, n):
sgn = -1
if n.is_odd:
sgn = 1
n -= 1
return 2**(2*a - 1)*(1 + sgn*I*sqrt(x - 1))**(1 - 2*a) \
*exp(-2*n*pi*I*a)
@classmethod
def _expr_big_minus(cls, a, x, n):
sgn = 1
if n.is_odd:
sgn = -1
return sgn*2**(2*a - 1)*(sqrt(1 + x) + sgn)**(1 - 2*a)*exp(-2*pi*I*a*n)
class HyperRep_log1(HyperRep):
""" Represent -z*hyper([1, 1], [2], z) == log(1 - z). """
@classmethod
def _expr_small(cls, x):
return log(1 - x)
@classmethod
def _expr_small_minus(cls, x):
return log(1 + x)
@classmethod
def _expr_big(cls, x, n):
return log(x - 1) + (2*n - 1)*pi*I
@classmethod
def _expr_big_minus(cls, x, n):
return log(1 + x) + 2*n*pi*I
class HyperRep_atanh(HyperRep):
""" Represent hyper([1/2, 1], [3/2], z) == atanh(sqrt(z))/sqrt(z). """
@classmethod
def _expr_small(cls, x):
return atanh(sqrt(x))/sqrt(x)
def _expr_small_minus(cls, x):
return atan(sqrt(x))/sqrt(x)
def _expr_big(cls, x, n):
if n.is_even:
return (acoth(sqrt(x)) + I*pi/2)/sqrt(x)
else:
return (acoth(sqrt(x)) - I*pi/2)/sqrt(x)
def _expr_big_minus(cls, x, n):
if n.is_even:
return atan(sqrt(x))/sqrt(x)
else:
return (atan(sqrt(x)) - pi)/sqrt(x)
class HyperRep_asin1(HyperRep):
""" Represent hyper([1/2, 1/2], [3/2], z) == asin(sqrt(z))/sqrt(z). """
@classmethod
def _expr_small(cls, z):
return asin(sqrt(z))/sqrt(z)
@classmethod
def _expr_small_minus(cls, z):
return asinh(sqrt(z))/sqrt(z)
@classmethod
def _expr_big(cls, z, n):
return S(-1)**n*((S(1)/2 - n)*pi/sqrt(z) + I*acosh(sqrt(z))/sqrt(z))
@classmethod
def _expr_big_minus(cls, z, n):
return S(-1)**n*(asinh(sqrt(z))/sqrt(z) + n*pi*I/sqrt(z))
class HyperRep_asin2(HyperRep):
""" Represent hyper([1, 1], [3/2], z) == asin(sqrt(z))/sqrt(z)/sqrt(1-z). """
# TODO this can be nicer
@classmethod
def _expr_small(cls, z):
return HyperRep_asin1._expr_small(z) \
/HyperRep_power1._expr_small(S(1)/2, z)
@classmethod
def _expr_small_minus(cls, z):
return HyperRep_asin1._expr_small_minus(z) \
/HyperRep_power1._expr_small_minus(S(1)/2, z)
@classmethod
def _expr_big(cls, z, n):
return HyperRep_asin1._expr_big(z, n) \
/HyperRep_power1._expr_big(S(1)/2, z, n)
@classmethod
def _expr_big_minus(cls, z, n):
return HyperRep_asin1._expr_big_minus(z, n) \
/HyperRep_power1._expr_big_minus(S(1)/2, z, n)
class HyperRep_sqrts1(HyperRep):
""" Return a representative for hyper([-a, 1/2 - a], [1/2], z). """
@classmethod
def _expr_small(cls, a, z):
return ((1 - sqrt(z))**(2*a) + (1 + sqrt(z))**(2*a))/2
@classmethod
def _expr_small_minus(cls, a, z):
return (1 + z)**a*cos(2*a*atan(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
if n.is_even:
return ((sqrt(z) + 1)**(2*a)*exp(2*pi*I*n*a) +
(sqrt(z) - 1)**(2*a)*exp(2*pi*I*(n - 1)*a))/2
else:
n -= 1
return ((sqrt(z) - 1)**(2*a)*exp(2*pi*I*a*(n + 1)) +
(sqrt(z) + 1)**(2*a)*exp(2*pi*I*a*n))/2
@classmethod
def _expr_big_minus(cls, a, z, n):
if n.is_even:
return (1 + z)**a*exp(2*pi*I*n*a)*cos(2*a*atan(sqrt(z)))
else:
return (1 + z)**a*exp(2*pi*I*n*a)*cos(2*a*atan(sqrt(z)) - 2*pi*a)
class HyperRep_sqrts2(HyperRep):
""" Return a representative for
sqrt(z)/2*[(1-sqrt(z))**2a - (1 + sqrt(z))**2a]
== -2*z/(2*a+1) d/dz hyper([-a - 1/2, -a], [1/2], z)"""
@classmethod
def _expr_small(cls, a, z):
return sqrt(z)*((1 - sqrt(z))**(2*a) - (1 + sqrt(z))**(2*a))/2
@classmethod
def _expr_small_minus(cls, a, z):
return sqrt(z)*(1 + z)**a*sin(2*a*atan(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
if n.is_even:
return sqrt(z)/2*((sqrt(z) - 1)**(2*a)*exp(2*pi*I*a*(n - 1)) -
(sqrt(z) + 1)**(2*a)*exp(2*pi*I*a*n))
else:
n -= 1
return sqrt(z)/2*((sqrt(z) - 1)**(2*a)*exp(2*pi*I*a*(n + 1)) -
(sqrt(z) + 1)**(2*a)*exp(2*pi*I*a*n))
def _expr_big_minus(cls, a, z, n):
if n.is_even:
return (1 + z)**a*exp(2*pi*I*n*a)*sqrt(z)*sin(2*a*atan(sqrt(z)))
else:
return (1 + z)**a*exp(2*pi*I*n*a)*sqrt(z) \
*sin(2*a*atan(sqrt(z)) - 2*pi*a)
class HyperRep_log2(HyperRep):
""" Represent log(1/2 + sqrt(1 - z)/2) == -z/4*hyper([3/2, 1, 1], [2, 2], z) """
@classmethod
def _expr_small(cls, z):
return log(S(1)/2 + sqrt(1 - z)/2)
@classmethod
def _expr_small_minus(cls, z):
return log(S(1)/2 + sqrt(1 + z)/2)
@classmethod
def _expr_big(cls, z, n):
if n.is_even:
return (n - S(1)/2)*pi*I + log(sqrt(z)/2) + I*asin(1/sqrt(z))
else:
return (n - S(1)/2)*pi*I + log(sqrt(z)/2) - I*asin(1/sqrt(z))
def _expr_big_minus(cls, z, n):
if n.is_even:
return pi*I*n + log(S(1)/2 + sqrt(1 + z)/2)
else:
return pi*I*n + log(sqrt(1 + z)/2 - S(1)/2)
class HyperRep_cosasin(HyperRep):
""" Represent hyper([a, -a], [1/2], z) == cos(2*a*asin(sqrt(z))). """
# Note there are many alternative expressions, e.g. as powers of a sum of
# square roots.
@classmethod
def _expr_small(cls, a, z):
return cos(2*a*asin(sqrt(z)))
@classmethod
def _expr_small_minus(cls, a, z):
return cosh(2*a*asinh(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
return cosh(2*a*acosh(sqrt(z)) + a*pi*I*(2*n - 1))
@classmethod
def _expr_big_minus(cls, a, z, n):
return cosh(2*a*asinh(sqrt(z)) + 2*a*pi*I*n)
class HyperRep_sinasin(HyperRep):
""" Represent 2*a*z*hyper([1 - a, 1 + a], [3/2], z)
== sqrt(z)/sqrt(1-z)*sin(2*a*asin(sqrt(z))) """
@classmethod
def _expr_small(cls, a, z):
return sqrt(z)/sqrt(1 - z)*sin(2*a*asin(sqrt(z)))
@classmethod
def _expr_small_minus(cls, a, z):
return -sqrt(z)/sqrt(1 + z)*sinh(2*a*asinh(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
return -1/sqrt(1 - 1/z)*sinh(2*a*acosh(sqrt(z)) + a*pi*I*(2*n - 1))
@classmethod
def _expr_big_minus(cls, a, z, n):
return -1/sqrt(1 + 1/z)*sinh(2*a*asinh(sqrt(z)) + 2*a*pi*I*n)
| |
import datetime
import os
import tempfile
from dateutil import relativedelta
import netCDF4
import numpy as np
import pytest
# constants
_UNITS_STD_TIME = "days since 1990-01-01"
_CALENDAR_NO_LEAP = "noleap"
_DATESTR_FORMAT_MONTHLY = "Foo.bar.ha.%Y-%m.nc"
@pytest.fixture(scope="module")
def cleandir():
newpath = tempfile.mkdtemp()
os.chdir(newpath)
@pytest.fixture(scope="module")
def tempdestdir():
return tempfile.mkdtemp()
@pytest.fixture(scope="module")
def tempsrcdir():
return tempfile.mkdtemp()
@pytest.fixture(scope="module")
def random_field():
return np.random.rand(5, 5)
@pytest.fixture(scope="module")
def mask4tests():
return np.random.randint(1, size=(1, 5, 5))
@pytest.fixture(scope="module")
def random_masked_field(mask4tests):
field = np.ma.masked_array(np.random.rand(1, 5, 5))
field.mask = mask4tests
return field
@pytest.fixture(scope="module")
def hdf_file(random_field, tempsrcdir):
try:
import h5py
filename = os.path.join(tempsrcdir, "testhdf.hdf5")
f = h5py.File(filename, "w")
shape = random_field.shape
dset = f.create_dataset("random_field", shape, dtype="f")
dset[:, :] = random_field
f.close()
return filename
except (ImportError, AttributeError):
return None
@pytest.fixture(scope="module")
def foo_nc(random_field, tempsrcdir):
""" a simple netCDF file with a random field"""
filename = os.path.join(tempsrcdir, "foo.nc")
dataset = netCDF4.Dataset(filename, "w", format="NETCDF3_CLASSIC")
shape = random_field.shape
dataset.createDimension("dim0", shape[0])
dataset.createDimension("dim1", shape[1])
dataset.createDimension("time", None)
var = dataset.createVariable("random", "f8", ("time", "dim0", "dim1"))
time = dataset.createVariable("time", "f8", ("time",))
time.units = _UNITS_STD_TIME
var[0, :, :] = random_field
time[:] = 1.0
dataset.close()
return filename
@pytest.fixture(scope="module")
def bar_nc(random_field, tempsrcdir):
""" a simple netCDF file with a random field * 2"""
filename = os.path.join(tempsrcdir, "bar.nc")
dataset = netCDF4.Dataset(filename, "w")
shape = random_field.shape
dataset.createDimension("dim0", shape[0])
dataset.createDimension("dim1", shape[1])
dataset.createDimension("time", None)
var = dataset.createVariable("random", "f8", ("time", "dim0", "dim1"))
time = dataset.createVariable("time", "f8", ("time",))
time.units = _UNITS_STD_TIME
var[0, :, :] = random_field * 2.0
time[:] = 2.0
dataset.close()
return filename
@pytest.fixture(scope="module")
def bar_mask_nc(random_masked_field, tempsrcdir):
""" a simple netCDF file with a random field * 2"""
filename = os.path.join(tempsrcdir, "bar.nc")
dataset = netCDF4.Dataset(filename, "w", format="NETCDF3_CLASSIC")
shape = random_masked_field.shape
dataset.createDimension("dim0", shape[1])
dataset.createDimension("dim1", shape[2])
dataset.createDimension("time", 1)
var = dataset.createVariable("random", "f8", ("time", "dim0", "dim1"))
time = dataset.createVariable("time", "f8", ("time",))
time.units = _UNITS_STD_TIME
var[:, :, :] = random_masked_field * 2.0
time[:] = 2.0
dataset.close()
return filename
@pytest.fixture(scope="module")
def monthly_filelist(random_field, monthlydatetimelist, tempsrcdir):
"""Create a bunch of sample monthly netcdf files with real times"""
file_list = []
for date in monthlydatetimelist:
filename = date.strftime(_DATESTR_FORMAT_MONTHLY)
filename = os.path.join(tempsrcdir, filename)
dataset = netCDF4.Dataset(filename, "w")
shape = random_field.shape
dataset.createDimension("dim0", shape[0])
dataset.createDimension("dim1", shape[1])
dataset.createDimension("time", 1)
var = dataset.createVariable("random", "f8", ("time", "dim0", "dim1"))
time = dataset.createVariable("time", "f8", ("time",))
time.units = _UNITS_STD_TIME
time.calendar = _CALENDAR_NO_LEAP
var[:, :, :] = random_field
time[:] = netCDF4.date2num(date, _UNITS_STD_TIME, calendar=_CALENDAR_NO_LEAP)
dataset.close()
file_list.append(filename)
return file_list
@pytest.fixture(scope="module")
def testfiles8589(random_field, tempsrcdir):
"""Create a bunch of sample monthly netcdf files with real times"""
filelist = []
for year in range(1985, 1990):
date = datetime.datetime(year, 1, 1)
filename = date.strftime("%y.nc")
filename = os.path.join(tempsrcdir, filename)
dataset = netCDF4.Dataset(filename, "w")
shape = random_field.shape
dataset.createDimension("dim0", shape[0])
dataset.createDimension("dim1", shape[1])
dataset.createDimension("time")
var = dataset.createVariable("random", "f8", ("time", "dim0", "dim1"))
time = dataset.createVariable("time", "f8", ("time",))
time.units = _UNITS_STD_TIME
time.calendar = _CALENDAR_NO_LEAP
var[0, :, :] = random_field
time[:] = netCDF4.date2num(date, _UNITS_STD_TIME, calendar=_CALENDAR_NO_LEAP)
dataset.close()
filelist.append(filename)
return filelist
@pytest.fixture(scope="module")
def testfile85(random_field, tempsrcdir):
"""Create a bunch of sample monthly netcdf files with real times"""
dates = [
datetime.datetime(1985, 1, 1) + datetime.timedelta(days=d)
for d in range(0, 365)
]
filename = os.path.join(tempsrcdir, "85.nc")
dataset = netCDF4.Dataset(filename, "w")
shape = random_field.shape
dataset.createDimension("dim0", shape[0])
dataset.createDimension("dim1", shape[1])
dataset.createDimension("time", len(dates))
var = dataset.createVariable("random", "f8", ("time", "dim0", "dim1"))
time = dataset.createVariable("time", "f8", ("time",))
time.units = _UNITS_STD_TIME
time.calendar = _CALENDAR_NO_LEAP
var[:, :, :] = random_field
time[:] = netCDF4.date2num(dates, _UNITS_STD_TIME, calendar=_CALENDAR_NO_LEAP)
dataset.close()
return filename
@pytest.fixture(scope="module")
def testfileglobal(tempsrcdir):
"""Create a bunch of sample monthly netcdf files with real times"""
dates = [datetime.datetime.now()]
filename = os.path.join(tempsrcdir, "global.nc")
dataset = netCDF4.Dataset(filename, "w")
random_field = np.random.rand(1, 180, 360) # 1degree resolution
shape = random_field.shape
dataset.createDimension("lat", shape[1])
dataset.createDimension("lon", shape[2])
dataset.createDimension("time", len(dates))
var = dataset.createVariable("random", "f8", ("time", "lat", "lon"))
lon = dataset.createVariable("lon", "f8", ("lon",))
lat = dataset.createVariable("lat", "f8", ("lat",))
time = dataset.createVariable("time", "f8", ("time",))
time.units = _UNITS_STD_TIME
time.calendar = _CALENDAR_NO_LEAP
var[:, :, :] = random_field
time[:] = netCDF4.date2num(dates, _UNITS_STD_TIME, calendar=_CALENDAR_NO_LEAP)
lat[:] = np.linspace(-90.0, 90.0, shape[1])
lon[:] = np.linspace(-180.0, 180, shape[2])
dataset.close()
return filename
@pytest.fixture(scope="module")
def monthlydatetimelist():
"""list of monthly datetimes"""
return [
datetime.datetime(2000, 1, 1) + relativedelta.relativedelta(months=i)
for i in range(12)
]
@pytest.fixture(scope="module")
def foo3c(tempsrcdir):
filename = os.path.join(tempsrcdir, "foo_3c.nc")
dataset = netCDF4.Dataset(filename, "w", format="NETCDF3_CLASSIC")
dataset.close()
return filename
@pytest.fixture(scope="module")
def foo364(tempsrcdir):
filename = os.path.join(tempsrcdir, "foo_364.nc")
f = netCDF4.Dataset(filename, "w", format="NETCDF3_64BIT")
f.close()
return filename
@pytest.fixture(scope="module")
def foo4c(tempsrcdir):
filename = os.path.join(tempsrcdir, "foo_4c.nc")
f = netCDF4.Dataset(filename, "w", format="NETCDF4_CLASSIC")
f.close()
return filename
| |
#!/usr/bin/env python
# fibaro_motion_sensor_a.py
# Copyright (C) ContinuumBridge Limited, 2014-2015 - All Rights Reserved
# Written by Peter Claydon
#
ModuleName = "fibaro_motion_sensor"
BATTERY_CHECK_INTERVAL = 10800 # How often to check battery (secs) = 3 hours
SENSOR_POLL_INTERVAL = 600 # How often to request sensor values = 10 mins
TIME_CUTOFF = 1800 # Data older than this is considered "stale"
import sys
import time
import json
import os
from pprint import pprint
from cbcommslib import CbAdaptor
from cbconfig import *
from twisted.internet import threads
from twisted.internet import reactor
class Adaptor(CbAdaptor):
def __init__(self, argv):
self.status = "ok"
self.state = "stopped"
self.apps = {"binary_sensor": [],
"temperature": [],
"luminance": [],
"battery": [],
"connected": []}
self.lastTemperatureTime = 0
self.lastHumidityTime = 0
self.lastLuminanceTime = 0
self.lastBinaryTime = 0
self.lastBatteryTime = 0
# super's __init__ must be called:
#super(Adaptor, self).__init__(argv)
CbAdaptor.__init__(self, argv)
def setState(self, action):
#self.cbLog("debug", "setting state to: " + action)
# error is only ever set from the running state, so set back to running if error is cleared
if action == "error":
self.state == "error"
elif action == "clear_error":
self.state = "running"
else:
self.state = action
msg = {"id": self.id,
"status": "state",
"state": self.state}
self.sendManagerMessage(msg)
def sendCharacteristic(self, characteristic, data, timeStamp):
msg = {"id": self.id,
"content": "characteristic",
"characteristic": characteristic,
"data": data,
"timeStamp": timeStamp}
for a in self.apps[characteristic]:
self.sendMessage(msg, a)
def checkBattery(self):
self.cbLog("debug", "checkBattery")
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "128",
"action": "Get",
"value": ""
}
self.sendZwaveMessage(cmd)
reactor.callLater(BATTERY_CHECK_INTERVAL, self.checkBattery)
def pollSensors(self):
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "49",
"action": "Get",
"value": ""
}
self.sendZwaveMessage(cmd)
reactor.callLater(SENSOR_POLL_INTERVAL, self.pollSensors)
def forceInterview(self):
self.cbLog("debug", "forceInterview")
cmd = {"id": self.id,
"request": "force_interview",
"address": self.addr
}
self.sendZwaveMessage(cmd)
def checkConnected(self):
self.cbLog("debug", "checkConnected, updateTime: " + str(self.updateTime) + ", lastUpdateTime: " + str(self.lastUpdateTime))
if self.updateTime == self.lastUpdateTime:
self.connected = False
else:
self.connected = True
self.sendCharacteristic("connected", self.connected, time.time())
self.lastUpdateTime = self.updateTime
reactor.callLater(SENSOR_POLL_INTERVAL * 2, self.checkConnected)
def onZwaveMessage(self, message):
self.cbLog("debug", "onZwaveMessage, message: " + str(json.dumps(message, indent=4)))
if message["content"] == "init":
self.updateTime = 0
self.lastUpdateTime = time.time()
# Alarm command class
cmd = {"id": self.id,
"request": "get",
"address": self.addr,
"instance": "0",
"commandClass": "48",
"value": "1"
}
self.sendZwaveMessage(cmd)
# Temperature
cmd = {"id": self.id,
"request": "get",
"address": self.addr,
"instance": "0",
"commandClass": "49",
"value": "1"
}
self.sendZwaveMessage(cmd)
# luminance
cmd = {"id": self.id,
"request": "get",
"address": self.addr,
"instance": "0",
"commandClass": "49",
"value": "3"
}
self.sendZwaveMessage(cmd)
# Battery
cmd = {"id": self.id,
"request": "get",
"address": self.addr,
"instance": "0",
"commandClass": "128"
}
self.sendZwaveMessage(cmd)
# Associate PIR alarm with this controller
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "133",
"action": "Set",
"value": "1,1"
}
self.sendZwaveMessage(cmd)
# Associate temperature/luminance with this controller
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "133",
"action": "Set",
"value": "2,1"
}
self.sendZwaveMessage(cmd)
# Associate temperature/luminance with this controller
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "133",
"action": "Set",
"value": "3,1"
}
self.sendZwaveMessage(cmd)
# Turn off LED for motion
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "112",
"action": "Set",
"value": "80,0,1"
}
self.sendZwaveMessage(cmd)
# Turn off LED for tamper
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "112",
"action": "Set",
"value": "89,0,1"
}
self.sendZwaveMessage(cmd)
# Change motion cancellation delay from 30s to 60s
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "112",
"action": "Set",
"value": "6,60,2"
}
self.sendZwaveMessage(cmd)
# Wakeup every 5 minutes
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "132",
"action": "Set",
"value": "300,1"
}
self.sendZwaveMessage(cmd)
reactor.callLater(300, self.checkBattery)
reactor.callLater(30, self.pollSensors)
reactor.callLater(300, self.checkConnected)
elif message["content"] == "data":
try:
if message["commandClass"] == "49":
if message["value"] == "1":
temperature = message["data"]["val"]["value"]
updateTime = message["data"]["val"]["updateTime"]
# Only send if we don't already have an update from this time and the update is recent (not stale after restart)
if updateTime != self.lastTemperatureTime and time.time() - updateTime < TIME_CUTOFF:
self.cbLog("debug", "onZwaveMessage, temperature: " + str(temperature))
self.sendCharacteristic("temperature", temperature, updateTime)
self.lastTemperatureTime = updateTime
elif message["value"] == "3":
luminance = message["data"]["val"]["value"]
updateTime = message["data"]["val"]["updateTime"]
if updateTime != self.lastLuminanceTime and time.time() - updateTime < TIME_CUTOFF:
self.cbLog("debug", "onZwaveMessage, luminance: " + str(luminance))
self.sendCharacteristic("luminance", luminance, time.time())
self.lastLuminanceTime = updateTime
elif message["value"] == "5":
humidity = message["data"]["val"]["value"]
updateTime = message["data"]["val"]["updateTime"]
if updateTime != self.lastHumidityTime and time.time() - updateTime < TIME_CUTOFF:
self.cbLog("debug", "onZwaveMessage, humidity: " + str(humidity))
self.sendCharacteristic("humidity", humidity, time.time())
self.lastHumidityTime = updateTime
elif message["commandClass"] == "48":
if message["value"] == "1":
updateTime = message["data"]["level"]["updateTime"]
if updateTime != self.lastBinaryTime and time.time() - updateTime < TIME_CUTOFF:
if message["data"]["level"]["value"]:
b = "on"
else:
b = "off"
self.cbLog("debug", "onZwaveMessage, alarm: " + b)
self.sendCharacteristic("binary_sensor", b, time.time())
self.lastBinaryTime = updateTime
elif message["commandClass"] == "128":
updateTime = message["data"]["last"]["updateTime"]
if (updateTime != self.lastBatteryTime) and (time.time() - updateTime < TIME_CUTOFF):
battery = message["data"]["last"]["value"]
self.cbLog("debug", "battery: " + str(battery))
msg = {"id": self.id,
"status": "battery_level",
"battery_level": battery}
self.sendManagerMessage(msg)
self.sendCharacteristic("battery", battery, time.time())
self.lastBatteryTime = updateTime
self.updateTime = message["data"]["updateTime"]
except Exception as ex:
self.cbLog("warning", "onZwaveMessage, unexpected message: " + str(message))
self.cbLog("warning", "Exception: " + str(type(ex)) + str(ex.args))
def onAppInit(self, message):
self.cbLog("debug", "onAppInit, message: " + str(message))
resp = {"name": self.name,
"id": self.id,
"status": "ok",
"service": [{"characteristic": "binary_sensor", "interval": 0, "type": "pir"},
{"characteristic": "temperature", "interval": 600},
{"characteristic": "luminance", "interval": 600},
{"characteristic": "battery", "interval": 600},
{"characteristic": "connected", "interval": 600}],
"content": "service"}
self.sendMessage(resp, message["id"])
self.setState("running")
def onAppRequest(self, message):
# Switch off anything that already exists for this app
for a in self.apps:
if message["id"] in self.apps[a]:
self.apps[a].remove(message["id"])
# Now update details based on the message
for f in message["service"]:
if message["id"] not in self.apps[f["characteristic"]]:
self.apps[f["characteristic"]].append(message["id"])
self.cbLog("debug", "apps: " + str(self.apps))
def onAppCommand(self, message):
if "data" not in message:
self.cbLog("warning", "app message without data: " + str(message))
else:
self.cbLog("warning", "This is a sensor. Message not understood: " + str(message))
def onAction(self, action):
self.cbLog("debug", "onAction")
if action == "interview":
self.forceInterview()
else:
self.cbLog("warning", "onAction. Unrecognised action: " + str(action))
def onConfigureMessage(self, config):
#self.cbLog("debug", "onConfigureMessage, config: " + str(config))
"""Config is based on what apps are to be connected.
May be called again if there is a new configuration, which
could be because a new app has been added.
"""
self.setState("starting")
if __name__ == '__main__':
Adaptor(sys.argv)
| |
# Copyright 2016 CityGrid Media, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Organize options from config files '''
import os
import re
import sys
import ConfigParser
import c3.aws.ec2.ebs
import c3.utils.naming
import c3.utils.accounts
from c3.utils import logging
from ConfigParser import SafeConfigParser
def get_account_from_conf(conf=None):
''' Loads config only so we can get the account for ClusterConfig. '''
scp = SafeConfigParser()
scp.read(conf)
try:
return scp.get('cluster', 'aws_account')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError), msg:
logging.error(msg)
return None
def get_hvm_instances():
''' HVM instance types that are not compatible with paravirtual AMIs '''
instances = [
'cc2.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
't2.micro',
't2.small',
't2.medium',
]
return instances
def verify_az(avz):
''' Verify AZ via regex '''
if re.match(r"^\w+-\w+-\d\w$", avz):
return True
return False
class TooManyAMIsError(Exception):
''' Returns too many AMI exception '''
def __init__(self, value):
super(TooManyAMIsError, self).__init__(value)
self.value = value
def __str__(self):
return self.value
class AMINotFoundError(Exception):
''' Return AMI not found exception '''
def __init__(self, value):
super(AMINotFoundError, self).__init__(value)
self.value = value
def __str__(self):
return self.value
class InvalidAZError(Exception):
''' Returns invalid AZ exception'''
def __init__(self, value):
super(InvalidAZError, self).__init__(value)
self.value = value
def __str__(self):
return self.value
class InvalidCIDRNameError(Exception):
''' Return invalid cidr name exception '''
def __init__(self, value):
super(InvalidCIDRNameError, self).__init__(value)
self.value = value
def __str__(self):
return self.value
class ConfigNotFoundException(Exception):
''' Return config not found exception '''
def __init__(self, value):
super(ConfigNotFoundException, self).__init__(value)
self.value = value
def __str__(self):
return self.value
class EBSConfig(object):
''' A class to hold EBS configuration data '''
def __init__(self):
self.volumes = list()
self.azs = list()
def add_volumes(self, vol_type, device, size, iops):
''' Add volume information '''
self.volumes.append(
{'type': vol_type, 'device': device, 'size': size, 'iops': iops})
def get_volumes(self):
''' Return volume information '''
return self.volumes
def set_azs(self, azs):
''' Set AZ for volume '''
self.azs = azs
def get_azs(self):
''' Get AZ information for volume '''
return self.azs
class ELBConfig(object):
''' A simple class to hold ELB configuration data '''
# pylint: disable=too-many-instance-attributes
# Appropriate number of attributes for an ELB
def __init__(self):
self.enabled = None
self.protocol = None
self.public_port = None
self.private_port = None
self.vip_number = None
self.hc_access_point = None
self.hc_interval = None
self.hc_target = None
self.hc_healthy_threshold = None
self.hc_unhealthy_threshold = None
self.azs = list()
def validate(self):
''' Validate required config options are set'''
if not self.enabled:
return self.enabled
items = [
'protocol', 'public_port', 'private_port',
'hc_access_point', 'hc_interval', 'hc_target',
'hc_healthy_threshold', 'hc_unhealthy_threshold', 'vip_number']
for item in items:
if getattr(self, item) is None:
msg = '%s not set, disabling ELB' % item
logging.error(msg)
self.enabled = False
return self.enabled
def set_azs(self, azs):
''' Set ELB AZ '''
self.azs = azs
def get_azs(self):
''' Set ELB AZ '''
return self.azs
class SGConfig(object):
''' A class to store SG config objects '''
def __init__(self):
self.cidr_rules = list()
self.sg_rules = list()
def add_cidr(self, proto, fport, lport, cidr):
''' Add cidr rules '''
self.cidr_rules.append(
{'proto': proto, 'fport': fport, 'lport': lport, 'cidr': cidr})
def add_sg(self, proto, fport, lport, owner, sgrp):
''' Add SG rules '''
# pylint: disable=too-many-arguments
# Appropriate number arguments of for add_sg
self.sg_rules.append(
{'proto': proto, 'fport': fport,
'lport': lport, 'owner': owner, 'sg': sgrp})
def get_cidr(self):
''' Get cidr rules '''
return self.cidr_rules
def get_sg(self):
''' Get sg rules '''
return self.sg_rules
class RAIDConfig(object):
''' A simple class to hold RAID configuration data '''
def __init__(self):
self.enabled = None
self.level = None
self.device = None
def set_level(self, level):
''' Set the RAID level '''
self.level = level
def set_device(self, device):
''' Set the RAID device '''
self.device = device
def get_level(self):
''' Returns RAID level '''
return self.level
def get_device(self):
''' Returns RAID device '''
return self.device
class RDSSGConfig(object):
''' This Class is used to build RDS SG Authorizations. '''
def __init__(self):
self.cidr_rules = list()
self.sg_rules = list()
def add_cidr(self, cidr):
''' Add CIDR rules to cidr list. '''
self.cidr_rules.append({'cidr': cidr})
def add_sg(self, oid, sid):
''' Add SG rules to cidr list. '''
self.sg_rules.append({'oid': oid, 'sid': sid})
def get_cidr(self):
''' Returns populated list of CIDR rules. '''
return self.cidr_rules
def get_sg(self):
''' Returns populated list of SG rules. '''
return self.sg_rules
class RDSPGConfig(object):
''' This Class is used to get RDS Parameters. '''
def __init__(self):
self.rds_parameters = list()
def add_parameter(self, parameter):
''' Add RDS Parameter to list. '''
self.rds_parameters.append(parameter)
def get_parameters(self):
''' Returns list of paramters. '''
return self.rds_parameters
class RDSInstanceConfig(object):
''' Manages storeing RDS config items. '''
def __init__(self):
self.rds_conf = dict()
def add_config(self, key, value):
''' Used to Store RD config items into a dictionary. '''
self.rds_conf[key] = value
def get_config(self):
''' Returns RDS config dictionary. '''
return self.rds_conf
class ClusterConfig(object):
""" A class to hold and manage cluster configuration data
Config will come from all of the following, in priority order:
1) The command line, via the Set methods
2) The Class config file, named $env$class.ini
3) The $AWS_CONF_DIR/cluster_defaults.ini.$AWS_PROFILE_NAME
4) The $AWS_CONF_DIR/cluster_defaults.ini
5) The $HOME/.cluster_defaults.ini (for "sshkey" only)
"""
# pylint: disable=too-many-instance-attributes
# Appropriate number of attributes for ClusterConfig
def __init__(self, ini_file=None, account_name=None, prv_type='ec2',
verbose=False, no_defaults=False):
self.no_defaults = no_defaults # only read self.classfile if True
self.defaults = os.getenv('AWS_CONF_DIR') + '/cluster_defaults.ini'
# This should hold only the ssh key
self.personal_defaults = os.getenv('HOME') + '/.cluster_defaults.ini'
self.classfile = ini_file
if not os.path.exists(ini_file):
raise ConfigNotFoundException('Invalid config: %s' % ini_file)
self.verbose = verbose
self.account_name = account_name
self.global_ssg = None
self.primary_sg = None
# These aren't IN the config file, they're implied by the name
self.server_env = None
self.server_class = None
# This should be looked up from the AZs used
self.server_datacenter = None
self.user_data_raw = None
self.domain = None
self.ebs = EBSConfig()
self.elb = ELBConfig()
self.sgrp = SGConfig()
self.raid = RAIDConfig()
self.rds = RDSInstanceConfig()
self.rds_sg = RDSSGConfig()
self.rds_pg = RDSPGConfig()
self.tagset = dict()
self.overrides = dict()
self.ini_files = list()
self.ini = None
# Read the in the INI files
if self.no_defaults:
self.read_files([self.classfile])
else:
self.read_files(
[self.personal_defaults, self.defaults,
"%s-%s" % (self.defaults, self.account_name), self.classfile])
self.read_sections(prv_type)
def read_sections(self, prv_type):
''' Read sections based on provisioning type '''
if prv_type == 'ec2':
self.get_meta_data()
self.server_datacenter = self.get_cg_region()
if self.ini.has_section('ebs'):
self.read_ebs_config()
if self.ini.has_section('elb'):
self.read_elb_config()
if self.ini.has_section('securitygroup'):
self.read_sg_config()
if self.ini.has_section('raid'):
self.read_raid_config()
elif prv_type == 'rds':
self.get_meta_data()
self.server_datacenter = self.get_cg_region()
if self.ini.has_section('rds_provision'):
self.read_rds_config()
if self.ini.has_section('rds_securitygroup'):
self.read_rds_sg_config()
if self.ini.has_section('rds_parameters'):
self.read_rds_pg_config()
elif prv_type == 's3':
pass
def get_meta_data(self):
''' Get metadata from classfile '''
self.server_env = os.path.basename(self.classfile)[:3]
self.server_class = os.path.basename(self.classfile)[3:6]
self.primary_sg = '%s%s' % (self.server_env, self.server_class)
self.global_ssg = 'ssg-management'
def get_server_env(self):
''' Return the server Environment '''
return self.server_env
def get_primary_sg(self):
''' Return the primary SG '''
return self.primary_sg
def get_global_ssg(self):
''' Return the Global SG '''
return self.global_ssg
def get_aws_region(self):
''' We work in only one region, so we can just take the first '''
if self.get_azs()[0] == 'auto':
return 'us-east-1'
else:
return self.get_azs()[0][:-1]
def get_cg_region(self):
''' Return region from c3.utils.naming.get_aws_dc '''
return c3.utils.naming.get_aws_dc(self.get_aws_region())
def read_files(self, conf_files):
''' Read in ini files '''
logging.debug('Trying %s' % conf_files, self.verbose)
for ini in conf_files:
if os.path.exists(ini):
self.ini_files.append(ini)
logging.debug('Reading %s' % self.ini_files, self.verbose)
self.ini = ConfigParser.ConfigParser({
'AWS_BASE_DIR': os.getenv('AWS_BASE_DIR'),
'AWS_CONF_DIR': os.getenv('AWS_CONF_DIR')})
self.ini.read(self.ini_files)
def get_ini(self, section, name, castf, fallback=None):
''' Get a setting from the ini files '''
try:
return castf(self.ini.get(section, name))
except ConfigParser.NoSectionError, msg:
logging.error(msg)
return fallback
except ConfigParser.NoOptionError, msg:
logging.error(msg)
return fallback
return fallback
def set_ami(self, ami):
''' Set AMI '''
self.overrides['ami'] = ami
def get_ami(self):
''' Return the AMI '''
if 'ami' in self.overrides:
return self.overrides['ami']
instance_type = self.get_size()
raw_ami = self.get_ini('cluster', 'ami', str)
if raw_ami.count('VTYPE'):
if instance_type in get_hvm_instances():
return raw_ami.replace('VTYPE', 'hvm')
else:
return raw_ami.replace('VTYPE', 'paravirtual')
else:
return raw_ami
def get_whitelist_url(self):
''' Return the whitelist URL for puppet whitelisting '''
if 'whitelisturl' in self.overrides:
return self.overrides['whitelisturl']
return self.get_ini("cluster", "whitelisturl", str)
def get_resolved_ami(self, node_db):
''' Return resolved AMI '''
ami = self.get_ami()
if ami[:4] == 'ami-':
logging.error(
'AMI statically set to %s. Please use graffiti values' % ami)
return ami
try:
amis = node_db.get_amis(self.get_cg_region(), ami)
except:
raise AMINotFoundError("No AMI matching '%s' found" % ami)
if amis is None:
raise AMINotFoundError("No AMI matching '%s' found" % ami)
if len(amis) == 1:
newami = amis.values()[0]
self.set_ami(newami)
logging.debug(
"Converted '%s' to '%s'" % (ami, newami), self.verbose)
return newami
elif len(amis) > 1:
raise TooManyAMIsError("%s matches too many AMIs: %s" % (ami, amis))
def limit_azs(self, limit):
''' Limit the number of AZs to use '''
if limit > 0:
oldazs = self.get_azs()
newazs = oldazs[:limit]
self.set_azs(','.join(newazs))
return len(oldazs) - len(newazs)
else:
logging.error("Trying to limit AZs to %d" % limit)
return 0
def set_azs(self, azs):
''' Set comma sperated list of AZs '''
for avz in azs.split(","):
if not verify_az(avz):
raise InvalidAZError("AZ '%s' is invalid" % avz)
self.overrides['azs'] = azs.split(",")
def get_azs(self):
''' Return AZ information '''
zones = list()
if 'azs' in self.overrides:
return self.overrides['azs']
ret = self.get_ini("cluster", "zone", str)
if ret:
for avz in ret.split(","):
if not verify_az(avz):
raise InvalidAZError("AZ '%s' is invalid" % avz)
zones.append(avz.strip())
return zones
def get_next_az(self):
''' We'll need them in a list to do this, stick in overrides '''
if 'azs' not in self.overrides:
self.overrides['azs'] = self.get_azs()
avz = self.overrides['azs'].pop(0)
self.overrides['azs'].append(avz)
return avz
def get_count_azs(self):
''' Get the count of unique AZs '''
return len(set(self.get_azs()))
def set_count(self, count):
''' Set the instance count '''
self.overrides['count'] = int(count)
def get_count(self):
''' Return the instance count '''
if 'count' in self.overrides:
return self.overrides['count']
return self.get_ini("cluster", "instance_count", int)
def set_size(self, size):
''' Set the instance size '''
self.overrides['size'] = size
def get_size(self):
''' Return the instance size '''
if 'size' in self.overrides:
return self.overrides['size']
return self.get_ini("cluster", "instance_size", str)
def set_ssh_key(self, sshkey):
''' Set the ssh key '''
self.overrides['sshkey'] = sshkey
def get_ssh_key(self):
''' Return the ssh key '''
if 'sshkey' in self.overrides:
return self.overrides['sshkey']
return self.get_ini("ssh", "sshkey", str)
def get_dc(self):
''' Get the AWS region '''
if self.get_ini("DEFAULT", "datacenter", str) is not None:
logging.error(
"The 'datacenter' option is no longer read from the INI file")
return self.get_cg_region()
def get_user_data_file(self):
''' Return the userdata file '''
return self.get_ini("cluster", "user_data_file", str, None)
def get_user_data(self, replacements=None):
''' Get userdata and set replacements '''
path = self.get_user_data_file()
logging.debug('user_data_file: %s' % path, self.verbose)
if not self.user_data_raw:
if os.path.exists(path):
try:
udfile = file(path, "r")
except IOError, msg:
logging.error(msg)
return None
self.user_data_raw = udfile.read()
udfile.close()
udata = self.user_data_raw
if replacements:
for key in replacements.keys():
logging.debug(
'Replacing %s with %s in %s' %
(key, replacements[key], path), self.verbose)
udata = udata.replace(key, replacements[key])
return udata.strip()
def get_tagset(self):
''' Return the tagset cost tags '''
self.tagset['BusinessUnit'] = self.get_ini("tags", "business_unit", str)
self.tagset['Team'] = self.get_ini("tags", "team", str)
self.tagset['Project'] = self.get_ini("tags", "project", str)
if any(ent for ent in self.ini_files if ent.endswith('meta.ini')):
self.tagset['Component'] = self.get_ini("tags", "component", str)
self.tagset['Env'] = self.get_ini("tags", "env", str)
else:
comp = self.get_ini("tags", "component", str)
if comp[:4] == self.server_class + ' ':
self.tagset['Component'] = self.get_ini(
"tags", "component", str)
else:
self.tagset['Component'] = "%s %s" % (
self.server_class, self.get_ini("tags", "component", str))
self.tagset['Env'] = self.get_server_env()
return self.tagset
def get_launch_timeout(self):
''' Return launch timeout '''
return self.get_ini("cluster", "launch_timeout", int)
def get_sleep_step(self):
''' R eturn sleep step '''
return self.get_ini("cluster", "sleep_step", int)
def add_sg(self, sgp):
''' Adding additional SGs '''
if 'other_sgs' not in self.overrides:
self.overrides['other_sgs'] = self.get_additional_sgs()
self.overrides['other_sgs'].append(sgp)
def get_additional_sgs(self):
''' Returns additonal SGs'''
other_sgs = list()
if 'other_sgs' in self.overrides:
return self.overrides['other_sgs']
ret = self.get_ini("cluster", "additional_sgs", str)
if ret:
for sgr in ret.split(','):
other_sgs.append(sgr.strip())
return other_sgs
def get_sgs(self):
''' Return all SGs '''
ret = self.get_additional_sgs()
ret.append("%s%s" % (self.server_env, self.server_class))
return ret
def get_node_groups(self):
''' Return Node groups '''
node_groups = list()
if 'node_groups' in self.overrides:
return self.overrides['node_groups']
ret = self.get_ini("cluster", "node_groups", str)
if ret:
for ngrp in ret.split(','):
node_groups.append(ngrp.strip())
return node_groups
def set_allocate_eips(self):
''' Set allocated EIPs '''
self.overrides['allocate_eips'] = True
return True
def get_allocate_eips(self):
''' Return allocated EIPs '''
if 'allocate_eips' in self.overrides:
return self.overrides['allocate_eips']
if self.get_ini("cluster", "allocate_eip", str) == "True":
allocate_eips = True
else:
allocate_eips = False
return allocate_eips
def set_use_ebs_optimized(self):
''' Set use EBS optimized '''
self.overrides['use_ebs_optimized'] = True
def get_use_ebs_optimized(self):
''' Get EBS optimized option '''
if 'use_ebs_optimized' in self.overrides:
return self.overrides['use_ebs_optimized']
if self.get_ini("cluster", "use_ebs_optimized", str):
use_ebs_optimized = True
else:
use_ebs_optimized = False
return use_ebs_optimized
def get_aws_account(self):
''' Returns AWS account name. '''
return self.account_name
def get_domain(self):
''' Returns domain '''
return self.get_ini('cluster', 'domain', str)
def get_fs_type(self):
''' Get the filesystem type '''
return self.get_ini('cluster', 'fs_type', str)
def read_ebs_config(self):
''' Read EBS config options '''
for vol in self.ini.items("ebs"):
if len(vol[1].split()) == 3:
device = vol[0]
(vol_type, size, iops) = vol[1].split(" ")
self.ebs.add_volumes(
vol_type, "/dev/" + device, size, iops)
elif len(vol[1].split()) == 2:
device = vol[0]
(vol_type, size) = vol[1].split(" ")
self.ebs.add_volumes(
vol_type, "/dev/" + device, size, None)
def get_ebs_config(self):
''' Return EBS config options '''
return self.ebs.get_volumes()
def read_elb_config(self):
''' Read in ELB config options '''
if self.get_ini("elb", "enabled", str) == "True":
self.elb.enabled = True
else:
self.elb.enabled = False
return False
self.elb.protocol = self.get_ini("elb", "protocol", str)
self.elb.public_port = self.get_ini("elb", "public_port", int)
self.elb.private_port = self.get_ini("elb", "private_port", int)
self.elb.vip_number = self.get_ini("elb", "vip_number", int) or 1
self.elb.hc_access_point = self.get_ini(
"healthcheck", "hc_access_point", str)
self.elb.hc_interval = self.get_ini("healthcheck", "hc_interval", int)
self.elb.hc_target = self.get_ini("healthcheck", "hc_target", str)
self.elb.hc_healthy_threshold = self.get_ini(
"healthcheck", "hc_healthy_threshold", int)
self.elb.hc_unhealthy_threshold = self.get_ini(
"healthcheck", "hc_unhealthy_threshold", int)
self.elb.validate()
def get_elb_config(self):
''' Return ELB config '''
return self.elb
def get_elb_name(self):
''' Return the name of the ELB, based on cluster and ELB configs '''
return "%s%svip%s%d" % (
self.get_cg_region(), self.server_env[:1],
self.server_class, self.elb.vip_number)
def read_sg_config(self):
''' Reads in SG config options '''
for item in self.ini.items("securitygroup"):
if item[1][:7] == "ingress":
(rtype, proto, ports, remote) = item[1].split(" ")
if ports == "None":
(prt1, prt2) = [-1, -1]
elif '-' in ports:
(prt1, prt2) = ports.split("-")
else:
prt1 = prt2 = ports
prt1 = int(prt1)
prt2 = int(prt2)
if remote[:5] == 'CIDR:':
self.sgrp.add_cidr(proto, prt1, prt2, remote[5:])
elif remote[:4] == 'Net:':
cidr = c3.utils.naming.get_cidr(remote[4:])
if not cidr:
raise InvalidCIDRNameError(
"Network '%s' is invalid" % remote[4:])
self.sgrp.add_cidr(proto, prt1, prt2, cidr)
elif remote[:3] == 'SG:':
acct, sgrp = remote[3:].split("/")
if acct == 'self':
acctid = c3.utils.accounts.get_account_id(
account_name=self.get_aws_account())
elif acct == 'amazon-elb':
logging.debug('acctid set to amazon-elb', self.verbose)
acctid = 'amazon-elb'
else:
acctid = c3.utils.accounts.get_account_id(
account_name=acct)
logging.debug('%s == %s' % (acct, acctid), self.verbose)
if acctid:
self.sgrp.add_sg(proto, prt1, prt2, acctid, sgrp)
else:
logging.error("Can't find my own account.")
logging.debug(
"Allowing %s %s for ports %d to %d from %s" %
(rtype, proto, prt1, prt2, remote), self.verbose)
def get_sg_rules(self):
''' Return SG rules '''
return self.sgrp.get_sg()
def get_cidr_rules(self):
''' Return CIDR rules '''
return self.sgrp.get_cidr()
def read_raid_config(self):
''' Read in RAID config options '''
if self.get_ini("raid", "enabled", str) == 'True':
self.raid.enabled = True
else:
self.raid.enabled = False
return False
self.raid.set_level(self.get_ini("raid", "level", str))
self.raid.set_device(self.get_ini("raid", "device", str))
def read_rds_sg_config(self):
''' Reads RDS SG authorizations from ini files. '''
for rule in self.ini.items('rds_securitygroup'):
if re.match('.*rule', rule[0]):
(rtype, rvalue) = rule[1].split(':')
if rtype == 'Net':
cidr = c3.utils.naming.get_cidr(rvalue)
if cidr:
logging.debug('Appending RDS CIDR rule %s' % cidr,
self.verbose)
self.rds_sg.add_cidr(cidr)
elif rtype == 'CIDR':
logging.debug('Appending RDS CIDR rule %s' % rvalue,
self.verbose)
self.rds_sg.add_cidr(rvalue)
elif rtype == 'SG':
(oid, sid) = rvalue.split('/')
if oid != 'self':
acctid = c3.utils.accounts.get_account_id(oid)
else:
acctid = c3.utils.accounts.get_account_id(
self.get_aws_account())
if acctid:
logging.debug(
'Appending RDS SG rule %s:%s' % (acctid, sid),
self.verbose)
self.rds_sg.add_sg(acctid, sid)
else:
logging.warn("Can't find account for %s" % oid)
def get_rds_sg_rules(self):
''' Returns list of RDS SG rules. '''
return self.rds_sg.get_sg()
def get_rds_cidr_rules(self):
''' Returns list of RDS CIDR rules. '''
return self.rds_sg.get_cidr()
def read_rds_pg_config(self):
''' Reads RDS parameters from ini files. '''
for param in self.ini.items('rds_parameters'):
if re.match('.*parameter', param[0]):
(name, value, method) = param[1].split()
self.rds_pg.add_parameter((name, value, method))
def get_rds_parameters(self):
''' Returns list of RDS parameters. '''
return self.rds_pg.get_parameters()
def read_rds_config(self):
''' Reads RDS config items from config and store into dictionary. '''
for item in self.ini.items('rds_provision'):
if item[1] == 'False':
self.rds.add_config(item[0], None)
else:
self.rds.add_config(item[0], item[1])
def get_rds_config(self):
''' Returns dictonary of RDS config items. '''
return self.rds.get_config()
def get_placement_group(self):
''' Returns placement group '''
return self.get_ini("cluster", "placement_group", str)
| |
from collections import Counter
from bottle_inject import inject, Injector, InjectError, Plugin
import unittest
import functools
def as_implicit(ip):
ip.implicit = True
return ip
class TestInjector(unittest.TestCase):
def setUp(self):
self.seq = range(10)
def test_inject_compare(self):
self.assertEqual(inject('x'), inject('x'))
self.assertEqual(inject('x', bar=6, foo=5), inject('x', foo=5, bar=6))
self.assertNotEqual(inject('x', foo=5), inject('x'))
self.assertNotEqual(inject('y'), inject('x'))
self.assertNotEqual(inject('x'), inject('x'))
self.assertNotEqual(inject('x'), 'x')
def _common_checks(self, results):
self.assertEqual(as_implicit(inject('a')), results['a'])
self.assertEqual(as_implicit(inject('_b')), results['_b'])
self.assertEqual(None, results.get('c'))
self.assertEqual(inject('x'), results['d'])
self.assertFalse(results['d'].implicit)
self.assertEqual(inject('x2', foo='foo', bar="baz"), results['e'])
self.assertFalse(results['e'].implicit)
self.assertEqual(None, results.get('f'))
self.assertEqual(None, results.get('g'))
def test_inspection(self):
def test(a, _b, c=5, d=inject('x'), e=inject('x2', foo='foo', bar="baz"), *f, **g): pass
self._common_checks(Injector().inspect(test))
def test_inspect_class(self):
class Foo:
def __init__(self, a, _b, c=5, d=inject('x'), e=inject('x2', foo='foo', bar="baz"), *f, **g):
pass
self._common_checks(Injector().inspect(Foo))
def test_inspect_blacklist(self):
def test(self, a): pass
self.assertEquals(['a'], Injector().inspect(test).keys())
def test_inspect_wrapped(self):
def test(a, _b, c=5, d=inject('x'), e=inject('x2', foo='foo', bar="baz"), *f, **g): pass
@functools.wraps(test)
def wrapped(): pass
if not hasattr(wrapped, '__wrapped__'):
# Python 3.2 added this. Without it we cannot unwrap.
# This is just to satisfy the coverage in unsupported python versions
wrapped.__wrapped__ = test
self._common_checks(Injector().inspect(wrapped))
def test_inject_value(self):
ij = Injector()
value = []
ij.add_value('val', value)
def test(val, other=inject('val')):
self.assertTrue(val is other)
self.assertTrue(val is value)
val.append(5)
ij.call_inject(test)
self.assertEqual([5], value)
def test_inject_provider(self):
def provider():
counter['provider_called'] += 1
return counter
def test(c, other=inject('c')):
self.assertTrue(other is c)
c['counter_used'] += 1
counter = Counter()
ij = Injector()
ij.add_provider('c', provider)
ij.call_inject(test)
self.assertEqual(2, counter['provider_called'])
self.assertEqual(1, counter['counter_used'])
def test_inject_provider_decorator(self):
counter = Counter()
ij = Injector()
@ij.provider('c')
def provider():
counter['provider_called'] += 1
return counter
def test(c, other=inject('c')):
self.assertTrue(other is c)
c['counter_used'] += 1
ij.call_inject(test)
self.assertEqual(2, counter['provider_called'])
self.assertEqual(1, counter['counter_used'])
def test_inject_resolver(self):
counter = Counter()
def resolver(keyname='provider_called', increment=1):
counter['resolver_called'] += 1
def provider():
counter[keyname] += increment
return counter
return provider
def test(c, other=inject('c', keyname='special_called', increment=10)):
self.assertTrue(other is c)
c['counter_used'] += 1
ij = Injector()
ij.add_resolver('c', resolver)
ij.call_inject(test)
self.assertEqual(2, counter['resolver_called'])
self.assertEqual(1, counter['provider_called'])
self.assertEqual(10, counter['special_called'])
self.assertEqual(1, counter['counter_used'])
ij.call_inject(test)
self.assertEqual(2, counter['resolver_called']) # !!! Should be cached and not called again
self.assertEqual(2, counter['provider_called'])
self.assertEqual(20, counter['special_called'])
self.assertEqual(2, counter['counter_used'])
def test_inject_resolver_decorator(self):
counter = Counter()
ij = Injector()
@ij.resolver('c')
def resolver(keyname='provider_called', increment=1):
counter['resolver_called'] += 1
def provider():
counter[keyname] += increment
return counter
return provider
def test(c, other=inject('c', keyname='special_called', increment=10)):
self.assertTrue(other is c)
c['counter_used'] += 1
ij.call_inject(test)
self.assertEqual(2, counter['resolver_called'])
self.assertEqual(1, counter['provider_called'])
self.assertEqual(10, counter['special_called'])
self.assertEqual(1, counter['counter_used'])
ij.call_inject(test)
self.assertEqual(2, counter['resolver_called']) # !!! Should be cached and not called again
self.assertEqual(2, counter['provider_called'])
self.assertEqual(20, counter['special_called'])
self.assertEqual(2, counter['counter_used'])
def test_remove_provider(self):
ij = Injector()
ij.add_value('val', 5)
ij.remove('val')
def test(val): pass
with self.assertRaises(InjectError):
ij.call_inject(test)
def test_resolver_alias(self):
counter = Counter()
def resolver(keyname='provider_called', increment=1):
counter['resolver_called'] += 1
def provider():
counter[keyname] += increment
return counter
return provider
def test(a, b, c, d, e, f):
self.assertTrue(a is b)
self.assertTrue(a is c)
self.assertTrue(a is d)
self.assertTrue(a is e)
self.assertTrue(a is f)
a['counter_used'] += 1
ij = Injector()
ij.add_resolver('a', resolver, alias="b")
ij.add_resolver('c', resolver, alias=("d", "e", "f"))
ij.call_inject(test)
self.assertEqual(6, counter['resolver_called'])
self.assertEqual(6, counter['provider_called'])
self.assertEqual(1, counter['counter_used'])
def test_wrap_decorator(self):
ij = Injector()
@ij.wrap
def test(a):
return a
with self.assertRaises(InjectError):
test()
ij.add_value('a', 5)
self.assertEquals(5, test())
self.assertEquals(6, test(a=6))
import bottle
class TestBottlePlugin(unittest.TestCase):
def test_autoinject(self):
app = bottle.Bottle()
ij = app.install(Plugin())
@app.get('/')
def get_route(req, res, injector):
self.assertEquals(bottle.reqest, req)
self.assertEquals(bottle.response, res)
self.assertEquals(ij, injector)
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/env python
"""
--------------------------------------------------------------------------
Routine to perform aperture photometry on CHIMERA science frames.
Usage: python photometry.py [options] image coords
Author:
Navtej Saini
Organization:
Caltech, Pasadena, CA, USA
Version:
20 December 2015 0.1dev Initial implementation
9 Feb 2016 0.2 User can input photometric zero point
--------------------------------------------------------------------------
"""
import os, sys
from pyraf import iraf
import numpy as np, warnings
from StringIO import StringIO
from optparse import OptionParser
try:
import matplotlib.pylab as plt
except ImportError:
plot_flag = False
else:
try:
import seaborn
except ImportError:
pass
plot_flag = True
import chimera
def dump(infile, keywords):
"""
Dump keyword data from DAOPHOT output photometry file.
Parameters
----------
keywords : string
Comma separated fields that have to be extracted from phot file
Returns
-------
indata : numpy array
Photometry data array
"""
# Load iraf packages
iraf.noao(_doprint = 0)
iraf.noao.digiphot(_doprint = 0)
iraf.noao.digiphot.ptools(_doprint = 0)
indata = iraf.pdump(infile, keywords, "yes", Stdout = 1)
return indata[1]
def plotter(phot_data, nframes, exptime, outfile):
"""
Plot light curve.
Parameters
----------
phot_data : numpy array
Photometry array
nframes : int
Number of image cube frames
exptime : float
Kinetic or accumulation time
outfile : string
Name of the out png image
Returns
-------
None
"""
params = {'backend': 'ps',
'font.size': 10,
'axes.labelweight': 'medium',
'figure.dpi' : 300,
'savefig.dpi': 300,
'savefig.jpeg_quality': 100
}
plt.rcParams.update(params)
ts = np.linspace(0, nframes*exptime, nframes)
plt.figure(figsize=(6,4))
plt.title("Normalized Light Curve : %s" %phot_data[0]['DATETIME'].split('T')[0])
plt.xlabel("Time (secs)")
plt.ylabel("Normalized Flux")
#dt = [item.split('T')[1] for item in phot_data['DATETIME']]
#plt.xticks(np.arange(min(ts), max(ts)+10, 60), dt, rotation = 45)
plt.plot(ts, phot_data['FLUX_ADU']/np.mean(phot_data['FLUX_ADU']), "r-")
plt.savefig(outfile, dpi = 300, bbox_inches = "tight")
return
def process(infile, coords, fwhmpsf, sigma, aperture, annulus, dannulus, output, zmag, debug):
"""
Entry point function to process science image.
Parameters
----------
infile : string
Science image or list of science images
coords : string
Input text file with coordinates of stars
fwhmpsf : float
FWHM of the stelar psf in pixels
sigma : float
Sky background sigma
annulus : int
Inner sky annulus radius in pixels
dannulus : int
Radius of sky annulus in pixels
output : string
Output file name
zmag : string
Photometric zero point
Returns
-------
None
"""
print "PHOTOMETRY: CHIMERA Aperture Photometry Routine"
fwhmpsf = float(fwhmpsf)
sigma = float(sigma)
annulus = int(annulus)
dannulus = int(dannulus)
# Check if input is a string of FITS images or a text file with file names
if infile[0] == "@":
infile = infile[1:]
if not os.path.exists(infile):
raise IOError("PHOTOMETRY: Not able to locate file %s. Stopping." %infile)
image_cubes = []
with open(infile, "r") as fd:
for line in fd.readlines():
if len(line) > 1:
image_cubes.append(line.replace("\n", ""))
else:
image_cubes = infile.split(",")
# Number of images
ncubes = len(image_cubes)
# Check if the input coordinates file exists. Take a tmp copy of the input
# file and use that. Delete at the end of processing.
if not os.path.exists(coords):
raise IOError("Input coordinate file %s does not exist. Stopping." %coords)
else:
tmp_coords = coords + ".tmp"
iraf.copy(coords, tmp_coords)
# Fields to extract from phot file
fields = "XCEN,YCEN,CIER,MSKY,STDEV,NSKY,SIER,SUM,AREA,FLUX,MERR,PIER"
total_phot_data = []
img_sec = []
for i in range(ncubes):
sci_file = image_cubes[i]
if not os.path.exists(sci_file):
raise IOError("FITS image %s does not exist. Stopping." %sci_file)
fpath, fname = os.path.split(sci_file)
print "\n Processing science image %s" %fname
# Instantiate an Aperphot object
ap = chimera.Aperphot(sci_file, coords)
# Set fwhmpsf, sigma, annulus. dannulus and zero point
ap.fwhmpsf = fwhmpsf
ap.sigma = sigma
ap.annulus = annulus
ap.dannulus = dannulus
if zmag != "":
ap.zmag = float(zmag)
# Read the input FITS image
if i == 0:
img, imghdr = chimera.fitsread(ap.sci_file, header = True)
else:
img = chimera.fitsread(ap.sci_file)
# Determine nominal aperture radius for photometry
if i == 0:
if aperture:
nom_aper = float(aperture)
else:
nom_aper = ap.daocog()
print " Nominal aperture radius : %4.1f pixels" %nom_aper
# Perform aperture photometry on all the frames
dtype = [("DATETIME", "S25"),("XCEN", "f4"),("YCEN", "f4"),("CIER", "i4"),("MSKY", "f4"),("STDEV", "f4"),("NSKY", "i4"),("SIER", "i4"),("SUM", "f4"),("AREA", "f4"),("FLUX_ADU", "f4"),("FLUX_ELEC", "f4"),("FERR", "f4"),("MAG", "f4"),("MERR", "f4"),("PIER", "i4"),]
phot_data = np.zeros([ap.nframes], dtype = dtype)
for j in range(ap.nframes):
print " Processing frame number : %d" %(j+1)
outfile = sci_file.replace(".fits", "_" + str(j) + ".phot.1")
ap.daophot(j+1, tmp_coords, outfile, nom_aper)
objcen = dump(outfile, "XCEN,YCEN")
with open(tmp_coords, "w") as fd:
fd.write(objcen + '\n')
aperphot_data = dump(outfile, fields).split()
phot_data[j]['DATETIME'] = ap.addtime(j * ap.kintime).isoformat()
phot_data[j]['XCEN'] = float(aperphot_data[0])
phot_data[j]['YCEN'] = float(aperphot_data[1])
phot_data[j]['CIER'] = int(aperphot_data[2])
phot_data[j]['MSKY'] = float(aperphot_data[3])
phot_data[j]['STDEV'] = float(aperphot_data[4])
phot_data[j]['NSKY'] = int(aperphot_data[5])
phot_data[j]['SIER'] = int(aperphot_data[6])
phot_data[j]['SUM'] = float(aperphot_data[7])
phot_data[j]['AREA'] = float(aperphot_data[8])
phot_data[j]['FLUX_ADU'] = float(aperphot_data[9])
phot_data[j]['FLUX_ELEC'] = float(aperphot_data[9]) * ap.epadu
phot_data[j]['MAG'] = ap.zmag - 2.5 * np.log10(phot_data[j]['FLUX_ELEC']/ap.exptime)
if aperphot_data[10] == 'INDEF':
phot_data[j]['MERR'] = -10
else:
phot_data[j]['MERR'] = float(aperphot_data[10])
phot_data[j]['PIER'] = int(aperphot_data[11])
# Calculate error in flux - using the formula
# err = sqrt(flux * gain + npix * (1 + (npix/nsky)) * (flux_sky * gain + R**2))
phot_data[j]['FERR'] = np.sqrt(phot_data[j]['FLUX_ELEC'] + phot_data[j]['AREA'] * (1 + phot_data[j]['AREA']/phot_data[j]['NSKY']) * (phot_data[j]['MSKY'] * ap.epadu + ap.readnoise**2))
# Save a 51x51 image section of the object
xmin, xmax = int(phot_data[j]['XCEN']) - 25, int(phot_data[j]['XCEN']) + 25
ymin, ymax = int(phot_data[j]['YCEN']) - 25, int(phot_data[j]['YCEN']) + 25
img_sec.append(img[j, ymin:ymax, xmin:xmax])
# Save photometry of all the image cubes in a single file
total_phot_data.append(phot_data)
# If debug mode -
# 1. save DAOPHOT phot files
# 2. save individual phot data as npy file
# 3. Plot light cuve for each data cube separatly
if debug:
# Save photometry data in numpy binary format
print " Saving photometry data as numpy binary"
if output != "":
npy_outfile = output + ".npy"
else:
npy_outfile = coords + "_phot.npy"
if os.path.exists(npy_outfile):
os.remove(npy_outfile)
np.save(npy_outfile, phot_data)
# Plot first pass light curve
if plot_flag:
print " Plotting normalized light curve"
if output != "":
plt_outfile = output + ".png"
else:
plt_outfile = coords + "_lc.png"
plotter(phot_data, ap.nframes, ap.kintime, plt_outfile)
else:
# Delete intermediate files is not debug mode
iraf.delete(os.path.join(fpath, '*.phot.1'))
# Convert the total_phot_data to array and reshape it
print ' Saving consolidated photometry data...'
total_phot_data_arr = np.concatenate(total_phot_data)
# Save the array as npy file
np.save(coords + "_total.phot.npy", total_phot_data_arr)
# Save the image section with object as FITS file
print ' Saving image section with object as FITS image...'
img_sec_arr = np.asarray(img_sec)
img_fname = coords + "_obj.fits"
if os.path.exists(img_fname):
os.remove(img_fname)
chimera.fitswrite(img_sec_arr, coords + "_obj.fits", header = imghdr)
# Delete temporary coordinate file
if os.path.exists(tmp_coords):
os.remove(tmp_coords)
return
if __name__ == "__main__":
usage = "Usage: python %prog [options] sci_image coords"
description = "Description. Utility to perform aperture photometry in CHIMERA science images."
parser = OptionParser(usage = usage, version = "%prog 0.2", description = description)
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default = False,
help = "print result messages to stdout"
)
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default = True,
help = "don't print result messages to stdout"
)
parser.add_option("-f", "--fwhmpsf", dest = "fwhmpsf",
action="store", metavar="FWHMPSF", help = "FWHM of PSF (default is 6 pixels)",
default = 6
)
parser.add_option("-s", "--sigma", dest = "sigma",
action="store", metavar="SIGMA", help = "Sky background sigma (default is 10)",
default = 10
)
parser.add_option("-a", "--aperture", dest = "aperture",
action="store", metavar="APERTURE", help = "Photometry aperture radius (default is None)",
default = None
)
parser.add_option("-r", "--annulus", dest = "annulus",
action="store", metavar="ANNULUS", help = "Inner radius of sky annlus in pixels (default is 14)",
default = 14
)
parser.add_option("-d", "--dannulus", dest = "dannulus",
action="store", metavar="DANNULUS", help = "Radius of sky annulus in piexls (default is 16)",
default = 16
)
parser.add_option("-o", "--output", dest = "output",
action="store", metavar="OUTPUT", help = "Output file name",
default=""
)
parser.add_option("-z", "--zmag", dest = "zmag",
action="store", metavar="ZMAG", help = "Photometric zero point",
default = ""
)
parser.add_option("-b", "--debug", dest = "debug",
action="store", metavar="DEBUG", help = "Debug modes? Default is False",
default = False
)
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("PHOTOMETRY: Incorrect number of arguments")
# Check verbosity
if not options.verbose:
output = StringIO()
old_stdout = sys.stdout
sys.stdout = output
# Switch off warnings
warnings.filterwarnings('ignore')
process(args[0], args[1], options.fwhmpsf, options.sigma, options.aperture, options.annulus, options.dannulus, options.output, options.zmag, options.debug)
# Reset verbosity
if not options.verbose:
sys.stdout = old_stdout
| |
#!/usr/bin/python2.4
#
# Unit tests for Mox.
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cStringIO
import unittest
import re
import mox
import mox_test_helper
OS_LISTDIR = mox_test_helper.os.listdir
class ExpectedMethodCallsErrorTest(unittest.TestCase):
"""Test creation and string conversion of ExpectedMethodCallsError."""
def testAtLeastOneMethod(self):
self.assertRaises(ValueError, mox.ExpectedMethodCallsError, [])
def testOneError(self):
method = mox.MockMethod("testMethod", [], False)
method(1, 2).AndReturn('output')
e = mox.ExpectedMethodCallsError([method])
self.assertEqual(
"Verify: Expected methods never called:\n"
" 0. testMethod(1, 2) -> 'output'",
str(e))
def testManyErrors(self):
method1 = mox.MockMethod("testMethod", [], False)
method1(1, 2).AndReturn('output')
method2 = mox.MockMethod("testMethod", [], False)
method2(a=1, b=2, c="only named")
method3 = mox.MockMethod("testMethod2", [], False)
method3().AndReturn(44)
method4 = mox.MockMethod("testMethod", [], False)
method4(1, 2).AndReturn('output')
e = mox.ExpectedMethodCallsError([method1, method2, method3, method4])
self.assertEqual(
"Verify: Expected methods never called:\n"
" 0. testMethod(1, 2) -> 'output'\n"
" 1. testMethod(a=1, b=2, c='only named') -> None\n"
" 2. testMethod2() -> 44\n"
" 3. testMethod(1, 2) -> 'output'",
str(e))
class OrTest(unittest.TestCase):
"""Test Or correctly chains Comparators."""
def testValidOr(self):
"""Or should be True if either Comparator returns True."""
self.assert_(mox.Or(mox.IsA(dict), mox.IsA(str)) == {})
self.assert_(mox.Or(mox.IsA(dict), mox.IsA(str)) == 'test')
self.assert_(mox.Or(mox.IsA(str), mox.IsA(str)) == 'test')
def testInvalidOr(self):
"""Or should be False if both Comparators return False."""
self.failIf(mox.Or(mox.IsA(dict), mox.IsA(str)) == 0)
class AndTest(unittest.TestCase):
"""Test And correctly chains Comparators."""
def testValidAnd(self):
"""And should be True if both Comparators return True."""
self.assert_(mox.And(mox.IsA(str), mox.IsA(str)) == '1')
def testClauseOneFails(self):
"""And should be False if the first Comparator returns False."""
self.failIf(mox.And(mox.IsA(dict), mox.IsA(str)) == '1')
def testAdvancedUsage(self):
"""And should work with other Comparators.
Note: this test is reliant on In and ContainsKeyValue.
"""
test_dict = {"mock" : "obj", "testing" : "isCOOL"}
self.assert_(mox.And(mox.In("testing"),
mox.ContainsKeyValue("mock", "obj")) == test_dict)
def testAdvancedUsageFails(self):
"""Note: this test is reliant on In and ContainsKeyValue."""
test_dict = {"mock" : "obj", "testing" : "isCOOL"}
self.failIf(mox.And(mox.In("NOTFOUND"),
mox.ContainsKeyValue("mock", "obj")) == test_dict)
class SameElementsAsTest(unittest.TestCase):
"""Test SameElementsAs correctly identifies sequences with same elements."""
def testSortedLists(self):
"""Should return True if two lists are exactly equal."""
self.assert_(mox.SameElementsAs([1, 2.0, 'c']) == [1, 2.0, 'c'])
def testUnsortedLists(self):
"""Should return True if two lists are unequal but have same elements."""
self.assert_(mox.SameElementsAs([1, 2.0, 'c']) == [2.0, 'c', 1])
def testUnhashableLists(self):
"""Should return True if two lists have the same unhashable elements."""
self.assert_(mox.SameElementsAs([{'a': 1}, {2: 'b'}]) ==
[{2: 'b'}, {'a': 1}])
def testEmptyLists(self):
"""Should return True for two empty lists."""
self.assert_(mox.SameElementsAs([]) == [])
def testUnequalLists(self):
"""Should return False if the lists are not equal."""
self.failIf(mox.SameElementsAs([1, 2.0, 'c']) == [2.0, 'c'])
def testUnequalUnhashableLists(self):
"""Should return False if two lists with unhashable elements are unequal."""
self.failIf(mox.SameElementsAs([{'a': 1}, {2: 'b'}]) == [{2: 'b'}])
class ContainsKeyValueTest(unittest.TestCase):
"""Test ContainsKeyValue correctly identifies key/value pairs in a dict.
"""
def testValidPair(self):
"""Should return True if the key value is in the dict."""
self.assert_(mox.ContainsKeyValue("key", 1) == {"key": 1})
def testInvalidValue(self):
"""Should return False if the value is not correct."""
self.failIf(mox.ContainsKeyValue("key", 1) == {"key": 2})
def testInvalidKey(self):
"""Should return False if they key is not in the dict."""
self.failIf(mox.ContainsKeyValue("qux", 1) == {"key": 2})
class ContainsAttributeValueTest(unittest.TestCase):
"""Test ContainsAttributeValue correctly identifies properties in an object.
"""
def setUp(self):
"""Create an object to test with."""
class TestObject(object):
key = 1
self.test_object = TestObject()
def testValidPair(self):
"""Should return True if the object has the key attribute and it matches."""
self.assert_(mox.ContainsAttributeValue("key", 1) == self.test_object)
def testInvalidValue(self):
"""Should return False if the value is not correct."""
self.failIf(mox.ContainsKeyValue("key", 2) == self.test_object)
def testInvalidKey(self):
"""Should return False if they the object doesn't have the property."""
self.failIf(mox.ContainsKeyValue("qux", 1) == self.test_object)
class InTest(unittest.TestCase):
"""Test In correctly identifies a key in a list/dict"""
def testItemInList(self):
"""Should return True if the item is in the list."""
self.assert_(mox.In(1) == [1, 2, 3])
def testKeyInDict(self):
"""Should return True if the item is a key in a dict."""
self.assert_(mox.In("test") == {"test" : "module"})
class NotTest(unittest.TestCase):
"""Test Not correctly identifies False predicates."""
def testItemInList(self):
"""Should return True if the item is NOT in the list."""
self.assert_(mox.Not(mox.In(42)) == [1, 2, 3])
def testKeyInDict(self):
"""Should return True if the item is NOT a key in a dict."""
self.assert_(mox.Not(mox.In("foo")) == {"key" : 42})
def testInvalidKeyWithNot(self):
"""Should return False if they key is NOT in the dict."""
self.assert_(mox.Not(mox.ContainsKeyValue("qux", 1)) == {"key": 2})
class StrContainsTest(unittest.TestCase):
"""Test StrContains correctly checks for substring occurrence of a parameter.
"""
def testValidSubstringAtStart(self):
"""Should return True if the substring is at the start of the string."""
self.assert_(mox.StrContains("hello") == "hello world")
def testValidSubstringInMiddle(self):
"""Should return True if the substring is in the middle of the string."""
self.assert_(mox.StrContains("lo wo") == "hello world")
def testValidSubstringAtEnd(self):
"""Should return True if the substring is at the end of the string."""
self.assert_(mox.StrContains("ld") == "hello world")
def testInvaildSubstring(self):
"""Should return False if the substring is not in the string."""
self.failIf(mox.StrContains("AAA") == "hello world")
def testMultipleMatches(self):
"""Should return True if there are multiple occurances of substring."""
self.assert_(mox.StrContains("abc") == "ababcabcabcababc")
class RegexTest(unittest.TestCase):
"""Test Regex correctly matches regular expressions."""
def testIdentifyBadSyntaxDuringInit(self):
"""The user should know immediately if a regex has bad syntax."""
self.assertRaises(re.error, mox.Regex, '(a|b')
def testPatternInMiddle(self):
"""Should return True if the pattern matches at the middle of the string.
This ensures that re.search is used (instead of re.find).
"""
self.assert_(mox.Regex(r"a\s+b") == "x y z a b c")
def testNonMatchPattern(self):
"""Should return False if the pattern does not match the string."""
self.failIf(mox.Regex(r"a\s+b") == "x y z")
def testFlagsPassedCorrectly(self):
"""Should return True as we pass IGNORECASE flag."""
self.assert_(mox.Regex(r"A", re.IGNORECASE) == "a")
def testReprWithoutFlags(self):
"""repr should return the regular expression pattern."""
self.assert_(repr(mox.Regex(r"a\s+b")) == "<regular expression 'a\s+b'>")
def testReprWithFlags(self):
"""repr should return the regular expression pattern and flags."""
self.assert_(repr(mox.Regex(r"a\s+b", flags=4)) ==
"<regular expression 'a\s+b', flags=4>")
class IsATest(unittest.TestCase):
"""Verify IsA correctly checks equality based upon class type, not value."""
def testEqualityValid(self):
"""Verify that == correctly identifies objects of the same type."""
self.assert_(mox.IsA(str) == 'test')
def testEqualityInvalid(self):
"""Verify that == correctly identifies objects of different types."""
self.failIf(mox.IsA(str) == 10)
def testInequalityValid(self):
"""Verify that != identifies objects of different type."""
self.assert_(mox.IsA(str) != 10)
def testInequalityInvalid(self):
"""Verify that != correctly identifies objects of the same type."""
self.failIf(mox.IsA(str) != "test")
def testEqualityInListValid(self):
"""Verify list contents are properly compared."""
isa_list = [mox.IsA(str), mox.IsA(str)]
str_list = ["abc", "def"]
self.assert_(isa_list == str_list)
def testEquailtyInListInvalid(self):
"""Verify list contents are properly compared."""
isa_list = [mox.IsA(str),mox.IsA(str)]
mixed_list = ["abc", 123]
self.failIf(isa_list == mixed_list)
def testSpecialTypes(self):
"""Verify that IsA can handle objects like cStringIO.StringIO."""
isA = mox.IsA(cStringIO.StringIO())
stringIO = cStringIO.StringIO()
self.assert_(isA == stringIO)
class IsAlmostTest(unittest.TestCase):
"""Verify IsAlmost correctly checks equality of floating point numbers."""
def testEqualityValid(self):
"""Verify that == correctly identifies nearly equivalent floats."""
self.assertEquals(mox.IsAlmost(1.8999999999), 1.9)
def testEqualityInvalid(self):
"""Verify that == correctly identifies non-equivalent floats."""
self.assertNotEquals(mox.IsAlmost(1.899), 1.9)
def testEqualityWithPlaces(self):
"""Verify that specifying places has the desired effect."""
self.assertNotEquals(mox.IsAlmost(1.899), 1.9)
self.assertEquals(mox.IsAlmost(1.899, places=2), 1.9)
def testNonNumericTypes(self):
"""Verify that IsAlmost handles non-numeric types properly."""
self.assertNotEquals(mox.IsAlmost(1.8999999999), '1.9')
self.assertNotEquals(mox.IsAlmost('1.8999999999'), 1.9)
self.assertNotEquals(mox.IsAlmost('1.8999999999'), '1.9')
class MockMethodTest(unittest.TestCase):
"""Test class to verify that the MockMethod class is working correctly."""
def setUp(self):
self.expected_method = mox.MockMethod("testMethod", [], False)(['original'])
self.mock_method = mox.MockMethod("testMethod", [self.expected_method],
True)
def testNameAttribute(self):
"""Should provide a __name__ attribute."""
self.assertEquals('testMethod', self.mock_method.__name__)
def testAndReturnNoneByDefault(self):
"""Should return None by default."""
return_value = self.mock_method(['original'])
self.assert_(return_value == None)
def testAndReturnValue(self):
"""Should return a specificed return value."""
expected_return_value = "test"
self.expected_method.AndReturn(expected_return_value)
return_value = self.mock_method(['original'])
self.assert_(return_value == expected_return_value)
def testAndRaiseException(self):
"""Should raise a specified exception."""
expected_exception = Exception('test exception')
self.expected_method.AndRaise(expected_exception)
self.assertRaises(Exception, self.mock_method)
def testWithSideEffects(self):
"""Should call state modifier."""
local_list = ['original']
def modifier(mutable_list):
self.assertTrue(local_list is mutable_list)
mutable_list[0] = 'mutation'
self.expected_method.WithSideEffects(modifier).AndReturn(1)
self.mock_method(local_list)
self.assertEquals('mutation', local_list[0])
def testWithReturningSideEffects(self):
"""Should call state modifier and propagate its return value."""
local_list = ['original']
expected_return = 'expected_return'
def modifier_with_return(mutable_list):
self.assertTrue(local_list is mutable_list)
mutable_list[0] = 'mutation'
return expected_return
self.expected_method.WithSideEffects(modifier_with_return)
actual_return = self.mock_method(local_list)
self.assertEquals('mutation', local_list[0])
self.assertEquals(expected_return, actual_return)
def testWithReturningSideEffectsWithAndReturn(self):
"""Should call state modifier and ignore its return value."""
local_list = ['original']
expected_return = 'expected_return'
unexpected_return = 'unexpected_return'
def modifier_with_return(mutable_list):
self.assertTrue(local_list is mutable_list)
mutable_list[0] = 'mutation'
return unexpected_return
self.expected_method.WithSideEffects(modifier_with_return).AndReturn(
expected_return)
actual_return = self.mock_method(local_list)
self.assertEquals('mutation', local_list[0])
self.assertEquals(expected_return, actual_return)
def testEqualityNoParamsEqual(self):
"""Methods with the same name and without params should be equal."""
expected_method = mox.MockMethod("testMethod", [], False)
self.assertEqual(self.mock_method, expected_method)
def testEqualityNoParamsNotEqual(self):
"""Methods with different names and without params should not be equal."""
expected_method = mox.MockMethod("otherMethod", [], False)
self.failIfEqual(self.mock_method, expected_method)
def testEqualityParamsEqual(self):
"""Methods with the same name and parameters should be equal."""
params = [1, 2, 3]
expected_method = mox.MockMethod("testMethod", [], False)
expected_method._params = params
self.mock_method._params = params
self.assertEqual(self.mock_method, expected_method)
def testEqualityParamsNotEqual(self):
"""Methods with the same name and different params should not be equal."""
expected_method = mox.MockMethod("testMethod", [], False)
expected_method._params = [1, 2, 3]
self.mock_method._params = ['a', 'b', 'c']
self.failIfEqual(self.mock_method, expected_method)
def testEqualityNamedParamsEqual(self):
"""Methods with the same name and same named params should be equal."""
named_params = {"input1": "test", "input2": "params"}
expected_method = mox.MockMethod("testMethod", [], False)
expected_method._named_params = named_params
self.mock_method._named_params = named_params
self.assertEqual(self.mock_method, expected_method)
def testEqualityNamedParamsNotEqual(self):
"""Methods with the same name and diffnamed params should not be equal."""
expected_method = mox.MockMethod("testMethod", [], False)
expected_method._named_params = {"input1": "test", "input2": "params"}
self.mock_method._named_params = {"input1": "test2", "input2": "params2"}
self.failIfEqual(self.mock_method, expected_method)
def testEqualityWrongType(self):
"""Method should not be equal to an object of a different type."""
self.failIfEqual(self.mock_method, "string?")
def testObjectEquality(self):
"""Equality of objects should work without a Comparator"""
instA = TestClass();
instB = TestClass();
params = [instA, ]
expected_method = mox.MockMethod("testMethod", [], False)
expected_method._params = params
self.mock_method._params = [instB, ]
self.assertEqual(self.mock_method, expected_method)
def testStrConversion(self):
method = mox.MockMethod("f", [], False)
method(1, 2, "st", n1=8, n2="st2")
self.assertEqual(str(method), ("f(1, 2, 'st', n1=8, n2='st2') -> None"))
method = mox.MockMethod("testMethod", [], False)
method(1, 2, "only positional")
self.assertEqual(str(method), "testMethod(1, 2, 'only positional') -> None")
method = mox.MockMethod("testMethod", [], False)
method(a=1, b=2, c="only named")
self.assertEqual(str(method),
"testMethod(a=1, b=2, c='only named') -> None")
method = mox.MockMethod("testMethod", [], False)
method()
self.assertEqual(str(method), "testMethod() -> None")
method = mox.MockMethod("testMethod", [], False)
method(x="only 1 parameter")
self.assertEqual(str(method), "testMethod(x='only 1 parameter') -> None")
method = mox.MockMethod("testMethod", [], False)
method().AndReturn('return_value')
self.assertEqual(str(method), "testMethod() -> 'return_value'")
method = mox.MockMethod("testMethod", [], False)
method().AndReturn(('a', {1: 2}))
self.assertEqual(str(method), "testMethod() -> ('a', {1: 2})")
class MockAnythingTest(unittest.TestCase):
"""Verify that the MockAnything class works as expected."""
def setUp(self):
self.mock_object = mox.MockAnything()
def testRepr(self):
"""Calling repr on a MockAnything instance must work."""
self.assertEqual('<MockAnything instance>', repr(self.mock_object))
def testSetupMode(self):
"""Verify the mock will accept any call."""
self.mock_object.NonsenseCall()
self.assert_(len(self.mock_object._expected_calls_queue) == 1)
def testReplayWithExpectedCall(self):
"""Verify the mock replays method calls as expected."""
self.mock_object.ValidCall() # setup method call
self.mock_object._Replay() # start replay mode
self.mock_object.ValidCall() # make method call
def testReplayWithUnexpectedCall(self):
"""Unexpected method calls should raise UnexpectedMethodCallError."""
self.mock_object.ValidCall() # setup method call
self.mock_object._Replay() # start replay mode
self.assertRaises(mox.UnexpectedMethodCallError,
self.mock_object.OtherValidCall)
def testVerifyWithCompleteReplay(self):
"""Verify should not raise an exception for a valid replay."""
self.mock_object.ValidCall() # setup method call
self.mock_object._Replay() # start replay mode
self.mock_object.ValidCall() # make method call
self.mock_object._Verify()
def testVerifyWithIncompleteReplay(self):
"""Verify should raise an exception if the replay was not complete."""
self.mock_object.ValidCall() # setup method call
self.mock_object._Replay() # start replay mode
# ValidCall() is never made
self.assertRaises(mox.ExpectedMethodCallsError, self.mock_object._Verify)
def testSpecialClassMethod(self):
"""Verify should not raise an exception when special methods are used."""
self.mock_object[1].AndReturn(True)
self.mock_object._Replay()
returned_val = self.mock_object[1]
self.assert_(returned_val)
self.mock_object._Verify()
def testNonzero(self):
"""You should be able to use the mock object in an if."""
self.mock_object._Replay()
if self.mock_object:
pass
def testNotNone(self):
"""Mock should be comparable to None."""
self.mock_object._Replay()
if self.mock_object is not None:
pass
if self.mock_object is None:
pass
def testEquals(self):
"""A mock should be able to compare itself to another object."""
self.mock_object._Replay()
self.assertEquals(self.mock_object, self.mock_object)
def testEqualsMockFailure(self):
"""Verify equals identifies unequal objects."""
self.mock_object.SillyCall()
self.mock_object._Replay()
self.assertNotEquals(self.mock_object, mox.MockAnything())
def testEqualsInstanceFailure(self):
"""Verify equals identifies that objects are different instances."""
self.mock_object._Replay()
self.assertNotEquals(self.mock_object, TestClass())
def testNotEquals(self):
"""Verify not equals works."""
self.mock_object._Replay()
self.assertFalse(self.mock_object != self.mock_object)
def testNestedMockCallsRecordedSerially(self):
"""Test that nested calls work when recorded serially."""
self.mock_object.CallInner().AndReturn(1)
self.mock_object.CallOuter(1)
self.mock_object._Replay()
self.mock_object.CallOuter(self.mock_object.CallInner())
self.mock_object._Verify()
def testNestedMockCallsRecordedNested(self):
"""Test that nested cals work when recorded in a nested fashion."""
self.mock_object.CallOuter(self.mock_object.CallInner().AndReturn(1))
self.mock_object._Replay()
self.mock_object.CallOuter(self.mock_object.CallInner())
self.mock_object._Verify()
def testIsCallable(self):
"""Test that MockAnything can even mock a simple callable.
This is handy for "stubbing out" a method in a module with a mock, and
verifying that it was called.
"""
self.mock_object().AndReturn('mox0rd')
self.mock_object._Replay()
self.assertEquals('mox0rd', self.mock_object())
self.mock_object._Verify()
def testIsReprable(self):
"""Test that MockAnythings can be repr'd without causing a failure."""
self.failUnless('MockAnything' in repr(self.mock_object))
class MethodCheckerTest(unittest.TestCase):
"""Tests MockMethod's use of MethodChecker method."""
def testNoParameters(self):
method = mox.MockMethod('NoParameters', [], False,
CheckCallTestClass.NoParameters)
method()
self.assertRaises(AttributeError, method, 1)
self.assertRaises(AttributeError, method, 1, 2)
self.assertRaises(AttributeError, method, a=1)
self.assertRaises(AttributeError, method, 1, b=2)
def testOneParameter(self):
method = mox.MockMethod('OneParameter', [], False,
CheckCallTestClass.OneParameter)
self.assertRaises(AttributeError, method)
method(1)
method(a=1)
self.assertRaises(AttributeError, method, b=1)
self.assertRaises(AttributeError, method, 1, 2)
self.assertRaises(AttributeError, method, 1, a=2)
self.assertRaises(AttributeError, method, 1, b=2)
def testTwoParameters(self):
method = mox.MockMethod('TwoParameters', [], False,
CheckCallTestClass.TwoParameters)
self.assertRaises(AttributeError, method)
self.assertRaises(AttributeError, method, 1)
self.assertRaises(AttributeError, method, a=1)
self.assertRaises(AttributeError, method, b=1)
method(1, 2)
method(1, b=2)
method(a=1, b=2)
method(b=2, a=1)
self.assertRaises(AttributeError, method, b=2, c=3)
self.assertRaises(AttributeError, method, a=1, b=2, c=3)
self.assertRaises(AttributeError, method, 1, 2, 3)
self.assertRaises(AttributeError, method, 1, 2, 3, 4)
self.assertRaises(AttributeError, method, 3, a=1, b=2)
def testOneDefaultValue(self):
method = mox.MockMethod('OneDefaultValue', [], False,
CheckCallTestClass.OneDefaultValue)
method()
method(1)
method(a=1)
self.assertRaises(AttributeError, method, b=1)
self.assertRaises(AttributeError, method, 1, 2)
self.assertRaises(AttributeError, method, 1, a=2)
self.assertRaises(AttributeError, method, 1, b=2)
def testTwoDefaultValues(self):
method = mox.MockMethod('TwoDefaultValues', [], False,
CheckCallTestClass.TwoDefaultValues)
self.assertRaises(AttributeError, method)
self.assertRaises(AttributeError, method, c=3)
self.assertRaises(AttributeError, method, 1)
self.assertRaises(AttributeError, method, 1, d=4)
self.assertRaises(AttributeError, method, 1, d=4, c=3)
method(1, 2)
method(a=1, b=2)
method(1, 2, 3)
method(1, 2, 3, 4)
method(1, 2, c=3)
method(1, 2, c=3, d=4)
method(1, 2, d=4, c=3)
method(d=4, c=3, a=1, b=2)
self.assertRaises(AttributeError, method, 1, 2, 3, 4, 5)
self.assertRaises(AttributeError, method, 1, 2, e=9)
self.assertRaises(AttributeError, method, a=1, b=2, e=9)
def testArgs(self):
method = mox.MockMethod('Args', [], False, CheckCallTestClass.Args)
self.assertRaises(AttributeError, method)
self.assertRaises(AttributeError, method, 1)
method(1, 2)
method(a=1, b=2)
method(1, 2, 3)
method(1, 2, 3, 4)
self.assertRaises(AttributeError, method, 1, 2, a=3)
self.assertRaises(AttributeError, method, 1, 2, c=3)
def testKwargs(self):
method = mox.MockMethod('Kwargs', [], False, CheckCallTestClass.Kwargs)
self.assertRaises(AttributeError, method)
method(1)
method(1, 2)
method(a=1, b=2)
method(b=2, a=1)
self.assertRaises(AttributeError, method, 1, 2, 3)
self.assertRaises(AttributeError, method, 1, 2, a=3)
method(1, 2, c=3)
method(a=1, b=2, c=3)
method(c=3, a=1, b=2)
method(a=1, b=2, c=3, d=4)
self.assertRaises(AttributeError, method, 1, 2, 3, 4)
def testArgsAndKwargs(self):
method = mox.MockMethod('ArgsAndKwargs', [], False,
CheckCallTestClass.ArgsAndKwargs)
self.assertRaises(AttributeError, method)
method(1)
method(1, 2)
method(1, 2, 3)
method(a=1)
method(1, b=2)
self.assertRaises(AttributeError, method, 1, a=2)
method(b=2, a=1)
method(c=3, b=2, a=1)
method(1, 2, c=3)
class CheckCallTestClass(object):
def NoParameters(self):
pass
def OneParameter(self, a):
pass
def TwoParameters(self, a, b):
pass
def OneDefaultValue(self, a=1):
pass
def TwoDefaultValues(self, a, b, c=1, d=2):
pass
def Args(self, a, b, *args):
pass
def Kwargs(self, a, b=2, **kwargs):
pass
def ArgsAndKwargs(self, a, *args, **kwargs):
pass
class MockObjectTest(unittest.TestCase):
"""Verify that the MockObject class works as exepcted."""
def setUp(self):
self.mock_object = mox.MockObject(TestClass)
def testSetupModeWithValidCall(self):
"""Verify the mock object properly mocks a basic method call."""
self.mock_object.ValidCall()
self.assert_(len(self.mock_object._expected_calls_queue) == 1)
def testSetupModeWithInvalidCall(self):
"""UnknownMethodCallError should be raised if a non-member method is called.
"""
# Note: assertRaises does not catch exceptions thrown by MockObject's
# __getattr__
try:
self.mock_object.InvalidCall()
self.fail("No exception thrown, expected UnknownMethodCallError")
except mox.UnknownMethodCallError:
pass
except Exception:
self.fail("Wrong exception type thrown, expected UnknownMethodCallError")
def testReplayWithInvalidCall(self):
"""UnknownMethodCallError should be raised if a non-member method is called.
"""
self.mock_object.ValidCall() # setup method call
self.mock_object._Replay() # start replay mode
# Note: assertRaises does not catch exceptions thrown by MockObject's
# __getattr__
try:
self.mock_object.InvalidCall()
self.fail("No exception thrown, expected UnknownMethodCallError")
except mox.UnknownMethodCallError:
pass
except Exception:
self.fail("Wrong exception type thrown, expected UnknownMethodCallError")
def testIsInstance(self):
"""Mock should be able to pass as an instance of the mocked class."""
self.assert_(isinstance(self.mock_object, TestClass))
def testFindValidMethods(self):
"""Mock should be able to mock all public methods."""
self.assert_('ValidCall' in self.mock_object._known_methods)
self.assert_('OtherValidCall' in self.mock_object._known_methods)
self.assert_('MyClassMethod' in self.mock_object._known_methods)
self.assert_('MyStaticMethod' in self.mock_object._known_methods)
self.assert_('_ProtectedCall' in self.mock_object._known_methods)
self.assert_('__PrivateCall' not in self.mock_object._known_methods)
self.assert_('_TestClass__PrivateCall' in self.mock_object._known_methods)
def testFindsSuperclassMethods(self):
"""Mock should be able to mock superclasses methods."""
self.mock_object = mox.MockObject(ChildClass)
self.assert_('ValidCall' in self.mock_object._known_methods)
self.assert_('OtherValidCall' in self.mock_object._known_methods)
self.assert_('MyClassMethod' in self.mock_object._known_methods)
self.assert_('ChildValidCall' in self.mock_object._known_methods)
def testAccessClassVariables(self):
"""Class variables should be accessible through the mock."""
self.assert_('SOME_CLASS_VAR' in self.mock_object._known_vars)
self.assert_('_PROTECTED_CLASS_VAR' in self.mock_object._known_vars)
self.assertEquals('test_value', self.mock_object.SOME_CLASS_VAR)
def testEquals(self):
"""A mock should be able to compare itself to another object."""
self.mock_object._Replay()
self.assertEquals(self.mock_object, self.mock_object)
def testEqualsMockFailure(self):
"""Verify equals identifies unequal objects."""
self.mock_object.ValidCall()
self.mock_object._Replay()
self.assertNotEquals(self.mock_object, mox.MockObject(TestClass))
def testEqualsInstanceFailure(self):
"""Verify equals identifies that objects are different instances."""
self.mock_object._Replay()
self.assertNotEquals(self.mock_object, TestClass())
def testNotEquals(self):
"""Verify not equals works."""
self.mock_object._Replay()
self.assertFalse(self.mock_object != self.mock_object)
def testMockSetItem_ExpectedSetItem_Success(self):
"""Test that __setitem__() gets mocked in Dummy.
In this test, _Verify() succeeds.
"""
dummy = mox.MockObject(TestClass)
dummy['X'] = 'Y'
dummy._Replay()
dummy['X'] = 'Y'
dummy._Verify()
def testMockSetItem_ExpectedSetItem_NoSuccess(self):
"""Test that __setitem__() gets mocked in Dummy.
In this test, _Verify() fails.
"""
dummy = mox.MockObject(TestClass)
dummy['X'] = 'Y'
dummy._Replay()
# NOT doing dummy['X'] = 'Y'
self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
def testMockSetItem_ExpectedNoSetItem_Success(self):
"""Test that __setitem__() gets mocked in Dummy."""
dummy = mox.MockObject(TestClass)
# NOT doing dummy['X'] = 'Y'
dummy._Replay()
def call(): dummy['X'] = 'Y'
self.assertRaises(mox.UnexpectedMethodCallError, call)
def testMockSetItem_ExpectedNoSetItem_NoSuccess(self):
"""Test that __setitem__() gets mocked in Dummy.
In this test, _Verify() fails.
"""
dummy = mox.MockObject(TestClass)
# NOT doing dummy['X'] = 'Y'
dummy._Replay()
# NOT doing dummy['X'] = 'Y'
dummy._Verify()
def testMockSetItem_ExpectedSetItem_NonmatchingParameters(self):
"""Test that __setitem__() fails if other parameters are expected."""
dummy = mox.MockObject(TestClass)
dummy['X'] = 'Y'
dummy._Replay()
def call(): dummy['wrong'] = 'Y'
self.assertRaises(mox.UnexpectedMethodCallError, call)
dummy._Verify()
def testMockSetItem_WithSubClassOfNewStyleClass(self):
class NewStyleTestClass(object):
def __init__(self):
self.my_dict = {}
def __setitem__(self, key, value):
self.my_dict[key], value
class TestSubClass(NewStyleTestClass):
pass
dummy = mox.MockObject(TestSubClass)
dummy[1] = 2
dummy._Replay()
dummy[1] = 2
dummy._Verify()
def testMockGetItem_ExpectedGetItem_Success(self):
"""Test that __getitem__() gets mocked in Dummy.
In this test, _Verify() succeeds.
"""
dummy = mox.MockObject(TestClass)
dummy['X'].AndReturn('value')
dummy._Replay()
self.assertEqual(dummy['X'], 'value')
dummy._Verify()
def testMockGetItem_ExpectedGetItem_NoSuccess(self):
"""Test that __getitem__() gets mocked in Dummy.
In this test, _Verify() fails.
"""
dummy = mox.MockObject(TestClass)
dummy['X'].AndReturn('value')
dummy._Replay()
# NOT doing dummy['X']
self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
def testMockGetItem_ExpectedNoGetItem_NoSuccess(self):
"""Test that __getitem__() gets mocked in Dummy."""
dummy = mox.MockObject(TestClass)
# NOT doing dummy['X']
dummy._Replay()
def call(): return dummy['X']
self.assertRaises(mox.UnexpectedMethodCallError, call)
def testMockGetItem_ExpectedGetItem_NonmatchingParameters(self):
"""Test that __getitem__() fails if other parameters are expected."""
dummy = mox.MockObject(TestClass)
dummy['X'].AndReturn('value')
dummy._Replay()
def call(): return dummy['wrong']
self.assertRaises(mox.UnexpectedMethodCallError, call)
dummy._Verify()
def testMockGetItem_WithSubClassOfNewStyleClass(self):
class NewStyleTestClass(object):
def __getitem__(self, key):
return {1: '1', 2: '2'}[key]
class TestSubClass(NewStyleTestClass):
pass
dummy = mox.MockObject(TestSubClass)
dummy[1].AndReturn('3')
dummy._Replay()
self.assertEquals('3', dummy.__getitem__(1))
dummy._Verify()
def testMockIter_ExpectedIter_Success(self):
"""Test that __iter__() gets mocked in Dummy.
In this test, _Verify() succeeds.
"""
dummy = mox.MockObject(TestClass)
iter(dummy).AndReturn(iter(['X', 'Y']))
dummy._Replay()
self.assertEqual([x for x in dummy], ['X', 'Y'])
dummy._Verify()
def testMockContains_ExpectedContains_Success(self):
"""Test that __contains__ gets mocked in Dummy.
In this test, _Verify() succeeds.
"""
dummy = mox.MockObject(TestClass)
dummy.__contains__('X').AndReturn(True)
dummy._Replay()
self.failUnless('X' in dummy)
dummy._Verify()
def testMockContains_ExpectedContains_NoSuccess(self):
"""Test that __contains__() gets mocked in Dummy.
In this test, _Verify() fails.
"""
dummy = mox.MockObject(TestClass)
dummy.__contains__('X').AndReturn('True')
dummy._Replay()
# NOT doing 'X' in dummy
self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
def testMockContains_ExpectedContains_NonmatchingParameter(self):
"""Test that __contains__ fails if other parameters are expected."""
dummy = mox.MockObject(TestClass)
dummy.__contains__('X').AndReturn(True)
dummy._Replay()
def call(): return 'Y' in dummy
self.assertRaises(mox.UnexpectedMethodCallError, call)
dummy._Verify()
def testMockIter_ExpectedIter_NoSuccess(self):
"""Test that __iter__() gets mocked in Dummy.
In this test, _Verify() fails.
"""
dummy = mox.MockObject(TestClass)
iter(dummy).AndReturn(iter(['X', 'Y']))
dummy._Replay()
# NOT doing self.assertEqual([x for x in dummy], ['X', 'Y'])
self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify)
def testMockIter_ExpectedNoIter_NoSuccess(self):
"""Test that __iter__() gets mocked in Dummy."""
dummy = mox.MockObject(TestClass)
# NOT doing iter(dummy)
dummy._Replay()
def call(): return [x for x in dummy]
self.assertRaises(mox.UnexpectedMethodCallError, call)
def testMockIter_ExpectedGetItem_Success(self):
"""Test that __iter__() gets mocked in Dummy using getitem."""
dummy = mox.MockObject(SubscribtableNonIterableClass)
dummy[0].AndReturn('a')
dummy[1].AndReturn('b')
dummy[2].AndRaise(IndexError)
dummy._Replay()
self.assertEquals(['a', 'b'], [x for x in dummy])
dummy._Verify()
def testMockIter_ExpectedNoGetItem_NoSuccess(self):
"""Test that __iter__() gets mocked in Dummy using getitem."""
dummy = mox.MockObject(SubscribtableNonIterableClass)
# NOT doing dummy[index]
dummy._Replay()
function = lambda: [x for x in dummy]
self.assertRaises(mox.UnexpectedMethodCallError, function)
def testMockGetIter_WithSubClassOfNewStyleClass(self):
class NewStyleTestClass(object):
def __iter__(self):
return iter([1, 2, 3])
class TestSubClass(NewStyleTestClass):
pass
dummy = mox.MockObject(TestSubClass)
iter(dummy).AndReturn(iter(['a', 'b']))
dummy._Replay()
self.assertEquals(['a', 'b'], [x for x in dummy])
dummy._Verify()
def testInstantiationWithAdditionalAttributes(self):
mock_object = mox.MockObject(TestClass, attrs={"attr1": "value"})
self.assertEquals(mock_object.attr1, "value")
def testCantOverrideMethodsWithAttributes(self):
self.assertRaises(ValueError, mox.MockObject, TestClass,
attrs={"ValidCall": "value"})
def testCantMockNonPublicAttributes(self):
self.assertRaises(mox.PrivateAttributeError, mox.MockObject, TestClass,
attrs={"_protected": "value"})
self.assertRaises(mox.PrivateAttributeError, mox.MockObject, TestClass,
attrs={"__private": "value"})
class MoxTest(unittest.TestCase):
"""Verify Mox works correctly."""
def setUp(self):
self.mox = mox.Mox()
def testCreateObject(self):
"""Mox should create a mock object."""
mock_obj = self.mox.CreateMock(TestClass)
def testVerifyObjectWithCompleteReplay(self):
"""Mox should replay and verify all objects it created."""
mock_obj = self.mox.CreateMock(TestClass)
mock_obj.ValidCall()
mock_obj.ValidCallWithArgs(mox.IsA(TestClass))
self.mox.ReplayAll()
mock_obj.ValidCall()
mock_obj.ValidCallWithArgs(TestClass("some_value"))
self.mox.VerifyAll()
def testVerifyObjectWithIncompleteReplay(self):
"""Mox should raise an exception if a mock didn't replay completely."""
mock_obj = self.mox.CreateMock(TestClass)
mock_obj.ValidCall()
self.mox.ReplayAll()
# ValidCall() is never made
self.assertRaises(mox.ExpectedMethodCallsError, self.mox.VerifyAll)
def testEntireWorkflow(self):
"""Test the whole work flow."""
mock_obj = self.mox.CreateMock(TestClass)
mock_obj.ValidCall().AndReturn("yes")
self.mox.ReplayAll()
ret_val = mock_obj.ValidCall()
self.assertEquals("yes", ret_val)
self.mox.VerifyAll()
def testCallableObject(self):
"""Test recording calls to a callable object works."""
mock_obj = self.mox.CreateMock(CallableClass)
mock_obj("foo").AndReturn("qux")
self.mox.ReplayAll()
ret_val = mock_obj("foo")
self.assertEquals("qux", ret_val)
self.mox.VerifyAll()
def testInheritedCallableObject(self):
"""Test recording calls to an object inheriting from a callable object."""
mock_obj = self.mox.CreateMock(InheritsFromCallable)
mock_obj("foo").AndReturn("qux")
self.mox.ReplayAll()
ret_val = mock_obj("foo")
self.assertEquals("qux", ret_val)
self.mox.VerifyAll()
def testCallOnNonCallableObject(self):
"""Test that you cannot call a non-callable object."""
mock_obj = self.mox.CreateMock(TestClass)
self.assertRaises(TypeError, mock_obj)
def testCallableObjectWithBadCall(self):
"""Test verifying calls to a callable object works."""
mock_obj = self.mox.CreateMock(CallableClass)
mock_obj("foo").AndReturn("qux")
self.mox.ReplayAll()
self.assertRaises(mox.UnexpectedMethodCallError, mock_obj, "ZOOBAZ")
def testCallableObjectVerifiesSignature(self):
mock_obj = self.mox.CreateMock(CallableClass)
# Too many arguments
self.assertRaises(AttributeError, mock_obj, "foo", "bar")
def testUnorderedGroup(self):
"""Test that using one unordered group works."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Method(1).InAnyOrder()
mock_obj.Method(2).InAnyOrder()
self.mox.ReplayAll()
mock_obj.Method(2)
mock_obj.Method(1)
self.mox.VerifyAll()
def testUnorderedGroupsInline(self):
"""Unordered groups should work in the context of ordered calls."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(1).InAnyOrder()
mock_obj.Method(2).InAnyOrder()
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
mock_obj.Method(2)
mock_obj.Method(1)
mock_obj.Close()
self.mox.VerifyAll()
def testMultipleUnorderdGroups(self):
"""Multiple unoreded groups should work."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Method(1).InAnyOrder()
mock_obj.Method(2).InAnyOrder()
mock_obj.Foo().InAnyOrder('group2')
mock_obj.Bar().InAnyOrder('group2')
self.mox.ReplayAll()
mock_obj.Method(2)
mock_obj.Method(1)
mock_obj.Bar()
mock_obj.Foo()
self.mox.VerifyAll()
def testMultipleUnorderdGroupsOutOfOrder(self):
"""Multiple unordered groups should maintain external order"""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Method(1).InAnyOrder()
mock_obj.Method(2).InAnyOrder()
mock_obj.Foo().InAnyOrder('group2')
mock_obj.Bar().InAnyOrder('group2')
self.mox.ReplayAll()
mock_obj.Method(2)
self.assertRaises(mox.UnexpectedMethodCallError, mock_obj.Bar)
def testUnorderedGroupWithReturnValue(self):
"""Unordered groups should work with return values."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(1).InAnyOrder().AndReturn(9)
mock_obj.Method(2).InAnyOrder().AndReturn(10)
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
actual_two = mock_obj.Method(2)
actual_one = mock_obj.Method(1)
mock_obj.Close()
self.assertEquals(9, actual_one)
self.assertEquals(10, actual_two)
self.mox.VerifyAll()
def testUnorderedGroupWithComparator(self):
"""Unordered groups should work with comparators"""
def VerifyOne(cmd):
if not isinstance(cmd, str):
self.fail('Unexpected type passed to comparator: ' + str(cmd))
return cmd == 'test'
def VerifyTwo(cmd):
return True
mock_obj = self.mox.CreateMockAnything()
mock_obj.Foo(['test'], mox.Func(VerifyOne), bar=1).InAnyOrder().\
AndReturn('yes test')
mock_obj.Foo(['test'], mox.Func(VerifyTwo), bar=1).InAnyOrder().\
AndReturn('anything')
self.mox.ReplayAll()
mock_obj.Foo(['test'], 'anything', bar=1)
mock_obj.Foo(['test'], 'test', bar=1)
self.mox.VerifyAll()
def testMultipleTimes(self):
"""Test if MultipleTimesGroup works."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Method(1).MultipleTimes().AndReturn(9)
mock_obj.Method(2).AndReturn(10)
mock_obj.Method(3).MultipleTimes().AndReturn(42)
self.mox.ReplayAll()
actual_one = mock_obj.Method(1)
second_one = mock_obj.Method(1) # This tests MultipleTimes.
actual_two = mock_obj.Method(2)
actual_three = mock_obj.Method(3)
mock_obj.Method(3)
mock_obj.Method(3)
self.mox.VerifyAll()
self.assertEquals(9, actual_one)
self.assertEquals(9, second_one) # Repeated calls should return same number.
self.assertEquals(10, actual_two)
self.assertEquals(42, actual_three)
def testMultipleTimesUsingIsAParameter(self):
"""Test if MultipleTimesGroup works with a IsA parameter."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(mox.IsA(str)).MultipleTimes("IsA").AndReturn(9)
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
actual_one = mock_obj.Method("1")
second_one = mock_obj.Method("2") # This tests MultipleTimes.
mock_obj.Close()
self.mox.VerifyAll()
self.assertEquals(9, actual_one)
self.assertEquals(9, second_one) # Repeated calls should return same number.
def testMutlipleTimesUsingFunc(self):
"""Test that the Func is not evaluated more times than necessary.
If a Func() has side effects, it can cause a passing test to fail.
"""
self.counter = 0
def MyFunc(actual_str):
"""Increment the counter if actual_str == 'foo'."""
if actual_str == 'foo':
self.counter += 1
return True
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(mox.Func(MyFunc)).MultipleTimes()
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
mock_obj.Method('foo')
mock_obj.Method('foo')
mock_obj.Method('not-foo')
mock_obj.Close()
self.mox.VerifyAll()
self.assertEquals(2, self.counter)
def testMultipleTimesThreeMethods(self):
"""Test if MultipleTimesGroup works with three or more methods."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(1).MultipleTimes().AndReturn(9)
mock_obj.Method(2).MultipleTimes().AndReturn(8)
mock_obj.Method(3).MultipleTimes().AndReturn(7)
mock_obj.Method(4).AndReturn(10)
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
actual_three = mock_obj.Method(3)
mock_obj.Method(1)
actual_two = mock_obj.Method(2)
mock_obj.Method(3)
actual_one = mock_obj.Method(1)
actual_four = mock_obj.Method(4)
mock_obj.Close()
self.assertEquals(9, actual_one)
self.assertEquals(8, actual_two)
self.assertEquals(7, actual_three)
self.assertEquals(10, actual_four)
self.mox.VerifyAll()
def testMultipleTimesMissingOne(self):
"""Test if MultipleTimesGroup fails if one method is missing."""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(1).MultipleTimes().AndReturn(9)
mock_obj.Method(2).MultipleTimes().AndReturn(8)
mock_obj.Method(3).MultipleTimes().AndReturn(7)
mock_obj.Method(4).AndReturn(10)
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
mock_obj.Method(3)
mock_obj.Method(2)
mock_obj.Method(3)
mock_obj.Method(3)
mock_obj.Method(2)
self.assertRaises(mox.UnexpectedMethodCallError, mock_obj.Method, 4)
def testMultipleTimesTwoGroups(self):
"""Test if MultipleTimesGroup works with a group after a
MultipleTimesGroup.
"""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(1).MultipleTimes().AndReturn(9)
mock_obj.Method(3).MultipleTimes("nr2").AndReturn(42)
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
actual_one = mock_obj.Method(1)
mock_obj.Method(1)
actual_three = mock_obj.Method(3)
mock_obj.Method(3)
mock_obj.Close()
self.assertEquals(9, actual_one)
self.assertEquals(42, actual_three)
self.mox.VerifyAll()
def testMultipleTimesTwoGroupsFailure(self):
"""Test if MultipleTimesGroup fails with a group after a
MultipleTimesGroup.
"""
mock_obj = self.mox.CreateMockAnything()
mock_obj.Open()
mock_obj.Method(1).MultipleTimes().AndReturn(9)
mock_obj.Method(3).MultipleTimes("nr2").AndReturn(42)
mock_obj.Close()
self.mox.ReplayAll()
mock_obj.Open()
actual_one = mock_obj.Method(1)
mock_obj.Method(1)
actual_three = mock_obj.Method(3)
self.assertRaises(mox.UnexpectedMethodCallError, mock_obj.Method, 1)
def testWithSideEffects(self):
"""Test side effect operations actually modify their target objects."""
def modifier(mutable_list):
mutable_list[0] = 'mutated'
mock_obj = self.mox.CreateMockAnything()
mock_obj.ConfigureInOutParameter(['original']).WithSideEffects(modifier)
mock_obj.WorkWithParameter(['mutated'])
self.mox.ReplayAll()
local_list = ['original']
mock_obj.ConfigureInOutParameter(local_list)
mock_obj.WorkWithParameter(local_list)
self.mox.VerifyAll()
def testWithSideEffectsException(self):
"""Test side effect operations actually modify their target objects."""
def modifier(mutable_list):
mutable_list[0] = 'mutated'
mock_obj = self.mox.CreateMockAnything()
method = mock_obj.ConfigureInOutParameter(['original'])
method.WithSideEffects(modifier).AndRaise(Exception('exception'))
mock_obj.WorkWithParameter(['mutated'])
self.mox.ReplayAll()
local_list = ['original']
self.failUnlessRaises(Exception,
mock_obj.ConfigureInOutParameter,
local_list)
mock_obj.WorkWithParameter(local_list)
self.mox.VerifyAll()
def testStubOutMethod(self):
"""Test that a method is replaced with a MockAnything."""
test_obj = TestClass()
# Replace OtherValidCall with a mock.
self.mox.StubOutWithMock(test_obj, 'OtherValidCall')
self.assert_(isinstance(test_obj.OtherValidCall, mox.MockAnything))
test_obj.OtherValidCall().AndReturn('foo')
self.mox.ReplayAll()
actual = test_obj.OtherValidCall()
self.mox.VerifyAll()
self.mox.UnsetStubs()
self.assertEquals('foo', actual)
self.failIf(isinstance(test_obj.OtherValidCall, mox.MockAnything))
def testStubOutClass_OldStyle(self):
"""Test a mocked class whose __init__ returns a Mock."""
self.mox.StubOutWithMock(mox_test_helper, 'TestClassFromAnotherModule')
self.assert_(isinstance(mox_test_helper.TestClassFromAnotherModule,
mox.MockObject))
mock_instance = self.mox.CreateMock(
mox_test_helper.TestClassFromAnotherModule)
mox_test_helper.TestClassFromAnotherModule().AndReturn(mock_instance)
mock_instance.Value().AndReturn('mock instance')
self.mox.ReplayAll()
a_mock = mox_test_helper.TestClassFromAnotherModule()
actual = a_mock.Value()
self.mox.VerifyAll()
self.mox.UnsetStubs()
self.assertEquals('mock instance', actual)
def testStubOutClass(self):
self.mox.StubOutClassWithMocks(mox_test_helper, 'CallableClass')
# Instance one
mock_one = mox_test_helper.CallableClass(1, 2)
mock_one.Value().AndReturn('mock')
# Instance two
mock_two = mox_test_helper.CallableClass(8, 9)
mock_two('one').AndReturn('called mock')
self.mox.ReplayAll()
one = mox_test_helper.CallableClass(1, 2)
actual_one = one.Value()
two = mox_test_helper.CallableClass(8, 9)
actual_two = two('one')
self.mox.VerifyAll()
self.mox.UnsetStubs()
# Verify the correct mocks were returned
self.assertEquals(mock_one, one)
self.assertEquals(mock_two, two)
# Verify
self.assertEquals('mock', actual_one)
self.assertEquals('called mock', actual_two)
def testStubOutClass_NotAClass(self):
self.assertRaises(TypeError, self.mox.StubOutClassWithMocks,
mox_test_helper, 'MyTestFunction')
def testStubOutClassNotEnoughCreated(self):
self.mox.StubOutClassWithMocks(mox_test_helper, 'CallableClass')
mox_test_helper.CallableClass(1, 2)
mox_test_helper.CallableClass(8, 9)
self.mox.ReplayAll()
mox_test_helper.CallableClass(1, 2)
self.assertRaises(mox.ExpectedMockCreationError, self.mox.VerifyAll)
self.mox.UnsetStubs()
def testStubOutClassWrongSignature(self):
self.mox.StubOutClassWithMocks(mox_test_helper, 'CallableClass')
self.assertRaises(AttributeError, mox_test_helper.CallableClass)
self.mox.UnsetStubs()
def testStubOutClassWrongParameters(self):
self.mox.StubOutClassWithMocks(mox_test_helper, 'CallableClass')
mox_test_helper.CallableClass(1, 2)
self.mox.ReplayAll()
self.assertRaises(mox.UnexpectedMethodCallError,
mox_test_helper.CallableClass, 8, 9)
self.mox.UnsetStubs()
def testStubOutClassTooManyCreated(self):
self.mox.StubOutClassWithMocks(mox_test_helper, 'CallableClass')
mox_test_helper.CallableClass(1, 2)
self.mox.ReplayAll()
mox_test_helper.CallableClass(1, 2)
self.assertRaises(mox.UnexpectedMockCreationError,
mox_test_helper.CallableClass, 8, 9)
self.mox.UnsetStubs()
def testWarnsUserIfMockingMock(self):
"""Test that user is warned if they try to stub out a MockAnything."""
self.mox.StubOutWithMock(TestClass, 'MyStaticMethod')
self.assertRaises(TypeError, self.mox.StubOutWithMock, TestClass,
'MyStaticMethod')
def testStubOutFirstClassMethodVerifiesSignature(self):
self.mox.StubOutWithMock(mox_test_helper, 'MyTestFunction')
# Wrong number of arguments
self.assertRaises(AttributeError, mox_test_helper.MyTestFunction, 1)
self.mox.UnsetStubs()
def testStubOutObject(self):
"""Test than object is replaced with a Mock."""
class Foo(object):
def __init__(self):
self.obj = TestClass()
foo = Foo()
self.mox.StubOutWithMock(foo, "obj")
self.assert_(isinstance(foo.obj, mox.MockObject))
foo.obj.ValidCall()
self.mox.ReplayAll()
foo.obj.ValidCall()
self.mox.VerifyAll()
self.mox.UnsetStubs()
self.failIf(isinstance(foo.obj, mox.MockObject))
def testForgotReplayHelpfulMessage(self):
"""If there is an AttributeError on a MockMethod, give users a helpful msg.
"""
foo = self.mox.CreateMockAnything()
bar = self.mox.CreateMockAnything()
foo.GetBar().AndReturn(bar)
bar.ShowMeTheMoney()
# Forgot to replay!
try:
foo.GetBar().ShowMeTheMoney()
except AttributeError, e:
self.assertEquals('MockMethod has no attribute "ShowMeTheMoney". '
'Did you remember to put your mocks in replay mode?', str(e))
class ReplayTest(unittest.TestCase):
"""Verify Replay works properly."""
def testReplay(self):
"""Replay should put objects into replay mode."""
mock_obj = mox.MockObject(TestClass)
self.assertFalse(mock_obj._replay_mode)
mox.Replay(mock_obj)
self.assertTrue(mock_obj._replay_mode)
class MoxTestBaseTest(unittest.TestCase):
"""Verify that all tests in a class derived from MoxTestBase are wrapped."""
def setUp(self):
self.mox = mox.Mox()
self.test_mox = mox.Mox()
self.test_stubs = mox.stubout.StubOutForTesting()
self.result = unittest.TestResult()
def tearDown(self):
self.mox.UnsetStubs()
self.test_mox.UnsetStubs()
self.test_stubs.UnsetAll()
self.test_stubs.SmartUnsetAll()
def _setUpTestClass(self):
"""Replacement for setUp in the test class instance.
Assigns a mox.Mox instance as the mox attribute of the test class instance.
This replacement Mox instance is under our control before setUp is called
in the test class instance.
"""
self.test.mox = self.test_mox
self.test.stubs = self.test_stubs
def _CreateTest(self, test_name):
"""Create a test from our example mox class.
The created test instance is assigned to this instances test attribute.
"""
self.test = mox_test_helper.ExampleMoxTest(test_name)
self.mox.stubs.Set(self.test, 'setUp', self._setUpTestClass)
def _VerifySuccess(self):
"""Run the checks to confirm test method completed successfully."""
self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
self.mox.StubOutWithMock(self.test_mox, 'VerifyAll')
self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
self.test_mox.UnsetStubs()
self.test_mox.VerifyAll()
self.test_stubs.UnsetAll()
self.test_stubs.SmartUnsetAll()
self.mox.ReplayAll()
self.test.run(result=self.result)
self.assertTrue(self.result.wasSuccessful())
self.mox.VerifyAll()
self.mox.UnsetStubs() # Needed to call the real VerifyAll() below.
self.test_mox.VerifyAll()
def testSuccess(self):
"""Successful test method execution test."""
self._CreateTest('testSuccess')
self._VerifySuccess()
def testSuccessNoMocks(self):
"""Let testSuccess() unset all the mocks, and verify they've been unset."""
self._CreateTest('testSuccess')
self.test.run(result=self.result)
self.assertTrue(self.result.wasSuccessful())
self.assertEqual(OS_LISTDIR, mox_test_helper.os.listdir)
def testStubs(self):
"""Test that "self.stubs" is provided as is useful."""
self._CreateTest('testHasStubs')
self._VerifySuccess()
def testStubsNoMocks(self):
"""Let testHasStubs() unset the stubs by itself."""
self._CreateTest('testHasStubs')
self.test.run(result=self.result)
self.assertTrue(self.result.wasSuccessful())
self.assertEqual(OS_LISTDIR, mox_test_helper.os.listdir)
def testExpectedNotCalled(self):
"""Stubbed out method is not called."""
self._CreateTest('testExpectedNotCalled')
self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
# Don't stub out VerifyAll - that's what causes the test to fail
self.test_mox.UnsetStubs()
self.test_stubs.UnsetAll()
self.test_stubs.SmartUnsetAll()
self.mox.ReplayAll()
self.test.run(result=self.result)
self.failIf(self.result.wasSuccessful())
self.mox.VerifyAll()
def testExpectedNotCalledNoMocks(self):
"""Let testExpectedNotCalled() unset all the mocks by itself."""
self._CreateTest('testExpectedNotCalled')
self.test.run(result=self.result)
self.failIf(self.result.wasSuccessful())
self.assertEqual(OS_LISTDIR, mox_test_helper.os.listdir)
def testUnexpectedCall(self):
"""Stubbed out method is called with unexpected arguments."""
self._CreateTest('testUnexpectedCall')
self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
# Ensure no calls are made to VerifyAll()
self.mox.StubOutWithMock(self.test_mox, 'VerifyAll')
self.test_mox.UnsetStubs()
self.test_stubs.UnsetAll()
self.test_stubs.SmartUnsetAll()
self.mox.ReplayAll()
self.test.run(result=self.result)
self.failIf(self.result.wasSuccessful())
self.mox.VerifyAll()
def testFailure(self):
"""Failing assertion in test method."""
self._CreateTest('testFailure')
self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs')
self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll')
self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll')
# Ensure no calls are made to VerifyAll()
self.mox.StubOutWithMock(self.test_mox, 'VerifyAll')
self.test_mox.UnsetStubs()
self.test_stubs.UnsetAll()
self.test_stubs.SmartUnsetAll()
self.mox.ReplayAll()
self.test.run(result=self.result)
self.failIf(self.result.wasSuccessful())
self.mox.VerifyAll()
def testMixin(self):
"""Run test from mix-in test class, ensure it passes."""
self._CreateTest('testStat')
self._VerifySuccess()
def testMixinAgain(self):
"""Run same test as above but from the current test class.
This ensures metaclass properly wrapped test methods from all base classes.
If unsetting of stubs doesn't happen, this will fail.
"""
self._CreateTest('testStatOther')
self._VerifySuccess()
class VerifyTest(unittest.TestCase):
"""Verify Verify works properly."""
def testVerify(self):
"""Verify should be called for all objects.
This should throw an exception because the expected behavior did not occur.
"""
mock_obj = mox.MockObject(TestClass)
mock_obj.ValidCall()
mock_obj._Replay()
self.assertRaises(mox.ExpectedMethodCallsError, mox.Verify, mock_obj)
class ResetTest(unittest.TestCase):
"""Verify Reset works properly."""
def testReset(self):
"""Should empty all queues and put mocks in record mode."""
mock_obj = mox.MockObject(TestClass)
mock_obj.ValidCall()
self.assertFalse(mock_obj._replay_mode)
mock_obj._Replay()
self.assertTrue(mock_obj._replay_mode)
self.assertEquals(1, len(mock_obj._expected_calls_queue))
mox.Reset(mock_obj)
self.assertFalse(mock_obj._replay_mode)
self.assertEquals(0, len(mock_obj._expected_calls_queue))
class MyTestCase(unittest.TestCase):
"""Simulate the use of a fake wrapper around Python's unittest library."""
def setUp(self):
super(MyTestCase, self).setUp()
self.critical_variable = 42
self.another_critical_variable = 42
def testMethodOverride(self):
"""Should be properly overriden in a derived class."""
self.assertEquals(42, self.another_critical_variable)
self.another_critical_variable += 1
class MoxTestBaseMultipleInheritanceTest(mox.MoxTestBase, MyTestCase):
"""Test that multiple inheritance can be used with MoxTestBase."""
def setUp(self):
super(MoxTestBaseMultipleInheritanceTest, self).setUp()
self.another_critical_variable = 99
def testMultipleInheritance(self):
"""Should be able to access members created by all parent setUp()."""
self.assert_(isinstance(self.mox, mox.Mox))
self.assertEquals(42, self.critical_variable)
def testMethodOverride(self):
"""Should run before MyTestCase.testMethodOverride."""
self.assertEquals(99, self.another_critical_variable)
self.another_critical_variable = 42
super(MoxTestBaseMultipleInheritanceTest, self).testMethodOverride()
self.assertEquals(43, self.another_critical_variable)
class MoxTestDontMockProperties(MoxTestBaseTest):
def testPropertiesArentMocked(self):
mock_class = self.mox.CreateMock(ClassWithProperties)
self.assertRaises(mox.UnknownMethodCallError, lambda:
mock_class.prop_attr)
class TestClass:
"""This class is used only for testing the mock framework"""
SOME_CLASS_VAR = "test_value"
_PROTECTED_CLASS_VAR = "protected value"
def __init__(self, ivar=None):
self.__ivar = ivar
def __eq__(self, rhs):
return self.__ivar == rhs
def __ne__(self, rhs):
return not self.__eq__(rhs)
def ValidCall(self):
pass
def MethodWithArgs(self, one, two, nine=None):
pass
def OtherValidCall(self):
pass
def ValidCallWithArgs(self, *args, **kwargs):
pass
@classmethod
def MyClassMethod(cls):
pass
@staticmethod
def MyStaticMethod():
pass
def _ProtectedCall(self):
pass
def __PrivateCall(self):
pass
def __getitem__(self, key):
pass
def __DoNotMock(self):
pass
def __getitem__(self, key):
"""Return the value for key."""
return self.d[key]
def __setitem__(self, key, value):
"""Set the value for key to value."""
self.d[key] = value
def __contains__(self, key):
"""Returns True if d contains the key."""
return key in self.d
def __iter__(self):
pass
class ChildClass(TestClass):
"""This inherits from TestClass."""
def __init__(self):
TestClass.__init__(self)
def ChildValidCall(self):
pass
class CallableClass(object):
"""This class is callable, and that should be mockable!"""
def __init__(self):
pass
def __call__(self, param):
return param
class ClassWithProperties(object):
def setter_attr(self, value):
pass
def getter_attr(self):
pass
prop_attr = property(getter_attr, setter_attr)
class SubscribtableNonIterableClass(object):
def __getitem__(self, index):
raise IndexError
class InheritsFromCallable(CallableClass):
"""This class should also be mockable; it inherits from a callable class."""
pass
if __name__ == '__main__':
unittest.main()
| |
3#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 15 13:47:44 2018
@author: BallBlueMeercat
"""
import random
import numpy as np
import zodesolve
import tools
def redshift_picks(zmin, zmax, n):
"""
Takes in:
zmin = integer lowest redshift;
zmax = integer highest redshift;
n = integer number of redshifts to be generated.
Returns:
zpicks = list of randomly selected redshifts between zmin and zmax.
"""
# print('-zpicks has been called')
zinterval = (zmax - zmin) / (n*2)
z_opts = tools.flist(zmin, zmax, zinterval)
zpicks = random.sample(z_opts, n)
zpicks = sorted(zpicks)
return zpicks
def magn(params, zpicks, firstderivs_key, plot_key=False):
"""
Takes in:
params = dictionary with true parameters;
zpicks = list of redshifts to integrate over, in accending order;
firstderivs_key = string, indicates which firstderivs to integrate;
plot_key = Boolean, to plot or not to plot model figures;
Returns:
mag = np.ndarray of apparent mag corresponding to input redshits.
"""
# print('@@@ magn has been called')
if firstderivs_key == 'LCDM':
params['gamma'] = 0
del params['gamma']
# Absolute brightness of supernovae.
M = -19
dlpc, plot_var = zodesolve.zodesolve(params, zpicks, firstderivs_key)
# Calculating apparent magnitudes of supernovae at the simulated
# luminosity distances using the distance modulus formula.
mag = 5 * np.log10(dlpc/10) + M
if plot_key:
# Checking evolution of the model.
import plots
plots.modelcheck(mag, zpicks, plot_var, firstderivs_key)
return mag
# Slow mag calculation
# # Calculating apparent magnitudes of supernovae at the simulated
# # luminosity distances using the distance modulus formula.
# mag = []
# for i in range(len(dlpc)):
# if dlpc[i] == 0:
# magnitude = M
# else:
# # magnitude from the distance modulus formula
# magnitude = 5 * math.log10(dlpc[i]/10) + M
# mag.append(magnitude)
def model_comparison(params, zpicks, firstderivs_key, gamma_list=False):
"""
Takes in:
params = dictionary with true parameters;
zpicks = list of redshifts to integrate over, in accending order;
firstderivs_key = list of strings, which firstderivs to integrate;
gamma_list = list of floats or integers, interaction constants.
Action:
plots one model evolution with different gamma,
or evolution of different models with the same gamma.
"""
import plots
# Absolute brightness of supernovae.
M = -19
plot_var_dict = {}
j = 1
if gamma_list:
for gamma in gamma_list:
params['gamma'] = gamma
dlpc, plot_var = zodesolve.zodesolve(params, zpicks, firstderivs_key)
# Calculating apparent magnitudes of supernovae at the simulated
# luminosity distances using the distance modulus formula.
mag = 5 * np.log10(dlpc/10) + M
plot_var_dict['plot_var_'+str(j)] = plot_var
plot_var_dict['mag_'+str(j)] = mag
j+=1
# Plotting evolution of the model with different gamma.
plots.gammacheck(mag, zpicks, firstderivs_key, plot_var_dict)
elif len(firstderivs_key) > 1:
for key in firstderivs_key:
dlpc, plot_var = zodesolve.zodesolve(params, zpicks, key)
# Calculating apparent magnitudes of supernovae at the simulated
# luminosity distances using the distance modulus formula.
mag = 5 * np.log10(dlpc/10) + M
plot_var_dict['plot_var_'+str(j)] = plot_var
plot_var_dict['mag_'+str(j)] = mag
j+=1
# Plotting evolution of different models with same gamma.
plots.ivcdmcheck(mag, zpicks, firstderivs_key, plot_var_dict)
return
def gnoise(mag, mu, sigma):
"""
Returns:
mag = mag, each point offset by unique Gaussian noise;
noise = Gaussian noise.
"""
# print(' -gnoise has been called')
n = len(mag)
noise = np.random.normal(mu,sigma,n)
mag = mag + noise
# import matplotlib.pyplot as pl
# from pylab import figure
# figure()
# pl.title('Noise distribution')
# pl.hist(noise, 100)
# pl.show()
return mag
def noisy_mag(zpicks, mu, sigma, params, firstderivs_key):
model = magn(params, zpicks, firstderivs_key)
model = np.asarray(model)
mag = gnoise(model, mu, sigma)
return mag
def makensavemagnz(m_true, g_true, mu, sigma, zpicks, data_key, filename):
'''
Takes in:
Parameters used to simulate magnitude:
m_true = e_m(t)/e_crit(t0) at t=t0;
de_true = 1 - m_true = e_de(t)/e_crit(t0) at t=t0;
g_true = interaction term, rate at which DE decays into matter.
Statistical parameteres of gaussian noise added to data:
mu = mean;
sigma = standard deviation;
npoints = how many mag and z to generate.
Model type:
data_key = string, key for dictionary of interaction modes in firstderivs
Options: 'Hdecay', 'rdecay', 'rdecay_de', 'rdecay_m', 'interacting', 'LCDM'
Length of parameters has to correspond to the model being tested.
filename = string, name of file data is saved to.
Returns:
Nothing. Generates redshifts and corresponding magnitudes (according
to the model specified by data_key) offset by Gaussian noise,
saves them into a binary file called filename in the working directory.
'''
if data_key == 'LCDM':
data_params = {'m':m_true}
else:
data_params = {'m':m_true, 'gamma':g_true}
mag = noisy_mag(zpicks, mu, sigma, data_params, data_key)
output = mag, zpicks
# Relative path of output folder.
save_path = './data/'+filename
import pickle
pickle.dump(output, open(save_path, 'wb'))
return
| |
from nose.tools import assert_equal, assert_true, assert_false, assert_raises
import networkx as nx
from networkx.algorithms import flow
from networkx.algorithms.connectivity import minimum_st_edge_cut
from networkx.algorithms.connectivity import minimum_st_node_cut
from networkx.utils import arbitrary_element
flow_funcs = [
flow.boykov_kolmogorov,
flow.dinitz,
flow.edmonds_karp,
flow.preflow_push,
flow.shortest_augmenting_path,
]
msg = "Assertion failed in function: {0}"
# Tests for node and edge cutsets
def _generate_no_biconnected(max_attempts=50):
attempts = 0
while True:
G = nx.fast_gnp_random_graph(100,0.0575)
if nx.is_connected(G) and not nx.is_biconnected(G):
attempts = 0
yield G
else:
if attempts >= max_attempts:
msg = "Tried %d times: no suitable Graph."%attempts
raise Exception(msg % max_attempts)
else:
attempts += 1
def test_articulation_points():
Ggen = _generate_no_biconnected()
for flow_func in flow_funcs:
for i in range(3):
G = next(Ggen)
cut = nx.minimum_node_cut(G, flow_func=flow_func)
assert_true(len(cut) == 1, msg=msg.format(flow_func.__name__))
assert_true(cut.pop() in set(nx.articulation_points(G)),
msg=msg.format(flow_func.__name__))
def test_brandes_erlebach_book():
# Figure 1 chapter 7: Connectivity
# http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf
G = nx.Graph()
G.add_edges_from([(1, 2), (1, 3), (1, 4), (1, 5), (2, 3), (2, 6), (3, 4),
(3, 6), (4, 6), (4, 7), (5, 7), (6, 8), (6, 9), (7, 8),
(7, 10), (8, 11), (9, 10), (9, 11), (10, 11)])
for flow_func in flow_funcs:
kwargs = dict(flow_func=flow_func)
# edge cutsets
assert_equal(3, len(nx.minimum_edge_cut(G, 1, 11, **kwargs)),
msg=msg.format(flow_func.__name__))
edge_cut = nx.minimum_edge_cut(G, **kwargs)
# Node 5 has only two edges
assert_equal(2, len(edge_cut), msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_edges_from(edge_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
# node cuts
assert_equal(set([6, 7]), minimum_st_node_cut(G, 1, 11, **kwargs),
msg=msg.format(flow_func.__name__))
assert_equal(set([6, 7]), nx.minimum_node_cut(G, 1, 11, **kwargs),
msg=msg.format(flow_func.__name__))
node_cut = nx.minimum_node_cut(G, **kwargs)
assert_equal(2, len(node_cut), msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_nodes_from(node_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
def test_white_harary_paper():
# Figure 1b white and harary (2001)
# http://eclectic.ss.uci.edu/~drwhite/sm-w23.PDF
# A graph with high adhesion (edge connectivity) and low cohesion
# (node connectivity)
G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4))
G.remove_node(7)
for i in range(4,7):
G.add_edge(0,i)
G = nx.disjoint_union(G, nx.complete_graph(4))
G.remove_node(G.order()-1)
for i in range(7,10):
G.add_edge(0,i)
for flow_func in flow_funcs:
kwargs = dict(flow_func=flow_func)
# edge cuts
edge_cut = nx.minimum_edge_cut(G, **kwargs)
assert_equal(3, len(edge_cut), msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_edges_from(edge_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
# node cuts
node_cut = nx.minimum_node_cut(G, **kwargs)
assert_equal(set([0]), node_cut, msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_nodes_from(node_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
def test_petersen_cutset():
G = nx.petersen_graph()
for flow_func in flow_funcs:
kwargs = dict(flow_func=flow_func)
# edge cuts
edge_cut = nx.minimum_edge_cut(G, **kwargs)
assert_equal(3, len(edge_cut), msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_edges_from(edge_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
# node cuts
node_cut = nx.minimum_node_cut(G, **kwargs)
assert_equal(3, len(node_cut), msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_nodes_from(node_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
def test_octahedral_cutset():
G=nx.octahedral_graph()
for flow_func in flow_funcs:
kwargs = dict(flow_func=flow_func)
# edge cuts
edge_cut = nx.minimum_edge_cut(G, **kwargs)
assert_equal(4, len(edge_cut), msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_edges_from(edge_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
# node cuts
node_cut = nx.minimum_node_cut(G, **kwargs)
assert_equal(4, len(node_cut), msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_nodes_from(node_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
def test_icosahedral_cutset():
G=nx.icosahedral_graph()
for flow_func in flow_funcs:
kwargs = dict(flow_func=flow_func)
# edge cuts
edge_cut = nx.minimum_edge_cut(G, **kwargs)
assert_equal(5, len(edge_cut), msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_edges_from(edge_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
# node cuts
node_cut = nx.minimum_node_cut(G, **kwargs)
assert_equal(5, len(node_cut), msg=msg.format(flow_func.__name__))
H = G.copy()
H.remove_nodes_from(node_cut)
assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__))
def test_node_cutset_exception():
G=nx.Graph()
G.add_edges_from([(1, 2), (3, 4)])
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, nx.minimum_node_cut, G, flow_func=flow_func)
def test_node_cutset_random_graphs():
for flow_func in flow_funcs:
for i in range(3):
G = nx.fast_gnp_random_graph(50, 0.25)
if not nx.is_connected(G):
ccs = iter(nx.connected_components(G))
start = arbitrary_element(next(ccs))
G.add_edges_from((start, arbitrary_element(c)) for c in ccs)
cutset = nx.minimum_node_cut(G, flow_func=flow_func)
assert_equal(nx.node_connectivity(G), len(cutset),
msg=msg.format(flow_func.__name__))
G.remove_nodes_from(cutset)
assert_false(nx.is_connected(G), msg=msg.format(flow_func.__name__))
def test_edge_cutset_random_graphs():
for flow_func in flow_funcs:
for i in range(3):
G = nx.fast_gnp_random_graph(50, 0.25)
if not nx.is_connected(G):
ccs = iter(nx.connected_components(G))
start = arbitrary_element(next(ccs))
G.add_edges_from((start, arbitrary_element(c)) for c in ccs)
cutset = nx.minimum_edge_cut(G, flow_func=flow_func)
assert_equal(nx.edge_connectivity(G), len(cutset),
msg=msg.format(flow_func.__name__))
G.remove_edges_from(cutset)
assert_false(nx.is_connected(G), msg=msg.format(flow_func.__name__))
def test_empty_graphs():
G = nx.Graph()
D = nx.DiGraph()
for interface_func in [nx.minimum_node_cut, nx.minimum_edge_cut]:
for flow_func in flow_funcs:
assert_raises(nx.NetworkXPointlessConcept, interface_func, G,
flow_func=flow_func)
assert_raises(nx.NetworkXPointlessConcept, interface_func, D,
flow_func=flow_func)
def test_unbounded():
G = nx.complete_graph(5)
for flow_func in flow_funcs:
assert_equal(4, len(minimum_st_edge_cut(G, 1, 4, flow_func=flow_func)))
def test_missing_source():
G = nx.path_graph(4)
for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]:
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, interface_func, G, 10, 1,
flow_func=flow_func)
def test_missing_target():
G = nx.path_graph(4)
for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]:
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, interface_func, G, 1, 10,
flow_func=flow_func)
def test_not_weakly_connected():
G = nx.DiGraph()
nx.add_path(G, [1, 2, 3])
nx.add_path(G, [4, 5])
for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]:
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, interface_func, G,
flow_func=flow_func)
def test_not_connected():
G = nx.Graph()
nx.add_path(G, [1, 2, 3])
nx.add_path(G, [4, 5])
for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]:
for flow_func in flow_funcs:
assert_raises(nx.NetworkXError, interface_func, G,
flow_func=flow_func)
def tests_min_cut_complete():
G = nx.complete_graph(5)
for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]:
for flow_func in flow_funcs:
assert_equal(4, len(interface_func(G, flow_func=flow_func)))
def tests_min_cut_complete_directed():
G = nx.complete_graph(5)
G = G.to_directed()
for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]:
for flow_func in flow_funcs:
assert_equal(4, len(interface_func(G, flow_func=flow_func)))
def tests_minimum_st_node_cut():
G = nx.Graph()
G.add_nodes_from([0, 1, 2, 3, 7, 8, 11, 12])
G.add_edges_from([(7, 11), (1, 11), (1, 12), (12, 8), (0, 1)])
nodelist = minimum_st_node_cut(G, 7, 11)
assert(nodelist == [])
def test_invalid_auxiliary():
G = nx.complete_graph(5)
assert_raises(nx.NetworkXError, minimum_st_node_cut, G, 0, 3,
auxiliary=G)
def test_interface_only_source():
G = nx.complete_graph(5)
for interface_func in [nx.minimum_node_cut, nx.minimum_edge_cut]:
assert_raises(nx.NetworkXError, interface_func, G, s=0)
def test_interface_only_target():
G = nx.complete_graph(5)
for interface_func in [nx.minimum_node_cut, nx.minimum_edge_cut]:
assert_raises(nx.NetworkXError, interface_func, G, t=3)
| |
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
""" This module defines the following classes:
- QShellDialog
- QShell
QShell is based on ideas and code from PyCute developed by Gerard Vermeulen.
Used with the author's permission.
More information on PyCute, visit:
http://gerard.vermeulen.free.fr/html/pycute-intro.html
"""
from PyQt4 import QtGui, QtCore
from code import InteractiveInterpreter
import copy
import sys
import time
import os.path
import api
from core.configuration import get_vistrails_configuration
from core.interpreter.default import get_default_interpreter
import core.modules.module_registry
import core.system
from core.vistrail.port_spec import PortSpec
from gui.vistrails_palette import QVistrailsPaletteInterface
from core.utils import all
################################################################################
class QShellDialog(QtGui.QWidget, QVistrailsPaletteInterface):
"""This class incorporates the QShell into a dockable widget for use in the
VisTrails environment"""
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent=parent)
#locals() returns the original dictionary, not a copy as
#the docs say
self.firstLocals = copy.copy(locals())
self.shell = QShell(self.firstLocals,None)
layout = QtGui.QVBoxLayout()
layout.setMargin(0)
layout.setSpacing(0)
layout.addWidget(self.shell)
self.setLayout(layout)
# self.setWidget(self.shell)
self.setWindowTitle(self.shell.windowTitle())
# self.setTitleBarWidget(QtGui.QLabel(self.shell.windowTitle()))
# self.monitorWindowTitle(self.shell)
self.vistrails_interpreter = get_default_interpreter()
def createMenu(self):
"""createMenu() -> None
Creates a menu bar and adds it to the main layout.
"""
self.newSessionAct = QtGui.QAction(self.tr("&Restart"),self)
self.newSessionAct.setShortcut(self.tr("Ctrl+R"))
self.connect(self.newSessionAct, QtCore.SIGNAL("triggered()"),
self.newSession)
self.saveSessionAct = QtGui.QAction(self.tr("&Save"), self)
self.saveSessionAct.setShortcut(self.tr("Ctrl+S"))
self.connect(self.saveSessionAct, QtCore.SIGNAL("triggered()"),
self.saveSession)
self.closeSessionAct = QtGui.QAction(self.tr("Close"), self)
self.closeSessionAct.setShortcut(self.tr("Ctrl+W"))
self.connect(self.closeSessionAct,QtCore.SIGNAL("triggered()"),
self.closeSession)
self.menuBar = QtGui.QMenuBar(self)
menu = self.menuBar.addMenu(self.tr("&Session"))
menu.addAction(self.newSessionAct)
menu.addAction(self.saveSessionAct)
menu.addAction(self.closeSessionAct)
self.layout().setMenuBar(self.menuBar)
def closeEvent(self, e):
"""closeEvent(e) -> None
Event handler called when the dialog is about to close."""
self.closeSession()
self.emit(QtCore.SIGNAL("shellHidden()"))
def showEvent(self, e):
"""showEvent(e) -> None
Event handler called when the dialog acquires focus
"""
self.shell.show()
def closeSession(self):
"""closeSession() -> None.
Hides the dialog instead of closing it, so the session continues open.
"""
self.hide()
def newSession(self):
"""newSession() -> None
Tells the shell to start a new session passing a copy of the original
locals dictionary.
"""
self.shell.restart(copy.copy(self.firstLocals))
def saveSession(self):
"""saveSession() -> None
Opens a File Save dialog and passes the filename to shell's saveSession.
"""
default = 'visTrails' + '-' + time.strftime("%Y%m%d-%H%M.log")
default = os.path.join(core.system.vistrails_file_directory(),default)
fileName = QtGui.QFileDialog.getSaveFileName(self,
"Save Session As..",
default,
"Log files (*.log)")
if not fileName:
return
self.shell.saveSession(str(fileName))
def visibility_changed(self, visible):
QVistrailsPaletteInterface.visibility_changed(self, visible)
if visible:
self.shell.show()
else:
self.shell.hide()
##############################################################################
# QShell
class vistrails_port(object):
def __init__(self, vistrails_module, port_spec):
# print 'calling vistrails_port.__init__'
self._vistrails_module = vistrails_module
self._port_spec = port_spec
def __call__(self, *args, **kwargs):
if len(args) + len(kwargs) > 0:
self._vistrails_module._update_func(self._port_spec,
*args, **kwargs)
return None
return self
class vistrails_module(object):
def __init__(self, *args, **kwargs):
if not hasattr(self, '_module'):
self._module = \
api.add_module_from_descriptor(self._module_desc)
# FIXME if constant, we can use args
module_desc = self._module_desc
for attr_name, value in kwargs.iteritems():
self._process_attr_value(attr_name, value)
def _process_attr_value(self, attr_name, value):
if self._module.has_port_spec(attr_name, 'input'):
port_spec = self._module.get_port_spec(attr_name, 'input')
args = None
# FIXME want this to be any iterable
if type(value) == tuple:
args = value
else:
args = (value,)
self._update_func(port_spec, *args)
else:
raise AttributeError("type object '%s' has no "
"attribute '%s'" % \
(self.__class__.__name__,
attr_name))
def __getattr__(self, attr_name):
def create_port(port_spec):
return vistrails_port(self, port_spec)
try:
return self.__dict__[attr_name]
except KeyError:
if self._module.has_port_spec(attr_name, 'output'):
port_spec = \
self._module.get_port_spec(attr_name, 'output')
return create_port(port_spec)
elif self._module.has_port_spec(attr_name, 'input'):
port_spec = \
self._module.get_port_spec(attr_name, 'input')
return create_port(port_spec)
else:
raise AttributeError("type object '%s' has no "
"attribute '%s'" % \
(self.__class__.__name__,
attr_name))
def __setattr__(self, attr_name, value):
if attr_name.startswith('_'):
self.__dict__[attr_name] = value
else:
self._process_attr_value(attr_name, value)
def _update_func(self, port_spec, *args, **kwargs):
# print 'running _update_func', port_spec.name
# print args
if port_spec.type != 'input':
if self._module.has_port_spec(port_spec.name, 'input'):
port_spec = \
self._module.get_port_spec(port_spec.name, 'input')
else:
raise Exception("cannot update an output port spec")
# FIXME deal with kwargs
num_ports = 0
num_params = 0
for value in args:
# print 'processing', type(value), value
if isinstance(value, vistrails_port):
# make connection to specified output port
# print 'updating port'
num_ports += 1
elif isinstance(value, vistrails_module):
# make connection to 'self' output port of value
# print 'updating module'
num_ports += 1
else:
# print 'update literal', type(value), value
num_params += 1
if num_ports > 1 or (num_ports == 1 and num_params > 0):
reg = core.modules.module_registry.get_module_registry()
tuple_desc = \
reg.get_descriptor_by_name('edu.utah.sci.vistrails.basic',
'Tuple', '')
d = {'_module_desc': tuple_desc,
'_package': self._package,}
tuple = type('module', (vistrails_module,), d)()
output_port_spec = PortSpec(id=-1,
name='value',
type='output',
sigstring=port_spec.sigstring)
api.add_port_spec(tuple._module.id, output_port_spec)
self._update_func(port_spec, *[tuple.value()])
assert len(port_spec.descriptors()) == len(args)
for i, descriptor in enumerate(port_spec.descriptors()):
arg_name = 'arg%d' % i
sigstring = "(" + descriptor.sigstring + ")"
tuple_port_spec = PortSpec(id=-1,
name=arg_name,
type='input',
sigstring=sigstring)
api.add_port_spec(tuple._module.id, tuple_port_spec)
tuple._process_attr_value(arg_name, args[i])
# create tuple object
pass
elif num_ports == 1:
other = args[0]
if isinstance(other, vistrails_port):
if other._port_spec.type != 'output':
other_module = other._vistrails_module._module
if other_module.has_port_spec(port_spec.name,
'output'):
other_port_spec = \
other_module.get_port_spec(port_spec.name,
'output')
else:
raise Exception("cannot update an input "
"port spec")
else:
other_port_spec = other._port_spec
api.add_connection(other._vistrails_module._module.id,
other_port_spec,
self._module.id,
port_spec)
elif isinstance(other, vistrails_module):
other_port_spec = \
other._module.get_port_spec('self', 'output')
api.add_connection(other._module.id,
other_port_spec,
self._module.id,
port_spec)
else:
api.change_parameter(self._module.id,
port_spec.name,
[str(x) for x in args])
class QShell(QtGui.QTextEdit):
"""This class embeds a python interperter in a QTextEdit Widget
It is based on PyCute developed by Gerard Vermeulen.
"""
def __init__(self, locals=None, parent=None):
"""Constructor.
The optional 'locals' argument specifies the dictionary in which code
will be executed; it defaults to a newly created dictionary with key
"__name__" set to "__console__" and key "__doc__" set to None.
The optional 'log' argument specifies the file in which the interpreter
session is to be logged.
The optional 'parent' argument specifies the parent widget. If no parent
widget has been specified, it is possible to exit the interpreter
by Ctrl-D.
"""
QtGui.QTextEdit.__init__(self, parent)
self.setReadOnly(False)
self.setWindowTitle("Console")
# to exit the main interpreter by a Ctrl-D if QShell has no parent
if parent is None:
self.eofKey = QtCore.Qt.Key_D
else:
self.eofKey = None
# flag for knowing when selecting text
self.selectMode = False
self.interpreter = None
self.controller = None
# storing current state
#this is not working on mac
#self.prev_stdout = sys.stdout
#self.prev_stdin = sys.stdin
#self.prev_stderr = sys.stderr
# capture all interactive input/output
#sys.stdout = self
#sys.stderr = self
#sys.stdin = self
# user interface setup
self.setAcceptRichText(False)
self.setWordWrapMode(QtGui.QTextOption.WrapAnywhere)
conf = get_vistrails_configuration()
shell_conf = conf.shell
# font
font = QtGui.QFont(shell_conf.font_face, shell_conf.font_size)
font.setFixedPitch(1)
self.setFont(font)
self.reset(locals)
def load_package(self, pkg_name):
reg = core.modules.module_registry.get_module_registry()
package = reg.get_package_by_name(pkg_name)
def create_dict(modules, ns, m, mdesc):
md = {}
if len(ns) == 0:
d = {'_module_desc': mdesc,
'_package': pkg,}
modules[m] = type('module', (vistrails_module,), d)
else:
if ns[0] in modules:
md = create_dict(modules[ns[0]], ns[1:], m, mdesc)
else:
md = create_dict(md, ns[1:], m, mdesc)
modules[ns[0]] = md
return modules
def create_namespace_path(root, modules):
for k,v in modules.iteritems():
if type(v) == type({}):
d = create_namespace_path(k,v)
modules[k] = d
if root is not None:
modules['_package'] = pkg
return type(root, (object,), modules)()
else:
return modules
def get_module_init(module_desc):
def init(self, *args, **kwargs):
self.__dict__['module'] = \
api.add_module_from_descriptor(module_desc)
return init
def get_module(package):
def getter(self, attr_name):
desc_tuple = (attr_name, '')
if desc_tuple in package.descriptors:
module_desc = package.descriptors[desc_tuple]
d = {'_module_desc': module_desc,
'_package': self,}
return type('module', (vistrails_module,), d)
else:
raise AttributeError("type object '%s' has no attribute "
"'%s'" % (self.__class__.__name__,
attr_name))
return getter
d = {'__getattr__': get_module(package),}
pkg = type(package.name, (object,), d)()
modules = {}
for (m,ns) in package.descriptors:
module_desc = package.descriptors[(m,ns)]
modules = create_dict(modules, ns.split('|'), m, module_desc)
modules = create_namespace_path(None, modules)
for (k,v) in modules.iteritems():
setattr(pkg, k, v)
return pkg
def selected_modules(self):
shell_modules = []
modules = api.get_selected_modules()
for module in modules:
d = {'_module': module}
shell_modules.append(type('module', (vistrails_module,), d)())
return shell_modules
def reset(self, locals):
"""reset(locals) -> None
Reset shell preparing it for a new session.
"""
locals['load_package'] = self.load_package
locals['selected_modules'] = self.selected_modules
if self.interpreter:
del self.interpreter
self.interpreter = InteractiveInterpreter(locals)
# last line + last incomplete lines
self.line = QtCore.QString()
self.lines = []
# the cursor position in the last line
self.point = 0
# flag: the interpreter needs more input to run the last lines.
self.more = 0
# flag: readline() is being used for e.g. raw_input() and input()
self.reading = 0
# history
self.history = []
self.pointer = 0
self.last = 0
# interpreter prompt.
if hasattr(sys, "ps1"):
sys.ps1
else:
sys.ps1 = ">>> "
if hasattr(sys, "ps2"):
sys.ps2
else:
sys.ps2 = "... "
# interpreter banner
self.write('VisTrails shell running Python %s on %s.\n' %
(sys.version, sys.platform))
self.write('Type "copyright", "credits" or "license"'
' for more information on Python.\n')
self.write(sys.ps1)
def flush(self):
"""flush() -> None.
Simulate stdin, stdout, and stderr.
"""
pass
def isatty(self):
"""isatty() -> int
Simulate stdin, stdout, and stderr.
"""
return 1
def readline(self):
"""readline() -> str
Simulate stdin, stdout, and stderr.
"""
self.reading = 1
self.__clearLine()
cursor = self.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
self.setTextCursor(cursor)
while self.reading:
qApp.processOneEvent()
if self.line.length() == 0:
return '\n'
else:
return str(self.line)
def write(self, text):
"""write(text: str) -> None
Simulate stdin, stdout, and stderr.
"""
cursor = self.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
cursor.clearSelection()
self.setTextCursor(cursor)
self.insertPlainText(text)
cursor = self.textCursor()
self.last = cursor.position()
def insertFromMimeData(self, source):
if source.hasText():
cursor = self.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
cursor.clearSelection()
self.setTextCursor(cursor)
self.__insertText(source.text())
def scroll_bar_at_bottom(self):
"""Returns true if vertical bar exists and is at bottom, or if
vertical bar does not exist."""
bar = self.verticalScrollBar()
if not bar:
return True
return bar.value() == bar.maximum()
def __run(self):
"""__run() -> None
Append the last line to the history list, let the interpreter execute
the last line(s), and clean up accounting for the interpreter results:
(1) the interpreter succeeds
(2) the interpreter fails, finds no errors and wants more line(s)
(3) the interpreter fails, finds errors and writes them to sys.stderr
"""
cursor = self.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
self.setTextCursor(cursor)
# self.set_controller()
should_scroll = self.scroll_bar_at_bottom()
self.pointer = 0
self.history.append(QtCore.QString(self.line))
self.lines.append(str(self.line))
source = '\n'.join(self.lines)
self.write('\n')
self.more = self.interpreter.runsource(source)
if self.more:
self.write(sys.ps2)
else:
self.write(sys.ps1)
self.lines = []
self.__clearLine()
if should_scroll:
bar = self.verticalScrollBar()
if bar:
bar.setValue(bar.maximum())
def __clearLine(self):
"""__clearLine() -> None
Clear input line buffer.
"""
self.line.truncate(0)
self.point = 0
def __insertText(self, text):
"""__insertText(text) -> None
Insert text at the current cursor position.
"""
self.insertPlainText(text)
self.line.insert(self.point, text)
self.point += text.length()
# def add_pipeline(self, p):
# """
# add_pipeline(p) -> None
# Set the active pipeline in the command shell. This replaces the modules
# variable with the list of current active modules of the selected pipeline.
# """
# if self.controller:
# self.interpreter.active_pipeline = self.controller.current_pipeline
# else:
# self.interpreter.active_pipeline = p
# cmd = 'active_pipeline = self.shell.interpreter.active_pipeline'
# self.interpreter.runcode(cmd)
# cmd = 'modules = self.vistrails_interpreter.find_persistent_entities(active_pipeline)[0]'
# self.interpreter.runcode(cmd)
def set_controller(self, controller=None):
"""set_controller(controller: VistrailController) -> None
Set the current VistrailController on the shell.
"""
self.controller = controller
if controller:
self.interpreter.active_pipeline = self.controller.current_pipeline
cmd = 'active_pipeline = self.shell.interpreter.active_pipeline'
self.interpreter.runcode(cmd)
cmd = 'modules = self.vistrails_interpreter.' \
'find_persistent_entities(active_pipeline)[0]'
self.interpreter.runcode(cmd)
# def set_pipeline(self):
# """set_active_pipeline() -> None
# Makes sure that the pipeline being displayed is present in the shell for
# direct inspection and manipulation
# """
# self.add_pipeline(None)
def keyPressEvent(self, e):
"""keyPressEvent(e) -> None
Handle user input a key at a time.
Notice that text might come more than one keypress at a time
if user is a fast enough typist!
"""
text = e.text()
key = e.key()
# NB: Sometimes len(str(text)) > 1!
if text.length() and all(ord(x) >= 32 and
ord(x) < 127
for x in str(text)):
# exit select mode and jump to end of text
cursor = self.textCursor()
if self.selectMode or cursor.hasSelection():
self.selectMode = False
cursor.movePosition(QtGui.QTextCursor.End)
cursor.clearSelection()
self.setTextCursor(cursor)
self.__insertText(text)
return
if e.modifiers() & QtCore.Qt.MetaModifier and key == self.eofKey:
self.parent().closeSession()
if e.modifiers() & QtCore.Qt.ControlModifier:
if key == QtCore.Qt.Key_C or key == QtCore.Qt.Key_Insert:
self.copy()
elif key == QtCore.Qt.Key_V:
cursor = self.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
cursor.clearSelection()
self.setTextCursor(cursor)
self.paste()
elif key == QtCore.Qt.Key_A:
self.selectAll()
self.selectMode = True
else:
e.ignore()
return
if e.modifiers() & QtCore.Qt.ShiftModifier:
if key == QtCore.Qt.Key_Insert:
cursor = self.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
cursor.clearSelection()
self.setTextCursor(cursor)
self.paste()
else:
e.ignore()
return
# exit select mode and jump to end of text
cursor = self.textCursor()
if self.selectMode or cursor.hasSelection():
self.selectMode = False
cursor.movePosition(QtGui.QTextCursor.End)
cursor.clearSelection()
self.setTextCursor(cursor)
if key == QtCore.Qt.Key_Backspace:
if self.point:
QtGui.QTextEdit.keyPressEvent(self, e)
self.point -= 1
self.line.remove(self.point, 1)
elif key == QtCore.Qt.Key_Delete:
QtGui.QTextEdit.keyPressEvent(self, e)
self.line.remove(self.point, 1)
elif key == QtCore.Qt.Key_Return or key == QtCore.Qt.Key_Enter:
if self.reading:
self.reading = 0
else:
self.__run()
elif key == QtCore.Qt.Key_Tab:
self.__insertText(text)
elif key == QtCore.Qt.Key_Left:
if self.point:
QtGui.QTextEdit.keyPressEvent(self, e)
self.point -= 1
elif key == QtCore.Qt.Key_Right:
if self.point < self.line.length():
QtGui.QTextEdit.keyPressEvent(self, e)
self.point += 1
elif key == QtCore.Qt.Key_Home:
cursor = self.textCursor()
cursor.movePosition(QtGui.QTextCursor.StartOfLine)
cursor.setPosition(cursor.position() + 4)
self.setTextCursor(cursor)
self.point = 0
elif key == QtCore.Qt.Key_End:
QtGui.QTextEdit.keyPressEvent(self, e)
self.point = self.line.length()
elif key == QtCore.Qt.Key_Up:
if len(self.history):
if self.pointer == 0:
self.pointer = len(self.history)
self.pointer -= 1
self.__recall()
elif key == QtCore.Qt.Key_Down:
if len(self.history):
self.pointer += 1
if self.pointer == len(self.history):
self.pointer = 0
self.__recall()
else:
e.ignore()
def __recall(self):
"""__recall() -> None
Display the current item from the command history.
"""
cursor = self.textCursor()
cursor.setPosition(self.last)
cursor.select(QtGui.QTextCursor.LineUnderCursor)
cursor.removeSelectedText()
self.setTextCursor(cursor)
self.insertPlainText(sys.ps1)
self.__clearLine()
self.__insertText(self.history[self.pointer])
def focusNextPrevChild(self, next):
"""focusNextPrevChild(next) -> None
Suppress tabbing to the next window in multi-line commands.
"""
if next and self.more:
return 0
return QtGui.QTextEdit.focusNextPrevChild(self, next)
def mousePressEvent(self, e):
"""mousePressEvent(e) -> None
Keep the cursor after the last prompt.
"""
if e.button() == QtCore.Qt.LeftButton:
self.selectMode = True
QtGui.QTextEdit.mousePressEvent(self, e)
# cursor = self.textCursor()
# cursor.movePosition(QtGui.QTextCursor.End)
# self.setTextCursor(cursor)
return
def hide(self):
"""suspend() -> None
Called when hiding the parent window in order to recover the previous
state.
"""
#recovering the state
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.stdin = sys.__stdin__
def show(self):
"""show() -> None
Store previous state and starts capturing all interactive input and
output.
"""
# capture all interactive input/output
sys.stdout = self
sys.stderr = self
sys.stdin = self
self.setFocus()
def saveSession(self, fileName):
"""saveSession(fileName: str) -> None
Write its contents to a file """
output = open(str(fileName), 'w')
output.write(self.toPlainText())
output.close()
def restart(self, locals=None):
"""restart(locals=None) -> None
Restart a new session
"""
self.clear()
self.reset(locals)
def contentsContextMenuEvent(self,ev):
"""
contentsContextMenuEvent(ev) -> None
Suppress the right button context menu.
"""
return
| |
#!/usr/bin/env python3
import os
import shutil
import unittest
import create_and_write_file
import mvtools_test_fixture
import path_utils
import mvtools_envvars
import dsl_type20
def getcontents(filename):
if not os.path.exists(filename):
return None
contents = ""
with open(filename) as f:
contents = f.read()
return contents
class DSLType20Test(unittest.TestCase):
def setUp(self):
self.mvtools_envvars_inst = mvtools_envvars.Mvtools_Envvars()
v, r = self.mvtools_envvars_inst.make_copy_environ()
if not v:
self.tearDown()
self.fail(r)
v, r = self.delegate_setUp()
if not v:
self.tearDown()
self.fail(r)
def delegate_setUp(self):
v, r = mvtools_test_fixture.makeAndGetTestFolder("dsl_type20_test")
if not v:
return v, r
self.test_base_dir = r[0]
self.test_dir = r[1]
v, r = mvtools_envvars.mvtools_envvar_read_test_dsltype20_reserved_1()
if v:
return False, "DSLType20's first test envvar is defined. This test requires it to be undefined."
self.reserved_test_env_var_1 = "$MVTOOLS_TEST_DSLTYPE20_RESERVED_1"
v, r = mvtools_envvars.mvtools_envvar_read_test_dsltype20_reserved_2()
if v:
return False, "DSLType20's second test envvar is defined. This test requires it to be undefined."
self.reserved_test_env_var_2 = "$MVTOOLS_TEST_DSLTYPE20_RESERVED_2"
self.reserved_path_with_user_1 = "/tmp/folder"
self.contents_cfg_test_ok_1 = "var1 = \"val1\"\n"
self.contents_cfg_test_ok_1 += "var2 {opt1} = \"val2\"\n"
self.contents_cfg_test_ok_1 += "var3 {opt2: \"val3\"} = \"val4\"\n"
self.contents_cfg_test_ok_1 += "var4 {opt3: \"val5\" / opt4: \"val6\"} = \"val7\"\n"
self.contents_cfg_test_ok_1 += "var5 {opt5 / opt6: \"val8\" / opt7: \"val9\"} = \"val10\"\n"
self.cfg_test_ok_1 = path_utils.concat_path(self.test_dir, "test_ok_1.t20")
self.contents_cfg_test_ok_2 = "var1 = \"val1\"\n"
self.contents_cfg_test_ok_2 += "var2 = \"a/path/valid1\"\n"
self.contents_cfg_test_ok_2 += "var3 {opt1 / opt2: \"a/path/valid2\"} = \"a/path/valid3\"\n"
self.contents_cfg_test_ok_2 += "var4 = \"$SOME_ENV_VAR\"\n"
self.contents_cfg_test_ok_2 += "var5 {r1: \"\" / r1} = \"repeated1\"\n"
self.contents_cfg_test_ok_2 += "var5 {r2 / r2} = \"repeated2\"\n"
self.cfg_test_ok_2 = path_utils.concat_path(self.test_dir, "test_ok_2.t20")
self.contents_cfg_test_ok_3 = "var1 = \"val1\"\n"
self.contents_cfg_test_ok_3 += "var2 = \"val2\"\n"
self.cfg_test_ok_3 = path_utils.concat_path(self.test_dir, "test_ok_3.t20")
self.contents_cfg_test_ok_4 = ("var1 = \"%s\"\n") % self.reserved_test_env_var_1
self.contents_cfg_test_ok_4 += ("var2 {opt1: \"%s\"} = \"val1\"\n") % self.reserved_test_env_var_2
self.contents_cfg_test_ok_4 += ("var3 = \"%s\"\n") % self.reserved_path_with_user_1
self.cfg_test_ok_4 = path_utils.concat_path(self.test_dir, "test_ok_4.t20")
self.contents_cfg_test_ok_5 = "var1 = \"val1\" # comment\n"
self.cfg_test_ok_5 = path_utils.concat_path(self.test_dir, "test_ok_5.t20")
self.contents_cfg_test_fail_1 = "var1 = val1\n"
self.cfg_test_fail_1 = path_utils.concat_path(self.test_dir, "test_fail_1.t20")
self.contents_cfg_test_fail_2 = "var1\n"
self.cfg_test_fail_2 = path_utils.concat_path(self.test_dir, "test_fail_2.t20")
self.contents_cfg_test_fail_3 = "{var1 = \"val1\"}\n"
self.cfg_test_fail_3 = path_utils.concat_path(self.test_dir, "test_fail_3.t20")
self.contents_cfg_test_fail_4 = "{fakeopt} var1 = \"val1\"\n"
self.cfg_test_fail_4 = path_utils.concat_path(self.test_dir, "test_fail_4.t20")
self.contents_cfg_test_fail_5 = "var1 {opt1: \"val1} = \"val2\"\n"
self.cfg_test_fail_5 = path_utils.concat_path(self.test_dir, "test_fail_5.t20")
create_and_write_file.create_file_contents(self.cfg_test_ok_1, self.contents_cfg_test_ok_1)
create_and_write_file.create_file_contents(self.cfg_test_ok_2, self.contents_cfg_test_ok_2)
create_and_write_file.create_file_contents(self.cfg_test_ok_3, self.contents_cfg_test_ok_3)
create_and_write_file.create_file_contents(self.cfg_test_ok_4, self.contents_cfg_test_ok_4)
create_and_write_file.create_file_contents(self.cfg_test_ok_5, self.contents_cfg_test_ok_5)
create_and_write_file.create_file_contents(self.cfg_test_fail_1, self.contents_cfg_test_fail_1)
create_and_write_file.create_file_contents(self.cfg_test_fail_2, self.contents_cfg_test_fail_2)
create_and_write_file.create_file_contents(self.cfg_test_fail_3, self.contents_cfg_test_fail_3)
create_and_write_file.create_file_contents(self.cfg_test_fail_4, self.contents_cfg_test_fail_4)
create_and_write_file.create_file_contents(self.cfg_test_fail_5, self.contents_cfg_test_fail_5)
return True, ""
def tearDown(self):
shutil.rmtree(self.test_base_dir)
v, r = self.mvtools_envvars_inst.restore_copy_environ()
if not v:
self.fail(r)
def parse_test_aux(self, filename, _dsl_t20_opts):
contents = getcontents(filename)
if contents is None:
self.fail("Unable to open and read file [%s]" % filename)
dsl = dsl_type20.DSLType20(_dsl_t20_opts)
v, r = dsl.parse(contents)
return v
def testDslType20_SanitizeLine(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertEqual(dsl.sanitize_line(""), "")
self.assertEqual(dsl.sanitize_line("abc"), "abc")
self.assertEqual(dsl.sanitize_line("#abc"), "")
self.assertEqual(dsl.sanitize_line("abc#def"), "abc")
self.assertEqual(dsl.sanitize_line(" abc "), "abc")
self.assertEqual(dsl.sanitize_line(" # abc "), "")
self.assertEqual(dsl.sanitize_line(" abc #"), "abc")
self.assertEqual(dsl.sanitize_line(" abc // def # xyz"), "abc // def")
def testDslType20_Parse1(self):
self.assertTrue(self.parse_test_aux(self.cfg_test_ok_1, dsl_type20.DSLType20_Options()))
def testDslType20_Parse2(self):
self.assertTrue(self.parse_test_aux(self.cfg_test_ok_2, dsl_type20.DSLType20_Options()))
def testDslType20_Parse3(self):
self.assertFalse(self.parse_test_aux(self.cfg_test_fail_1, dsl_type20.DSLType20_Options()))
def testDslType20_Parse4(self):
self.assertFalse(self.parse_test_aux(self.cfg_test_fail_2, dsl_type20.DSLType20_Options()))
def testDslType20_Parse5(self):
self.assertFalse(self.parse_test_aux(self.cfg_test_fail_3, dsl_type20.DSLType20_Options()))
def testDslType20_Parse6(self):
self.assertFalse(self.parse_test_aux(self.cfg_test_fail_4, dsl_type20.DSLType20_Options()))
def testDslType20_Parse7(self):
self.assertFalse(self.parse_test_aux(self.cfg_test_fail_5, dsl_type20.DSLType20_Options()))
def testDslType20_Parse8(self):
self.assertTrue(self.parse_test_aux(self.cfg_test_ok_5, dsl_type20.DSLType20_Options()))
def testDslType20_Parse9(self):
blanksub = path_utils.concat_path(self.test_dir, " ")
self.assertFalse(os.path.exists(blanksub))
os.mkdir(blanksub)
self.assertTrue(os.path.exists(blanksub))
blankfile = path_utils.concat_path(blanksub, " ")
self.assertFalse(os.path.exists(blankfile))
create_and_write_file.create_file_contents(blankfile, self.contents_cfg_test_ok_1)
self.assertTrue(os.path.exists(blankfile))
self.assertTrue(self.parse_test_aux(blankfile, dsl_type20.DSLType20_Options()))
def testDslType20_GetVars1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse(self.contents_cfg_test_ok_1)
self.assertTrue(v)
self.assertEqual(dsl.get_vars("var0"), [])
self.assertEqual(dsl.get_vars("var1"), [("var1", "val1", [])])
self.assertEqual(dsl.get_vars("var2"), [("var2", "val2", [("opt1", None)])])
self.assertEqual(dsl.get_vars("var3"), [("var3", "val4", [("opt2", "val3")])])
self.assertEqual(dsl.get_vars("var4"), [("var4", "val7", [("opt3", "val5"), ("opt4", "val6")])])
self.assertEqual(dsl.get_vars("var5"), [("var5", "val10", [("opt5", None), ("opt6", "val8"), ("opt7", "val9")])])
self.assertEqual(dsl.get_vars("var6"), [])
self.assertTrue(dsl_type20.hasopt_var(dsl.get_vars("var2")[0], "opt1"))
self.assertTrue(dsl_type20.hasopt_var(dsl.get_vars("var3")[0], "opt2"))
self.assertFalse(dsl_type20.hasopt_var(dsl.get_vars("var3")[0], "opt3"))
self.assertTrue(dsl_type20.hasopt_opts(dsl.get_vars("var2")[0][2], "opt1"))
self.assertTrue(dsl_type20.hasopt_opts(dsl.get_vars("var3")[0][2], "opt2"))
self.assertEqual(dsl_type20.getopts(dsl.get_vars("var2")[0], "opt1"), [("opt1", None)])
def testDslType20_GetVars2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse(self.contents_cfg_test_ok_2)
self.assertTrue(v)
self.assertEqual(dsl.get_vars("var0"), [])
self.assertEqual(dsl.get_vars("var1"), [("var1", "val1", [])])
self.assertEqual(dsl.get_vars("var2"), [("var2", "a/path/valid1", [])])
self.assertEqual(dsl.get_vars("var3"), [("var3", "a/path/valid3", [("opt1", None), ("opt2", "a/path/valid2")])])
self.assertEqual(dsl.get_vars("var4"), [("var4", "$SOME_ENV_VAR", [])])
self.assertEqual(dsl.get_vars("var5"), [("var5", "repeated1", [("r1", ""), ("r1", None)]), ("var5", "repeated2", [("r2", None), ("r2", None)])])
self.assertEqual(dsl.get_vars("var6"), [])
self.assertEqual(dsl_type20.getopts(dsl.get_vars("var5")[0], "r1"), [("r1", ""), ("r1", None)])
self.assertEqual(dsl_type20.getopts(dsl.get_vars("var5")[1], "r1"), [])
self.assertEqual(dsl_type20.getopts(dsl.get_vars("var5")[1], "r2"), [("r2", None), ("r2", None)])
def testDslType20_GetVars3(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse(self.contents_cfg_test_ok_3)
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "val1", []), ("var2", "val2", [])])
def testDslType20_GetVars4(self):
os.environ[ (self.reserved_test_env_var_1[1:]) ] = "test-value-1"
os.environ[ (self.reserved_test_env_var_2[1:]) ] = "test-value-2"
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(True, True))
v, r = dsl.parse(self.contents_cfg_test_ok_4)
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "test-value-1", []), ("var2", "val1", [("opt1","test-value-2")]), ("var3", path_utils.concat_path(os.path.expanduser("/tmp/folder")), [])])
def testDslType20_GetVars5(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_context("ctx1", [("opt1", "val1")])[0])
self.assertTrue(dsl.add_var("var1", "val2", [ ], "ctx1" )[0])
self.assertEqual(dsl.get_all_vars("ctx1"), [("var1", "val2", [])])
def testDslType20_GetVars6(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(vars_auto_ctx_options=True))
self.assertTrue(dsl.add_context("ctx1", [("opt1", "val1")])[0])
self.assertTrue(dsl.add_var("var1", "val2", [ ], "ctx1" )[0])
self.assertEqual(dsl.get_all_vars("ctx1"), [("var1", "val2", [("opt1", "val1")])])
def testDslType20_TestVanilla1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = \"val1\"")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "val1", [])])
def testDslType20_TestVanilla2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 {opt1} = \"val1\"")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "val1", [("opt1", None)])])
def testDslType20_TestParseDecoratedVar1(self):
decorated_var = "* var1 {opt1} = \"val1\""
dsl1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl1.parse(decorated_var)
self.assertFalse(v)
dsl2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(variable_decorator = "* "))
v, r = dsl2.parse(decorated_var)
self.assertTrue(v)
self.assertEqual(dsl2.get_all_vars(), [("var1", "val1", [("opt1", None)])])
def testDslType20_TestParseDecoratedVar2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(variable_decorator = "**"))
v, r = dsl.parse("** var1 {opt1} = \"val1\"")
self.assertTrue(v)
def testDslType20_TestParseDecoratedVarFail1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(variable_decorator = "* "))
v, r = dsl.parse("*var1 {opt1} = \"val1\"")
self.assertFalse(v)
def testDslType20_TestParseDecoratedVarFail2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(variable_decorator = "* "))
v, r = dsl.parse("* ")
self.assertFalse(v)
def testDslType20_TestParseDecoratedVarFail3(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(variable_decorator = "**"))
v, r = dsl.parse("* var1 {opt1} = \"val1\"")
self.assertFalse(v)
def testDslType20_TestNonEscapedQuoteVarVal(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = \"val \"1\"\"")
self.assertFalse(v)
def testDslType20_TestNonEscapedQuoteVarOptVal(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 {opt1: \"val \"2\"\"} = \"val \\\"1\\\"\"")
self.assertFalse(v)
def testDslType20_TestSlashInsideVarOptVal1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 { _opt-1 : \"val \\\\ = \\\"1/2\\\"\" / opt2 : \"val = \\\"2/4\\\"\" } = \"val = \\\"1\\\"\" ")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "val = \"1\"", [("_opt-1", "val \\ = \"1/2\""), ("opt2", "val = \"2/4\"")])])
def testDslType20_TestCommentInsideVarOptVal1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 { remote_link : \"https://www.url.net/folder/whatever\" } = \"svn.py\"")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "svn.py", [("remote_link", "https://www.url.net/folder/whatever")])])
def testDslType20_TestBlankAndNoneOption(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 {opt1: \"\" / opt2} = \"val1\"")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "val1", [("opt1", ""), ("opt2", None)])])
def testDslType20_TestSpacedOptionValueless1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse(" var1 { the_option } = \"val1\" ")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "val1", [("the_option", None)])])
def testDslType20_TestSpacedOptionValueless2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse(" var1 { the_option1 / the_option2 } = \"val1\" ")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "val1", [("the_option1", None), ("the_option2", None)])])
def testDslType20_TestOptionsAlternated1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 {opt1 / opt2: \"val2\" / opt3 / opt4: \"val3\"} = \"val1\"")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "val1", [("opt1", None), ("opt2", "val2"), ("opt3", None), ("opt4", "val3")])])
def testDslType20_TestOptionsAlternated2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 {opt2: \"val2\" / opt1 / opt4: \"val3\" / opt3} = \"val1\"")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "val1", [("opt2", "val2"), ("opt1", None), ("opt4", "val3"), ("opt3", None)])])
def testDslType20_TestMalformedOptName(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 {opt=1: \"optval\"} = \"val1\"")
self.assertFalse(v)
def testDslType20_TestMalformedValueQuotesEscaped1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = \"val1\\\"")
self.assertFalse(v)
def testDslType20_TestMalformedValueQuotesEscaped2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = \\\"val1\"")
self.assertFalse(v)
def testDslType20_TestVarValueParsing1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = \" val1\"")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", " val1", [])])
def testDslType20_TestVarValueParsing2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = \"val1 \"")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "val1 ", [])])
def testDslType20_TestVarValueParsing3(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = \"val1 val2\"")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "val1 val2", [])])
def testDslType20_TestOptValueParsing1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 {opt1: \" val2\"} = \"val1\"")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "val1", [("opt1", " val2")])])
def testDslType20_TestOptValueParsing2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 {opt1: \"val2 \"} = \"val1\"")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "val1", [("opt1", "val2 ")])])
def testDslType20_TestOptValueParsing3(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 {opt1: \"val2 val3\"} = \"val1\"")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "val1", [("opt1", "val2 val3")])])
def testDslType20_TestUnspacing(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1{opt1/opt2/opt3:\"val\\\"2\"}=\"val1\"")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "val1", [("opt1", None), ("opt2", None), ("opt3", "val\"2")])])
def testDslType20_TestLeftoversFail1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 abc = \"val1\"")
self.assertFalse(v)
def testDslType20_TestLeftoversFail2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = abc \"val1\"")
self.assertFalse(v)
def testDslType20_TestLeftoversFail3(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = \"val1\" abc")
self.assertFalse(v)
def testDslType20_TestLeftoversFail4(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = {opt1} \"val1\"")
self.assertFalse(v)
def testDslType20_TestLeftoversFail5(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 {opt1 opt2} = \"val1\"")
self.assertFalse(v)
def testDslType20_TestLeftoversFail6(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 {opt1 /} = \"val1\"")
self.assertFalse(v)
def testDslType20_TestLeftoversFail7(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 {{opt1} = \"val1\"")
self.assertFalse(v)
def testDslType20_TestLeftoversFail8(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 {opt1}} = \"val1\"")
self.assertFalse(v)
def testDslType20_TestLeftoversFail9(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 {opt1} abc = \"val1\"")
self.assertFalse(v)
def testDslType20_TestLeftoversFail10(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 == \"val1\"")
self.assertFalse(v)
def testDslType20_TestLeftoversFail11(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 {opt1} == \"val1\"")
self.assertFalse(v)
def testDslType20_TestExceedMaxNumberOptionsFail(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
dsl.max_number_options = 2
v, r = dsl.parse("var1 {opt1 / opt2 / opt3} == \"val1\"")
self.assertFalse(v)
def testDslType20_TestNewContextVanilla1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = \"val1\"\n[\n@ctx1\n]")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "val1", [])])
def testDslType20_TestNewContextVanilla2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = \"val1\"\n[\n@ctx1\nvar2 = \"val2\"\n]")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "val1", [])])
self.assertEqual(dsl.get_all_vars("ctx1"), [("var2", "val2", [])])
def testDslType20_TestNewContextVanilla3(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = \"val1\"\n[\n@ctx1\nvar2 {opt1 / opt2} = \"val2\"\n]\nvar3 = \"val3\"\n[\n@ctx2\nvar4 = \"val4\"\n]")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars(), [("var1", "val1", []), ("var3", "val3", [])])
self.assertEqual(dsl.get_all_vars("ctx1"), [("var2", "val2", [("opt1", None), ("opt2", None)])])
self.assertEqual(dsl.get_all_vars("ctx2"), [("var4", "val4", [])])
def testDslType20_TestNewContextWithOptions1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(vars_auto_ctx_options=True))
v, r = dsl.parse("[\n@ctx1 {opt1}\nvar1 = \"val1\"\n]")
self.assertEqual(dsl.get_vars("var1", "ctx1"), [("var1", "val1", [("opt1", None)])])
self.assertTrue(v)
def testDslType20_TestNewContextWithOptions2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(vars_auto_ctx_options=True))
v, r = dsl.parse("[\n@ctx1 {opt2: \"val2\"}\nvar1 = \"val1\"\n]")
self.assertTrue(v)
self.assertEqual(dsl.get_vars("var1", "ctx1"), [("var1", "val1", [("opt2", "val2")])])
def testDslType20_TestNewContextWithOptions3(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(vars_auto_ctx_options=True))
v, r = dsl.parse("[\n@ctx1 {opt3: \"val3\"}\nvar1 {opt4: \"val4\"} = \"val1\"\n]")
self.assertTrue(v)
self.assertEqual(dsl.get_vars("var1", "ctx1"), [("var1", "val1", [("opt3", "val3"), ("opt4", "val4")])])
def testDslType20_TestNewContextWithOptions4(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(vars_auto_ctx_options=True))
v, r = dsl.parse("[\n@ctx1 {opt1: \"val1\"}\nvar1 {opt1: \"val3\"} = \"val2\"\n]")
self.assertTrue(v)
self.assertEqual(dsl.get_vars("var1", "ctx1"), [("var1", "val2", [("opt1", "val1"), ("opt1", "val3")])])
def testDslType20_TestNewContextWithOptions5(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
v, r = dsl.parse("[\n@ctx1 {opt1: \"val1\"}\nvar1 {opt1: \"val3\"} = \"val2\"\n]")
self.assertTrue(v)
self.assertEqual(dsl.get_vars("var1", "ctx1"), [("var1", "val2", [("opt1", "val3")])])
def testDslType20_TestNewContextWithOptions6(self):
test_envvar_value = "dsltype20-test-value"
os.environ[ (self.reserved_test_env_var_1[1:]) ] = test_envvar_value
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(vars_auto_ctx_options=True, expand_envvars=True))
v, r = dsl.parse("[\n@ctx1 {opt1: \"%s\"}\nvar1 = \"val2\"\n]" % self.reserved_test_env_var_1)
self.assertEqual(dsl.get_vars("var1", "ctx1"), [("var1", "val2", [("opt1", test_envvar_value)])])
self.assertTrue(v)
def testDslType20_TestNewContextWithOptions7(self):
test_envvar_value = "dsltype20-test-value"
os.environ[ (self.reserved_test_env_var_1[1:]) ] = test_envvar_value
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(vars_auto_ctx_options=True, expand_envvars=False))
v, r = dsl.parse("[\n@ctx1 {opt1: \"%s\"}\nvar1 = \"val2\"\n]" % self.reserved_test_env_var_1)
self.assertEqual(dsl.get_vars("var1", "ctx1"), [("var1", "val2", [("opt1", self.reserved_test_env_var_1)])])
self.assertTrue(v)
def testDslType20_TestNewContextFail1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = \"val1\"\n@ctx1\nvar2 = \"val2\"\n]")
self.assertFalse(v)
def testDslType20_TestNewContextFail2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = \"val1\"\n[\n[\n@ctx1\n]")
self.assertFalse(v)
def testDslType20_TestNewContextFail3(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = \"val1\"\n[\n]")
self.assertFalse(v)
def testDslType20_TestNewContextFail4(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = \"val1\"\n[\n")
self.assertFalse(v)
def testDslType20_TestNewContextFail5(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = \"val1\"\n[\n@@ctx1\n]")
self.assertFalse(v)
def testDslType20_TestNewContextFail6(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("var1 = \"val1\"\n[\n@ctx 1\n]")
self.assertFalse(v)
def testDslType20_TestNewContextFail7(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("[\n@ctx1\nvar1 = \"val1\"\n")
self.assertFalse(v)
def testDslType20_TestNewContextFail8(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("\n[\nvar1 = \"val1\"\n]\n")
self.assertFalse(v)
def testDslType20_TestNewContextFail9(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("\n[\n]\n")
self.assertFalse(v)
def testDslType20_TestContextFail1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl._parse_variable("var1 = \"val1\"", "nonexistent context")
self.assertFalse(v)
def testDslType20_TestContextReopenFail(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("[\n@ctx1\nvar1 = \"val1\"\n]\nvar2 = \"val2\"\n[\n@ctx1\nvar3 = \"val3\"\n]")
self.assertFalse(v)
def testDslType20_TestContextGetAllVarsFail1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("[\n@ctx1\nvar1 = \"val1\"\n]")
self.assertTrue(v)
self.assertEqual(dsl.get_all_vars("ctx1"), [("var1", "val1", [])])
self.assertEqual(dsl.get_all_vars("nonexistent context"), None)
def testDslType20_TestContextGetVarsFail1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.parse("[\n@ctx1\nvar1 = \"val1\"\n]")
self.assertTrue(v)
self.assertEqual(dsl.get_vars("var1", None), [])
self.assertEqual(dsl.get_vars("var1", "ctx1"), [("var1", "val1", [])])
self.assertEqual(dsl.get_vars("var1", "nonexistent context"), None)
def testDslType20_TestAddContext1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_context("ok", [])[0])
def testDslType20_TestAddContext2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_context("ok1", [])[0])
def testDslType20_TestAddContext3(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_context("_ok-90", [])[0])
def testDslType20_TestAddContext4(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_context("ok", [("var1", "val1")])[0])
def testDslType20_TestGetAllContexts1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_context("ctx1", [])[0])
self.assertTrue(dsl.add_context("ctx2", [])[0])
self.assertEqual(dsl.get_all_contexts(), ["ctx1", "ctx2"])
def testDslType20_TestGetAllContexts2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_context("ctx1", [])[0])
self.assertTrue(dsl.add_context("ctx2", [])[0])
v, r = dsl.add_context("ctx 3", [])
self.assertFalse(v)
self.assertTrue(dsl.add_context("ctx4", [])[0])
self.assertEqual(dsl.get_all_contexts(), ["ctx1", "ctx2", "ctx4"])
def testDslType20_TestAddContextFail1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.add_context([], [])
self.assertFalse(v)
def testDslType20_TestAddContextFail2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.add_context("", [])
self.assertFalse(v)
def testDslType20_TestAddContextFail3(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.add_context("ok!", [])
self.assertFalse(v)
def testDslType20_TestAddContextFail4(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.add_context("@ok", [])
self.assertFalse(v)
def testDslType20_TestAddContextFail5(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.add_context("ok", [])
self.assertTrue(v)
v, r = dsl.add_context("ok", [])
self.assertFalse(v)
def testDslType20_TestAddContextFail6(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl.add_context("ok", "nok")
self.assertFalse(v)
def testDslType20_TestAddContextFail7(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertFalse(dsl.add_context("ok", [("var1", "first line\nsecond line")])[0])
def testDslType20_TestAddVar1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_var("var1", "", [ ] )[0])
self.assertEqual(dsl.get_all_vars(), [ ("var1", "", [ ]) ] )
def testDslType20_TestAddVar2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_var("var1", "val1", [ ] )[0])
self.assertEqual(dsl.get_all_vars(), [ ("var1", "val1", [ ]) ] )
def testDslType20_TestAddVar3(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_var("var1", "val1", [ ("opt1", "val2") ] )[0])
self.assertEqual(dsl.get_all_vars(), [ ("var1", "val1", [ ("opt1", "val2") ]) ] )
def testDslType20_TestAddVar4(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_var("var1", "val1", [ ("opt1", "val2"), ("opt1", "val2") ] )[0])
self.assertEqual(dsl.get_all_vars(), [ ("var1", "val1", [ ("opt1", "val2"), ("opt1", "val2") ]) ] )
def testDslType20_TestAddVar5(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_var("var1", "val1", [ ("opt1", "val2"), ("opt1", "val2") ] )[0])
self.assertTrue(dsl.add_var("var1", "val1", [ ("opt1", "val2"), ("opt1", "val2") ] )[0])
self.assertEqual(dsl.get_all_vars(), [ ("var1", "val1", [ ("opt1", "val2"), ("opt1", "val2") ]), ("var1", "val1", [ ("opt1", "val2"), ("opt1", "val2") ]) ] )
def testDslType20_TestAddVar6(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(vars_auto_ctx_options=True))
self.assertTrue(dsl.add_context("ctx1", [("opt1", "val1")])[0])
self.assertTrue(dsl.add_var("var1", "val2", [ ("opt2", "val3") ], "ctx1" )[0])
self.assertEqual(dsl.get_vars("var1", "ctx1"), [("var1", "val2", [("opt1", "val1"), ("opt2", "val3")] )])
self.assertEqual(dsl.get_all_vars("ctx1"), [ ("var1", "val2", [("opt1", "val1"), ("opt2", "val3")] ) ] )
def testDslType20_TestAddVar7(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(vars_auto_ctx_options=True))
self.assertTrue(dsl.add_context("ctx1", [("opt1", "val1")])[0])
self.assertTrue(dsl.add_var("var1", "val2", [ ("opt1", "val3") ], "ctx1" )[0])
self.assertEqual(dsl.get_vars("var1", "ctx1"), [("var1", "val2", [("opt1", "val1"), ("opt1", "val3")] )])
self.assertEqual(dsl.get_all_vars("ctx1"), [ ("var1", "val2", [("opt1", "val1"), ("opt1", "val3")] ) ] )
def testDslType20_TestAddVar8(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
self.assertTrue(dsl.add_context("ctx1", [("opt1", "val1")])[0])
self.assertTrue(dsl.add_var("var1", "val2", [ ("opt1", "val3") ], "ctx1" )[0])
self.assertEqual(dsl.get_vars("var1", "ctx1"), [("var1", "val2", [("opt1", "val3")] )])
self.assertEqual(dsl.get_all_vars("ctx1"), [ ("var1", "val2", [("opt1", "val3")] ) ] )
def testDslType20_TestAddVar9(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_var("var1", "val1", [ ("opt1", None) ] )[0])
self.assertEqual(dsl.get_all_vars(), [ ("var1", "val1", [ ("opt1", None) ]) ] )
def testDslType20_TestAddVarFail1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertFalse(dsl.add_var("var1", None, [ ] )[0])
def testDslType20_TestAddVarFail2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertFalse(dsl.add_var("var1", [], [ ] )[0])
def testDslType20_TestAddVarFail3(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertFalse(dsl.add_var("var1", "val1", [ ("opt") ] )[0])
def testDslType20_TestAddVarFail4(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertFalse(dsl.add_var("var1", "val1", [ ("opt", 1) ] )[0])
def testDslType20_TestAddVarFail5(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertFalse(dsl.add_var("var1", "val1", [ ("opt", "val", "again") ] )[0])
def testDslType20_TestAddVarFail6(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertFalse(dsl.add_var("var1", "val1", [ ["opt", "val"] ] )[0])
def testDslType20_TestAddVarFail7(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertFalse(dsl.add_var("var1", "val1", [ None ] )[0])
def testDslType20_TestAddVarFail8(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertFalse(dsl.add_var("var1", "val1", None )[0])
def testDslType20_TestAddVarFail9(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertFalse(dsl.add_var(1, "val1", [ ("opt", "val") ] )[0])
def testDslType20_TestAddVarFail10(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertFalse(dsl.add_var(None, "val1", [ ("opt", "val") ] )[0])
def testDslType20_TestAddVarFail11(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertFalse(dsl.add_var("var1", "first line\nsecond line", [ ] )[0])
def testDslType20_TestAddVarFail12(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertFalse(dsl.add_var("var1", "val1", [ ("opt1", "first line\nsecond line") ] )[0])
def testDslType20_TestCountOccurrenceFirstOfPair1(self):
self.assertEqual(dsl_type20.count_occurrence_first_of_pair( [ ("b", None) ], "a" ), 0)
def testDslType20_TestCountOccurrenceFirstOfPair2(self):
self.assertEqual(dsl_type20.count_occurrence_first_of_pair( [ ("b", None) ], "b" ), 1)
def testDslType20_TestCountOccurrenceFirstOfPair3(self):
self.assertEqual(dsl_type20.count_occurrence_first_of_pair( [ ("b", None), ("b", None) ], "b" ), 2)
def testDslType20_TestDisallowDupes1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
self.assertTrue(dsl.add_var("var1", "val1", [])[0])
self.assertFalse(dsl.add_var("var1", "val2", [])[0])
def testDslType20_TestDisallowDupes2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
self.assertTrue(dsl.add_var("var1", "val1", [ ("opt1", None) ] )[0])
def testDslType20_TestDisallowDupes3(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
self.assertFalse(dsl.add_var("var1", "val1", [ ("opt1", None), ("opt1", None) ] )[0])
def testDslType20_TestDisallowDupes4(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
self.assertTrue(dsl.add_var("var1", "val1", [ ("opt1", None) ] )[0])
self.assertTrue(dsl.add_var("var2", "val1", [ ("opt1", None) ] )[0])
def testDslType20_TestDisallowDupesParse1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
contents_cfg_test_fail_dupevar = "var1 = \"val1\"\n"
contents_cfg_test_fail_dupevar += "var1 = \"val2\"\n"
v, r = dsl.parse(contents_cfg_test_fail_dupevar)
self.assertFalse(v)
def testDslType20_TestRemVar1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_var("var1", "val1", [])[0])
self.assertEqual(dsl.get_all_vars(), [ ("var1", "val1", []) ] )
self.assertTrue(dsl.rem_var("var1"))
self.assertEqual(dsl.get_all_vars(), [] )
def testDslType20_TestRemVar2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_var("var1", "val1", [])[0])
self.assertEqual(dsl.get_all_vars(), [ ("var1", "val1", []) ] )
self.assertTrue(dsl.rem_var("var1", 0))
self.assertEqual(dsl.get_all_vars(), [] )
def testDslType20_TestRemVar3(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_var("var1", "val1", [])[0])
self.assertEqual(dsl.get_all_vars(), [ ("var1", "val1", []) ] )
self.assertFalse(dsl.rem_var("var1", 1))
self.assertEqual(dsl.get_all_vars(), [ ("var1", "val1", []) ] )
def testDslType20_TestRemVar4(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_var("var1", "val1", [])[0])
self.assertTrue(dsl.add_var("var1", "val2", [])[0])
self.assertEqual(dsl.get_all_vars(), [ ("var1", "val1", []), ("var1", "val2", []) ] )
self.assertTrue(dsl.rem_var("var1", 1))
self.assertEqual(dsl.get_all_vars(), [("var1", "val1", [])] )
def testDslType20_TestRemVar5(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_var("var1", "val1", [])[0])
self.assertEqual(dsl.get_all_vars(), [ ("var1", "val1", []) ] )
self.assertFalse(dsl.rem_var("var2"))
self.assertEqual(dsl.get_all_vars(), [ ("var1", "val1", []) ] )
def testDslType20_TestRemVar6(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_var("var1", "val2", [])[0])
self.assertTrue(dsl.add_var("var1", "val4", [], "ctx1")[0])
self.assertEqual(dsl.get_all_vars(), [ ("var1", "val2", []) ] )
self.assertEqual(dsl.get_all_vars("ctx1"), [ ("var1", "val4", []) ] )
self.assertTrue(dsl.rem_var("var1", None, "ctx1"))
self.assertEqual(dsl.get_all_vars(), [ ("var1", "val2", []) ] )
self.assertEqual(dsl.get_all_vars("ctx1"), [] )
def testDslType20_TestRemCtx1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertFalse(dsl.rem_ctx(None))
def testDslType20_TestRemCtx2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertFalse(dsl.rem_ctx(dsl.default_context_id))
def testDslType20_TestRemCtx3(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertFalse(dsl.rem_ctx("ctx1"))
def testDslType20_TestRemCtx4(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_context("ctx1", [])[0])
self.assertTrue("ctx1" in dsl.get_all_contexts() )
self.assertTrue(dsl.rem_ctx("ctx1"))
self.assertFalse("ctx1" in dsl.get_all_contexts() )
def testDslType20_TestRemCtx5(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_context("ctx1", [])[0])
self.assertTrue(dsl.add_context("ctx2", [])[0])
self.assertTrue("ctx1" in dsl.get_all_contexts() )
self.assertTrue("ctx2" in dsl.get_all_contexts() )
self.assertTrue(dsl.rem_ctx("ctx1"))
self.assertFalse("ctx1" in dsl.get_all_contexts() )
self.assertTrue("ctx2" in dsl.get_all_contexts() )
def testDslType20_TestProduce1(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl_1.add_var("var1", "val1", [])[0])
self.assertEqual(dsl_1.get_all_vars(), [("var1", "val1", [])])
self.assertEqual(dsl_1.produce(), "var1 = \"val1\"")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce2(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl_1.add_var("var1", "val1", [])[0])
self.assertTrue(dsl_1.add_var("var2", "val2", [])[0])
self.assertEqual(dsl_1.get_all_vars(), [("var1", "val1", []), ("var2", "val2", [])])
self.assertEqual(dsl_1.produce(), "var1 = \"val1\"\nvar2 = \"val2\"")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce3(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl_1.add_var("var1", "val1", [], "ctx1")[0])
self.assertTrue(dsl_1.add_var("var2", "val2", [])[0])
self.assertTrue(dsl_1.add_var("var1", "val1", [], "ctx2")[0])
self.assertTrue(dsl_1.add_var("var2", "val2", [], "ctx2")[0])
self.assertEqual(dsl_1.get_all_vars(), [("var2", "val2", [])])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "val1", [])])
self.assertEqual(dsl_1.get_all_vars("ctx2"), [("var1", "val1", []), ("var2", "val2", [])])
self.assertEqual(dsl_1.produce(), "var2 = \"val2\"\n[\n@ctx1\nvar1 = \"val1\"\n]\n\n[\n@ctx2\nvar1 = \"val1\"\nvar2 = \"val2\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce4(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl_1.add_var("var1", "val\"1\"", [], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "val\"1\"", [])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1\nvar1 = \"val\\\"1\\\"\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce5(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl_1.add_var("var1", "val\"1\"", [("opt1", None)], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "val\"1\"", [("opt1", None)])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1\nvar1 {opt1} = \"val\\\"1\\\"\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce6(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl_1.add_var("var1", "val1", [("opt1", "val2")], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "val1", [("opt1", "val2")])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1\nvar1 {opt1: \"val2\"} = \"val1\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce7(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl_1.add_var("var1", "val\"1\"", [("opt1", "val\"2\"")], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "val\"1\"", [("opt1", "val\"2\"")])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1\nvar1 {opt1: \"val\\\"2\\\"\"} = \"val\\\"1\\\"\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce8(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(vars_auto_ctx_options=True))
self.assertTrue(dsl_1.add_context("ctx1", [("opt1", "val2")])[0])
self.assertTrue(dsl_1.add_var("var1", "val1", [], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "val1", [("opt1", "val2")])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1 {opt1: \"val2\"}\nvar1 = \"val1\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce9(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(vars_auto_ctx_options=True))
self.assertTrue(dsl_1.add_context("ctx1", [("opt1", "val1")])[0])
self.assertTrue(dsl_1.add_var("var1", "val2", [("opt1", "val3")], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "val2", [("opt1", "val1"), ("opt1", "val3")])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1 {opt1: \"val1\"}\nvar1 {opt1: \"val3\"} = \"val2\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce10(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
self.assertTrue(dsl_1.add_context("ctx1", [("opt1", "val1")])[0])
self.assertTrue(dsl_1.add_var("var1", "val2", [("opt1", "val3")], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "val2", [("opt1", "val3")])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1 {opt1: \"val1\"}\nvar1 {opt1: \"val3\"} = \"val2\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce11(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(variable_decorator="* "))
self.assertTrue(dsl_1.add_var("var1", "val1", [])[0])
self.assertEqual(dsl_1.get_all_vars(), [("var1", "val1", [])])
self.assertEqual(dsl_1.produce(), "* var1 = \"val1\"")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(variable_decorator="* "))
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce12(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
self.assertTrue(dsl_1.add_context("ctx1", [("opt1", "val1"), ("opt2", "val2")])[0])
self.assertTrue(dsl_1.add_var("var1", "val4", [("opt1", "val3")], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "val4", [("opt1", "val3")])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1 {opt1: \"val1\" / opt2: \"val2\"}\nvar1 {opt1: \"val3\"} = \"val4\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce13(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
self.assertTrue(dsl_1.add_context("ctx1", [])[0])
self.assertTrue(dsl_1.add_var("var1", "", [], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "", [])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1\nvar1 = \"\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce14(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
self.assertTrue(dsl_1.add_context("ctx1", [])[0])
self.assertTrue(dsl_1.add_var("var1", "", [("opt1", None)], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "", [("opt1", None)])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1\nvar1 {opt1} = \"\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce15(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
self.assertTrue(dsl_1.add_context("ctx1", [("opt1", None)])[0])
self.assertTrue(dsl_1.add_var("var1", "", [], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "", [])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1 {opt1}\nvar1 = \"\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce16(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
self.assertTrue(dsl_1.add_context("ctx1", [])[0])
self.assertTrue(dsl_1.add_var("var1", "", [("opt1", "")], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "", [("opt1", "")])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1\nvar1 {opt1: \"\"} = \"\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce17(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
self.assertTrue(dsl_1.add_context("ctx1", [("opt1", "")])[0])
self.assertTrue(dsl_1.add_var("var1", "", [], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "", [])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1 {opt1: \"\"}\nvar1 = \"\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce18(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
self.assertTrue(dsl_1.add_context("ctx1", [])[0])
self.assertTrue(dsl_1.add_var("var1", "", [("opt1", None), ("opt2", None)], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "", [("opt1", None), ("opt2", None)])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1\nvar1 {opt1 / opt2} = \"\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce19(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
self.assertTrue(dsl_1.add_context("ctx1", [])[0])
self.assertTrue(dsl_1.add_var("var1", "", [("opt1", ""), ("opt2", "")], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "", [("opt1", ""), ("opt2", "")])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1\nvar1 {opt1: \"\" / opt2: \"\"} = \"\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce20(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
self.assertTrue(dsl_1.add_context("ctx1", [])[0])
self.assertTrue(dsl_1.add_var("var1", "", [("opt1", "abc"), ("opt2", "def")], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "", [("opt1", "abc"), ("opt2", "def")])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1\nvar1 {opt1: \"abc\" / opt2: \"def\"} = \"\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce21(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
self.assertTrue(dsl_1.add_context("ctx1", [("opt1", None), ("opt2", None)])[0])
self.assertTrue(dsl_1.add_var("var1", "", [], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "", [])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1 {opt1 / opt2}\nvar1 = \"\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce22(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
self.assertTrue(dsl_1.add_context("ctx1", [("opt1", ""), ("opt2", "")])[0])
self.assertTrue(dsl_1.add_var("var1", "", [], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "", [])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1 {opt1: \"\" / opt2: \"\"}\nvar1 = \"\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce23(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
self.assertTrue(dsl_1.add_context("ctx1", [("opt1", "abc"), ("opt2", "def")])[0])
self.assertTrue(dsl_1.add_var("var1", "", [], "ctx1")[0])
self.assertEqual(dsl_1.get_all_vars("ctx1"), [("var1", "", [])])
self.assertEqual(dsl_1.produce(), "[\n@ctx1 {opt1: \"abc\" / opt2: \"def\"}\nvar1 = \"\"\n]")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options(allow_dupes=False))
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestProduce24(self):
dsl_1 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl_1.add_var("var1", "#val1", [])[0])
self.assertEqual(dsl_1.get_all_vars(), [("var1", "#val1", [])])
self.assertEqual(dsl_1.produce(), "var1 = \"#val1\"")
dsl_2 = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
v, r = dsl_2.parse(dsl_1.produce())
self.assertTrue(v)
self.assertEqual(r, None)
self.assertEqual(dsl_1.get_all_vars(), dsl_2.get_all_vars())
self.assertEqual(dsl_1.produce(), dsl_2.produce())
def testDslType20_TestGetContextOptions1(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_context("ctx1", [("var1", "val1")])[0])
self.assertEqual(dsl.get_context_options("ctx1"), [("var1", "val1")])
def testDslType20_TestGetContextOptions2(self):
dsl = dsl_type20.DSLType20(dsl_type20.DSLType20_Options())
self.assertTrue(dsl.add_context("ctx1", [("var1", "val1"), ("var2", "val2")])[0])
self.assertEqual(dsl.get_context_options("ctx1"), [("var1", "val1"), ("var2", "val2")])
if __name__ == '__main__':
unittest.main()
| |
#! /usr/bin/env python
"""Check design."""
import os
import sys
import luigi
import shutil
from luigi import LocalTarget
from luigi.util import inherits, requires
import pandas as pd
import gffutils
import glob
DIR = os.path.dirname(os.path.realpath(__file__))
script_dir = os.path.abspath(os.path.join(DIR, "../../scripts"))
os.environ["PATH"] += ":" + script_dir
sys.path.insert(0, script_dir)
import logging
import json
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
import re
from functools import reduce
from piret.miscs import RefFile
class conversions(luigi.Task):
"""Convert gene count, RPKM, fold change table to GeneID or locus tag
and also to ones that have EC# or KO# when available."""
gff_file = luigi.Parameter()
gene_count_table = luigi.Parameter()
gene_RPKM_table = luigi.Parameter()
gene_CPM_table = luigi.Parameter()
gene_fc_table = luigi.Parameter()
def output(self):
"""Expected output of DGE using edgeR."""
edger_dir = os.path.join(self.workdir, "edgeR", self.kingdom)
out_filepath = os.path.join(edger_dir, "summary_updown.csv")
return LocalTarget(out_filepath)
def run(self):
"""Run edgeR."""
fcount_dir = os.path.join(self.workdir, "featureCounts", self.kingdom)
edger_dir = os.path.join(self.workdir, "edgeR", self.kingdom)
if not os.path.exists(edger_dir):
os.makedirs(edger_dir)
for file in os.listdir(fcount_dir):
if file.endswith("tsv"):
name = file.split("_")[-2]
edger_list = ["-r", os.path.join(fcount_dir, file),
"-e", self.exp_design,
"-p", self.p_value,
"-n", name,
"-o", edger_dir]
# TODO: get the output that has locus tag
edger_cmd = EdgeR[edger_list]
logger = logging.getLogger('luigi-interface')
logger.info(edger_cmd)
edger_cmd()
if file == "gene_count.tsv":
# TODO:convert the first column to locus tag
if self.pathway is True:
path_list = ["-d", edger_dir,
"-m", "edgeR", "-c",
self.org_code] # get pathway information
path_cmd = plot_pathway[path_list]
logger.info(path_cmd)
path_cmd()
if self.GAGE is True:
gage_list = ["-d", edger_dir, "-m",
"edgeR", "-c", self.org_code]
gage_cmd = gage_analysis[gage_list]
logger.info(gage_cmd)
gage_cmd()
self.summ_summ()
class conver2json(luigi.Task):
""" Summarizes and converts all the results to one big JSON file."""
gff_file = luigi.Parameter()
fasta_file = luigi.Parameter()
pathway = luigi.BoolParameter()
kingdom = luigi.Parameter()
workdir = luigi.Parameter()
method = luigi.ListParameter()
NovelRegions = luigi.BoolParameter()
def requires(self):
flist = []
if "edgeR" in self.method:
cpm_file = os.path.join(self.workdir, "processes", "edgeR",
self.kingdom, "gene",
"gene" + "_count_CPM.csv")
flist.append(cpm_file)
elif "DESeq2" in self.method:
fpm_file = os.path.join(self.workdir, "processes", "DESeq2",
self.kingdom, "gene",
"gene" + "_count_FPKM.csv")
flist.append(fpm_file)
return [RefFile(f) for f in flist]
def output(self):
"""Expected output JSON."""
if self.kingdom == "prokarya":
jfile = os.path.join(self.workdir, "prokarya_out.json")
return LocalTarget(jfile)
elif self.kingdom == "eukarya":
jfile = os.path.join(self.workdir, "eukarya_out.json")
return LocalTarget(jfile)
def run(self):
""" Create JSON files."""
if self.kingdom == "prokarya":
jfile = os.path.join(self.workdir, "prokarya_out.json")
elif self.kingdom == "eukarya":
jfile = os.path.join(self.workdir, "eukarya_out.json")
self.gff2json(jfile)
def gff2json(self, out_json):
"""A function that converts a gff file to JSON file."""
# read in the gff file to a database
if os.path.exists(os.path.join(self.workdir, "processes",
"databases")) is False:
os.makedirs(os.path.join(self.workdir, "processes",
"databases"))
db_out = os.path.join(self.workdir, "processes", "databases",
self.kingdom,
"piret.db")
if os.path.exists(db_out) is False:
# create db if not already present
db = gffutils.create_db(self.gff_file, dbfn=db_out, force=True,
keep_order=True,
merge_strategy="create_unique")
else:
# read db if its already present
db = gffutils.FeatureDB(db_out, keep_order=True)
if "edgeR" in self.method:
edger_summ_cds = self.pm_summary("CDS", "edgeR")
edger_summ_genes = self.pm_summary("gene", "edgeR")
dge_edger_cds = self.dge_summary("CDS", "edgeR")
dge_edger_gene = self.dge_summary("gene", "edgeR")
else:
edger_summ_cds = ({}, {})
edger_summ_genes = ({}, {})
dge_edger_cds = {}
dge_edger_gene = {}
if "DESeq2" in self.method:
deseq_summ_cds = self.pm_summary("CDS", "DESeq2")
deseq_summ_genes = self.pm_summary("gene", "DESeq2")
dge_deseq_cds = self.dge_summary("CDS", "DESeq2")
dge_deseq_gene = self.dge_summary("gene", "DESeq2")
else:
deseq_summ_cds = ({}, {})
deseq_summ_genes = ({}, {})
dge_deseq_cds = {}
dge_deseq_gene = {}
if "ballgown" in self.method:
ballgown_gene_pm = self.pm_summary_ballgown()
else:
ballgown_gene_pm = {}
stringtie_tpms = self.stringtie_tpm()
read_summ_cds = self.read_summary("CDS")
read_summ_gene = self.read_summary("gene")
read_summ_rRNA = self.read_summary("rRNA")
read_summ_tRNA = self.read_summary("tRNA")
read_summ_exon = self.read_summary("exon")
if self.NovelRegions is True:
read_summ_NovelRegion = self.read_summary("NovelRegion")
emaps = self.get_emapper()
with open(out_json, "w") as json_file:
json_list = []
for feat_obj in db.all_features():
feat_dic = {} # an empty dictionary to append features
feat_dic['seqid'] = feat_obj.seqid
feat_dic['id'] = feat_obj.id
feat_dic['source'] = feat_obj.source
feat_type = feat_obj.featuretype
feat_dic['featuretype'] = feat_type
feat_dic['start'] = feat_obj.start
feat_dic['end'] = feat_obj.end
feat_dic["length"] = abs(feat_obj.end - feat_obj.start) + 1
feat_dic['strand'] = feat_obj.strand
feat_dic['frame'] = feat_obj.frame
try:
feat_dic['locus_tag'] = feat_obj.attributes['locus_tag'][0]
except KeyError:
pass
try:
feat_dic['Note'] = feat_obj.attributes['Note']
except KeyError:
pass
feat_dic['extra'] = feat_obj.extra
if feat_type != "region":
try:
nt_seqs = feat_obj.sequence(self.fasta_file)
nt_obj = Seq(nt_seqs, generic_dna)
feat_dic['nt_seq'] = nt_seqs
except KeyError:
pass
# ============================================================================#
if feat_type == "CDS":
# translate the CDS
feat_dic['aa_seqs'] = self.translate(nt_obj, "CDS")
# assign FPKMs and FPMs
self.assign_scores(feat_dic=feat_dic,
edger_sdic=edger_summ_cds,
deseq_sdic=deseq_summ_cds,
feat_id=feat_obj.id)
# assign read numbers
try:
feat_dic["read_count"] = read_summ_cds[feat_obj.id]
except KeyError:
feat_dic["read_count"] = None
# assign dge information
self.assign_dges(feat_type="CDS", feat_dic=feat_dic,
feat_id=feat_obj.id,
method="edgeR", dge_dict=dge_edger_cds)
self.assign_dges(feat_type="CDS", feat_dic=feat_dic,
feat_id=feat_obj.id,
method="DESeq2", dge_dict=dge_deseq_cds)
# assign EC#s, KOs, etc.
try:
feat_dic["emapper"] = emaps[feat_obj.id]
except KeyError:
feat_dic["emapper"] = None
# ============================================================================#
elif feat_type == "NovelRegion":
try:
feat_dic["read_count"] = read_summ_NovelRegion[feat_obj.id]
except KeyError:
feat_dic["read_count"] = None
# ============================================================================#
elif feat_type == 'rRNA':
try:
feat_dic["read_count"] = read_summ_rRNA[feat_obj.id]
except KeyError:
feat_dic["read_count"] = None
# ============================================================================#
elif feat_type == 'tRNA':
try:
feat_dic["read_count"] = read_summ_tRNA[feat_obj.id]
except KeyError:
feat_dic["read_count"] = None
# ============================================================================#
elif feat_type == 'exon':
try:
feat_dic["read_count"] = read_summ_exon[feat_obj.id]
except KeyError:
feat_dic["read_count"] = None
# ============================================================================#
elif feat_type == "gene":
# assign scores
self.assign_scores(feat_dic=feat_dic,
edger_sdic=edger_summ_genes,
deseq_sdic=deseq_summ_genes,
feat_id=feat_obj.id)
# assign read numbers
try:
feat_dic["read_count"] = read_summ_gene[feat_obj.id]
except KeyError:
feat_dic["read_count"] = None
# assign ballgown info
try:
feat_dic["ballgown_values"] = ballgown_gene_pm[feat_obj.id]
except KeyError:
feat_dic["ballgown_values"] = None
# assign stringtie
try:
feat_dic["stringtie_values"] = stringtie_tpms[feat_obj.id]
except KeyError:
feat_dic["stringtie_values"] = None
# assign dge information
self.assign_dges(feat_type="gene", feat_dic=feat_dic,
feat_id=feat_obj.id,
method="edgeR", dge_dict=dge_edger_gene)
self.assign_dges(feat_type="gene", feat_dic=feat_dic,
feat_id=feat_obj.id,
method="DESeq2", dge_dict=dge_deseq_gene)
else:
pass
# just to make sure that keys are strings, else json dump fails
feat_dic_str = {}
for key, value in feat_dic.items():
feat_dic_str[str(key)] = value
json_list.append(feat_dic_str)
json.dump(json_list, json_file, indent=4)
# meta_list = ["seqid", "id", "source", "featuretype", "start",
# "end", "length", "strand", "frame", "locus_tag",
# "extra"]
# df = pd.io.json.json_normalize(json_list, errors="ignore")
def assign_scores(self, feat_dic, edger_sdic, deseq_sdic, feat_id):
"""Assign scores from edger and deseq to summary dic."""
try:
feat_dic["edger_cpm"] = edger_sdic[0][feat_id]
except KeyError:
feat_dic["edger_cpm"] = None
try:
feat_dic["deseq_fpm"] = deseq_sdic[0][feat_id]
except KeyError:
feat_dic["deseq_fpm"] = None
try:
feat_dic["edger_rpkm"] = edger_sdic[1][feat_id]
except KeyError:
feat_dic["edger_rpkm"] = None
try:
feat_dic["deseq_fpkm"] = deseq_sdic[1][feat_id]
except KeyError:
feat_dic["deseq_fpkm"] = None
def get_emapper(self):
"""get emapper result as a dataframe."""
emapper_files = os.path.join(self.workdir, "processes", "emapper",
self.kingdom,
"emapper.emapper.annotations")
if os.path.exists(emapper_files) is True:
emap = pd.read_csv(emapper_files, sep='\t', skiprows=[0,1,2],
skipinitialspace=True, skipfooter=3,
header=None, engine='python')
emap1 = emap.reset_index()
emap1.columns = emap1.iloc[0]
emap2 = emap1.drop(0).drop([0], axis=1).set_index('#query_name').to_dict(orient="index")
return emap2
else:
return None
def read_summary(self, feat_type):
"""Get read values as a dictionary."""
read_file = os.path.join(self.workdir, "processes", "featureCounts",
self.kingdom, feat_type + "_count_sorted.csv")
if os.path.exists(read_file) is True:
read_data = pd.read_csv(read_file, sep=",",
index_col="Geneid")
read_dict = read_data.drop(["Unnamed: 0", "Chr", "Start", "End",
"Strand", "Length"], axis=1).to_dict(orient="index")
else:
read_dict = {}
for feat, count_dic in read_dict.items():
int_read = {}
feat_read = {}
for samp, count in count_dic.items():
int_read[samp] = int(count)
feat_read[feat] = int_read
# print(feat_read)
read_dict.update(feat_read)
# print(read_dict)
return read_dict
def dge_summary(self, feat_type, method):
"""summarize SGE results from edgeR of DESeq2"""
dge_dir = os.path.join(self.workdir, "processes", method,
self.kingdom, feat_type)
dge_files = [f for f in glob.glob(dge_dir + "**/*et.csv", recursive=True)]
dge_dicts = {}
for file in dge_files:
dge_df = pd.read_csv(file, sep=",", index_col=0)
if method == "edgeR":
dge_dict = dge_df.drop(["Geneid", "Chr", "Start", "End",
"Strand", "Length"],
axis=1).to_dict(orient="index")
elif method == "DESeq2":
dge_dict = dge_df.to_dict(orient="index")
dge_dicts[str(os.path.basename(file).replace(".csv", ""))] = dge_dict
return dge_dicts
def assign_dges(self, feat_type, feat_dic, feat_id, method, dge_dict):
"""Assign dge values in JSON file."""
dge_dir = os.path.join(self.workdir, "processes", method,
self.kingdom, feat_type)
dge_files = [os.path.basename(f).replace(".csv", "")
for f in glob.glob(dge_dir + "**/*et.csv",
recursive=True)]
if len(dge_files) < 1:
pass
else:
for key, value in dge_dict.items():
try:
feat_dic[key + "__" + method] = dge_dict[key][feat_id]
except KeyError:
feat_dic[key + "__" + method] = None
def pm_summary(self, feat_type, method):
"""Get FPKM values."""
if method == "edgeR":
cpm_file = os.path.join(self.workdir, "processes", method,
self.kingdom, feat_type,
feat_type + "_count_CPM.csv")
rpkm_file = os.path.join(self.workdir, "processes", method,
self.kingdom, feat_type,
feat_type + "_count_RPKM.csv")
elif method == "DESeq2":
cpm_file = os.path.join(self.workdir, "processes", method,
self.kingdom, feat_type,
feat_type + "_count_FPM.csv")
rpkm_file = os.path.join(self.workdir, "processes", method,
self.kingdom, feat_type,
feat_type + "_count_FPKM.csv")
if all([os.path.exists(cpm_file), os.path.exists(rpkm_file)]) is False:
return ({}, {})
elif all([os.path.exists(cpm_file), os.path.exists(rpkm_file)]) is True:
cpm_dict = pd.read_csv(cpm_file, sep=",",
index_col=0).to_dict(orient="index")
rpkm_dict = pd.read_csv(rpkm_file, sep=",",
index_col=0).to_dict(orient="index")
return(cpm_dict, rpkm_dict)
elif os.path.exists(cpm_file) is True and os.path.exists(rpkm_file) is False:
cpm_dict = pd.read_csv(cpm_file, sep=",",
index_col=0).to_dict(orient="index")
return(cpm_dict, {})
elif os.path.exists(rpkm_file) is True and os.path.exists(rpkm_file) is False:
rpkm_dict = pd.read_csv(rpkm_file, sep=",",
index_col=0).to_dict(orient="index")
return({}, rpkm_dict)
def pm_summary_ballgown(self):
pm_file = os.path.join(self.workdir, "processes", "ballgown",
self.kingdom, "summpary_PMs.csv")
if os.path.exists(pm_file) is True:
pm_dict = pd.read_csv(pm_file, sep=",",
index_col=6).drop(["t_id", "chr", "strand",
"start", "end",
"num_exons", "length",
"gene_id", "gene_name"],
axis=1).to_dict(orient="index")
return pm_dict
else:
return {}
def stringtie_tpm(self):
"""get TPMs from stringtie."""
stie_dir = os.path.join(self.workdir, "processes", "stringtie")
stie_files = [f for f in glob.glob(stie_dir + "/**/*sTie.tab",
recursive=True)]
dflist = []
for f in stie_files:
df = pd.read_csv(f, sep="\t").drop(["Gene Name", "Strand",
"Start", "End"], axis=1)
samp_name = os.path.basename(f)
samp = re.sub("_sTie.tab", "", samp_name)
df.columns = ["GeneID", "Reference", samp + "_cov",
samp + "_FPKM", samp + "_TPM"]
dflist.append(df)
finaldf = reduce(lambda df1, df2: pd.merge(df1, df2, on=['GeneID', 'Reference']), dflist).drop_duplicates()
finaldic = finaldf.set_index(['GeneID', 'Reference']).to_dict(orient="index")
return finaldic
def translate(self, nucleotide, type):
"""Takes in a string of nucleotides and translate to AA."""
if type == "CDS":
aa = nucleotide.translate()
elif type == "exon":
aa = nucleotide.translate()
else:
aa = "not translated"
return str(aa)
| |
""" Modified version of build_ext that handles fortran source files.
"""
import os
import subprocess
from glob import glob
from distutils.dep_util import newer_group
from distutils.command.build_ext import build_ext as old_build_ext
from distutils.errors import DistutilsFileError, DistutilsSetupError,\
DistutilsError
from distutils.file_util import copy_file
from numpy.distutils import log
from numpy.distutils.exec_command import filepath_from_subprocess_output
from numpy.distutils.system_info import combine_paths
from numpy.distutils.misc_util import (
filter_sources, get_ext_source_files, get_numpy_include_dirs,
has_cxx_sources, has_f_sources, is_sequence
)
from numpy.distutils.command.config_compiler import show_fortran_compilers
class build_ext (old_build_ext):
description = "build C/C++/F extensions (compile/link to build directory)"
user_options = old_build_ext.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
('parallel=', 'j',
"number of parallel jobs"),
('warn-error', None,
"turn all warnings into errors (-Werror)"),
]
help_options = old_build_ext.help_options + [
('help-fcompiler', None, "list available Fortran compilers",
show_fortran_compilers),
]
boolean_options = old_build_ext.boolean_options + ['warn-error']
def initialize_options(self):
old_build_ext.initialize_options(self)
self.fcompiler = None
self.parallel = None
self.warn_error = None
def finalize_options(self):
if self.parallel:
try:
self.parallel = int(self.parallel)
except ValueError:
raise ValueError("--parallel/-j argument must be an integer")
# Ensure that self.include_dirs and self.distribution.include_dirs
# refer to the same list object. finalize_options will modify
# self.include_dirs, but self.distribution.include_dirs is used
# during the actual build.
# self.include_dirs is None unless paths are specified with
# --include-dirs.
# The include paths will be passed to the compiler in the order:
# numpy paths, --include-dirs paths, Python include path.
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
incl_dirs = self.include_dirs or []
if self.distribution.include_dirs is None:
self.distribution.include_dirs = []
self.include_dirs = self.distribution.include_dirs
self.include_dirs.extend(incl_dirs)
old_build_ext.finalize_options(self)
self.set_undefined_options('build',
('parallel', 'parallel'),
('warn_error', 'warn_error'),
)
def run(self):
if not self.extensions:
return
# Make sure that extension sources are complete.
self.run_command('build_src')
if self.distribution.has_c_libraries():
if self.inplace:
if self.distribution.have_run.get('build_clib'):
log.warn('build_clib already run, it is too late to '
'ensure in-place build of build_clib')
build_clib = self.distribution.get_command_obj(
'build_clib')
else:
build_clib = self.distribution.get_command_obj(
'build_clib')
build_clib.inplace = 1
build_clib.ensure_finalized()
build_clib.run()
self.distribution.have_run['build_clib'] = 1
else:
self.run_command('build_clib')
build_clib = self.get_finalized_command('build_clib')
self.library_dirs.append(build_clib.build_clib)
else:
build_clib = None
# Not including C libraries to the list of
# extension libraries automatically to prevent
# bogus linking commands. Extensions must
# explicitly specify the C libraries that they use.
from distutils.ccompiler import new_compiler
from numpy.distutils.fcompiler import new_fcompiler
compiler_type = self.compiler
# Initialize C compiler:
self.compiler = new_compiler(compiler=compiler_type,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
self.compiler.customize(self.distribution)
self.compiler.customize_cmd(self)
if self.warn_error:
self.compiler.compiler.append('-Werror')
self.compiler.compiler_so.append('-Werror')
self.compiler.show_customization()
# Setup directory for storing generated extra DLL files on Windows
self.extra_dll_dir = os.path.join(self.build_temp, '.libs')
if not os.path.isdir(self.extra_dll_dir):
os.makedirs(self.extra_dll_dir)
# Create mapping of libraries built by build_clib:
clibs = {}
if build_clib is not None:
for libname, build_info in build_clib.libraries or []:
if libname in clibs and clibs[libname] != build_info:
log.warn('library %r defined more than once,'
' overwriting build_info\n%s... \nwith\n%s...'
% (libname, repr(clibs[libname])[:300], repr(build_info)[:300]))
clibs[libname] = build_info
# .. and distribution libraries:
for libname, build_info in self.distribution.libraries or []:
if libname in clibs:
# build_clib libraries have a precedence before distribution ones
continue
clibs[libname] = build_info
# Determine if C++/Fortran 77/Fortran 90 compilers are needed.
# Update extension libraries, library_dirs, and macros.
all_languages = set()
for ext in self.extensions:
ext_languages = set()
c_libs = []
c_lib_dirs = []
macros = []
for libname in ext.libraries:
if libname in clibs:
binfo = clibs[libname]
c_libs += binfo.get('libraries', [])
c_lib_dirs += binfo.get('library_dirs', [])
for m in binfo.get('macros', []):
if m not in macros:
macros.append(m)
for l in clibs.get(libname, {}).get('source_languages', []):
ext_languages.add(l)
if c_libs:
new_c_libs = ext.libraries + c_libs
log.info('updating extension %r libraries from %r to %r'
% (ext.name, ext.libraries, new_c_libs))
ext.libraries = new_c_libs
ext.library_dirs = ext.library_dirs + c_lib_dirs
if macros:
log.info('extending extension %r defined_macros with %r'
% (ext.name, macros))
ext.define_macros = ext.define_macros + macros
# determine extension languages
if has_f_sources(ext.sources):
ext_languages.add('f77')
if has_cxx_sources(ext.sources):
ext_languages.add('c++')
l = ext.language or self.compiler.detect_language(ext.sources)
if l:
ext_languages.add(l)
# reset language attribute for choosing proper linker
if 'c++' in ext_languages:
ext_language = 'c++'
elif 'f90' in ext_languages:
ext_language = 'f90'
elif 'f77' in ext_languages:
ext_language = 'f77'
else:
ext_language = 'c' # default
if l and l != ext_language and ext.language:
log.warn('resetting extension %r language from %r to %r.' %
(ext.name, l, ext_language))
ext.language = ext_language
# global language
all_languages.update(ext_languages)
need_f90_compiler = 'f90' in all_languages
need_f77_compiler = 'f77' in all_languages
need_cxx_compiler = 'c++' in all_languages
# Initialize C++ compiler:
if need_cxx_compiler:
self._cxx_compiler = new_compiler(compiler=compiler_type,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
compiler = self._cxx_compiler
compiler.customize(self.distribution, need_cxx=need_cxx_compiler)
compiler.customize_cmd(self)
compiler.show_customization()
self._cxx_compiler = compiler.cxx_compiler()
else:
self._cxx_compiler = None
# Initialize Fortran 77 compiler:
if need_f77_compiler:
ctype = self.fcompiler
self._f77_compiler = new_fcompiler(compiler=self.fcompiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force,
requiref90=False,
c_compiler=self.compiler)
fcompiler = self._f77_compiler
if fcompiler:
ctype = fcompiler.compiler_type
fcompiler.customize(self.distribution)
if fcompiler and fcompiler.get_version():
fcompiler.customize_cmd(self)
fcompiler.show_customization()
else:
self.warn('f77_compiler=%s is not available.' %
(ctype))
self._f77_compiler = None
else:
self._f77_compiler = None
# Initialize Fortran 90 compiler:
if need_f90_compiler:
ctype = self.fcompiler
self._f90_compiler = new_fcompiler(compiler=self.fcompiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force,
requiref90=True,
c_compiler=self.compiler)
fcompiler = self._f90_compiler
if fcompiler:
ctype = fcompiler.compiler_type
fcompiler.customize(self.distribution)
if fcompiler and fcompiler.get_version():
fcompiler.customize_cmd(self)
fcompiler.show_customization()
else:
self.warn('f90_compiler=%s is not available.' %
(ctype))
self._f90_compiler = None
else:
self._f90_compiler = None
# Build extensions
self.build_extensions()
# Copy over any extra DLL files
# FIXME: In the case where there are more than two packages,
# we blindly assume that both packages need all of the libraries,
# resulting in a larger wheel than is required. This should be fixed,
# but it's so rare that I won't bother to handle it.
pkg_roots = {
self.get_ext_fullname(ext.name).split('.')[0]
for ext in self.extensions
}
for pkg_root in pkg_roots:
shared_lib_dir = os.path.join(pkg_root, '.libs')
if not self.inplace:
shared_lib_dir = os.path.join(self.build_lib, shared_lib_dir)
for fn in os.listdir(self.extra_dll_dir):
if not os.path.isdir(shared_lib_dir):
os.makedirs(shared_lib_dir)
if not fn.lower().endswith('.dll'):
continue
runtime_lib = os.path.join(self.extra_dll_dir, fn)
copy_file(runtime_lib, shared_lib_dir)
def swig_sources(self, sources, extensions=None):
# Do nothing. Swig sources have been handled in build_src command.
return sources
def build_extension(self, ext):
sources = ext.sources
if sources is None or not is_sequence(sources):
raise DistutilsSetupError(
("in 'ext_modules' option (extension '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % ext.name)
sources = list(sources)
if not sources:
return
fullname = self.get_ext_fullname(ext.name)
if self.inplace:
modpath = fullname.split('.')
package = '.'.join(modpath[0:-1])
base = modpath[-1]
build_py = self.get_finalized_command('build_py')
package_dir = build_py.get_package_dir(package)
ext_filename = os.path.join(package_dir,
self.get_ext_filename(base))
else:
ext_filename = os.path.join(self.build_lib,
self.get_ext_filename(fullname))
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_filename, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
c_sources, cxx_sources, f_sources, fmodule_sources = \
filter_sources(ext.sources)
if self.compiler.compiler_type == 'msvc':
if cxx_sources:
# Needed to compile kiva.agg._agg extension.
extra_args.append('/Zm1000')
# this hack works around the msvc compiler attributes
# problem, msvc uses its own convention :(
c_sources += cxx_sources
cxx_sources = []
# Set Fortran/C++ compilers for compilation and linking.
if ext.language == 'f90':
fcompiler = self._f90_compiler
elif ext.language == 'f77':
fcompiler = self._f77_compiler
else: # in case ext.language is c++, for instance
fcompiler = self._f90_compiler or self._f77_compiler
if fcompiler is not None:
fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(
ext, 'extra_f77_compile_args') else []
fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(
ext, 'extra_f90_compile_args') else []
cxx_compiler = self._cxx_compiler
# check for the availability of required compilers
if cxx_sources and cxx_compiler is None:
raise DistutilsError("extension %r has C++ sources"
"but no C++ compiler found" % (ext.name))
if (f_sources or fmodule_sources) and fcompiler is None:
raise DistutilsError("extension %r has Fortran sources "
"but no Fortran compiler found" % (ext.name))
if ext.language in ['f77', 'f90'] and fcompiler is None:
self.warn("extension %r has Fortran libraries "
"but no Fortran linker found, using default linker" % (ext.name))
if ext.language == 'c++' and cxx_compiler is None:
self.warn("extension %r has C++ libraries "
"but no C++ linker found, using default linker" % (ext.name))
kws = {'depends': ext.depends}
output_dir = self.build_temp
include_dirs = ext.include_dirs + get_numpy_include_dirs()
c_objects = []
if c_sources:
log.info("compiling C sources")
c_objects = self.compiler.compile(c_sources,
output_dir=output_dir,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_args,
**kws)
if cxx_sources:
log.info("compiling C++ sources")
c_objects += cxx_compiler.compile(cxx_sources,
output_dir=output_dir,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_args,
**kws)
extra_postargs = []
f_objects = []
if fmodule_sources:
log.info("compiling Fortran 90 module sources")
module_dirs = ext.module_dirs[:]
module_build_dir = os.path.join(
self.build_temp, os.path.dirname(
self.get_ext_filename(fullname)))
self.mkpath(module_build_dir)
if fcompiler.module_dir_switch is None:
existing_modules = glob('*.mod')
extra_postargs += fcompiler.module_options(
module_dirs, module_build_dir)
f_objects += fcompiler.compile(fmodule_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs,
depends=ext.depends)
if fcompiler.module_dir_switch is None:
for f in glob('*.mod'):
if f in existing_modules:
continue
t = os.path.join(module_build_dir, f)
if os.path.abspath(f) == os.path.abspath(t):
continue
if os.path.isfile(t):
os.remove(t)
try:
self.move_file(f, module_build_dir)
except DistutilsFileError:
log.warn('failed to move %r to %r' %
(f, module_build_dir))
if f_sources:
log.info("compiling Fortran sources")
f_objects += fcompiler.compile(f_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs,
depends=ext.depends)
if f_objects and not fcompiler.can_ccompiler_link(self.compiler):
unlinkable_fobjects = f_objects
objects = c_objects
else:
unlinkable_fobjects = []
objects = c_objects + f_objects
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
libraries = self.get_libraries(ext)[:]
library_dirs = ext.library_dirs[:]
linker = self.compiler.link_shared_object
# Always use system linker when using MSVC compiler.
if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'):
# expand libraries with fcompiler libraries as we are
# not using fcompiler linker
self._libs_with_msvc_and_fortran(
fcompiler, libraries, library_dirs)
elif ext.language in ['f77', 'f90'] and fcompiler is not None:
linker = fcompiler.link_shared_object
if ext.language == 'c++' and cxx_compiler is not None:
linker = cxx_compiler.link_shared_object
if fcompiler is not None:
objects, libraries = self._process_unlinkable_fobjects(
objects, libraries,
fcompiler, library_dirs,
unlinkable_fobjects)
linker(objects, ext_filename,
libraries=libraries,
library_dirs=library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=ext.language)
def _add_dummy_mingwex_sym(self, c_sources):
build_src = self.get_finalized_command("build_src").build_src
build_clib = self.get_finalized_command("build_clib").build_clib
objects = self.compiler.compile([os.path.join(build_src,
"gfortran_vs2003_hack.c")],
output_dir=self.build_temp)
self.compiler.create_static_lib(
objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug)
def _process_unlinkable_fobjects(self, objects, libraries,
fcompiler, library_dirs,
unlinkable_fobjects):
libraries = list(libraries)
objects = list(objects)
unlinkable_fobjects = list(unlinkable_fobjects)
# Expand possible fake static libraries to objects
for lib in list(libraries):
for libdir in library_dirs:
fake_lib = os.path.join(libdir, lib + '.fobjects')
if os.path.isfile(fake_lib):
# Replace fake static library
libraries.remove(lib)
with open(fake_lib, 'r') as f:
unlinkable_fobjects.extend(f.read().splitlines())
# Expand C objects
c_lib = os.path.join(libdir, lib + '.cobjects')
with open(c_lib, 'r') as f:
objects.extend(f.read().splitlines())
# Wrap unlinkable objects to a linkable one
if unlinkable_fobjects:
fobjects = [os.path.relpath(obj) for obj in unlinkable_fobjects]
wrapped = fcompiler.wrap_unlinkable_objects(
fobjects, output_dir=self.build_temp,
extra_dll_dir=self.extra_dll_dir)
objects.extend(wrapped)
return objects, libraries
def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries,
c_library_dirs):
if fcompiler is None:
return
for libname in c_libraries:
if libname.startswith('msvc'):
continue
fileexists = False
for libdir in c_library_dirs or []:
libfile = os.path.join(libdir, '%s.lib' % (libname))
if os.path.isfile(libfile):
fileexists = True
break
if fileexists:
continue
# make g77-compiled static libs available to MSVC
fileexists = False
for libdir in c_library_dirs:
libfile = os.path.join(libdir, 'lib%s.a' % (libname))
if os.path.isfile(libfile):
# copy libname.a file to name.lib so that MSVC linker
# can find it
libfile2 = os.path.join(self.build_temp, libname + '.lib')
copy_file(libfile, libfile2)
if self.build_temp not in c_library_dirs:
c_library_dirs.append(self.build_temp)
fileexists = True
break
if fileexists:
continue
log.warn('could not find library %r in directories %s'
% (libname, c_library_dirs))
# Always use system linker when using MSVC compiler.
f_lib_dirs = []
for dir in fcompiler.library_dirs:
# correct path when compiling in Cygwin but with normal Win
# Python
if dir.startswith('/usr/lib'):
try:
dir = subprocess.check_output(['cygpath', '-w', dir])
except (OSError, subprocess.CalledProcessError):
pass
else:
dir = filepath_from_subprocess_output(dir)
f_lib_dirs.append(dir)
c_library_dirs.extend(f_lib_dirs)
# make g77-compiled static libs available to MSVC
for lib in fcompiler.libraries:
if not lib.startswith('msvc'):
c_libraries.append(lib)
p = combine_paths(f_lib_dirs, 'lib' + lib + '.a')
if p:
dst_name = os.path.join(self.build_temp, lib + '.lib')
if not os.path.isfile(dst_name):
copy_file(p[0], dst_name)
if self.build_temp not in c_library_dirs:
c_library_dirs.append(self.build_temp)
def get_source_files(self):
self.check_extensions_list(self.extensions)
filenames = []
for ext in self.extensions:
filenames.extend(get_ext_source_files(ext))
return filenames
def get_outputs(self):
self.check_extensions_list(self.extensions)
outputs = []
for ext in self.extensions:
if not ext.sources:
continue
fullname = self.get_ext_fullname(ext.name)
outputs.append(os.path.join(self.build_lib,
self.get_ext_filename(fullname)))
return outputs
| |
# written by John Gregoire
# edited by Allison Schubauer and Daisy Hernandez
# 6/26/2013
# first version of figure of merit functions for automated
# data processing
from intermediatefunctions_firstversion import numpy
import intermediatefunctions_firstversion as inter
# this dictionary is required to know which figures of merit should
# be calculated for each type of experiment
# TO DO: come up with a better naming convention for this dictionary
EXPERIMENT_FUNCTIONS = {'CV': {'TafelSlopeVPerDec': [], 'TafelEstart': [],
'TafelFitVRange': [], 'TafelLogIex': [],
'Max': [['I(A)'], ['I(A)_LinSub']], 'Min': [['I(A)'], ['I(A)_LinSub']],
'EatIThresh': [['I(A)'], ['I(A)_LinSub']],
'IllDiff': [['I(A)', 'max'], ['I(A)', 'min'],
['I(A)_LinSub', 'max'], ['I(A)_LinSub', 'min']]},
'OCV': {'Final': [['Ewe(V)_E0']], 'Avg': [['Ewe(V)_E0']],
'ArrSS': [['Ewe(V)_E0']], 'IllDiff': [['Ewe(V)_E0', 'avg']]},
'CP': {'Final': [['Ewe(V)_E0']], 'Avg': [['Ewe(V)_E0']],
'ArrSS': [['Ewe(V)_E0']], 'IllDiff': [['Ewe(V)_E0', 'avg']]},
'CA': {'Final': [['I(A)']], 'Avg': [['I(A)']],
'ArrSS': [['I(A)']], 'IllDiff': [['I(A)', 'avg']]}}
zero_thresh = 5.e-8 # threshold below which measured value is equivalent to zero -
# this is a property of the instrument
"""necessary arguments:
vshift=-(.187-0.045)
booldev_frac = 0.5
booldev_nout = 3
dydev_frac = 0.2
dydev_nout = 5
dydev_abs = 0.
dx = 1.
maxfracoutliers = 0.5
critsegVrange = 0.04
critsegIend = 3.e-5
critsegVend = 0.36
SGpts = 10 (nptsoneside for Savitzy-Golay smoothing)
"""
def TafelSlopeVPerDec(rawd, interd, var='I(A)', vk='Ewe(V)_E0', boolDevFrac=0.5, boolDevNOut=3,
dyDevFrac=0.2, dyDevNOut=5, dyDevAbs = 0.,
dx=1., maxFracOutliers=0.5, critSegVRange=0.04, critSegIEnd=3.e-5,
critSegVEnd=0.36, SavGolPts=10, inverty=False):
# initialize the arrays to hold Tafel values (considered both
# intermediate data and figures of merit)
interd['TafelSlope'] = []
interd['TafelEstart'] = []
interd['TafelFitErange'] = []
interd['TafelLogIex'] = []
booldn_segstart = 3 * boolDevNOut
dn_segstart = 3 * dyDevNOut
inter.calcsegind(rawd, interd, SGpts=SavGolPts) # breaks experiment into segments
inter.calccurvregions(rawd, interd, SGpts=SavGolPts, inverty=inverty) # runs on all segments
linsub = inter.calcLinSub(rawd, interd, var=var, inverty=inverty) # returns 1 if successful, 0 if not
if inverty:
yinv=-1.
else:
yinv=1.
if not linsub:
nanlist=[float('nan')]*len(interd['segprops_dlist'])
interd['TafelSlope'] = nanlist
interd['TafelEstart'] = nanlist
interd['TafelFitErange'] = nanlist
interd['TafelLogIex'] = nanlist
return float('nan')
inter.SegSG(rawd, interd, SGpts=SGpts, order=1, k=var+'_LinSub')
for seg in range(len(interd['segprops_dlist'])):
inds=interd['segprops_dlist'][seg]['inds']
i=interd['I(A)_LinSub_SG'][inds]*yinv
# if i.sum()==0.:#LinSub attempted on all segments, here each segment is tried until 1 works
# continue
if vk in rawd.keys():
v = rawd[vk]
else:
v = interd[vk]
v=v[inds]
posinds=numpy.where(i>zero_thresh)
invboolarr=numpy.float32(i<=zero_thresh)
istart_segs, len_segs, fitdy_segs, fitinterc_segs=inter.findzerosegs(
invboolarr, boolDevFrac, boolDevNOut, booldn_segstart, SGnpts=SavGolPts,
dx=dx, maxfracoutliers=maxFracOutliers)
if len(istart_segs)==0:
# no Tafel segments
interd['TafelSlope'].append(float('nan'))
interd['TafelEstart'].append(float('nan'))
interd['TafelFitVrange'].append(float('nan'))
interd['TafelLogIex'].append(float('nan'))
continue
ind=numpy.argmax(len_segs)
i0=istart_segs[ind]
i1=i0+len_segs[ind]
taffitinds=numpy.arange(i0, i1)
interd['segprops_dlist'][seg]['TafelFitInds']=inds[taffitinds]
i=i[i0:i1]
i[i<zero_thresh]=zero_thresh #needed due to outliers
v=v[i0:i1]
il=numpy.log10(i)
try:
istart_segs, len_segs, fitdy_segs, fitinterc_segs, dy=inter.findlinearsegs(
il, dyDevFrac, dyDevNOut, dn_segstart, dydev_abs=dyDevAbs, dx=dx, critdy_fracmaxdy=None)
except:
interd['TafelSlope'].append(float('nan'))
interd['TafelEstart'].append(float('nan'))
interd['TafelFitVrange'].append(float('nan'))
interd['Tafel_logExCurrent'].append(float('nan'))
continue
if len(istart_segs)==0:
# no Tafel segments
interd['TafelSlope'].append(float('nan'))
interd['TafelEstart'].append(float('nan'))
interd['TafelFitVrange'].append(float('nan'))
interd['TafelLogIex'].append(float('nan'))
continue
#only take those segments covering a certain V range and with a min current for the top 10th of the V range
# in the segment and positive slope for there on out and then take the steepest one.
ind=None
maxdy=0
npts=critSegVRange/dx
npts2=max(2, npts//10+1)
for count2, (it0, slen, dyv) in enumerate(zip(istart_segs, len_segs, fitdy_segs)):
if slen<npts:
continue
it1=it0+slen
if numpy.mean(i[it1-npts2:it1])<critSegIEnd:
continue
if numpy.mean(v[it1-npts2:it1])<critSegVEnd:
continue
if numpy.any(dy[it1:]<0.):
continue
if dyv>maxdy:
maxdy=dyv
ind=count2
if ind is None:
# no Tafel segments
interd['TafelSlope'].append(float('nan'))
interd['TafelEstart'].append(float('nan'))
interd['TafelFitVrange'].append(float('nan'))
interd['TafelLogIex'].append(float('nan'))
continue
i0=istart_segs[ind]
i1=i0+len_segs[ind]
tafinds=numpy.arange(i0, i1)
it=il[tafinds]
vt=v[tafinds]
fitdy, fitint=numpy.polyfit(vt, it, 1)
interd['TafelSlope'].append(1./fitdy)
interd['TafelEstart'].append(v[0])
interd['TafelFitVrange'].append(vt.max()-vt.min())
interd['TafelLogIex'].append(fitint)
interd['segprops_dlist'][seg]['TafelInds']=inds[taffitinds][tafinds]
#FOMs (the entire list):
fomarr=numpy.array(interd['TafelSlope']) #in interd there is a fom for each segment but save the first not NaN one as scalar FOM
goodinds=numpy.where(numpy.logical_not(numpy.isnan(fomarr)))[0]
if len(goodinds)==0:
return float('nan')
return fomarr[goodinds[0]]
def TafelEstart(rawd, interd):
fomarr=numpy.array(interd['TafelEstart'])
goodinds=numpy.where(numpy.logical_not(numpy.isnan(fomarr)))[0]
if len(goodinds)==0:
return float('nan')
return fomarr[goodinds[0]]
def TafelFitVRange(rawd, interd):
fomarr=numpy.array(interd['TafelFitVrange'])
goodinds=numpy.where(numpy.logical_not(numpy.isnan(fomarr)))[0]
if len(goodinds)==0:
return float('nan')
return fomarr[goodinds[0]]
def TafelLogIex(rawd, interd):
fomarr=numpy.array(interd['TafelLogIex'])
goodinds=numpy.where(numpy.logical_not(numpy.isnan(fomarr)))[0]
if len(goodinds)==0:
return float('nan')
return fomarr[goodinds[0]]
def ArrSS(rawd, interd, x=['Ewe(V)_E0', 'I(A)', 'I(A)_LinSub'],
weightExp=1., numTestPts=10):
if x in rawd.keys():
x = rawd[x]
else:
x = interd[x]
i=numTestPts
s0=x[:i].std()/i**weightExp+1
while x[:i].std()/i**weightExp<s0 and i<len(x):
s0=x[:i].std()/i**weightExp
i+=numTestPts
return x[:i].mean()
def EatIThresh(rawd, interd, i=['I(A)', 'I(A)_LinSub'], v='Ewe(V)_E0', iThresh=1e-5,
numConsecPts=20, setAbove=1, noThresh=numpy.nan):
if i == 'I(A)_LinSub':
i = interd[i]
else:
i = rawd[i]
if v in rawd.keys():
v = rawd[v]
else:
v = interd[v]
if not setAbove: # 0 for below, 1 for above
i *= -1
iThresh *= -1
keyPts = numpy.int16(i >= iThresh)
keyPtsConsec = [keyPts[x:x+numConsecPts].prod()
for x in range(len(keyPts)-numConsecPts)]
if True in keyPtsConsec:
ival = keyPtsConsec.index(True)
return v[ival:ival+numConsecPts].mean()
else:
# return value indicating threshold not reached
return noThresh
def Avg(rawd, interd, x=['Ewe(V)_E0', 'I(A)', 'I(A)_LinSub'], t='t(s)', interval=1000,
numStdDevs=2., numPts=1000, startAtEnd=0):
if x in rawd.keys():
x = rawd[x]
else:
x = interd[x]
t = rawd[t]
# if we wish to start at the end, reverse the lists
if startAtEnd:
x = x[::-1]
t = t[::-1]
# restricts x to requested t-interval
x = x[numpy.abs(t-t[0])<interval]
# removes outliers using mean and std
x=inter.removeoutliers_meanstd(x, numPts//2, numStdDevs) # // = integer division
# the mean of the data now that outliers have been removed
return x.mean()
def Final(rawd, interd, x=['Ewe(V)_E0', 'I(A)', 'I(A)_LinSub']):
if x in rawd.keys():
x = rawd[x]
else:
x = interd[x]
return x[-1]
def Max(rawd, interd, x=['Ewe(V)_E0', 'I(A)', 'I(A)_LinSub']):
if x in rawd.keys():
x = rawd[x]
else:
x = interd[x]
return numpy.max(x)
def Min(rawd, interd, x=['Ewe(V)_E0', 'I(A)', 'I(A)_LinSub']):
if x in rawd.keys():
x = rawd[x]
else:
x = interd[x]
return numpy.min(x)
def IllDiff(rawd, interd, illum='Illum', thisvar=['Ewe(V)_E0', 'I(A)', 'I(A)_LinSub'],
othervar='I(A)', t='t(s)', fomName=['min', 'max', 'avg'],
lightStart=0.4, lightEnd=0.95, darkStart =0.4, darkEnd=0.95,
illSigKey='Ach(V)', sigTimeShift=0., illThresh=0.8,
illInvert=1):
if thisvar.startswith('I(A)'):
othervar = 'Ewe(V)_E0'
if sigTimeShift:
# add intermediate value 'IllumMod'
interd['IllumMod']=inter.illumtimeshift(rawd, illSigKey, t, sigTimeShift)
illSigKey = 'IllumMod'
if illInvert: # logical invert
# multiply illumination signal by -1
interd['IllumMod'] *= -1
elif illInvert: # logical invert
# add intermediate value 'IllumMod'
# multiply illumination signal by -1
interd['IllumMod'] = -1*rawd[illSigKey]
illSigKey = 'IllumMod'
err = inter.calcdiff_ill_caller(rawd, interd, ikey = illSigKey,
thresh = illThresh, ykeys = [thisvar],
xkeys = [othervar, t],
illfracrange = (lightStart, lightEnd),
darkfracrange = (darkStart, darkEnd))
if err:
# if this is not an illumination experiment, intermediate
# illumination values aren't necessary
for illIntermed in filter(lambda intermed: 'ill' in intermed.lower(),
interd.keys()):
del(interd[illIntermed])
return float('nan')
if fomName == 'min':
return min(interd[thisvar+'_illdiff'])
if fomName == 'max':
return max(interd[thisvar+'_illdiff'])
else:
return interd[thisvar+'_illdiffmean']
| |
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for basic_session_run_hooks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import shutil
import tempfile
import time
from tensorflow.python.client import session as session_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.summary import summary as summary_lib
from tensorflow.python.summary.writer import fake_summary_writer
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
# Provide a realistic start time for unit tests where we need to mock out
# calls to time.time().
MOCK_START_TIME = 1484695987.209386
class MockCheckpointSaverListener(
basic_session_run_hooks.CheckpointSaverListener):
def __init__(self):
self.begin_count = 0
self.before_save_count = 0
self.after_save_count = 0
self.end_count = 0
self.ask_for_stop = False
def begin(self):
self.begin_count += 1
def before_save(self, session, global_step):
self.before_save_count += 1
def after_save(self, session, global_step):
self.after_save_count += 1
if self.ask_for_stop:
return True
def end(self, session, global_step):
self.end_count += 1
def get_counts(self):
return {
'begin': self.begin_count,
'before_save': self.before_save_count,
'after_save': self.after_save_count,
'end': self.end_count
}
class SecondOrStepTimerTest(test.TestCase):
@test_util.run_deprecated_v1
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SecondOrStepTimer(every_secs=2.0, every_steps=10)
@test_util.run_deprecated_v1
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SecondOrStepTimer()
@test.mock.patch.object(time, 'time')
def test_every_secs(self, mock_time):
mock_time.return_value = MOCK_START_TIME
timer = basic_session_run_hooks.SecondOrStepTimer(every_secs=1.0)
self.assertTrue(timer.should_trigger_for_step(1))
timer.update_last_triggered_step(1)
self.assertFalse(timer.should_trigger_for_step(1))
self.assertFalse(timer.should_trigger_for_step(2))
mock_time.return_value += 1.0
self.assertFalse(timer.should_trigger_for_step(1))
self.assertTrue(timer.should_trigger_for_step(2))
def test_every_steps(self):
timer = basic_session_run_hooks.SecondOrStepTimer(every_steps=3)
self.assertTrue(timer.should_trigger_for_step(1))
timer.update_last_triggered_step(1)
self.assertFalse(timer.should_trigger_for_step(1))
self.assertFalse(timer.should_trigger_for_step(2))
self.assertFalse(timer.should_trigger_for_step(3))
self.assertTrue(timer.should_trigger_for_step(4))
def test_update_last_triggered_step(self):
timer = basic_session_run_hooks.SecondOrStepTimer(every_steps=1)
elapsed_secs, elapsed_steps = timer.update_last_triggered_step(1)
self.assertEqual(None, elapsed_secs)
self.assertEqual(None, elapsed_steps)
elapsed_secs, elapsed_steps = timer.update_last_triggered_step(5)
self.assertLess(0, elapsed_secs)
self.assertEqual(4, elapsed_steps)
elapsed_secs, elapsed_steps = timer.update_last_triggered_step(7)
self.assertLess(0, elapsed_secs)
self.assertEqual(2, elapsed_steps)
class StopAtStepTest(test.TestCase):
def test_raise_in_both_last_step_and_num_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.StopAtStepHook(num_steps=10, last_step=20)
def test_stop_based_on_last_step(self):
h = basic_session_run_hooks.StopAtStepHook(last_step=10)
with ops.Graph().as_default():
global_step = training_util.get_or_create_global_step()
no_op = control_flow_ops.no_op()
h.begin()
with session_lib.Session() as sess:
mon_sess = monitored_session._HookedSession(sess, [h])
sess.run(state_ops.assign(global_step, 5))
h.after_create_session(sess, None)
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 9))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 10))
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 11))
mon_sess._should_stop = False
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
def test_stop_based_on_num_step(self):
h = basic_session_run_hooks.StopAtStepHook(num_steps=10)
with ops.Graph().as_default():
global_step = training_util.get_or_create_global_step()
no_op = control_flow_ops.no_op()
h.begin()
with session_lib.Session() as sess:
mon_sess = monitored_session._HookedSession(sess, [h])
sess.run(state_ops.assign(global_step, 5))
h.after_create_session(sess, None)
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 13))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 14))
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 15))
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 16))
mon_sess._should_stop = False
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
def test_stop_based_with_multiple_steps(self):
h = basic_session_run_hooks.StopAtStepHook(num_steps=10)
with ops.Graph().as_default():
global_step = training_util.get_or_create_global_step()
no_op = control_flow_ops.no_op()
h.begin()
with session_lib.Session() as sess:
mon_sess = monitored_session._HookedSession(sess, [h])
sess.run(state_ops.assign(global_step, 5))
h.after_create_session(sess, None)
mon_sess.run(no_op)
self.assertFalse(mon_sess.should_stop())
sess.run(state_ops.assign(global_step, 15))
mon_sess.run(no_op)
self.assertTrue(mon_sess.should_stop())
class LoggingTensorHookTest(test.TestCase):
def setUp(self):
# Mock out logging calls so we can verify whether correct tensors are being
# monitored.
self._actual_log = tf_logging.info
self.logged_message = None
def mock_log(*args, **kwargs):
self.logged_message = args
self._actual_log(*args, **kwargs)
tf_logging.info = mock_log
def tearDown(self):
tf_logging.info = self._actual_log
def test_illegal_args(self):
with self.assertRaisesRegexp(ValueError, 'nvalid every_n_iter'):
basic_session_run_hooks.LoggingTensorHook(tensors=['t'], every_n_iter=0)
with self.assertRaisesRegexp(ValueError, 'nvalid every_n_iter'):
basic_session_run_hooks.LoggingTensorHook(tensors=['t'], every_n_iter=-10)
with self.assertRaisesRegexp(ValueError, 'xactly one of'):
basic_session_run_hooks.LoggingTensorHook(
tensors=['t'], every_n_iter=5, every_n_secs=5)
with self.assertRaisesRegexp(ValueError, 'xactly one of'):
basic_session_run_hooks.LoggingTensorHook(tensors=['t'])
def test_print_at_end_only(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
t = constant_op.constant(42.0, name='foo')
train_op = constant_op.constant(3)
hook = basic_session_run_hooks.LoggingTensorHook(
tensors=[t.name], at_end=True)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
self.evaluate(variables_lib.global_variables_initializer())
self.logged_message = ''
for _ in range(3):
mon_sess.run(train_op)
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self.logged_message).find(t.name), -1)
hook.end(sess)
self.assertRegexpMatches(str(self.logged_message), t.name)
def _validate_print_every_n_steps(self, sess, at_end):
t = constant_op.constant(42.0, name='foo')
train_op = constant_op.constant(3)
hook = basic_session_run_hooks.LoggingTensorHook(
tensors=[t.name], every_n_iter=10, at_end=at_end)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
self.evaluate(variables_lib.global_variables_initializer())
mon_sess.run(train_op)
self.assertRegexpMatches(str(self.logged_message), t.name)
for _ in range(3):
self.logged_message = ''
for _ in range(9):
mon_sess.run(train_op)
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self.logged_message).find(t.name), -1)
mon_sess.run(train_op)
self.assertRegexpMatches(str(self.logged_message), t.name)
# Add additional run to verify proper reset when called multiple times.
self.logged_message = ''
mon_sess.run(train_op)
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self.logged_message).find(t.name), -1)
self.logged_message = ''
hook.end(sess)
if at_end:
self.assertRegexpMatches(str(self.logged_message), t.name)
else:
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self.logged_message).find(t.name), -1)
def test_print_every_n_steps(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
self._validate_print_every_n_steps(sess, at_end=False)
# Verify proper reset.
self._validate_print_every_n_steps(sess, at_end=False)
def test_print_every_n_steps_and_end(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
self._validate_print_every_n_steps(sess, at_end=True)
# Verify proper reset.
self._validate_print_every_n_steps(sess, at_end=True)
def test_print_first_step(self):
# if it runs every iteration, first iteration has None duration.
with ops.Graph().as_default(), session_lib.Session() as sess:
t = constant_op.constant(42.0, name='foo')
train_op = constant_op.constant(3)
hook = basic_session_run_hooks.LoggingTensorHook(
tensors={'foo': t}, every_n_iter=1)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
self.evaluate(variables_lib.global_variables_initializer())
mon_sess.run(train_op)
self.assertRegexpMatches(str(self.logged_message), 'foo')
# in first run, elapsed time is None.
self.assertEqual(str(self.logged_message).find('sec'), -1)
def _validate_print_every_n_secs(self, sess, at_end, mock_time):
t = constant_op.constant(42.0, name='foo')
train_op = constant_op.constant(3)
hook = basic_session_run_hooks.LoggingTensorHook(
tensors=[t.name], every_n_secs=1.0, at_end=at_end)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
self.evaluate(variables_lib.global_variables_initializer())
mon_sess.run(train_op)
self.assertRegexpMatches(str(self.logged_message), t.name)
# assertNotRegexpMatches is not supported by python 3.1 and later
self.logged_message = ''
mon_sess.run(train_op)
self.assertEqual(str(self.logged_message).find(t.name), -1)
mock_time.return_value += 1.0
self.logged_message = ''
mon_sess.run(train_op)
self.assertRegexpMatches(str(self.logged_message), t.name)
self.logged_message = ''
hook.end(sess)
if at_end:
self.assertRegexpMatches(str(self.logged_message), t.name)
else:
# assertNotRegexpMatches is not supported by python 3.1 and later
self.assertEqual(str(self.logged_message).find(t.name), -1)
@test.mock.patch.object(time, 'time')
def test_print_every_n_secs(self, mock_time):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_time.return_value = MOCK_START_TIME
self._validate_print_every_n_secs(sess, at_end=False, mock_time=mock_time)
# Verify proper reset.
self._validate_print_every_n_secs(sess, at_end=False, mock_time=mock_time)
@test.mock.patch.object(time, 'time')
def test_print_every_n_secs_and_end(self, mock_time):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_time.return_value = MOCK_START_TIME
self._validate_print_every_n_secs(sess, at_end=True, mock_time=mock_time)
# Verify proper reset.
self._validate_print_every_n_secs(sess, at_end=True, mock_time=mock_time)
def test_print_formatter(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
t = constant_op.constant(42.0, name='foo')
train_op = constant_op.constant(3)
hook = basic_session_run_hooks.LoggingTensorHook(
tensors=[t.name], every_n_iter=10,
formatter=lambda items: 'qqq=%s' % items[t.name])
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
self.evaluate(variables_lib.global_variables_initializer())
mon_sess.run(train_op)
self.assertEqual(self.logged_message[0], 'qqq=42.0')
class CheckpointSaverHookTest(test.TestCase):
def setUp(self):
self.model_dir = tempfile.mkdtemp()
self.graph = ops.Graph()
with self.graph.as_default():
self.scaffold = monitored_session.Scaffold()
self.global_step = training_util.get_or_create_global_step()
self.train_op = training_util._increment_global_step(1)
def tearDown(self):
shutil.rmtree(self.model_dir, ignore_errors=True)
def test_saves_when_saver_and_scaffold_both_missing(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=1)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_raise_when_saver_and_scaffold_both_present(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, saver=self.scaffold.saver, scaffold=self.scaffold)
@test_util.run_deprecated_v1
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_secs=10, save_steps=20)
@test_util.run_deprecated_v1
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.CheckpointSaverHook(self.model_dir)
def test_save_secs_saves_in_first_step(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_secs=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_secs_calls_listeners_at_begin_and_end(self):
with self.graph.as_default():
listener = MockCheckpointSaverListener()
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_secs=2,
scaffold=self.scaffold,
listeners=[listener])
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op) # hook runs here
mon_sess.run(self.train_op) # hook won't run here, so it does at end
hook.end(sess) # hook runs here
self.assertEqual({
'begin': 1,
'before_save': 2,
'after_save': 2,
'end': 1
}, listener.get_counts())
def test_listener_with_monitored_session(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
global_step = training_util.get_or_create_global_step()
train_op = training_util._increment_global_step(1)
listener = MockCheckpointSaverListener()
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_steps=1,
scaffold=scaffold,
listeners=[listener])
with monitored_session.SingularMonitoredSession(
hooks=[hook],
scaffold=scaffold,
checkpoint_dir=self.model_dir) as sess:
sess.run(train_op)
sess.run(train_op)
global_step_val = sess.raw_session().run(global_step)
listener_counts = listener.get_counts()
self.assertEqual(2, global_step_val)
self.assertEqual({
'begin': 1,
'before_save': 3,
'after_save': 3,
'end': 1
}, listener_counts)
def test_listener_stops_training_in_after_save(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
training_util.get_or_create_global_step()
train_op = training_util._increment_global_step(1)
listener = MockCheckpointSaverListener()
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=1, scaffold=scaffold, listeners=[listener])
with monitored_session.SingularMonitoredSession(
hooks=[hook], scaffold=scaffold,
checkpoint_dir=self.model_dir) as sess:
sess.run(train_op)
self.assertFalse(sess.should_stop())
sess.run(train_op)
self.assertFalse(sess.should_stop())
listener.ask_for_stop = True
sess.run(train_op)
self.assertTrue(sess.should_stop())
def test_listener_with_default_saver(self):
with ops.Graph().as_default():
global_step = training_util.get_or_create_global_step()
train_op = training_util._increment_global_step(1)
listener = MockCheckpointSaverListener()
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_steps=1,
listeners=[listener])
with monitored_session.SingularMonitoredSession(
hooks=[hook],
checkpoint_dir=self.model_dir) as sess:
sess.run(train_op)
sess.run(train_op)
global_step_val = sess.raw_session().run(global_step)
listener_counts = listener.get_counts()
self.assertEqual(2, global_step_val)
self.assertEqual({
'begin': 1,
'before_save': 3,
'after_save': 3,
'end': 1
}, listener_counts)
with ops.Graph().as_default():
global_step = training_util.get_or_create_global_step()
with monitored_session.SingularMonitoredSession(
checkpoint_dir=self.model_dir) as sess2:
global_step_saved_val = sess2.run(global_step)
self.assertEqual(2, global_step_saved_val)
def test_two_listeners_with_default_saver(self):
with ops.Graph().as_default():
global_step = training_util.get_or_create_global_step()
train_op = training_util._increment_global_step(1)
listener1 = MockCheckpointSaverListener()
listener2 = MockCheckpointSaverListener()
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_steps=1,
listeners=[listener1, listener2])
with monitored_session.SingularMonitoredSession(
hooks=[hook],
checkpoint_dir=self.model_dir) as sess:
sess.run(train_op)
sess.run(train_op)
global_step_val = sess.raw_session().run(global_step)
listener1_counts = listener1.get_counts()
listener2_counts = listener2.get_counts()
self.assertEqual(2, global_step_val)
self.assertEqual({
'begin': 1,
'before_save': 3,
'after_save': 3,
'end': 1
}, listener1_counts)
self.assertEqual(listener1_counts, listener2_counts)
with ops.Graph().as_default():
global_step = training_util.get_or_create_global_step()
with monitored_session.SingularMonitoredSession(
checkpoint_dir=self.model_dir) as sess2:
global_step_saved_val = sess2.run(global_step)
self.assertEqual(2, global_step_saved_val)
@test.mock.patch.object(time, 'time')
def test_save_secs_saves_periodically(self, mock_time):
with self.graph.as_default():
mock_time.return_value = MOCK_START_TIME
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_secs=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mock_time.return_value = MOCK_START_TIME
mon_sess.run(self.train_op) # Saved.
mock_time.return_value = MOCK_START_TIME + 0.5
mon_sess.run(self.train_op) # Not saved.
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
# Simulate 2.5 seconds of sleep.
mock_time.return_value = MOCK_START_TIME + 2.5
mon_sess.run(self.train_op) # Saved.
mock_time.return_value = MOCK_START_TIME + 2.6
mon_sess.run(self.train_op) # Not saved.
mock_time.return_value = MOCK_START_TIME + 2.7
mon_sess.run(self.train_op) # Not saved.
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
# Simulate 7.5 more seconds of sleep (10 seconds from start.
mock_time.return_value = MOCK_START_TIME + 10
mon_sess.run(self.train_op) # Saved.
self.assertEqual(6,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
@test.mock.patch.object(time, 'time')
def test_save_secs_calls_listeners_periodically(self, mock_time):
with self.graph.as_default():
mock_time.return_value = MOCK_START_TIME
listener = MockCheckpointSaverListener()
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_secs=2,
scaffold=self.scaffold,
listeners=[listener])
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mock_time.return_value = MOCK_START_TIME + 0.5
mon_sess.run(self.train_op) # hook runs here
mock_time.return_value = MOCK_START_TIME + 0.5
mon_sess.run(self.train_op)
mock_time.return_value = MOCK_START_TIME + 3.0
mon_sess.run(self.train_op) # hook runs here
mock_time.return_value = MOCK_START_TIME + 3.5
mon_sess.run(self.train_op)
mock_time.return_value = MOCK_START_TIME + 4.0
mon_sess.run(self.train_op)
mock_time.return_value = MOCK_START_TIME + 6.5
mon_sess.run(self.train_op) # hook runs here
mock_time.return_value = MOCK_START_TIME + 7.0
mon_sess.run(self.train_op) # hook won't run here, so it does at end
mock_time.return_value = MOCK_START_TIME + 7.5
hook.end(sess) # hook runs here
self.assertEqual({
'begin': 1,
'before_save': 4,
'after_save': 4,
'end': 1
}, listener.get_counts())
def test_save_steps_saves_in_first_step(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_steps_saves_periodically(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
mon_sess.run(self.train_op)
# Not saved
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# Not saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# saved
self.assertEqual(5,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_saves_at_end(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_secs=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
mon_sess.run(self.train_op)
hook.end(sess)
self.assertEqual(2,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_summary_writer_defs(self):
fake_summary_writer.FakeSummaryWriter.install()
writer_cache.FileWriterCache.clear()
summary_writer = writer_cache.FileWriterCache.get(self.model_dir)
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
hook.after_create_session(sess, None)
mon_sess.run(self.train_op)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.model_dir,
expected_added_meta_graphs=[
meta_graph.create_meta_graph_def(
graph_def=self.graph.as_graph_def(add_shapes=True),
saver_def=self.scaffold.saver.saver_def)
])
fake_summary_writer.FakeSummaryWriter.uninstall()
def test_save_checkpoint_before_first_train_step(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
mon_sess = monitored_session._HookedSession(sess, [hook])
sess.run(self.scaffold.init_op)
hook.after_create_session(sess, None)
# Verifies that checkpoint is saved at step 0.
self.assertEqual(0,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
# Verifies that no checkpoint is saved after one training step.
mon_sess.run(self.train_op)
self.assertEqual(0,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
# Verifies that checkpoint is saved after save_steps.
mon_sess.run(self.train_op)
self.assertEqual(2,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_graph_def(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=1, scaffold=self.scaffold,
save_graph_def=True)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
sess.run(self.scaffold.init_op)
hook.after_create_session(sess, None)
self.assertIn('graph.pbtxt', os.listdir(self.model_dir))
# Should have a single .meta file for step 0
self.assertLen(gfile.Glob(os.path.join(self.model_dir, '*.meta')), 1)
mon_sess.run(self.train_op)
self.assertLen(gfile.Glob(os.path.join(self.model_dir, '*.meta')), 2)
def test_save_graph_def_false(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=1, scaffold=self.scaffold,
save_graph_def=False)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
sess.run(self.scaffold.init_op)
hook.after_create_session(sess, None)
self.assertNotIn('graph.pbtxt', os.listdir(self.model_dir))
# Should have a single .meta file for step 0
self.assertEmpty(gfile.Glob(os.path.join(self.model_dir, '*.meta')))
mon_sess.run(self.train_op)
self.assertEmpty(gfile.Glob(os.path.join(self.model_dir, '*.meta')))
class CheckpointSaverHookMultiStepTest(test.TestCase):
def setUp(self):
self.model_dir = tempfile.mkdtemp()
self.graph = ops.Graph()
self.steps_per_run = 5
with self.graph.as_default():
self.scaffold = monitored_session.Scaffold()
self.global_step = training_util.get_or_create_global_step()
self.train_op = training_util._increment_global_step(self.steps_per_run)
def tearDown(self):
shutil.rmtree(self.model_dir, ignore_errors=True)
def test_save_steps_saves_in_first_step(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_steps=2*self.steps_per_run,
scaffold=self.scaffold)
hook._set_steps_per_run(self.steps_per_run)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
self.assertEqual(5,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_steps_saves_periodically(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_steps=2*self.steps_per_run,
scaffold=self.scaffold)
hook._set_steps_per_run(self.steps_per_run)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
# Saved (step=5)
self.assertEqual(5,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# Not saved (step=10)
self.assertEqual(5,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# Saved (step=15)
self.assertEqual(15,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# Not saved (step=20)
self.assertEqual(15,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# Saved (step=25)
self.assertEqual(25,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
def test_save_steps_saves_at_end(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir,
save_steps=2*self.steps_per_run,
scaffold=self.scaffold)
hook._set_steps_per_run(self.steps_per_run)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
mon_sess.run(self.train_op)
hook.end(sess)
self.assertEqual(10,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
class ResourceCheckpointSaverHookTest(test.TestCase):
def setUp(self):
self.model_dir = tempfile.mkdtemp()
self.graph = ops.Graph()
with self.graph.as_default():
self.scaffold = monitored_session.Scaffold()
with variable_scope.variable_scope('foo', use_resource=True):
self.global_step = training_util.get_or_create_global_step()
self.train_op = training_util._increment_global_step(1)
def test_save_steps_saves_periodically(self):
with self.graph.as_default():
hook = basic_session_run_hooks.CheckpointSaverHook(
self.model_dir, save_steps=2, scaffold=self.scaffold)
hook.begin()
self.scaffold.finalize()
with session_lib.Session() as sess:
sess.run(self.scaffold.init_op)
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(self.train_op)
mon_sess.run(self.train_op)
# Not saved
self.assertEqual(1,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# Not saved
self.assertEqual(3,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
mon_sess.run(self.train_op)
# saved
self.assertEqual(5,
checkpoint_utils.load_variable(self.model_dir,
self.global_step.name))
class StepCounterHookTest(test.TestCase):
def setUp(self):
self.log_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.log_dir, ignore_errors=True)
@test.mock.patch.object(time, 'time')
def test_step_counter_every_n_steps(self, mock_time):
mock_time.return_value = MOCK_START_TIME
with ops.Graph().as_default() as g, session_lib.Session() as sess:
training_util.get_or_create_global_step()
train_op = training_util._increment_global_step(1)
summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g)
hook = basic_session_run_hooks.StepCounterHook(
summary_writer=summary_writer, every_n_steps=10)
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
with test.mock.patch.object(tf_logging, 'warning') as mock_log:
for _ in range(30):
mock_time.return_value += 0.01
mon_sess.run(train_op)
# logging.warning should not be called.
self.assertIsNone(mock_log.call_args)
hook.end(sess)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertItemsEqual([11, 21], summary_writer.summaries.keys())
for step in [11, 21]:
summary_value = summary_writer.summaries[step][0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
@test.mock.patch.object(time, 'time')
def test_step_counter_every_n_secs(self, mock_time):
mock_time.return_value = MOCK_START_TIME
with ops.Graph().as_default() as g, session_lib.Session() as sess:
training_util.get_or_create_global_step()
train_op = training_util._increment_global_step(1)
summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g)
hook = basic_session_run_hooks.StepCounterHook(
summary_writer=summary_writer, every_n_steps=None, every_n_secs=0.1)
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(train_op)
mock_time.return_value += 0.2
mon_sess.run(train_op)
mock_time.return_value += 0.2
mon_sess.run(train_op)
hook.end(sess)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertTrue(summary_writer.summaries, 'No summaries were created.')
self.assertItemsEqual([2, 3], summary_writer.summaries.keys())
for summary in summary_writer.summaries.values():
summary_value = summary[0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
def test_global_step_name(self):
with ops.Graph().as_default() as g, session_lib.Session() as sess:
with variable_scope.variable_scope('bar'):
variable_scope.get_variable(
'foo',
initializer=0,
trainable=False,
collections=[
ops.GraphKeys.GLOBAL_STEP, ops.GraphKeys.GLOBAL_VARIABLES
])
train_op = training_util._increment_global_step(1)
summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g)
hook = basic_session_run_hooks.StepCounterHook(
summary_writer=summary_writer, every_n_steps=1, every_n_secs=None)
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(train_op)
mon_sess.run(train_op)
hook.end(sess)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertTrue(summary_writer.summaries, 'No summaries were created.')
self.assertItemsEqual([2], summary_writer.summaries.keys())
summary_value = summary_writer.summaries[2][0].value[0]
self.assertEqual('bar/foo/sec', summary_value.tag)
def test_log_warning_if_global_step_not_increased(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
training_util.get_or_create_global_step()
train_op = training_util._increment_global_step(0) # keep same.
self.evaluate(variables_lib.global_variables_initializer())
hook = basic_session_run_hooks.StepCounterHook(
every_n_steps=1, every_n_secs=None)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(train_op) # Run one step to record global step.
with test.mock.patch.object(tf_logging, 'log_first_n') as mock_log:
for _ in range(30):
mon_sess.run(train_op)
self.assertRegexpMatches(
str(mock_log.call_args),
'global step.*has not been increased')
hook.end(sess)
def _setup_steps_per_run_test(self,
every_n_steps,
steps_per_run,
graph,
sess):
training_util.get_or_create_global_step()
self.train_op = training_util._increment_global_step(steps_per_run)
self.summary_writer = fake_summary_writer.FakeSummaryWriter(
self.log_dir, graph)
self.hook = basic_session_run_hooks.StepCounterHook(
summary_writer=self.summary_writer, every_n_steps=every_n_steps)
self.hook._set_steps_per_run(steps_per_run)
self.hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
self.mon_sess = monitored_session._HookedSession(sess, [self.hook])
@test.mock.patch.object(time, 'time')
def test_steps_per_run_less_than_every_n_steps(self, mock_time):
mock_time.return_value = MOCK_START_TIME
with ops.Graph().as_default() as g, session_lib.Session() as sess:
self._setup_steps_per_run_test(10, 5, g, sess)
# Logs at 15, 25
for _ in range(5):
mock_time.return_value += 0.01
self.mon_sess.run(self.train_op)
self.hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertItemsEqual([15, 25], self.summary_writer.summaries.keys())
for step in [15, 25]:
summary_value = self.summary_writer.summaries[step][0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
@test.mock.patch.object(time, 'time')
def test_steps_per_run_equal_every_n_steps(self, mock_time):
mock_time.return_value = MOCK_START_TIME
with ops.Graph().as_default() as g, session_lib.Session() as sess:
self._setup_steps_per_run_test(5, 5, g, sess)
# Logs at 10, 15, 20, 25
for _ in range(5):
mock_time.return_value += 0.01
self.mon_sess.run(self.train_op)
self.hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertItemsEqual([10, 15, 20, 25],
self.summary_writer.summaries.keys())
for step in [10, 15, 20, 25]:
summary_value = self.summary_writer.summaries[step][0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
@test.mock.patch.object(time, 'time')
def test_steps_per_run_greater_than_every_n_steps(self, mock_time):
mock_time.return_value = MOCK_START_TIME
with ops.Graph().as_default() as g, session_lib.Session() as sess:
self._setup_steps_per_run_test(5, 10, g, sess)
# Logs at 20, 30, 40, 50
for _ in range(5):
mock_time.return_value += 0.01
self.mon_sess.run(self.train_op)
self.hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertItemsEqual([20, 30, 40, 50],
self.summary_writer.summaries.keys())
for step in [20, 30, 40, 50]:
summary_value = self.summary_writer.summaries[step][0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
@test_util.run_deprecated_v1
class SummarySaverHookTest(test.TestCase):
def setUp(self):
test.TestCase.setUp(self)
self.log_dir = 'log/dir'
self.summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir)
var = variables_lib.Variable(0.0)
tensor = state_ops.assign_add(var, 1.0)
tensor2 = tensor * 2
self.summary_op = summary_lib.scalar('my_summary', tensor)
self.summary_op2 = summary_lib.scalar('my_summary2', tensor2)
training_util.get_or_create_global_step()
self.train_op = training_util._increment_global_step(1)
def test_raise_when_scaffold_and_summary_op_both_missing(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SummarySaverHook()
def test_raise_when_scaffold_and_summary_op_both_present(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SummarySaverHook(
scaffold=monitored_session.Scaffold(), summary_op=self.summary_op)
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SummarySaverHook(
save_secs=10, save_steps=20, summary_writer=self.summary_writer)
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SummarySaverHook(
save_secs=None, save_steps=None, summary_writer=self.summary_writer)
def test_save_steps(self):
hook = basic_session_run_hooks.SummarySaverHook(
save_steps=8,
summary_writer=self.summary_writer,
summary_op=self.summary_op)
with self.cached_session() as sess:
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(30):
mon_sess.run(self.train_op)
hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {
'my_summary': 1.0
},
9: {
'my_summary': 2.0
},
17: {
'my_summary': 3.0
},
25: {
'my_summary': 4.0
},
})
def test_multiple_summaries(self):
hook = basic_session_run_hooks.SummarySaverHook(
save_steps=8,
summary_writer=self.summary_writer,
summary_op=[self.summary_op, self.summary_op2])
with self.cached_session() as sess:
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(10):
mon_sess.run(self.train_op)
hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {
'my_summary': 1.0,
'my_summary2': 2.0
},
9: {
'my_summary': 2.0,
'my_summary2': 4.0
},
})
@test.mock.patch.object(time, 'time')
def test_save_secs_saving_once_every_step(self, mock_time):
mock_time.return_value = MOCK_START_TIME
hook = basic_session_run_hooks.SummarySaverHook(
save_secs=0.5,
summary_writer=self.summary_writer,
summary_op=self.summary_op)
with self.cached_session() as sess:
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(4):
mon_sess.run(self.train_op)
mock_time.return_value += 0.5
hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {
'my_summary': 1.0
},
2: {
'my_summary': 2.0
},
3: {
'my_summary': 3.0
},
4: {
'my_summary': 4.0
},
})
@test.mock.patch.object(time, 'time')
def test_save_secs_saving_once_every_three_steps(self, mock_time):
mock_time.return_value = 1484695987.209386
hook = basic_session_run_hooks.SummarySaverHook(
save_secs=9.,
summary_writer=self.summary_writer,
summary_op=self.summary_op)
with self.cached_session() as sess:
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(8):
mon_sess.run(self.train_op)
mock_time.return_value += 3.1
hook.end(sess)
# 24.8 seconds passed (3.1*8), it saves every 9 seconds starting from first:
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {
'my_summary': 1.0
},
4: {
'my_summary': 2.0
},
7: {
'my_summary': 3.0
},
})
class GlobalStepWaiterHookTest(test.TestCase):
def test_not_wait_for_step_zero(self):
with ops.Graph().as_default():
training_util.get_or_create_global_step()
hook = basic_session_run_hooks.GlobalStepWaiterHook(wait_until_step=0)
hook.begin()
with session_lib.Session() as sess:
# Before run should return without waiting gstep increment.
hook.before_run(
session_run_hook.SessionRunContext(
original_args=None, session=sess))
@test.mock.patch.object(time, 'sleep')
def test_wait_for_step(self, mock_sleep):
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
hook = basic_session_run_hooks.GlobalStepWaiterHook(wait_until_step=1000)
hook.begin()
with session_lib.Session() as sess:
# Mock out calls to time.sleep() to update the global step.
class Context(object):
counter = 0
def mock_sleep_side_effect(seconds):
del seconds # argument is ignored
Context.counter += 1
if Context.counter == 1:
# The first time sleep() is called, we update the global_step from
# 0 to 500.
sess.run(state_ops.assign(gstep, 500))
elif Context.counter == 2:
# The second time sleep() is called, we update the global_step from
# 500 to 1100.
sess.run(state_ops.assign(gstep, 1100))
else:
raise AssertionError(
'Expected before_run() to terminate after the second call to '
'time.sleep()')
mock_sleep.side_effect = mock_sleep_side_effect
# Run the mocked-out interaction with the hook.
self.evaluate(variables_lib.global_variables_initializer())
run_context = session_run_hook.SessionRunContext(
original_args=None, session=sess)
hook.before_run(run_context)
self.assertEqual(Context.counter, 2)
class FinalOpsHookTest(test.TestCase):
def test_final_ops_is_scalar_tensor(self):
with ops.Graph().as_default():
expected_value = 4
final_ops = constant_op.constant(expected_value)
hook = basic_session_run_hooks.FinalOpsHook(final_ops)
hook.begin()
with session_lib.Session() as session:
hook.end(session)
self.assertEqual(expected_value,
hook.final_ops_values)
def test_final_ops_is_tensor(self):
with ops.Graph().as_default():
expected_values = [1, 6, 3, 5, 2, 4]
final_ops = constant_op.constant(expected_values)
hook = basic_session_run_hooks.FinalOpsHook(final_ops)
hook.begin()
with session_lib.Session() as session:
hook.end(session)
self.assertListEqual(expected_values,
hook.final_ops_values.tolist())
def test_final_ops_triggers_out_of_range_error(self):
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.range(1)
iterator = dataset_ops.make_one_shot_iterator(dataset)
read_ops = iterator.get_next()
final_ops = read_ops
hook = basic_session_run_hooks.FinalOpsHook(final_ops)
hook.begin()
with session_lib.Session() as session:
session.run(read_ops)
with test.mock.patch.object(tf_logging, 'warning') as mock_log:
with self.assertRaisesRegexp(errors.OutOfRangeError,
'End of sequence'):
hook.end(session)
self.assertRegexpMatches(
str(mock_log.call_args),
'dependency back to some input source')
def test_final_ops_with_dictionary(self):
with ops.Graph().as_default():
expected_values = [4, -3]
final_ops = array_ops.placeholder(dtype=dtypes.float32)
final_ops_feed_dict = {final_ops: expected_values}
hook = basic_session_run_hooks.FinalOpsHook(
final_ops, final_ops_feed_dict)
hook.begin()
with session_lib.Session() as session:
hook.end(session)
self.assertListEqual(expected_values,
hook.final_ops_values.tolist())
@test_util.run_deprecated_v1
class ResourceSummarySaverHookTest(test.TestCase):
def setUp(self):
test.TestCase.setUp(self)
self.log_dir = 'log/dir'
self.summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir)
var = variable_scope.get_variable('var', initializer=0.0, use_resource=True)
tensor = state_ops.assign_add(var, 1.0)
self.summary_op = summary_lib.scalar('my_summary', tensor)
with variable_scope.variable_scope('foo', use_resource=True):
training_util.create_global_step()
self.train_op = training_util._increment_global_step(1)
def test_save_steps(self):
hook = basic_session_run_hooks.SummarySaverHook(
save_steps=8,
summary_writer=self.summary_writer,
summary_op=self.summary_op)
with self.cached_session() as sess:
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
for _ in range(30):
mon_sess.run(self.train_op)
hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_summaries={
1: {
'my_summary': 1.0
},
9: {
'my_summary': 2.0
},
17: {
'my_summary': 3.0
},
25: {
'my_summary': 4.0
},
})
class FeedFnHookTest(test.TestCase):
def test_feeding_placeholder(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
x = array_ops.placeholder(dtype=dtypes.float32)
y = x + 1
hook = basic_session_run_hooks.FeedFnHook(
feed_fn=lambda: {x: 1.0})
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
self.assertEqual(mon_sess.run(y), 2)
class ProfilerHookTest(test.TestCase):
def setUp(self):
super(ProfilerHookTest, self).setUp()
self.output_dir = tempfile.mkdtemp()
self.graph = ops.Graph()
self.filepattern = os.path.join(self.output_dir, 'timeline-*.json')
with self.graph.as_default():
self.global_step = training_util.get_or_create_global_step()
self.train_op = state_ops.assign_add(self.global_step, 1)
def tearDown(self):
super(ProfilerHookTest, self).tearDown()
shutil.rmtree(self.output_dir, ignore_errors=True)
def _count_timeline_files(self):
return len(gfile.Glob(self.filepattern))
@test_util.run_deprecated_v1
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.ProfilerHook(save_secs=10, save_steps=20)
@test_util.run_deprecated_v1
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.ProfilerHook(save_secs=None, save_steps=None)
def test_save_secs_does_not_save_in_first_step(self):
with self.graph.as_default():
hook = basic_session_run_hooks.ProfilerHook(
save_secs=2, output_dir=self.output_dir)
with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:
sess.run(self.train_op)
self.assertEqual(0, self._count_timeline_files())
@test.mock.patch.object(time, 'time')
def test_save_secs_saves_periodically(self, mock_time):
# Pick a fixed start time.
with self.graph.as_default():
mock_time.return_value = MOCK_START_TIME
hook = basic_session_run_hooks.ProfilerHook(
save_secs=2, output_dir=self.output_dir)
with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:
sess.run(self.train_op) # Not saved.
self.assertEqual(0, self._count_timeline_files())
# Simulate 2.5 seconds of sleep.
mock_time.return_value = MOCK_START_TIME + 2.5
sess.run(self.train_op) # Saved.
self.assertEqual(1, self._count_timeline_files())
# Pretend some small amount of time has passed.
mock_time.return_value = MOCK_START_TIME + 2.6
sess.run(self.train_op) # Not saved.
# Edge test just before we should save the timeline.
mock_time.return_value = MOCK_START_TIME + 4.4
sess.run(self.train_op) # Not saved.
self.assertEqual(1, self._count_timeline_files())
mock_time.return_value = MOCK_START_TIME + 4.5
sess.run(self.train_op) # Saved.
self.assertEqual(2, self._count_timeline_files())
def test_save_steps_does_not_save_in_first_step(self):
with self.graph.as_default():
hook = basic_session_run_hooks.ProfilerHook(
save_steps=1, output_dir=self.output_dir)
with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:
sess.run(self.train_op) # Not saved.
self.assertEqual(0, self._count_timeline_files())
def test_save_steps_saves_periodically(self):
with self.graph.as_default():
hook = basic_session_run_hooks.ProfilerHook(
save_steps=2, output_dir=self.output_dir)
with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:
self.assertEqual(0, self._count_timeline_files())
sess.run(self.train_op) # Not saved.
self.assertEqual(0, self._count_timeline_files())
sess.run(self.train_op) # Saved.
self.assertEqual(1, self._count_timeline_files())
sess.run(self.train_op) # Not saved.
self.assertEqual(1, self._count_timeline_files())
sess.run(self.train_op) # Saved.
self.assertEqual(2, self._count_timeline_files())
sess.run(self.train_op) # Not saved.
self.assertEqual(2, self._count_timeline_files())
def test_run_metadata_saves(self):
writer_cache.FileWriterCache.clear()
fake_summary_writer.FakeSummaryWriter.install()
fake_writer = writer_cache.FileWriterCache.get(self.output_dir)
with self.graph.as_default():
hook = basic_session_run_hooks.ProfilerHook(
save_steps=1, output_dir=self.output_dir)
with monitored_session.SingularMonitoredSession(hooks=[hook]) as sess:
sess.run(self.train_op) # Not saved.
sess.run(self.train_op) # Saved.
self.assertEqual(
list(fake_writer._added_run_metadata.keys()), ['step_2'])
fake_summary_writer.FakeSummaryWriter.uninstall()
if __name__ == '__main__':
test.main()
| |
"""Support for MQTT sensors."""
from __future__ import annotations
from datetime import timedelta
import functools
import logging
import voluptuous as vol
from homeassistant.components import sensor
from homeassistant.components.sensor import (
DEVICE_CLASSES_SCHEMA,
STATE_CLASSES_SCHEMA,
SensorEntity,
)
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_FORCE_UPDATE,
CONF_NAME,
CONF_UNIT_OF_MEASUREMENT,
CONF_VALUE_TEMPLATE,
)
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import dt as dt_util
from . import CONF_QOS, CONF_STATE_TOPIC, DOMAIN, PLATFORMS, subscription
from .. import mqtt
from .debug_info import log_messages
from .mixins import (
MQTT_ENTITY_COMMON_SCHEMA,
MqttAvailability,
MqttEntity,
async_setup_entry_helper,
)
_LOGGER = logging.getLogger(__name__)
CONF_EXPIRE_AFTER = "expire_after"
CONF_LAST_RESET_TOPIC = "last_reset_topic"
CONF_LAST_RESET_VALUE_TEMPLATE = "last_reset_value_template"
CONF_STATE_CLASS = "state_class"
MQTT_SENSOR_ATTRIBUTES_BLOCKED = frozenset(
{
sensor.ATTR_LAST_RESET,
sensor.ATTR_STATE_CLASS,
}
)
DEFAULT_NAME = "MQTT Sensor"
DEFAULT_FORCE_UPDATE = False
PLATFORM_SCHEMA = vol.All(
# Deprecated, remove in Home Assistant 2021.11
cv.deprecated(CONF_LAST_RESET_TOPIC),
cv.deprecated(CONF_LAST_RESET_VALUE_TEMPLATE),
mqtt.MQTT_RO_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_EXPIRE_AFTER): cv.positive_int,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_LAST_RESET_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_LAST_RESET_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_STATE_CLASS): STATE_CLASSES_SCHEMA,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
}
).extend(MQTT_ENTITY_COMMON_SCHEMA.schema),
)
async def async_setup_platform(
hass: HomeAssistant, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up MQTT sensors through configuration.yaml."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
await _async_setup_entity(hass, async_add_entities, config)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT sensors dynamically through MQTT discovery."""
setup = functools.partial(
_async_setup_entity, hass, async_add_entities, config_entry=config_entry
)
await async_setup_entry_helper(hass, sensor.DOMAIN, setup, PLATFORM_SCHEMA)
async def _async_setup_entity(
hass, async_add_entities, config: ConfigType, config_entry=None, discovery_data=None
):
"""Set up MQTT sensor."""
async_add_entities([MqttSensor(hass, config, config_entry, discovery_data)])
class MqttSensor(MqttEntity, SensorEntity):
"""Representation of a sensor that can be updated using MQTT."""
_attr_last_reset = None
_attributes_extra_blocked = MQTT_SENSOR_ATTRIBUTES_BLOCKED
def __init__(self, hass, config, config_entry, discovery_data):
"""Initialize the sensor."""
self._state = None
self._expiration_trigger = None
expire_after = config.get(CONF_EXPIRE_AFTER)
if expire_after is not None and expire_after > 0:
self._expired = True
else:
self._expired = None
MqttEntity.__init__(self, hass, config, config_entry, discovery_data)
@staticmethod
def config_schema():
"""Return the config schema."""
return PLATFORM_SCHEMA
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
template = self._config.get(CONF_VALUE_TEMPLATE)
if template is not None:
template.hass = self.hass
last_reset_template = self._config.get(CONF_LAST_RESET_VALUE_TEMPLATE)
if last_reset_template is not None:
last_reset_template.hass = self.hass
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
topics = {}
@callback
@log_messages(self.hass, self.entity_id)
def message_received(msg):
"""Handle new MQTT messages."""
payload = msg.payload
# auto-expire enabled?
expire_after = self._config.get(CONF_EXPIRE_AFTER)
if expire_after is not None and expire_after > 0:
# When expire_after is set, and we receive a message, assume device is not expired since it has to be to receive the message
self._expired = False
# Reset old trigger
if self._expiration_trigger:
self._expiration_trigger()
self._expiration_trigger = None
# Set new trigger
expiration_at = dt_util.utcnow() + timedelta(seconds=expire_after)
self._expiration_trigger = async_track_point_in_utc_time(
self.hass, self._value_is_expired, expiration_at
)
template = self._config.get(CONF_VALUE_TEMPLATE)
if template is not None:
variables = {"entity_id": self.entity_id}
payload = template.async_render_with_possible_json_value(
payload,
self._state,
variables=variables,
)
self._state = payload
self.async_write_ha_state()
topics["state_topic"] = {
"topic": self._config[CONF_STATE_TOPIC],
"msg_callback": message_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def last_reset_message_received(msg):
"""Handle new last_reset messages."""
payload = msg.payload
template = self._config.get(CONF_LAST_RESET_VALUE_TEMPLATE)
if template is not None:
variables = {"entity_id": self.entity_id}
payload = template.async_render_with_possible_json_value(
payload,
self._state,
variables=variables,
)
if not payload:
_LOGGER.debug("Ignoring empty last_reset message from '%s'", msg.topic)
return
try:
last_reset = dt_util.parse_datetime(payload)
if last_reset is None:
raise ValueError
self._attr_last_reset = last_reset
except ValueError:
_LOGGER.warning(
"Invalid last_reset message '%s' from '%s'", msg.payload, msg.topic
)
self.async_write_ha_state()
if CONF_LAST_RESET_TOPIC in self._config:
topics["last_reset_topic"] = {
"topic": self._config[CONF_LAST_RESET_TOPIC],
"msg_callback": last_reset_message_received,
"qos": self._config[CONF_QOS],
}
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state, topics
)
@callback
def _value_is_expired(self, *_):
"""Triggered when value is expired."""
self._expiration_trigger = None
self._expired = True
self.async_write_ha_state()
@property
def native_unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._config.get(CONF_UNIT_OF_MEASUREMENT)
@property
def force_update(self):
"""Force update."""
return self._config[CONF_FORCE_UPDATE]
@property
def native_value(self):
"""Return the state of the entity."""
return self._state
@property
def device_class(self) -> str | None:
"""Return the device class of the sensor."""
return self._config.get(CONF_DEVICE_CLASS)
@property
def state_class(self) -> str | None:
"""Return the state class of the sensor."""
return self._config.get(CONF_STATE_CLASS)
@property
def available(self) -> bool:
"""Return true if the device is available and value has not expired."""
expire_after = self._config.get(CONF_EXPIRE_AFTER)
# mypy doesn't know about fget: https://github.com/python/mypy/issues/6185
return MqttAvailability.available.fget(self) and ( # type: ignore[attr-defined]
expire_after is None or not self._expired
)
| |
from flask import render_template, flash, redirect, url_for, request
from webapp import app, db
from webapp.forms import SignUpForm
from webapp.models import Monkey, Friendship, BestFriend
from config import PER_PAGE
@app.route('/', methods=['GET', 'POST'])
def home():
form = SignUpForm()
if form.validate_on_submit():
monkey = Monkey(name=form.name.data,
age=form.age.data,
email=form.email.data)
db.session.add(monkey)
db.session.commit()
flash(monkey.name + ' came to life!')
return redirect(url_for('profile', monkey_id=monkey.monkey_id))
return render_template('home.html', title='Home', form=form)
@app.route('/monkeys/<int:page>/')
def monkeys(page,
monkeys=None,
alphabetically=False,
best_friend=False,
number_of_friend=False,
current_mode='monkeys'):
if not monkeys:
monkeys = Monkey.query.paginate(page, PER_PAGE).items
monkey_tuple = []
for monkey in monkeys:
bf_id = None
bf_name = None
bf_row = BestFriend.query.filter_by(monkey_id=monkey.monkey_id)
if bf_row.count():
bf_id = bf_row.first().best_friend_id
bf = Monkey.query.get(bf_id)
if bf:
bf_name = bf.name
number_friend = (Friendship.query.filter_by(
monkey_id=monkey.monkey_id).count())
monkey_tuple.append((monkey.monkey_id,
monkey.name,
bf_id,
bf_name,
number_friend))
# Calculate number of pages
total_monkey = Monkey.query.count()
if total_monkey % PER_PAGE == 0:
number_of_pages = total_monkey // PER_PAGE
else:
number_of_pages = (total_monkey // PER_PAGE) + 1
return render_template('monkeys.html', title='Monkeys',
monkeys=monkey_tuple,
current_page=page,
number_of_pages=number_of_pages,
current_mode=current_mode)
@app.route('/monkeys/alphabetically/<int:page>/')
def alphabetically(page):
sorted_monkeys = (Monkey.query.order_by(Monkey.name).
paginate(page, PER_PAGE).items)
flash('Sorted alphabetically')
return monkeys(page,
monkeys=sorted_monkeys,
alphabetically=True,
current_mode='alphabetically')
@app.route('/monkeys/best_friend/<int:page>/')
def best_friend(page):
subquery = (db.session.query(BestFriend.monkey_id, Monkey.name).
filter(Monkey.monkey_id == BestFriend.best_friend_id).
subquery())
sorted_monkeys = (Monkey.query.
outerjoin(subquery,
Monkey.monkey_id == subquery.c.monkey_id).
order_by(subquery.c.name).
paginate(page, PER_PAGE).items)
flash("Sorted by best friend's name")
return monkeys(page,
monkeys=sorted_monkeys,
best_friend=True,
current_mode='best_friend')
@app.route('/monkeys/number_of_friend/<int:page>/')
def number_of_friend(page):
subquery = (db.session.
query(Friendship.monkey_id, db.func.count('*').
label('number_of_friend')).
group_by(Friendship.monkey_id).
subquery())
sorted_monkeys = (Monkey.query.
outerjoin(subquery,
Monkey.monkey_id == subquery.c.monkey_id).
order_by(subquery.c.number_of_friend).
paginate(page, PER_PAGE).items)
flash("Sorted by number of friends")
return monkeys(page,
monkeys=sorted_monkeys,
number_of_friend=True,
current_mode='number_of_friend')
@app.route('/monkeys/profile/<int:monkey_id>/', methods=['GET', 'POST'])
def profile(monkey_id):
form = SignUpForm()
monkey = Monkey.query.get(monkey_id)
best_friend_name = 'Not yet'
bf_row = BestFriend.query.filter_by(monkey_id=monkey_id)
if bf_row.count():
best_friend_name = Monkey.query.get(bf_row.first().best_friend_id).name
friends_subquery = (Friendship.query
.filter_by(monkey_id=monkey_id)
.subquery())
friends = (Monkey.query.join(friends_subquery,
Monkey.monkey_id ==
friends_subquery.c.friend_id))
if form.validate_on_submit():
monkey.name = form.name.data
monkey.age = form.age.data
monkey.email = form.email.data
db.session.add(monkey)
db.session.commit()
flash('Monkey info edited')
return redirect(url_for('profile', monkey_id=monkey_id))
return render_template('profile.html',
title='Profile',
monkey=monkey,
monkey_id=monkey_id,
form=form,
best_friend_name=best_friend_name,
friends=friends)
@app.route('/monkeys/profile/<int:monkey_id>/terminate')
def terminate(monkey_id):
# Deleting best friend relationship(s)
monkeys = (BestFriend.query.
filter((BestFriend.monkey_id == monkey_id) |
(BestFriend.best_friend_id == monkey_id)))
for monkey in monkeys:
db.session.delete(monkey)
# Deleting friendship relationship(s)
monkeys = (Friendship.query.
filter((Friendship.monkey_id == monkey_id) |
(Friendship.friend_id == monkey_id)))
for monkey in monkeys:
db.session.delete(monkey)
# Deleting monkey itself
monkey = Monkey.query.get(monkey_id)
name = monkey.name
db.session.delete(monkey)
db.session.commit()
flash('{monkey} was terminated'.format(monkey=name))
return redirect(url_for('home'))
@app.route('/monkeys/profile/<int:monkey_id>/add_friends',
methods=['GET', 'POST'])
def add_friends(monkey_id):
monkey = Monkey.query.get(monkey_id)
header = 'Add monkey friends to {monkey}'.format(monkey=monkey.name)
monkeys = (db.session.
query(Monkey).
outerjoin(
Friendship,
db.and_(Friendship.monkey_id == monkey_id,
Friendship.friend_id == Monkey.monkey_id)).
filter(Friendship.monkey_id.is_(None)).
filter(Monkey.monkey_id != monkey_id))
if request.method == 'POST':
for key in request.form:
friend_name = Monkey.query.get(int(key)).name
friend = Friendship(monkey_id=monkey_id, friend_id=int(key))
friend_reciprocal = Friendship(monkey_id=int(key),
friend_id=monkey_id)
db.session.add(friend)
db.session.add(friend_reciprocal)
db.session.commit()
flash('{monkey} became friend with {friend} and vice versa'.
format(monkey=monkey.name, friend=friend_name))
return redirect(url_for('profile', monkey_id=monkey_id))
return render_template('friendship.html',
message=header,
monkeys=monkeys,
monkey_id=monkey.monkey_id,
view='add_friends')
@app.route('/monkeys/profile/<int:monkey_id>/remove_friends',
methods=['GET', 'POST'])
def remove_friends(monkey_id):
monkey = Monkey.query.get(monkey_id)
header = ("Remove monkey friends from {name}'s friend list".
format(name=monkey.name))
friends_subquery = (Friendship.query
.filter_by(monkey_id=monkey_id)
.subquery())
monkeys = (Monkey.query.join(friends_subquery,
Monkey.monkey_id ==
friends_subquery.c.friend_id))
if request.method == 'POST':
for key in request.form:
friend = (Friendship.query.
filter(Friendship.monkey_id == monkey_id,
Friendship.friend_id == int(key)).one())
friend_reciprocal = (Friendship.query.
filter(
Friendship.monkey_id == int(key),
Friendship.friend_id == monkey_id).one())
reciprocal_name = Monkey.query.get(int(key)).name
db.session.delete(friend)
db.session.delete(friend_reciprocal)
db.session.commit()
flash("{friend} was removed from {monkey}'s\
friend list and vice versa".
format(friend=reciprocal_name, monkey=monkey.name))
return redirect(url_for('profile', monkey_id=monkey_id))
return render_template('friendship.html',
message=header,
monkeys=monkeys,
monkey_id=monkey_id,
view='remove_friends')
@app.route('/monkeys/profile/<int:monkey_id>/choose_best_friend',
methods=['GET', 'POST'])
def choose_best_friend(monkey_id):
current_bf_name = ''
monkey = Monkey.query.get(monkey_id)
header = 'Choose best friend for {name}'.format(name=monkey.name)
bf_row = BestFriend.query.filter_by(monkey_id=monkey_id)
if bf_row.count():
current_bf_name = Monkey.query.get(bf_row.first().best_friend_id).name
monkeys = Monkey.query.filter(Monkey.monkey_id != monkey_id)
if request.method == 'POST':
for value in request.form.values():
if bf_row.count():
bf_row.first().best_friend_id = int(value)
db.session.add(bf_row.first())
best_friend_name = (Monkey.query.
get(bf_row.first().best_friend_id).name)
else:
bf = BestFriend(monkey_id=monkey_id, best_friend_id=int(value))
best_friend_name = Monkey.query.get(int(value)).name
db.session.add(bf)
db.session.commit()
flash('{monkey} chose {bf} as his/her best friend'.
format(monkey=monkey.name, bf=best_friend_name))
return redirect(url_for('profile', monkey_id=monkey_id))
return render_template('friendship.html',
message=header,
monkeys=monkeys,
monkey_id=monkey_id,
view='choose_best_friend',
current_bf=current_bf_name)
@app.errorhandler(404)
def not_found_error(error):
message = "I searched the database, top to bottom, it's just not there!"
return render_template('error.html', error=error, message=message)
@app.errorhandler(500)
def internal_error(error):
message = "You brought my database to its knees; I 'rollback', it's ok!"
db.session.rollback()
return render_template('error.html',
error='500: Internal Server Error',
message=message)
| |
# Copyright 2013-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains the detection logic for miscellaneous external dependencies.
import functools
import os
import re
import shlex
import sysconfig
from pathlib import Path
from .. import mlog
from .. import mesonlib
from ..environment import detect_cpu_family
from .base import (
DependencyException, DependencyMethods, ExternalDependency,
ExternalProgram, ExtraFrameworkDependency, PkgConfigDependency,
ConfigToolDependency,
)
class MPIDependency(ExternalDependency):
def __init__(self, environment, kwargs):
language = kwargs.get('language', 'c')
super().__init__('mpi', environment, language, kwargs)
kwargs['required'] = False
kwargs['silent'] = True
self.is_found = False
# NOTE: Only OpenMPI supplies a pkg-config file at the moment.
if language == 'c':
env_vars = ['MPICC']
pkgconfig_files = ['ompi-c']
default_wrappers = ['mpicc']
elif language == 'cpp':
env_vars = ['MPICXX']
pkgconfig_files = ['ompi-cxx']
default_wrappers = ['mpic++', 'mpicxx', 'mpiCC']
elif language == 'fortran':
env_vars = ['MPIFC', 'MPIF90', 'MPIF77']
pkgconfig_files = ['ompi-fort']
default_wrappers = ['mpifort', 'mpif90', 'mpif77']
else:
raise DependencyException('Language {} is not supported with MPI.'.format(language))
for pkg in pkgconfig_files:
try:
pkgdep = PkgConfigDependency(pkg, environment, kwargs, language=self.language)
if pkgdep.found():
self.compile_args = pkgdep.get_compile_args()
self.link_args = pkgdep.get_link_args()
self.version = pkgdep.get_version()
self.is_found = True
self.pcdep = pkgdep
break
except Exception:
pass
if not self.is_found:
# Prefer environment.
for var in env_vars:
if var in os.environ:
wrappers = [os.environ[var]]
break
else:
# Or search for default wrappers.
wrappers = default_wrappers
for prog in wrappers:
result = self._try_openmpi_wrapper(prog)
if result is not None:
self.is_found = True
self.version = result[0]
self.compile_args = self._filter_compile_args(result[1])
self.link_args = self._filter_link_args(result[2])
break
result = self._try_other_wrapper(prog)
if result is not None:
self.is_found = True
self.version = result[0]
self.compile_args = self._filter_compile_args(result[1])
self.link_args = self._filter_link_args(result[2])
break
if not self.is_found and mesonlib.is_windows():
result = self._try_msmpi()
if result is not None:
self.is_found = True
self.version, self.compile_args, self.link_args = result
def _filter_compile_args(self, args):
"""
MPI wrappers return a bunch of garbage args.
Drop -O2 and everything that is not needed.
"""
result = []
multi_args = ('-I', )
if self.language == 'fortran':
fc = self.env.coredata.compilers['fortran']
multi_args += fc.get_module_incdir_args()
include_next = False
for f in args:
if f.startswith(('-D', '-f') + multi_args) or f == '-pthread' \
or (f.startswith('-W') and f != '-Wall' and not f.startswith('-Werror')):
result.append(f)
if f in multi_args:
# Path is a separate argument.
include_next = True
elif include_next:
include_next = False
result.append(f)
return result
def _filter_link_args(self, args):
"""
MPI wrappers return a bunch of garbage args.
Drop -O2 and everything that is not needed.
"""
result = []
include_next = False
for f in args:
if f.startswith(('-L', '-l', '-Xlinker')) or f == '-pthread' \
or (f.startswith('-W') and f != '-Wall' and not f.startswith('-Werror')):
result.append(f)
if f in ('-L', '-Xlinker'):
include_next = True
elif include_next:
include_next = False
result.append(f)
return result
def _try_openmpi_wrapper(self, prog):
prog = ExternalProgram(prog, silent=True)
if prog.found():
cmd = prog.get_command() + ['--showme:compile']
p, o, e = mesonlib.Popen_safe(cmd)
p.wait()
if p.returncode != 0:
mlog.debug('Command', mlog.bold(cmd), 'failed to run:')
mlog.debug(mlog.bold('Standard output\n'), o)
mlog.debug(mlog.bold('Standard error\n'), e)
return
cargs = shlex.split(o)
cmd = prog.get_command() + ['--showme:link']
p, o, e = mesonlib.Popen_safe(cmd)
p.wait()
if p.returncode != 0:
mlog.debug('Command', mlog.bold(cmd), 'failed to run:')
mlog.debug(mlog.bold('Standard output\n'), o)
mlog.debug(mlog.bold('Standard error\n'), e)
return
libs = shlex.split(o)
cmd = prog.get_command() + ['--showme:version']
p, o, e = mesonlib.Popen_safe(cmd)
p.wait()
if p.returncode != 0:
mlog.debug('Command', mlog.bold(cmd), 'failed to run:')
mlog.debug(mlog.bold('Standard output\n'), o)
mlog.debug(mlog.bold('Standard error\n'), e)
return
version = re.search('\d+.\d+.\d+', o)
if version:
version = version.group(0)
else:
version = None
return version, cargs, libs
def _try_other_wrapper(self, prog):
prog = ExternalProgram(prog, silent=True)
if prog.found():
cmd = prog.get_command() + ['-show']
p, o, e = mesonlib.Popen_safe(cmd)
p.wait()
if p.returncode != 0:
mlog.debug('Command', mlog.bold(cmd), 'failed to run:')
mlog.debug(mlog.bold('Standard output\n'), o)
mlog.debug(mlog.bold('Standard error\n'), e)
return
args = shlex.split(o)
version = None
return version, args, args
def _try_msmpi(self):
if self.language == 'cpp':
# MS-MPI does not support the C++ version of MPI, only the standard C API.
return
if 'MSMPI_INC' not in os.environ:
return
incdir = os.environ['MSMPI_INC']
arch = detect_cpu_family(self.env.coredata.compilers)
if arch == 'x86':
if 'MSMPI_LIB32' not in os.environ:
return
libdir = os.environ['MSMPI_LIB32']
post = 'x86'
elif arch == 'x86_64':
if 'MSMPI_LIB64' not in os.environ:
return
libdir = os.environ['MSMPI_LIB64']
post = 'x64'
else:
return
if self.language == 'fortran':
return (None,
['-I' + incdir, '-I' + os.path.join(incdir, post)],
[os.path.join(libdir, 'msmpi.lib'), os.path.join(libdir, 'msmpifec.lib')])
else:
return (None,
['-I' + incdir, '-I' + os.path.join(incdir, post)],
[os.path.join(libdir, 'msmpi.lib')])
class OpenMPDependency(ExternalDependency):
# Map date of specification release (which is the macro value) to a version.
VERSIONS = {
'201811': '5.0',
'201511': '4.5',
'201307': '4.0',
'201107': '3.1',
'200805': '3.0',
'200505': '2.5',
'200203': '2.0',
'199810': '1.0',
}
def __init__(self, environment, kwargs):
language = kwargs.get('language')
super().__init__('openmp', environment, language, kwargs)
self.is_found = False
try:
openmp_date = self.clib_compiler.get_define('_OPENMP', '', self.env, [], [self])
except mesonlib.EnvironmentException as e:
mlog.debug('OpenMP support not available in the compiler')
mlog.debug(e)
openmp_date = False
if openmp_date:
self.version = self.VERSIONS[openmp_date]
if self.clib_compiler.has_header('omp.h', '', self.env, dependencies=[self]):
self.is_found = True
else:
mlog.log(mlog.yellow('WARNING:'), 'OpenMP found but omp.h missing.')
def need_openmp(self):
return True
class ThreadDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('threads', environment, None, kwargs)
self.name = 'threads'
self.is_found = True
def need_threads(self):
return True
class Python3Dependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('python3', environment, None, kwargs)
if self.want_cross:
return
self.name = 'python3'
self.static = kwargs.get('static', False)
# We can only be sure that it is Python 3 at this point
self.version = '3'
self._find_libpy3_windows(environment)
@classmethod
def _factory(cls, environment, kwargs):
methods = cls._process_method_kw(kwargs)
candidates = []
if DependencyMethods.PKGCONFIG in methods:
candidates.append(functools.partial(PkgConfigDependency, 'python3', environment, kwargs))
if DependencyMethods.SYSCONFIG in methods:
candidates.append(functools.partial(Python3Dependency, environment, kwargs))
if DependencyMethods.EXTRAFRAMEWORK in methods:
# In OSX the Python 3 framework does not have a version
# number in its name.
# There is a python in /System/Library/Frameworks, but that's
# python 2, Python 3 will always be in /Library
candidates.append(functools.partial(
ExtraFrameworkDependency, 'python', False, '/Library/Frameworks',
environment, kwargs.get('language', None), kwargs))
return candidates
@staticmethod
def get_windows_python_arch():
pyplat = sysconfig.get_platform()
if pyplat == 'mingw':
pycc = sysconfig.get_config_var('CC')
if pycc.startswith('x86_64'):
return '64'
elif pycc.startswith(('i686', 'i386')):
return '32'
else:
mlog.log('MinGW Python built with unknown CC {!r}, please file'
'a bug'.format(pycc))
return None
elif pyplat == 'win32':
return '32'
elif pyplat in ('win64', 'win-amd64'):
return '64'
mlog.log('Unknown Windows Python platform {!r}'.format(pyplat))
return None
def get_windows_link_args(self):
pyplat = sysconfig.get_platform()
if pyplat.startswith('win'):
vernum = sysconfig.get_config_var('py_version_nodot')
if self.static:
libname = 'libpython{}.a'.format(vernum)
else:
libname = 'python{}.lib'.format(vernum)
lib = Path(sysconfig.get_config_var('base')) / 'libs' / libname
elif pyplat == 'mingw':
if self.static:
libname = sysconfig.get_config_var('LIBRARY')
else:
libname = sysconfig.get_config_var('LDLIBRARY')
lib = Path(sysconfig.get_config_var('LIBDIR')) / libname
if not lib.exists():
mlog.log('Could not find Python3 library {!r}'.format(str(lib)))
return None
return [str(lib)]
def _find_libpy3_windows(self, env):
'''
Find python3 libraries on Windows and also verify that the arch matches
what we are building for.
'''
pyarch = self.get_windows_python_arch()
if pyarch is None:
self.is_found = False
return
arch = detect_cpu_family(env.coredata.compilers)
if arch == 'x86':
arch = '32'
elif arch == 'x86_64':
arch = '64'
else:
# We can't cross-compile Python 3 dependencies on Windows yet
mlog.log('Unknown architecture {!r} for'.format(arch),
mlog.bold(self.name))
self.is_found = False
return
# Pyarch ends in '32' or '64'
if arch != pyarch:
mlog.log('Need', mlog.bold(self.name), 'for {}-bit, but '
'found {}-bit'.format(arch, pyarch))
self.is_found = False
return
# This can fail if the library is not found
largs = self.get_windows_link_args()
if largs is None:
self.is_found = False
return
self.link_args = largs
# Compile args
inc = sysconfig.get_path('include')
platinc = sysconfig.get_path('platinclude')
self.compile_args = ['-I' + inc]
if inc != platinc:
self.compile_args.append('-I' + platinc)
self.version = sysconfig.get_config_var('py_version')
self.is_found = True
@staticmethod
def get_methods():
if mesonlib.is_windows():
return [DependencyMethods.PKGCONFIG, DependencyMethods.SYSCONFIG]
elif mesonlib.is_osx():
return [DependencyMethods.PKGCONFIG, DependencyMethods.EXTRAFRAMEWORK]
else:
return [DependencyMethods.PKGCONFIG]
def log_tried(self):
return 'sysconfig'
class PcapDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('pcap', environment, None, kwargs)
@classmethod
def _factory(cls, environment, kwargs):
methods = cls._process_method_kw(kwargs)
candidates = []
if DependencyMethods.PKGCONFIG in methods:
candidates.append(functools.partial(PkgConfigDependency, 'pcap', environment, kwargs))
if DependencyMethods.CONFIG_TOOL in methods:
candidates.append(functools.partial(ConfigToolDependency.factory,
'pcap', environment, None,
kwargs, ['pcap-config'],
'pcap-config',
PcapDependency.tool_finish_init))
return candidates
@staticmethod
def tool_finish_init(ctdep):
ctdep.compile_args = ctdep.get_config_value(['--cflags'], 'compile_args')
ctdep.link_args = ctdep.get_config_value(['--libs'], 'link_args')
ctdep.version = PcapDependency.get_pcap_lib_version(ctdep)
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL]
@staticmethod
def get_pcap_lib_version(ctdep):
# Since we seem to need to run a program to discover the pcap version,
# we can't do that when cross-compiling
if ctdep.want_cross:
return None
v = ctdep.clib_compiler.get_return_value('pcap_lib_version', 'string',
'#include <pcap.h>', ctdep.env, [], [ctdep])
v = re.sub(r'libpcap version ', '', v)
v = re.sub(r' -- Apple version.*$', '', v)
return v
class CupsDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('cups', environment, None, kwargs)
@classmethod
def _factory(cls, environment, kwargs):
methods = cls._process_method_kw(kwargs)
candidates = []
if DependencyMethods.PKGCONFIG in methods:
candidates.append(functools.partial(PkgConfigDependency, 'cups', environment, kwargs))
if DependencyMethods.CONFIG_TOOL in methods:
candidates.append(functools.partial(ConfigToolDependency.factory,
'cups', environment, None,
kwargs, ['cups-config'],
'cups-config', CupsDependency.tool_finish_init))
if DependencyMethods.EXTRAFRAMEWORK in methods:
if mesonlib.is_osx():
candidates.append(functools.partial(
ExtraFrameworkDependency, 'cups', False, None, environment,
kwargs.get('language', None), kwargs))
return candidates
@staticmethod
def tool_finish_init(ctdep):
ctdep.compile_args = ctdep.get_config_value(['--cflags'], 'compile_args')
ctdep.link_args = ctdep.get_config_value(['--ldflags', '--libs'], 'link_args')
@staticmethod
def get_methods():
if mesonlib.is_osx():
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL, DependencyMethods.EXTRAFRAMEWORK]
else:
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL]
class LibWmfDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('libwmf', environment, None, kwargs)
@classmethod
def _factory(cls, environment, kwargs):
methods = cls._process_method_kw(kwargs)
candidates = []
if DependencyMethods.PKGCONFIG in methods:
candidates.append(functools.partial(PkgConfigDependency, 'libwmf', environment, kwargs))
if DependencyMethods.CONFIG_TOOL in methods:
candidates.append(functools.partial(ConfigToolDependency.factory,
'libwmf', environment, None, kwargs, ['libwmf-config'], 'libwmf-config', LibWmfDependency.tool_finish_init))
return candidates
@staticmethod
def tool_finish_init(ctdep):
ctdep.compile_args = ctdep.get_config_value(['--cflags'], 'compile_args')
ctdep.link_args = ctdep.get_config_value(['--libs'], 'link_args')
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL]
class LibGCryptDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('libgcrypt', environment, None, kwargs)
@classmethod
def _factory(cls, environment, kwargs):
methods = cls._process_method_kw(kwargs)
candidates = []
if DependencyMethods.PKGCONFIG in methods:
candidates.append(functools.partial(PkgConfigDependency, 'libgcrypt', environment, kwargs))
if DependencyMethods.CONFIG_TOOL in methods:
candidates.append(functools.partial(ConfigToolDependency.factory,
'libgcrypt', environment, None, kwargs, ['libgcrypt-config'],
'libgcrypt-config',
LibGCryptDependency.tool_finish_init))
return candidates
@staticmethod
def tool_finish_init(ctdep):
ctdep.compile_args = ctdep.get_config_value(['--cflags'], 'compile_args')
ctdep.link_args = ctdep.get_config_value(['--libs'], 'link_args')
ctdep.version = ctdep.get_config_value(['--version'], 'version')[0]
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL]
| |
# Python Version: 2.7
import sys
import socket
import urlparse
from bs4 import BeautifulSoup
# GLOBAL CONSTANTS
CRLF = '\r\n'
# HEADER NAMES
HEADER_HOST = 'Host: '
HEADER_LOCATION = 'Location: '
HEADER_SET_COOKIE = 'Set-Cookie: '
HEADER_COOKIE = 'Cookie: '
HEADER_CONTENT_LENGTH = 'Content-Length: '
HEADER_CONNECTION = 'Connection: '
HEADER_KEEP_ALIVE = 'Keep-Alive: '
# __status_code__: String -> String
# GIVEN: a raw string carrying the response to a http request
# RETURNS: the status code of the response as a string
def __status_code__(http_response):
headers = http_response.split(CRLF + CRLF)[0]
headers = headers.split(CRLF)
return headers[0].split(' ')[1]
# __header_value__: String, String -> String
# GIVEN: a raw string carrying the response to a http request and
# a header string
# RETURNS: the value corresponding to the header string if it is
# present, the empty string otherwise
def __header_value__(http_response, header_string):
headers = http_response.split(CRLF + CRLF)[0]
headers = headers.split(CRLF)
for header in headers:
if header.startswith(header_string):
return header.partition(header_string)[2]
return ''
# __header_value_multiple__: String, String -> List<String>
# GIVEN: a raw string carrying the response to a http request and
# a header string
# RETURNS: the list of values corresponding to the header string if it is
# present, the empty list otherwise
def __header_value_multiple__(http_response, header_string):
headers = http_response.split(CRLF + CRLF)[0]
headers = headers.split(CRLF)
values = []
for header in headers:
if header.startswith(header_string):
values.append(header.partition(header_string)[2])
return values
# __set_cookies__: String -> Dict{cookie_name, value}
# GIVEN: a raw http response string
# RETURNS: a dictionary containing the name, value pairs extracted from
# the header field 'Set-Cookie'
def __set_cookies__(http_response):
set_cookies = __header_value_multiple__(http_response, HEADER_SET_COOKIE)
if set_cookies == []:
return None
return set_cookies
# HEADER EXTRACTORS: String -> String
# GIVEN: a http response string
# RETURNS: the value corresponding to the header suggested by the
# function name
def __location__(response_30x):
return __header_value__(response_30x, HEADER_LOCATION)
def __connection__(http_response):
return __header_value__(http_response, HEADER_CONNECTION)
def __keep_alive__(http_response):
return __header_value__(http_response, HEADER_KEEP_ALIVE)
# get_cookies: String -> Dict{cookie_name, value}
# GIVEN: a http response string
# RETURNS: a dictionary representing the cookies extracted from the response
def get_cookies(http_response):
cookies = __set_cookies__(http_response)
if cookies == None:
return None
return dict(map(lambda x: x.split(';')[0].split('='), cookies))
# merge_cookies: Dict{cookie_name, value}, Dict{cookie_name, value} ->
# Dict{cookie_name, value}
# GIVEN: two dictionaries containing cookies
# RETURNS: a dictionary containing the
def merge_cookies(current, new):
if current == None:
return new
if new == None:
return current
copy_of_current = current.copy()
copy_of_current.update(new)
return copy_of_current
# make_cookie_header: Dict{cookie_name, value} -> String
# GIVEN: a dictionary containing cookies
# RETURNS: the given cookies transformed into the 'Cookie' header format
def make_cookie_header(cookies):
if cookies == None:
return ''
header_value = []
for key, value in cookies.items():
header_value.append(key + '=' + value)
header_value = reduce(lambda x,y: x+';'+y, header_value)
return HEADER_COOKIE + header_value + CRLF
# make_content_length_header: Integer -> String
# GIVEN: a value for content length
# RETURNS: a formatted header for the same
def make_content_length_header(content_length):
return HEADER_CONTENT_LENGTH + content_length + CRLF
# make_host_header: String -> String
# GIVEN: a URL
# RETURNS: a formatted header for the host address extracted from the
# given URL
def make_host_header(url):
header_value = parse_port(urlparse.urlparse(url).netloc)[0]
return HEADER_HOST + header_value + CRLF
# make_connection_header: String -> String
# GIVEN: a value for the connection type
# RETURNS: a formatted header for the same
def make_connection_header(connection_type):
return HEADER_CONNECTION + connection_type + CRLF
# make_post_headers: String, Dict{cookie_name, value}, String -> String
# GIVEN: a URL, a dictionary containing cookies and a payload string
# RETURNS: a string containing the headers necessary for a POST request
# using the given data
def make_post_headers(url, cookies, payload):
headers = []
headers.append(make_cookie_header(cookies))
headers.append(make_content_length_header(str(len(payload))))
headers.append(make_host_header(url))
return reduce(lambda x,y: x+y, headers) + CRLF
# make_get_headers: String, Dict{cookie_name, value} -> String
# GIVEN: a URL and a dictionary containing cookies
# RETURNS: the headers in format appended with CLRF
def make_get_headers(url, cookies):
return make_cookie_header(cookies) + make_host_header(url) + CRLF
# url_encode_char: String -> String
# GIVEN: a single-character string
# RETURNS: the url encoding for the given character
def url_encode_char(char):
if char.isalnum() or char in ['*', '-', '.', '_']:
return char
return '%' + hex(ord(char.encode('UTF-8').encode('ASCII')))[2:].upper()
# url_encode: String -> String
# GIVEN: a string
# RETURNS: the given string url encoded(quoted)
def url_encode(string):
# print('url_encode:' + string)
return reduce(lambda x,y: x+y, map(lambda x: url_encode_char(x), string))
# url_decode: String -> String
# GIVEN: an encoded URL
# RETURNS: the given URL decoded
def url_decode(encoded_url):
decoded = ''
en = encoded_url
en_len = len(en)
i = 0
while i < en_len:
if en[i] == '%':
hex_value = '0x' + en[i+1 : i+3]
decoded += chr(int(hex_value, 16))
i += 3
else:
decoded += en[i]
i += 1
return decoded
# hidden_parameters: String -> [String, String]
# GIVEN: a html source tree
# RETURNS: a list containing [name, value] pairs of the <input> tags
# with type="hidden"
def hidden_parameters(html):
soup = BeautifulSoup(html)
input_tags = soup.find_all('input', type='hidden')
return map(lambda x: [x['name'], x['value']], input_tags)
# parameters_encoded: List<[String, String]> -> List<[String, String]>
# GIVEN: encoded parameters
# RETURNS: the given parameters decoded
def parameters_encoded(params):
return map(lambda x: [x[0], url_encode(x[1])], params)
# make_post_payload: List<[String, String]>
# GIVEN: key,value pairs as parameters to a POST request
# RETURNS: a payload constructed from the given data
def make_post_payload(parameters):
parameters = map(lambda x: x[0]+'='+x[1], parameters_encoded(parameters))
return reduce(lambda x,y: x+'&'+y, parameters) + CRLF
# get_path: String -> String
# GIVEN: a URL
# RETURNS: the server path present in the URL
def get_path(url):
return urlparse.urlparse(url).path
# post_path: String, String -> String
# GIVEN: a string containing html and the base url
# RETURNS: the path for the request line of the POST request.
def post_path(html, base_url):
soup = BeautifulSoup(html)
form = soup.find('form', method='post')
return get_path(urlparse.urljoin(form['action'], base_url))
# tcp_socket: String, String -> socket
# GIVEN: a hostname and a port number
# RETURNS: a tcp socket bound to the given hostname and port
def tcp_socket(hostname, port):
s = socket.socket()
try:
s.connect((socket.gethostbyname(hostname), int(port)))
except socket.error as se:
print('Exception caught: Socket Error' + str(se))
sys.exit(1)
return s
# parse_port: String -> (String, String)
# GIVEN: a hostname
# RETURNS: a 2-tuple (host, port). 'host' is the network host address
# of the given hostname. If the port number was appended to the given
# hostname, then 'port' is extracted from it, else 'port' defaults to '80'.
def parse_port(hostname):
parsed_hn = hostname.split(':')
if len(parsed_hn) > 1:
return tuple(parsed_hn)
return (hostname, '80')
# internet_address: String -> String
# GIVEN: a URL
# RETURNS: the host address of the given URL
def internet_address(url):
return (urlparse.urlparse(url).netloc, '80')
# get: String -> String
# GIVEN: a URL
# RETURNS: the raw http response to a GET request sent to the given URL
def get(url, cookies):
host, port = internet_address(url)
request_line = 'GET ' + url + ' HTTP/1.0' + CRLF
headers = make_get_headers(url, cookies)
request = request_line + headers + CRLF
sock = tcp_socket(host, port)
sock.send(request)
message = ''
response = None
buffer_length = 2048
while response != '':
response = sock.recv(buffer_length)
message = message + response
status_code = __status_code__(message)
if status_code == '200':
return message
elif status_code in ['301', '302', '303']:
new_url = get_location(message)
return get(new_url, merge_cookies(cookies, get_cookies(message)))
elif status_code in ['401', '403', '404']:
return ' abandon'
elif status_code == '400':
return ' badrequest'
elif status_code == '500':
# print('Internal Server Error')
return get(url, cookies)
return ''
# get_location: String -> String
# GIVEN: a http response
# RETURNS: the location specified in its headers
def get_location(http_response):
return url_decode(__location__(http_response)).split(CRLF)[0]
# post: String, String, Dict{cookie_name, value}, String -> String
# GIVEN: a URL, the path for the request line, a dictionary containing cookies
# and a payload string
# RETURNS: a raw string containing the http response to a POST request constructed
# from the given data
def post(url, path, cookies, payload):
request_line = 'POST ' + path + ' HTTP/1.0' + CRLF
headers_line = make_post_headers(url, cookies, payload)
request = request_line + headers_line + payload
host, port = internet_address(url)
s = tcp_socket(host, port)
s.send(request)
message = ''
buffer = None
while buffer != '':
buffer = s.recv(4096)
message += buffer
status_code = __status_code__(message)
if status_code in ['301', '302', '303']:
new_path = get_location(message)
return get(new_path, merge_cookies(cookies, get_cookies(message)))
elif status_code in ['401', '403', '404']:
return 'abandon post: ' + status_code
elif status_code == '400':
return 'bad request'
elif status_code == '500':
pass
# print('Internal Server Error')
elif status_code == '200':
print('Login unsuccessful')
exit(0)
return post(url, path, merge_cookies(cookies, get_cookies(message)), payload)
return ''
# link_within_domain: String, String -> Boolean
# GIVEN: a host address and a URL
# RETURNS: True if and only if the given URL is within the given domain
def link_within_domain(host, link):
return urlparse.urlparse(link).netloc == host
# get_abs_url: String, String -> String
# GIVEN: a host address and a relative URL
# RETURNS: a canonical URL constructed from the given data
def get_abs_url(host, rel_link):
return urlparse.urlunparse(('http', host, rel_link, '', '', ''))
# scrape_all_links: String -> List<String>
# GIVEN: a string containing html
# RETURNS: a list of all hyperlinks present in the given html string
def scrape_all_links(html):
return filter(lambda x: x.startswith('/'), map(lambda x: x['href'], BeautifulSoup(html).find_all('a')))
# krawll: Dict{cookie_name, value}, String, String, Function, Function -> Dict
# GIVEN: a dictionary containing cookies, the home page of the website, a function to extract
# the required data and a function that returns whether or not the crawler needs to stop at
# that point in time
# extractor: String -> Dict
# terminator: Dict -> Boolean
# RETURNS: the data collected by the extractor function
def krawll(cookies, home_page, host, extractor, terminator):
# Traversal algorithm: BFS
queue = []
visited = set()
data_extracted = {}
current_page = home_page
current_link = 'no idea'
abandon = False
while True:
visited.add(current_link)
if not abandon:
data_extracted.update(extractor(current_page))
if terminator(data_extracted):
break
links = map(lambda x: get_abs_url(host, x), scrape_all_links(current_page))
for link in links:
if link_within_domain(host, link) and (link not in visited) and (link not in queue):
queue.append(link)
abandon = False
if len(queue) == 0:
return data_extracted
current_link = queue.pop(0)
current_page = get(current_link, cookies)
status = __status_code__(current_page)
if status == 'abandon':
abandon = True
elif status == 'badrequest':
print(status)
return None
elif status == '200':
continue
else:
return None
return data_extracted
| |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
from canvas.models import UserRedis
for ui in orm['canvas.UserInfo'].objects.all():
ui.follower_count = UserRedis(ui.user_id).new_followers.zcard()
ui.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.apiapp': {
'Meta': {'object_name': 'APIApp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'canvas.apiauthtoken': {
'Meta': {'unique_together': "(('user', 'app'),)", 'object_name': 'APIAuthToken'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.APIApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.bestof': {
'Meta': {'object_name': 'BestOf'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'best_of'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'chosen_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'best_of'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {})
},
'canvas.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'posted_on_quest_of_the_day': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'skip_moderation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'})
},
'canvas.commentflag': {
'Meta': {'object_name': 'CommentFlag'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['auth.User']"})
},
'canvas.commentmoderationlog': {
'Meta': {'object_name': 'CommentModerationLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderated_comments_log'", 'to': "orm['auth.User']"}),
'visibility': ('django.db.models.fields.IntegerField', [], {})
},
'canvas.commentpin': {
'Meta': {'object_name': 'CommentPin'},
'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.commentsticker': {
'Meta': {'object_name': 'CommentSticker'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': "orm['canvas.Comment']"}),
'epic_message': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '140', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'canvas.commentstickerlog': {
'Meta': {'object_name': 'CommentStickerLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': "orm['canvas.Content']"}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'})
},
'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.emailunsubscribe': {
'Meta': {'object_name': 'EmailUnsubscribe'},
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.externalcontent': {
'Meta': {'object_name': 'ExternalContent'},
'_data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'content_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'external_content'", 'to': "orm['canvas.Comment']"}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'null': 'True', 'blank': 'True'})
},
'canvas.facebookinvite': {
'Meta': {'object_name': 'FacebookInvite'},
'fb_message_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_fbid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.facebookuser': {
'Meta': {'object_name': 'FacebookUser'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_invited': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'canvas.followcategory': {
'Meta': {'unique_together': "(('user', 'category'),)", 'object_name': 'FollowCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following'", 'to': "orm['auth.User']"})
},
'canvas.invitecode': {
'Meta': {'object_name': 'InviteCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.remixplugin': {
'Meta': {'object_name': 'RemixPlugin'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
's3md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'avatar': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']", 'null': 'True'}),
'bio_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'enable_timeline': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_timeline_posts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'follower_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_bypass': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'is_qa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'profile_image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']", 'null': 'True'}),
'trust_changed': ('canvas.util.UnixTimestampField', [], {'null': 'True', 'blank': 'True'}),
'trusted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'canvas.usermoderationlog': {
'Meta': {'object_name': 'UserModerationLog'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderation_log'", 'to': "orm['auth.User']"})
},
'canvas.userwarning': {
'Meta': {'object_name': 'UserWarning'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'confirmed': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'custom_message': ('django.db.models.fields.TextField', [], {}),
'disable_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('canvas.util.UnixTimestampField', [], {}),
'stock_message': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_warnings'", 'to': "orm['auth.User']"}),
'viewed': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'canvas.welcomeemailrecipient': {
'Meta': {'object_name': 'WelcomeEmailRecipient'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'canvas_auth.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'", '_ormbases': ['auth.User'], 'proxy': 'True'}
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas']
| |
import re
import logging
import xmltodict
from share.transform.chain import ChainTransformer, ctx, links as tools
from share.transform.chain.exceptions import InvalidIRI
from share.transform.chain.links import GuessAgentTypeLink
from share.transform.chain.parsers import Parser
from share.transform.chain.utils import force_text
from share.transform.chain.utils import oai_allowed_by_sets
logger = logging.getLogger(__name__)
def get_list(dct, key):
val = dct.get(key, [])
return val if isinstance(val, list) else [val]
#### Identifiers ####
class MODSWorkIdentifier(Parser):
schema = 'WorkIdentifier'
uri = tools.RunPython(force_text, ctx)
class Extra:
identifier_type = tools.Try(ctx['@type'])
class MODSAgentIdentifier(Parser):
schema = 'AgentIdentifier'
uri = ctx
#### Agents ####
class AffiliatedAgent(Parser):
schema = tools.GuessAgentType(ctx, default='organization')
name = ctx
class IsAffiliatedWith(Parser):
related = tools.Delegate(AffiliatedAgent, ctx)
class MODSAgent(Parser):
schema = tools.RunPython('get_agent_schema', ctx)
name = tools.OneOf(
tools.RunPython(force_text, ctx['mods:displayForm']),
tools.RunPython('squash_name_parts', ctx)
)
related_agents = tools.Map(tools.Delegate(IsAffiliatedWith), tools.Concat(tools.Try(
tools.Filter(lambda x: bool(x), tools.RunPython(force_text, ctx['mods:affiliation']))
)))
identifiers = tools.Map(
tools.Delegate(MODSAgentIdentifier),
tools.Unique(tools.Map(
tools.Try(tools.IRI(), exceptions=(InvalidIRI, )),
tools.Map(
tools.RunPython(force_text),
tools.Filter(
lambda obj: 'invalid' not in obj,
tools.Try(ctx['mods:nameIdentifier']),
)
)
))
)
class Extra:
name_type = tools.Try(ctx['@type'])
name_part = tools.Try(ctx['mods:namePart'])
affiliation = tools.Try(ctx['mods:affiliation'])
description = tools.Try(ctx['mods:description'])
display_form = tools.Try(ctx['mods:displayForm'])
etal = tools.Try(ctx['mods:etal'])
name_identifier = tools.Try(ctx['mods:nameIdentifier'])
def squash_name_parts(self, name):
name_parts = get_list(name, 'mods:namePart')
return ' '.join([force_text(n) for n in name_parts])
def get_agent_schema(self, obj):
name_type = obj.get('@type')
if name_type == 'personal':
return 'person'
if name_type == 'conference':
return 'organization'
# TODO SHARE-718
# if name_type == 'family':
# return 'family'
if name_type == 'corporate':
return GuessAgentTypeLink(default='organization').execute(self.squash_name_parts(obj))
return GuessAgentTypeLink().execute(self.squash_name_parts(obj))
class MODSPersonSplitName(MODSAgent):
schema = 'person'
name = None
family_name = tools.RunPython('get_name_part', ctx, 'family')
given_name = tools.RunPython('get_name_part', ctx, 'given')
suffix = tools.RunPython('get_name_part', ctx, 'termsOfAddress')
def get_name_part(self, obj, type):
name_parts = get_list(obj, 'mods:namePart')
return ' '.join([force_text(n) for n in name_parts if n.get('@type') == type])
class MODSSimpleAgent(Parser):
schema = tools.GuessAgentType(ctx, default='organization')
name = ctx
class MODSSimplePublisher(Parser):
schema = 'Publisher'
agent = tools.Delegate(MODSSimpleAgent, ctx)
#### Tags/Subjects ####
class MODSSubject(Parser):
schema = 'Subject'
name = ctx
class MODSThroughSubjects(Parser):
schema = 'ThroughSubjects'
subject = tools.Delegate(MODSSubject, ctx)
class MODSTag(Parser):
schema = 'Tag'
name = ctx
class MODSThroughTags(Parser):
schema = 'ThroughTags'
tag = tools.Delegate(MODSTag, ctx)
#### Work Relations ####
RELATION_MAP = {
# 'preceding':
# 'succeeding':
'original': 'IsDerivedFrom',
'host': 'IsPartOf',
'constituent': 'IsPartOf',
'series': 'IsPartOf',
# 'otherVersion':
# 'otherFormat':
'isReferencedBy': 'References',
'references': 'References',
'reviewOf': 'Reviews',
}
REVERSE_RELATIONS = {
'isReferencedBy',
'constituent',
}
# Finds the generated subclass of MODSCreativeWork
def related_work_parser(_):
return type(next(p for p in ctx.parsers if isinstance(p, MODSCreativeWork)))
def map_relation_type(obj):
return RELATION_MAP.get(obj['@type'], 'WorkRelation')
class MODSReverseWorkRelation(Parser):
schema = tools.RunPython(map_relation_type)
subject = tools.Delegate(related_work_parser, ctx)
class MODSWorkRelation(Parser):
schema = tools.RunPython(map_relation_type)
related = tools.Delegate(related_work_parser, ctx)
def work_relation_parser(obj):
if obj['@type'] in REVERSE_RELATIONS:
return MODSReverseWorkRelation
return MODSWorkRelation
#### Agent-work relations ####
def agent_parser(name):
name_parts = get_list(name, 'mods:namePart')
split_name = any(isinstance(n, dict) and n.get('@type') in {'given', 'family'} for n in name_parts)
return MODSPersonSplitName if split_name else MODSAgent
class MODSAgentWorkRelation(Parser):
schema = 'AgentWorkRelation'
agent = tools.Delegate(agent_parser, ctx)
cited_as = tools.RunPython(force_text, tools.Try(ctx['mods:displayForm']))
class MODSHost(MODSAgentWorkRelation):
schema = 'Host'
class MODSFunder(MODSAgentWorkRelation):
schema = 'Funder'
class MODSContributor(MODSAgentWorkRelation):
schema = 'Contributor'
class MODSCreator(MODSContributor):
schema = 'Creator'
order_cited = ctx('index')
class MODSPublisher(MODSAgentWorkRelation):
schema = 'Publisher'
#### Works ####
class MODSCreativeWork(Parser):
default_type = 'CreativeWork'
type_map = None
role_map = None
schema = tools.RunPython(
'get_schema',
tools.OneOf(
tools.RunPython(force_text, ctx['mods:genre']),
tools.Static(None)
)
)
title = tools.RunPython('join_title_info', ctx)
# Abstracts have the optional attribute "shareable". Don't bother checking for it, because
# abstracts that are not shareable should not have been shared with SHARE.
description = tools.Join(tools.RunPython(force_text, tools.Try(ctx['mods:abstract']), '\n'))
identifiers = tools.Map(
tools.Delegate(MODSWorkIdentifier),
tools.Filter(
lambda obj: 'invalid' not in obj,
tools.Concat(
tools.Try(ctx['mods:identifier']),
tools.Try(ctx.header['identifier']),
tools.Try(ctx['mods:location']['mods:url']),
)
)
)
related_works = tools.Concat(
tools.Map(
tools.Delegate(work_relation_parser),
tools.Try(ctx['mods:relatedItem'])
)
)
related_agents = tools.Concat(
tools.Map(
tools.Delegate(MODSCreator),
tools.RunPython('filter_names', ctx, 'creator')
),
tools.Map(
tools.Delegate(MODSFunder),
tools.RunPython('filter_names', ctx, 'funder')
),
tools.Map(
tools.Delegate(MODSHost),
tools.RunPython('filter_names', ctx, 'host')
),
tools.Map(
tools.Delegate(MODSPublisher),
tools.RunPython('filter_names', ctx, 'publisher')
),
tools.Map(
tools.Delegate(MODSContributor),
tools.RunPython('filter_names', ctx, 'creator', 'funder', 'host', 'publisher', invert=True)
),
tools.Map(
tools.Delegate(MODSSimplePublisher),
tools.Try(ctx['mods:originInfo']['mods:publisher']),
),
)
rights = tools.RunPython(force_text, tools.Try(ctx['mods:accessCondition']), '\n')
language = tools.ParseLanguage(
tools.Try(ctx['mods:language']['mods:languageTerm']),
)
subjects = tools.Map(
tools.Delegate(MODSThroughSubjects),
tools.Subjects(
tools.Concat(
tools.Try(ctx['mods:subject']['mods:topic']),
)
)
)
tags = tools.Map(
tools.Delegate(MODSThroughTags),
tools.Concat(
tools.Map(
tools.RunPython('tokenize'),
tools.Map(
tools.RunPython(force_text),
tools.Try(ctx.header.setSpec),
tools.Try(ctx['mods:genre']),
tools.Try(ctx['mods:classification']),
tools.Try(ctx['mods:subject']['mods:topic']),
)
),
deep=True
)
)
date_updated = tools.ParseDate(tools.Try(ctx.header.datestamp))
# TODO (in regulator) handle date ranges, uncertain dates ('1904-1941', '1890?', '1980-', '19uu', etc.)
date_published = tools.OneOf(
tools.ParseDate(tools.RunPython(force_text, tools.Try(ctx['mods:originInfo']['mods:dateIssued']))),
tools.Static(None)
)
is_deleted = tools.RunPython(lambda status: status == 'deleted', tools.Try(ctx.record.header['@status']))
class Extra:
"""
Fields that are combined in the base parser are relisted as singular elements that match
their original entry to preserve raw data structure.
"""
# (dc:description) http://www.loc.gov/standards/mods/userguide/abstract.html
abstract = tools.Try(ctx['mods:abstract'])
# (dc:rights) http://www.loc.gov/standards/mods/userguide/accesscondition.html
accessConditions = tools.Try(ctx['mods:accessCondition'])
# (dc:subject) http://www.loc.gov/standards/mods/userguide/classification.html
classification = tools.Try(ctx['mods:classification'])
# (N/A) http://www.loc.gov/standards/mods/userguide/extension.html
extension = tools.Try(ctx['mods:extension'])
# SHARE type
# (dc:type) http://www.loc.gov/standards/mods/userguide/genre.html
genre = tools.Try(ctx['mods:genre'])
# (dc:identifier) http://www.loc.gov/standards/mods/userguide/identifier.html
identifier = tools.Try(ctx['mods:identifier'])
# (dc:language) http://www.loc.gov/standards/mods/userguide/language.html
language = tools.Try(ctx['mods:language'])
# (dc:identifier for url) http://www.loc.gov/standards/mods/userguide/location.html
location = tools.Try(ctx['mods:location'])
# (dc:creator|dc:contributor) http://www.loc.gov/standards/mods/userguide/name.html
name = tools.Try(ctx['mods:name'])
# (dc:description) http://www.loc.gov/standards/mods/userguide/note.html
note = tools.Try(ctx['mods:note'])
# (dc:publisher|dc:date) http://www.loc.gov/standards/mods/userguide/origininfo.html
originInfo = tools.Try(ctx['mods:originInfo'])
# Extra
# (dc:title) http://www.loc.gov/standards/mods/userguide/part.html
part = tools.Try(ctx['mods:part'])
# (dc:format or N/A) http://www.loc.gov/standards/mods/userguide/physicaldescription.html
physicalDescription = tools.Try(ctx['mods:physicalDescription'])
# Metadata information
# (N/A) http://www.loc.gov/standards/mods/userguide/recordinfo.html
recordInfo = tools.Try(ctx['mods:recordInfo'])
# (dc:relation) http://www.loc.gov/standards/mods/userguide/relateditem.html
relatedItem = tools.Try(ctx['mods:relatedItem'])
# (dc:subject|dc:type|dc:coverage|N/A) http://www.loc.gov/standards/mods/userguide/subject.html
subject = tools.Try(ctx['mods:subject'])
# (dc:description) http://www.loc.gov/standards/mods/userguide/tableofcontents.html
tableOfContents = tools.Try(ctx['mods:tableOfContents'])
# (N/A) http://www.loc.gov/standards/mods/userguide/targetaudience.html
targetAudience = tools.Try(ctx['mods:targetAudience'])
# (dc:title) http://www.loc.gov/standards/mods/userguide/titleinfo.html
titleInfo = tools.Try(ctx['mods:titleInfo'])
# Extra
# (dc:type) http://www.loc.gov/standards/mods/userguide/typeofresource.html
typeOfResource = tools.Try(ctx['mods:typeOfResource'])
def get_schema(self, types):
if not types or not self.type_map:
return self.default_type
if isinstance(types, str):
types = [types]
for t in types:
if isinstance(t, dict):
t = t['#text']
t = t.lower()
if t in self.type_map:
return self.type_map[t]
return self.default_type
def tokenize(self, data):
if isinstance(data, str):
data = [data]
tokens = []
for item in data:
tokens.extend([x.strip() for x in re.split(r'(?: - )|\.|,', item) if x])
return tokens
# Map titleInfos to a string: https://www.loc.gov/standards/mods/userguide/titleinfo.html#mappings
def join_title_info(self, obj):
def get_part(title_info, part_name, delimiter=''):
part = force_text(title_info.get(part_name, ''), ' ').strip()
return delimiter + part if part else ''
title_infos = get_list(obj, 'mods:titleInfo')
titles = []
for title_info in title_infos:
title = ''
title += get_part(title_info, 'mods:nonSort')
title += get_part(title_info, 'mods:title')
title += get_part(title_info, 'mods:subTitle', ': ')
title += get_part(title_info, 'mods:partNumber', '. ')
title += get_part(title_info, 'mods:partName', ': ')
if title:
titles.append(title)
return '. '.join(titles)
def filter_names(self, obj, *roles, invert=False):
names = get_list(obj, 'mods:name')
filtered = [*names] if invert else []
for name in names:
name_roles = get_list(name, 'mods:role')
for role in name_roles:
role_terms = get_list(role, 'mods:roleTerm')
name_roles = {force_text(r).lower() for r in role_terms}
name_roles.update({self.role_map[r] for r in name_roles if r in self.role_map})
if name_roles.intersection(roles):
if invert:
filtered.remove(name)
else:
filtered.append(name)
return filtered
class MODSTransformer(ChainTransformer):
"""Transformer for oai_dc metadata format.
transformer_kwargs (TODO explain):
emitted_type
approved_sets
blocked_sets
type_map
role_map
"""
VERSION = 1
marc_roles = {
'fnd': 'funder',
'hst': 'host',
'his': 'host',
'pbl': 'publisher',
'cre': 'creator',
'aut': 'creator',
'author': 'creator',
}
def get_root_parser(self, unwrapped, emitted_type='creativework', type_map=None, role_map=None, **kwargs):
root_type_map = {
**{r.lower(): r for r in self.allowed_roots},
**{t.lower(): v for t, v in (type_map or {}).items()}
}
root_role_map = {
**{k: v for k, v in self.marc_roles.items()},
**{k.lower(): v.lower() for k, v in (role_map or {}).items()}
}
class RootParser(MODSCreativeWork):
default_type = emitted_type.lower()
type_map = root_type_map
role_map = root_role_map
return RootParser
def do_transform(self, datum, approved_sets=None, blocked_sets=None, **kwargs):
if not oai_allowed_by_sets(datum, blocked_sets, approved_sets):
return (None, None)
return super().do_transform(datum, **kwargs)
def unwrap_data(self, data, namespaces=None, **kwargs):
unwrapped_data = xmltodict.parse(data, process_namespaces=True, namespaces=(namespaces or self.NAMESPACES))
return {
**unwrapped_data['record'].get('metadata', {}).get('mods:mods', {}),
'header': unwrapped_data['record']['header'],
}
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log as logging
from tempest.common import negative_rest_client
from tempest import config
from tempest import exceptions
from tempest.lib.services.compute.agents_client import AgentsClient
from tempest.lib.services.compute.aggregates_client import AggregatesClient
from tempest.lib.services.compute.availability_zone_client import \
AvailabilityZoneClient
from tempest.lib.services.compute.baremetal_nodes_client import \
BaremetalNodesClient
from tempest.lib.services.compute.certificates_client import \
CertificatesClient
from tempest.lib.services.compute.extensions_client import \
ExtensionsClient
from tempest.lib.services.compute.fixed_ips_client import FixedIPsClient
from tempest.lib.services.compute.flavors_client import FlavorsClient
from tempest.lib.services.compute.floating_ip_pools_client import \
FloatingIPPoolsClient
from tempest.lib.services.compute.floating_ips_bulk_client import \
FloatingIPsBulkClient
from tempest.lib.services.compute.floating_ips_client import \
FloatingIPsClient as ComputeFloatingIPsClient
from tempest.lib.services.compute.hosts_client import HostsClient
from tempest.lib.services.compute.hypervisor_client import \
HypervisorClient
from tempest.lib.services.compute.images_client import ImagesClient \
as ComputeImagesClient
from tempest.services.volume.base.base_versions_client import BaseVersionsClient \
as BaseVersionsClient
from tempest.lib.services.compute.instance_usage_audit_log_client import \
InstanceUsagesAuditLogClient
from tempest.lib.services.compute.interfaces_client import InterfacesClient
from tempest.lib.services.compute.keypairs_client import KeyPairsClient
from tempest.lib.services.compute.limits_client import LimitsClient
from tempest.lib.services.compute.migrations_client import MigrationsClient
from tempest.lib.services.compute.networks_client import NetworksClient \
as ComputeNetworksClient
from tempest.lib.services.compute.quota_classes_client import \
QuotaClassesClient
from tempest.lib.services.compute.quotas_client import QuotasClient
from tempest.lib.services.compute.security_group_default_rules_client import \
SecurityGroupDefaultRulesClient
from tempest.lib.services.compute.security_group_rules_client import \
SecurityGroupRulesClient as ComputeSecurityGroupRulesClient
from tempest.lib.services.compute.security_groups_client import \
SecurityGroupsClient as ComputeSecurityGroupsClient
from tempest.lib.services.compute.server_groups_client import \
ServerGroupsClient
from tempest.lib.services.compute.servers_client import ServersClient
from tempest.lib.services.compute.services_client import ServicesClient
from tempest.lib.services.compute.snapshots_client import \
SnapshotsClient as ComputeSnapshotsClient
from tempest.lib.services.compute.tenant_networks_client import \
TenantNetworksClient
from tempest.lib.services.compute.tenant_usages_client import \
TenantUsagesClient
from tempest.lib.services.compute.versions_client import VersionsClient
from tempest.lib.services.compute.volumes_client import \
VolumesClient as ComputeVolumesClient
from tempest.lib.services.identity.v2.token_client import TokenClient
from tempest.lib.services.identity.v3.token_client import V3TokenClient
from tempest.lib.services.network.agents_client import AgentsClient \
as NetworkAgentsClient
from tempest.lib.services.network.extensions_client import \
ExtensionsClient as NetworkExtensionsClient
from tempest.lib.services.network.floating_ips_client import FloatingIPsClient
from tempest.lib.services.network.metering_label_rules_client import \
MeteringLabelRulesClient
from tempest.lib.services.network.metering_labels_client import \
MeteringLabelsClient
from tempest.lib.services.network.networks_client import NetworksClient
from tempest.lib.services.network.ports_client import PortsClient
from tempest.lib.services.network.quotas_client import QuotasClient \
as NetworkQuotasClient
from tempest.lib.services.network.security_group_rules_client import \
SecurityGroupRulesClient
from tempest.lib.services.network.security_groups_client import \
SecurityGroupsClient
from tempest.lib.services.network.subnetpools_client import SubnetpoolsClient
from tempest.lib.services.network.subnets_client import SubnetsClient
from tempest import manager
from tempest.services.baremetal.v1.json.baremetal_client import \
BaremetalClient
from tempest.services.data_processing.v1_1.data_processing_client import \
DataProcessingClient
from tempest.services.database.json.flavors_client import \
DatabaseFlavorsClient
from tempest.services.database.json.limits_client import \
DatabaseLimitsClient
from tempest.services.database.json.versions_client import \
DatabaseVersionsClient
from tempest.services.identity.v2.json.endpoints_client import EndpointsClient
from tempest.services.identity.v2.json.identity_client import IdentityClient
from tempest.services.identity.v2.json.roles_client import RolesClient
from tempest.services.identity.v2.json.services_client import \
ServicesClient as IdentityServicesClient
from tempest.services.identity.v2.json.tenants_client import TenantsClient
from tempest.services.identity.v2.json.users_client import UsersClient
from tempest.services.identity.v3.json.credentials_client import \
CredentialsClient
from tempest.services.identity.v3.json.domains_client import DomainsClient
from tempest.services.identity.v3.json.endpoints_client import \
EndPointsClient as EndPointsV3Client
from tempest.services.identity.v3.json.groups_client import GroupsClient
from tempest.services.identity.v3.json.identity_client import \
IdentityClient as IdentityV3Client
from tempest.services.identity.v3.json.policies_client import PoliciesClient
from tempest.services.identity.v3.json.projects_client import ProjectsClient
from tempest.services.identity.v3.json.regions_client import RegionsClient
from tempest.services.identity.v3.json.roles_client import \
RolesClient as RolesV3Client
from tempest.services.identity.v3.json.services_client import \
ServicesClient as IdentityServicesV3Client
from tempest.services.identity.v3.json.trusts_client import TrustsClient
from tempest.services.identity.v3.json.users_clients import \
UsersClient as UsersV3Client
from tempest.services.image.v1.json.images_client import ImagesClient
from tempest.services.image.v2.json.images_client import ImagesClientV2
from tempest.services.network.json.routers_client import RoutersClient
from tempest.services.object_storage.account_client import AccountClient
from tempest.services.object_storage.container_client import ContainerClient
from tempest.services.object_storage.object_client import ObjectClient
from tempest.services.orchestration.json.orchestration_client import \
OrchestrationClient
from tempest.services.telemetry.json.alarming_client import AlarmingClient
from tempest.services.telemetry.json.telemetry_client import \
TelemetryClient
from tempest.services.volume.v1.json.admin.hosts_client import \
HostsClient as VolumeHostsClient
from tempest.services.volume.v1.json.admin.quotas_client import \
QuotasClient as VolumeQuotasClient
from tempest.services.volume.v1.json.admin.services_client import \
ServicesClient as VolumeServicesClient
from tempest.services.volume.v1.json.admin.types_client import \
TypesClient as VolumeTypesClient
from tempest.services.volume.v1.json.availability_zone_client import \
AvailabilityZoneClient as VolumeAvailabilityZoneClient
from tempest.services.volume.v1.json.backups_client import BackupsClient
from tempest.services.volume.v1.json.extensions_client import \
ExtensionsClient as VolumeExtensionsClient
from tempest.services.volume.v1.json.qos_client import QosSpecsClient
from tempest.services.volume.v1.json.snapshots_client import SnapshotsClient
from tempest.services.volume.v1.json.volumes_client import VolumesClient
from tempest.services.volume.v2.json.admin.hosts_client import \
HostsClient as VolumeHostsV2Client
from tempest.services.volume.v2.json.admin.quotas_client import \
QuotasClient as VolumeQuotasV2Client
from tempest.services.volume.v2.json.admin.services_client import \
ServicesClient as VolumeServicesV2Client
from tempest.services.volume.v2.json.admin.types_client import \
TypesClient as VolumeTypesV2Client
from tempest.services.volume.v2.json.availability_zone_client import \
AvailabilityZoneClient as VolumeAvailabilityZoneV2Client
from tempest.services.volume.v2.json.backups_client import \
BackupsClient as BackupsV2Client
from tempest.services.volume.v2.json.extensions_client import \
ExtensionsClient as VolumeExtensionsV2Client
from tempest.services.volume.v2.json.qos_client import \
QosSpecsClient as QosSpecsV2Client
from tempest.services.volume.v2.json.snapshots_client import \
SnapshotsClient as SnapshotsV2Client
from tempest.services.volume.v2.json.volumes_client import \
VolumesClient as VolumesV2Client
CONF = config.CONF
LOG = logging.getLogger(__name__)
class Manager(manager.Manager):
"""Top level manager for OpenStack tempest clients"""
default_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
# NOTE: Tempest uses timeout values of compute API if project specific
# timeout values don't exist.
default_params_with_timeout_values = {
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
default_params_with_timeout_values.update(default_params)
def __init__(self, credentials, service=None):
"""Initialization of Manager class.
Setup all services clients and make them available for tests cases.
:param credentials: type Credentials or TestResources
:param service: Service name
"""
super(Manager, self).__init__(credentials=credentials)
self._set_compute_clients()
self._set_database_clients()
self._set_identity_clients()
self._set_volume_clients()
self._set_object_storage_clients()
self.baremetal_client = BaremetalClient(
self.auth_provider,
CONF.baremetal.catalog_type,
CONF.identity.region,
endpoint_type=CONF.baremetal.endpoint_type,
**self.default_params_with_timeout_values)
self.network_agents_client = NetworkAgentsClient(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
self.network_extensions_client = NetworkExtensionsClient(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
self.networks_client = NetworksClient(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
self.subnetpools_client = SubnetpoolsClient(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
self.subnets_client = SubnetsClient(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
self.ports_client = PortsClient(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
self.network_quotas_client = NetworkQuotasClient(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
self.floating_ips_client = FloatingIPsClient(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
self.metering_labels_client = MeteringLabelsClient(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
self.metering_label_rules_client = MeteringLabelRulesClient(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
self.routers_client = RoutersClient(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
self.security_group_rules_client = SecurityGroupRulesClient(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
self.security_groups_client = SecurityGroupsClient(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
if CONF.service_available.ceilometer:
self.telemetry_client = TelemetryClient(
self.auth_provider,
CONF.telemetry.catalog_type,
CONF.identity.region,
endpoint_type=CONF.telemetry.endpoint_type,
**self.default_params_with_timeout_values)
if CONF.service_available.aodh:
self.alarming_client = AlarmingClient(
self.auth_provider,
CONF.alarming.catalog_type,
CONF.identity.region,
endpoint_type=CONF.alarming.endpoint_type,
**self.default_params_with_timeout_values)
if CONF.service_available.glance:
self.image_client = ImagesClient(
self.auth_provider,
CONF.image.catalog_type,
CONF.image.region or CONF.identity.region,
endpoint_type=CONF.image.endpoint_type,
build_interval=CONF.image.build_interval,
build_timeout=CONF.image.build_timeout,
**self.default_params)
self.image_client_v2 = ImagesClientV2(
self.auth_provider,
CONF.image.catalog_type,
CONF.image.region or CONF.identity.region,
endpoint_type=CONF.image.endpoint_type,
build_interval=CONF.image.build_interval,
build_timeout=CONF.image.build_timeout,
**self.default_params)
self.orchestration_client = OrchestrationClient(
self.auth_provider,
CONF.orchestration.catalog_type,
CONF.orchestration.region or CONF.identity.region,
endpoint_type=CONF.orchestration.endpoint_type,
build_interval=CONF.orchestration.build_interval,
build_timeout=CONF.orchestration.build_timeout,
**self.default_params)
self.data_processing_client = DataProcessingClient(
self.auth_provider,
CONF.data_processing.catalog_type,
CONF.identity.region,
endpoint_type=CONF.data_processing.endpoint_type,
**self.default_params_with_timeout_values)
self.negative_client = negative_rest_client.NegativeRestClient(
self.auth_provider, service, **self.default_params)
def _set_compute_clients(self):
params = {
'service': CONF.compute.catalog_type,
'region': CONF.compute.region or CONF.identity.region,
'endpoint_type': CONF.compute.endpoint_type,
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
params.update(self.default_params)
self.agents_client = AgentsClient(self.auth_provider, **params)
self.compute_networks_client = ComputeNetworksClient(
self.auth_provider, **params)
self.migrations_client = MigrationsClient(self.auth_provider,
**params)
self.security_group_default_rules_client = (
SecurityGroupDefaultRulesClient(self.auth_provider, **params))
self.certificates_client = CertificatesClient(self.auth_provider,
**params)
self.servers_client = ServersClient(
self.auth_provider,
enable_instance_password=CONF.compute_feature_enabled
.enable_instance_password,
**params)
self.server_groups_client = ServerGroupsClient(
self.auth_provider, **params)
self.limits_client = LimitsClient(self.auth_provider, **params)
self.compute_images_client = ComputeImagesClient(self.auth_provider,
**params)
self.base_versions_client = BaseVersionsClient(self.auth_provider,
**params)
self.keypairs_client = KeyPairsClient(self.auth_provider, **params)
self.quotas_client = QuotasClient(self.auth_provider, **params)
self.quota_classes_client = QuotaClassesClient(self.auth_provider,
**params)
self.flavors_client = FlavorsClient(self.auth_provider, **params)
self.extensions_client = ExtensionsClient(self.auth_provider,
**params)
self.floating_ip_pools_client = FloatingIPPoolsClient(
self.auth_provider, **params)
self.floating_ips_bulk_client = FloatingIPsBulkClient(
self.auth_provider, **params)
self.compute_floating_ips_client = ComputeFloatingIPsClient(
self.auth_provider, **params)
self.compute_security_group_rules_client = \
ComputeSecurityGroupRulesClient(self.auth_provider, **params)
self.compute_security_groups_client = ComputeSecurityGroupsClient(
self.auth_provider, **params)
self.interfaces_client = InterfacesClient(self.auth_provider,
**params)
self.fixed_ips_client = FixedIPsClient(self.auth_provider,
**params)
self.availability_zone_client = AvailabilityZoneClient(
self.auth_provider, **params)
self.aggregates_client = AggregatesClient(self.auth_provider,
**params)
self.services_client = ServicesClient(self.auth_provider, **params)
self.tenant_usages_client = TenantUsagesClient(self.auth_provider,
**params)
self.hosts_client = HostsClient(self.auth_provider, **params)
self.hypervisor_client = HypervisorClient(self.auth_provider,
**params)
self.instance_usages_audit_log_client = \
InstanceUsagesAuditLogClient(self.auth_provider, **params)
self.tenant_networks_client = \
TenantNetworksClient(self.auth_provider, **params)
self.baremetal_nodes_client = BaremetalNodesClient(
self.auth_provider, **params)
# NOTE: The following client needs special timeout values because
# the API is a proxy for the other component.
params_volume = copy.deepcopy(params)
params_volume.update({
'build_interval': CONF.volume.build_interval,
'build_timeout': CONF.volume.build_timeout
})
self.volumes_extensions_client = ComputeVolumesClient(
self.auth_provider, **params_volume)
self.compute_versions_client = VersionsClient(self.auth_provider,
**params_volume)
self.snapshots_extensions_client = ComputeSnapshotsClient(
self.auth_provider, **params_volume)
def _set_database_clients(self):
self.database_flavors_client = DatabaseFlavorsClient(
self.auth_provider,
CONF.database.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
self.database_limits_client = DatabaseLimitsClient(
self.auth_provider,
CONF.database.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
self.database_versions_client = DatabaseVersionsClient(
self.auth_provider,
CONF.database.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
def _set_identity_clients(self):
params = {
'service': CONF.identity.catalog_type,
'region': CONF.identity.region
}
params.update(self.default_params_with_timeout_values)
# Clients below use the admin endpoint type of Keystone API v2
params_v2_admin = params.copy()
params_v2_admin['endpoint_type'] = CONF.identity.v2_admin_endpoint_type
self.endpoints_client = EndpointsClient(self.auth_provider,
**params_v2_admin)
self.identity_client = IdentityClient(self.auth_provider,
**params_v2_admin)
self.tenants_client = TenantsClient(self.auth_provider,
**params_v2_admin)
self.roles_client = RolesClient(self.auth_provider, **params_v2_admin)
self.users_client = UsersClient(self.auth_provider, **params_v2_admin)
self.identity_services_client = IdentityServicesClient(
self.auth_provider, **params_v2_admin)
# Clients below use the public endpoint type of Keystone API v2
params_v2_public = params.copy()
params_v2_public['endpoint_type'] = (
CONF.identity.v2_public_endpoint_type)
self.identity_public_client = IdentityClient(self.auth_provider,
**params_v2_public)
self.tenants_public_client = TenantsClient(self.auth_provider,
**params_v2_public)
self.users_public_client = UsersClient(self.auth_provider,
**params_v2_public)
# Clients below use the endpoint type of Keystone API v3
params_v3 = params.copy()
params_v3['endpoint_type'] = CONF.identity.v3_endpoint_type
self.domains_client = DomainsClient(self.auth_provider,
**params_v3)
self.identity_v3_client = IdentityV3Client(self.auth_provider,
**params_v3)
self.trusts_client = TrustsClient(self.auth_provider, **params_v3)
self.users_v3_client = UsersV3Client(self.auth_provider, **params_v3)
self.endpoints_v3_client = EndPointsV3Client(self.auth_provider,
**params_v3)
self.roles_v3_client = RolesV3Client(self.auth_provider, **params_v3)
self.identity_services_v3_client = IdentityServicesV3Client(
self.auth_provider, **params_v3)
self.policies_client = PoliciesClient(self.auth_provider, **params_v3)
self.projects_client = ProjectsClient(self.auth_provider, **params_v3)
self.regions_client = RegionsClient(self.auth_provider, **params_v3)
self.credentials_client = CredentialsClient(self.auth_provider,
**params_v3)
self.groups_client = GroupsClient(self.auth_provider, **params_v3)
# Token clients do not use the catalog. They only need default_params.
# They read auth_url, so they should only be set if the corresponding
# API version is marked as enabled
if CONF.identity_feature_enabled.api_v2:
if CONF.identity.uri:
self.token_client = TokenClient(
CONF.identity.uri, **self.default_params)
else:
msg = 'Identity v2 API enabled, but no identity.uri set'
raise exceptions.InvalidConfiguration(msg)
if CONF.identity_feature_enabled.api_v3:
if CONF.identity.uri_v3:
self.token_v3_client = V3TokenClient(
CONF.identity.uri_v3, **self.default_params)
else:
msg = 'Identity v3 API enabled, but no identity.uri_v3 set'
raise exceptions.InvalidConfiguration(msg)
def _set_volume_clients(self):
params = {
'service': CONF.volume.catalog_type,
'region': CONF.volume.region or CONF.identity.region,
'endpoint_type': CONF.volume.endpoint_type,
'build_interval': CONF.volume.build_interval,
'build_timeout': CONF.volume.build_timeout
}
params.update(self.default_params)
self.volume_qos_client = QosSpecsClient(self.auth_provider,
**params)
self.volume_qos_v2_client = QosSpecsV2Client(
self.auth_provider, **params)
self.volume_services_client = VolumeServicesClient(
self.auth_provider, **params)
self.volume_services_v2_client = VolumeServicesV2Client(
self.auth_provider, **params)
self.backups_client = BackupsClient(self.auth_provider, **params)
self.backups_v2_client = BackupsV2Client(self.auth_provider,
**params)
self.snapshots_client = SnapshotsClient(self.auth_provider,
**params)
self.snapshots_v2_client = SnapshotsV2Client(self.auth_provider,
**params)
self.volumes_client = VolumesClient(
self.auth_provider, default_volume_size=CONF.volume.volume_size,
**params)
self.volumes_v2_client = VolumesV2Client(
self.auth_provider, default_volume_size=CONF.volume.volume_size,
**params)
self.volume_types_client = VolumeTypesClient(self.auth_provider,
**params)
self.volume_types_v2_client = VolumeTypesV2Client(
self.auth_provider, **params)
self.volume_hosts_client = VolumeHostsClient(self.auth_provider,
**params)
self.volume_hosts_v2_client = VolumeHostsV2Client(
self.auth_provider, **params)
self.volume_quotas_client = VolumeQuotasClient(self.auth_provider,
**params)
self.volume_quotas_v2_client = VolumeQuotasV2Client(self.auth_provider,
**params)
self.volumes_extension_client = VolumeExtensionsClient(
self.auth_provider, **params)
self.volumes_v2_extension_client = VolumeExtensionsV2Client(
self.auth_provider, **params)
self.volume_availability_zone_client = \
VolumeAvailabilityZoneClient(self.auth_provider, **params)
self.volume_v2_availability_zone_client = \
VolumeAvailabilityZoneV2Client(self.auth_provider, **params)
def _set_object_storage_clients(self):
params = {
'service': CONF.object_storage.catalog_type,
'region': CONF.object_storage.region or CONF.identity.region,
'endpoint_type': CONF.object_storage.endpoint_type
}
params.update(self.default_params_with_timeout_values)
self.account_client = AccountClient(self.auth_provider, **params)
self.container_client = ContainerClient(self.auth_provider, **params)
self.object_client = ObjectClient(self.auth_provider, **params)
| |
"""
Introduction
============
SqlSoup provides a convenient way to access database tables without
having to declare table or mapper classes ahead of time.
Suppose we have a database with users, books, and loans tables
(corresponding to the PyWebOff dataset, if you're curious). For
testing purposes, we'll create this db as follows::
>>> from sqlalchemy import create_engine
>>> e = create_engine('sqlite:///:memory:')
>>> for sql in _testsql: e.execute(sql) #doctest: +ELLIPSIS
<...
Creating a SqlSoup gateway is just like creating an SQLAlchemy
engine::
>>> from sqlalchemy.ext.sqlsoup import SqlSoup
>>> db = SqlSoup('sqlite:///:memory:')
or, you can re-use an existing metadata or engine::
>>> db = SqlSoup(MetaData(e))
You can optionally specify a schema within the database for your
SqlSoup::
# >>> db.schema = myschemaname
Loading objects
===============
Loading objects is as easy as this::
>>> users = db.users.all()
>>> users.sort()
>>> users
[MappedUsers(name='Joe Student',email='student@example.edu',password='student',classname=None,admin=0), MappedUsers(name='Bhargan Basepair',email='basepair@example.edu',password='basepair',classname=None,admin=1)]
Of course, letting the database do the sort is better::
>>> db.users.order_by(db.users.name).all()
[MappedUsers(name='Bhargan Basepair',email='basepair@example.edu',password='basepair',classname=None,admin=1), MappedUsers(name='Joe Student',email='student@example.edu',password='student',classname=None,admin=0)]
Field access is intuitive::
>>> users[0].email
u'student@example.edu'
Of course, you don't want to load all users very often. Let's add a
WHERE clause. Let's also switch the order_by to DESC while we're at
it::
>>> from sqlalchemy import or_, and_, desc
>>> where = or_(db.users.name=='Bhargan Basepair', db.users.email=='student@example.edu')
>>> db.users.filter(where).order_by(desc(db.users.name)).all()
[MappedUsers(name='Joe Student',email='student@example.edu',password='student',classname=None,admin=0), MappedUsers(name='Bhargan Basepair',email='basepair@example.edu',password='basepair',classname=None,admin=1)]
You can also use .first() (to retrieve only the first object from a query) or
.one() (like .first when you expect exactly one user -- it will raise an
exception if more were returned)::
>>> db.users.filter(db.users.name=='Bhargan Basepair').one()
MappedUsers(name='Bhargan Basepair',email='basepair@example.edu',password='basepair',classname=None,admin=1)
Since name is the primary key, this is equivalent to
>>> db.users.get('Bhargan Basepair')
MappedUsers(name='Bhargan Basepair',email='basepair@example.edu',password='basepair',classname=None,admin=1)
This is also equivalent to
>>> db.users.filter_by(name='Bhargan Basepair').one()
MappedUsers(name='Bhargan Basepair',email='basepair@example.edu',password='basepair',classname=None,admin=1)
filter_by is like filter, but takes kwargs instead of full clause expressions.
This makes it more concise for simple queries like this, but you can't do
complex queries like the or\_ above or non-equality based comparisons this way.
Full query documentation
------------------------
Get, filter, filter_by, order_by, limit, and the rest of the
query methods are explained in detail in the `SQLAlchemy documentation`__.
__ http://www.sqlalchemy.org/docs/04/ormtutorial.html#datamapping_querying
Modifying objects
=================
Modifying objects is intuitive::
>>> user = _
>>> user.email = 'basepair+nospam@example.edu'
>>> db.flush()
(SqlSoup leverages the sophisticated SQLAlchemy unit-of-work code, so
multiple updates to a single object will be turned into a single
``UPDATE`` statement when you flush.)
To finish covering the basics, let's insert a new loan, then delete
it::
>>> book_id = db.books.filter_by(title='Regional Variation in Moss').first().id
>>> db.loans.insert(book_id=book_id, user_name=user.name)
MappedLoans(book_id=2,user_name='Bhargan Basepair',loan_date=None)
>>> db.flush()
>>> loan = db.loans.filter_by(book_id=2, user_name='Bhargan Basepair').one()
>>> db.delete(loan)
>>> db.flush()
You can also delete rows that have not been loaded as objects. Let's
do our insert/delete cycle once more, this time using the loans
table's delete method. (For SQLAlchemy experts: note that no flush()
call is required since this delete acts at the SQL level, not at the
Mapper level.) The same where-clause construction rules apply here as
to the select methods.
::
>>> db.loans.insert(book_id=book_id, user_name=user.name)
MappedLoans(book_id=2,user_name='Bhargan Basepair',loan_date=None)
>>> db.flush()
>>> db.loans.delete(db.loans.book_id==2)
You can similarly update multiple rows at once. This will change the
book_id to 1 in all loans whose book_id is 2::
>>> db.loans.update(db.loans.book_id==2, book_id=1)
>>> db.loans.filter_by(book_id=1).all()
[MappedLoans(book_id=1,user_name='Joe Student',loan_date=datetime.datetime(2006, 7, 12, 0, 0))]
Joins
=====
Occasionally, you will want to pull out a lot of data from related
tables all at once. In this situation, it is far more efficient to
have the database perform the necessary join. (Here we do not have *a
lot of data* but hopefully the concept is still clear.) SQLAlchemy is
smart enough to recognize that loans has a foreign key to users, and
uses that as the join condition automatically.
::
>>> join1 = db.join(db.users, db.loans, isouter=True)
>>> join1.filter_by(name='Joe Student').all()
[MappedJoin(name='Joe Student',email='student@example.edu',password='student',classname=None,admin=0,book_id=1,user_name='Joe Student',loan_date=datetime.datetime(2006, 7, 12, 0, 0))]
If you're unfortunate enough to be using MySQL with the default MyISAM
storage engine, you'll have to specify the join condition manually,
since MyISAM does not store foreign keys. Here's the same join again,
with the join condition explicitly specified::
>>> db.join(db.users, db.loans, db.users.name==db.loans.user_name, isouter=True)
<class 'sqlalchemy.ext.sqlsoup.MappedJoin'>
You can compose arbitrarily complex joins by combining Join objects
with tables or other joins. Here we combine our first join with the
books table::
>>> join2 = db.join(join1, db.books)
>>> join2.all()
[MappedJoin(name='Joe Student',email='student@example.edu',password='student',classname=None,admin=0,book_id=1,user_name='Joe Student',loan_date=datetime.datetime(2006, 7, 12, 0, 0),id=1,title='Mustards I Have Known',published_year='1989',authors='Jones')]
If you join tables that have an identical column name, wrap your join
with `with_labels`, to disambiguate columns with their table name
(.c is short for .columns)::
>>> db.with_labels(join1).c.keys()
[u'users_name', u'users_email', u'users_password', u'users_classname', u'users_admin', u'loans_book_id', u'loans_user_name', u'loans_loan_date']
You can also join directly to a labeled object::
>>> labeled_loans = db.with_labels(db.loans)
>>> db.join(db.users, labeled_loans, isouter=True).c.keys()
[u'name', u'email', u'password', u'classname', u'admin', u'loans_book_id', u'loans_user_name', u'loans_loan_date']
Relations
=========
You can define relations on SqlSoup classes:
>>> db.users.relate('loans', db.loans)
These can then be used like a normal SA property:
>>> db.users.get('Joe Student').loans
[MappedLoans(book_id=1,user_name='Joe Student',loan_date=datetime.datetime(2006, 7, 12, 0, 0))]
>>> db.users.filter(~db.users.loans.any()).all()
[MappedUsers(name='Bhargan Basepair',email='basepair+nospam@example.edu',password='basepair',classname=None,admin=1)]
relate can take any options that the relation function accepts in normal mapper definition:
>>> del db._cache['users']
>>> db.users.relate('loans', db.loans, order_by=db.loans.loan_date, cascade='all, delete-orphan')
Advanced Use
============
Accessing the Session
---------------------
SqlSoup uses a SessionContext to provide thread-local sessions. You
can get a reference to the current one like this::
>>> from sqlalchemy.ext.sqlsoup import objectstore
>>> session = objectstore.current
Now you have access to all the standard session-based SA features,
such as transactions. (SqlSoup's ``flush()`` is normally
transactionalized, but you can perform manual transaction management
if you need a transaction to span multiple flushes.)
Mapping arbitrary Selectables
-----------------------------
SqlSoup can map any SQLAlchemy ``Selectable`` with the map
method. Let's map a ``Select`` object that uses an aggregate function;
we'll use the SQLAlchemy ``Table`` that SqlSoup introspected as the
basis. (Since we're not mapping to a simple table or join, we need to
tell SQLAlchemy how to find the *primary key* which just needs to be
unique within the select, and not necessarily correspond to a *real*
PK in the database.)
::
>>> from sqlalchemy import select, func
>>> b = db.books._table
>>> s = select([b.c.published_year, func.count('*').label('n')], from_obj=[b], group_by=[b.c.published_year])
>>> s = s.alias('years_with_count')
>>> years_with_count = db.map(s, primary_key=[s.c.published_year])
>>> years_with_count.filter_by(published_year='1989').all()
[MappedBooks(published_year='1989',n=1)]
Obviously if we just wanted to get a list of counts associated with
book years once, raw SQL is going to be less work. The advantage of
mapping a Select is reusability, both standalone and in Joins. (And if
you go to full SQLAlchemy, you can perform mappings like this directly
to your object models.)
An easy way to save mapped selectables like this is to just hang them on
your db object::
>>> db.years_with_count = years_with_count
Python is flexible like that!
Raw SQL
-------
SqlSoup works fine with SQLAlchemy's `text block support`__.
__ http://www.sqlalchemy.org/docs/04/sqlexpression.html#sql_text
You can also access the SqlSoup's `engine` attribute to compose SQL
directly. The engine's ``execute`` method corresponds to the one of a
DBAPI cursor, and returns a ``ResultProxy`` that has ``fetch`` methods
you would also see on a cursor::
>>> rp = db.bind.execute('select name, email from users order by name')
>>> for name, email in rp.fetchall(): print name, email
Bhargan Basepair basepair+nospam@example.edu
Joe Student student@example.edu
You can also pass this engine object to other SQLAlchemy constructs.
Dynamic table names
-------------------
You can load a table whose name is specified at runtime with the entity() method:
>>> tablename = 'loans'
>>> db.entity(tablename) == db.loans
True
Extra tests
===========
Boring tests here. Nothing of real expository value.
::
>>> db.users.filter_by(classname=None).order_by(db.users.name).all()
[MappedUsers(name='Bhargan Basepair',email='basepair+nospam@example.edu',password='basepair',classname=None,admin=1), MappedUsers(name='Joe Student',email='student@example.edu',password='student',classname=None,admin=0)]
>>> db.nopk
Traceback (most recent call last):
...
PKNotFoundError: table 'nopk' does not have a primary key defined [columns: i]
>>> db.nosuchtable
Traceback (most recent call last):
...
NoSuchTableError: nosuchtable
>>> years_with_count.insert(published_year='2007', n=1)
Traceback (most recent call last):
...
InvalidRequestError: SQLSoup can only modify mapped Tables (found: Alias)
[tests clear()]
>>> db.loans.count()
1
>>> _ = db.loans.insert(book_id=1, user_name='Bhargan Basepair')
>>> db.clear()
>>> db.flush()
>>> db.loans.count()
1
"""
from sqlalchemy import *
from sqlalchemy import schema, sql
from sqlalchemy.orm import *
from sqlalchemy.ext.sessioncontext import SessionContext
from sqlalchemy.exceptions import *
from sqlalchemy.sql import expression
_testsql = """
CREATE TABLE books (
id integer PRIMARY KEY, -- auto-increments in sqlite
title text NOT NULL,
published_year char(4) NOT NULL,
authors text NOT NULL
);
CREATE TABLE users (
name varchar(32) PRIMARY KEY,
email varchar(128) NOT NULL,
password varchar(128) NOT NULL,
classname text,
admin int NOT NULL -- 0 = false
);
CREATE TABLE loans (
book_id int PRIMARY KEY REFERENCES books(id),
user_name varchar(32) references users(name)
ON DELETE SET NULL ON UPDATE CASCADE,
loan_date datetime DEFAULT current_timestamp
);
insert into users(name, email, password, admin)
values('Bhargan Basepair', 'basepair@example.edu', 'basepair', 1);
insert into users(name, email, password, admin)
values('Joe Student', 'student@example.edu', 'student', 0);
insert into books(title, published_year, authors)
values('Mustards I Have Known', '1989', 'Jones');
insert into books(title, published_year, authors)
values('Regional Variation in Moss', '1971', 'Flim and Flam');
insert into loans(book_id, user_name, loan_date)
values (
(select min(id) from books),
(select name from users where name like 'Joe%'),
'2006-07-12 0:0:0')
;
CREATE TABLE nopk (
i int
);
""".split(';')
__all__ = ['PKNotFoundError', 'SqlSoup']
#
# thread local SessionContext
#
class Objectstore(SessionContext):
def __getattr__(self, key):
return getattr(self.current, key)
def get_session(self):
return self.current
objectstore = Objectstore(create_session)
class PKNotFoundError(SQLAlchemyError): pass
def _ddl_error(cls):
msg = 'SQLSoup can only modify mapped Tables (found: %s)' \
% cls._table.__class__.__name__
raise InvalidRequestError(msg)
# metaclass is necessary to expose class methods with getattr, e.g.
# we want to pass db.users.select through to users._mapper.select
class SelectableClassType(type):
def insert(cls, **kwargs):
_ddl_error(cls)
def delete(cls, *args, **kwargs):
_ddl_error(cls)
def update(cls, whereclause=None, values=None, **kwargs):
_ddl_error(cls)
def __selectable__(cls):
return cls._table
def __getattr__(cls, attr):
if attr == '_query':
# called during mapper init
raise AttributeError()
return getattr(cls._query, attr)
class TableClassType(SelectableClassType):
def insert(cls, **kwargs):
o = cls()
o.__dict__.update(kwargs)
return o
def delete(cls, *args, **kwargs):
cls._table.delete(*args, **kwargs).execute()
def update(cls, whereclause=None, values=None, **kwargs):
cls._table.update(whereclause, values).execute(**kwargs)
def relate(cls, propname, *args, **kwargs):
class_mapper(cls)._compile_property(propname, relation(*args, **kwargs))
def _is_outer_join(selectable):
if not isinstance(selectable, sql.Join):
return False
if selectable.isouter:
return True
return _is_outer_join(selectable.left) or _is_outer_join(selectable.right)
def _selectable_name(selectable):
if isinstance(selectable, sql.Alias):
return _selectable_name(selectable.selectable)
elif isinstance(selectable, sql.Select):
return ''.join([_selectable_name(s) for s in selectable.froms])
elif isinstance(selectable, schema.Table):
return selectable.name.capitalize()
else:
x = selectable.__class__.__name__
if x[0] == '_':
x = x[1:]
return x
def class_for_table(selectable, **mapper_kwargs):
selectable = expression._selectable(selectable)
mapname = 'Mapped' + _selectable_name(selectable)
if isinstance(selectable, Table):
klass = TableClassType(mapname, (object,), {})
else:
klass = SelectableClassType(mapname, (object,), {})
def __cmp__(self, o):
L = self.__class__.c.keys()
L.sort()
t1 = [getattr(self, k) for k in L]
try:
t2 = [getattr(o, k) for k in L]
except AttributeError:
raise TypeError('unable to compare with %s' % o.__class__)
return cmp(t1, t2)
def __repr__(self):
import locale
encoding = locale.getdefaultlocale()[1] or 'ascii'
L = []
for k in self.__class__.c.keys():
value = getattr(self, k, '')
if isinstance(value, unicode):
value = value.encode(encoding)
L.append("%s=%r" % (k, value))
return '%s(%s)' % (self.__class__.__name__, ','.join(L))
for m in ['__cmp__', '__repr__']:
setattr(klass, m, eval(m))
klass._table = selectable
mappr = mapper(klass,
selectable,
extension=objectstore.mapper_extension,
allow_null_pks=_is_outer_join(selectable),
**mapper_kwargs)
klass._query = Query(mappr)
return klass
class SqlSoup:
def __init__(self, *args, **kwargs):
"""Initialize a new ``SqlSoup``.
`args` may either be an ``SQLEngine`` or a set of arguments
suitable for passing to ``create_engine``.
"""
# meh, sometimes having method overloading instead of kwargs would be easier
if isinstance(args[0], MetaData):
args = list(args)
metadata = args.pop(0)
if args or kwargs:
raise ArgumentError('Extra arguments not allowed when metadata is given')
else:
metadata = MetaData(*args, **kwargs)
self._metadata = metadata
self._cache = {}
self.schema = None
def engine(self):
return self._metadata.bind
engine = property(engine)
bind = engine
def delete(self, *args, **kwargs):
objectstore.delete(*args, **kwargs)
def flush(self):
objectstore.get_session().flush()
def clear(self):
objectstore.clear()
def map(self, selectable, **kwargs):
try:
t = self._cache[selectable]
except KeyError:
t = class_for_table(selectable, **kwargs)
self._cache[selectable] = t
return t
def with_labels(self, item):
# TODO give meaningful aliases
return self.map(expression._selectable(item).select(use_labels=True).alias('foo'))
def join(self, *args, **kwargs):
j = join(*args, **kwargs)
return self.map(j)
def entity(self, attr):
try:
t = self._cache[attr]
except KeyError:
table = Table(attr, self._metadata, autoload=True, schema=self.schema)
if not table.primary_key.columns:
raise PKNotFoundError('table %r does not have a primary key defined [columns: %s]' % (attr, ','.join(table.c.keys())))
if table.columns:
t = class_for_table(table)
else:
t = None
self._cache[attr] = t
return t
def __getattr__(self, attr):
return self.entity(attr)
def __repr__(self):
return 'SqlSoup(%r)' % self._metadata
if __name__ == '__main__':
import logging
logging.basicConfig()
import doctest
doctest.testmod()
| |
"""Color util methods."""
import math
import colorsys
from typing import Tuple, List, Optional
import attr
# Official CSS3 colors from w3.org:
# https://www.w3.org/TR/2010/PR-css3-color-20101028/#html4
# names do not have spaces in them so that we can compare against
# requests more easily (by removing spaces from the requests as well).
# This lets "dark seagreen" and "dark sea green" both match the same
# color "darkseagreen".
COLORS = {
'aliceblue': (240, 248, 255),
'antiquewhite': (250, 235, 215),
'aqua': (0, 255, 255),
'aquamarine': (127, 255, 212),
'azure': (240, 255, 255),
'beige': (245, 245, 220),
'bisque': (255, 228, 196),
'black': (0, 0, 0),
'blanchedalmond': (255, 235, 205),
'blue': (0, 0, 255),
'blueviolet': (138, 43, 226),
'brown': (165, 42, 42),
'burlywood': (222, 184, 135),
'cadetblue': (95, 158, 160),
'chartreuse': (127, 255, 0),
'chocolate': (210, 105, 30),
'coral': (255, 127, 80),
'cornflowerblue': (100, 149, 237),
'cornsilk': (255, 248, 220),
'crimson': (220, 20, 60),
'cyan': (0, 255, 255),
'darkblue': (0, 0, 139),
'darkcyan': (0, 139, 139),
'darkgoldenrod': (184, 134, 11),
'darkgray': (169, 169, 169),
'darkgreen': (0, 100, 0),
'darkgrey': (169, 169, 169),
'darkkhaki': (189, 183, 107),
'darkmagenta': (139, 0, 139),
'darkolivegreen': (85, 107, 47),
'darkorange': (255, 140, 0),
'darkorchid': (153, 50, 204),
'darkred': (139, 0, 0),
'darksalmon': (233, 150, 122),
'darkseagreen': (143, 188, 143),
'darkslateblue': (72, 61, 139),
'darkslategray': (47, 79, 79),
'darkslategrey': (47, 79, 79),
'darkturquoise': (0, 206, 209),
'darkviolet': (148, 0, 211),
'deeppink': (255, 20, 147),
'deepskyblue': (0, 191, 255),
'dimgray': (105, 105, 105),
'dimgrey': (105, 105, 105),
'dodgerblue': (30, 144, 255),
'firebrick': (178, 34, 34),
'floralwhite': (255, 250, 240),
'forestgreen': (34, 139, 34),
'fuchsia': (255, 0, 255),
'gainsboro': (220, 220, 220),
'ghostwhite': (248, 248, 255),
'gold': (255, 215, 0),
'goldenrod': (218, 165, 32),
'gray': (128, 128, 128),
'green': (0, 128, 0),
'greenyellow': (173, 255, 47),
'grey': (128, 128, 128),
'honeydew': (240, 255, 240),
'hotpink': (255, 105, 180),
'indianred': (205, 92, 92),
'indigo': (75, 0, 130),
'ivory': (255, 255, 240),
'khaki': (240, 230, 140),
'lavender': (230, 230, 250),
'lavenderblush': (255, 240, 245),
'lawngreen': (124, 252, 0),
'lemonchiffon': (255, 250, 205),
'lightblue': (173, 216, 230),
'lightcoral': (240, 128, 128),
'lightcyan': (224, 255, 255),
'lightgoldenrodyellow': (250, 250, 210),
'lightgray': (211, 211, 211),
'lightgreen': (144, 238, 144),
'lightgrey': (211, 211, 211),
'lightpink': (255, 182, 193),
'lightsalmon': (255, 160, 122),
'lightseagreen': (32, 178, 170),
'lightskyblue': (135, 206, 250),
'lightslategray': (119, 136, 153),
'lightslategrey': (119, 136, 153),
'lightsteelblue': (176, 196, 222),
'lightyellow': (255, 255, 224),
'lime': (0, 255, 0),
'limegreen': (50, 205, 50),
'linen': (250, 240, 230),
'magenta': (255, 0, 255),
'maroon': (128, 0, 0),
'mediumaquamarine': (102, 205, 170),
'mediumblue': (0, 0, 205),
'mediumorchid': (186, 85, 211),
'mediumpurple': (147, 112, 219),
'mediumseagreen': (60, 179, 113),
'mediumslateblue': (123, 104, 238),
'mediumspringgreen': (0, 250, 154),
'mediumturquoise': (72, 209, 204),
'mediumvioletredred': (199, 21, 133),
'midnightblue': (25, 25, 112),
'mintcream': (245, 255, 250),
'mistyrose': (255, 228, 225),
'moccasin': (255, 228, 181),
'navajowhite': (255, 222, 173),
'navy': (0, 0, 128),
'navyblue': (0, 0, 128),
'oldlace': (253, 245, 230),
'olive': (128, 128, 0),
'olivedrab': (107, 142, 35),
'orange': (255, 165, 0),
'orangered': (255, 69, 0),
'orchid': (218, 112, 214),
'palegoldenrod': (238, 232, 170),
'palegreen': (152, 251, 152),
'paleturquoise': (175, 238, 238),
'palevioletred': (219, 112, 147),
'papayawhip': (255, 239, 213),
'peachpuff': (255, 218, 185),
'peru': (205, 133, 63),
'pink': (255, 192, 203),
'plum': (221, 160, 221),
'powderblue': (176, 224, 230),
'purple': (128, 0, 128),
'red': (255, 0, 0),
'rosybrown': (188, 143, 143),
'royalblue': (65, 105, 225),
'saddlebrown': (139, 69, 19),
'salmon': (250, 128, 114),
'sandybrown': (244, 164, 96),
'seagreen': (46, 139, 87),
'seashell': (255, 245, 238),
'sienna': (160, 82, 45),
'silver': (192, 192, 192),
'skyblue': (135, 206, 235),
'slateblue': (106, 90, 205),
'slategray': (112, 128, 144),
'slategrey': (112, 128, 144),
'snow': (255, 250, 250),
'springgreen': (0, 255, 127),
'steelblue': (70, 130, 180),
'tan': (210, 180, 140),
'teal': (0, 128, 128),
'thistle': (216, 191, 216),
'tomato': (255, 99, 71),
'turquoise': (64, 224, 208),
'violet': (238, 130, 238),
'wheat': (245, 222, 179),
'white': (255, 255, 255),
'whitesmoke': (245, 245, 245),
'yellow': (255, 255, 0),
'yellowgreen': (154, 205, 50),
}
@attr.s()
class XYPoint:
"""Represents a CIE 1931 XY coordinate pair."""
x = attr.ib(type=float)
y = attr.ib(type=float)
@attr.s()
class GamutType:
"""Represents the Gamut of a light."""
# ColorGamut = gamut(xypoint(xR,yR),xypoint(xG,yG),xypoint(xB,yB))
red = attr.ib(type=XYPoint)
green = attr.ib(type=XYPoint)
blue = attr.ib(type=XYPoint)
def color_name_to_rgb(color_name: str) -> Tuple[int, int, int]:
"""Convert color name to RGB hex value."""
# COLORS map has no spaces in it, so make the color_name have no
# spaces in it as well for matching purposes
hex_value = COLORS.get(color_name.replace(' ', '').lower())
if not hex_value:
raise ValueError('Unknown color')
return hex_value
# pylint: disable=invalid-name
def color_RGB_to_xy(iR: int, iG: int, iB: int,
Gamut: Optional[GamutType] = None) -> Tuple[float, float]:
"""Convert from RGB color to XY color."""
return color_RGB_to_xy_brightness(iR, iG, iB, Gamut)[:2]
# Taken from:
# http://www.developers.meethue.com/documentation/color-conversions-rgb-xy
# License: Code is given as is. Use at your own risk and discretion.
# pylint: disable=invalid-name
def color_RGB_to_xy_brightness(
iR: int, iG: int, iB: int,
Gamut: Optional[GamutType] = None) -> Tuple[float, float, int]:
"""Convert from RGB color to XY color."""
if iR + iG + iB == 0:
return 0.0, 0.0, 0
R = iR / 255
B = iB / 255
G = iG / 255
# Gamma correction
R = pow((R + 0.055) / (1.0 + 0.055),
2.4) if (R > 0.04045) else (R / 12.92)
G = pow((G + 0.055) / (1.0 + 0.055),
2.4) if (G > 0.04045) else (G / 12.92)
B = pow((B + 0.055) / (1.0 + 0.055),
2.4) if (B > 0.04045) else (B / 12.92)
# Wide RGB D65 conversion formula
X = R * 0.664511 + G * 0.154324 + B * 0.162028
Y = R * 0.283881 + G * 0.668433 + B * 0.047685
Z = R * 0.000088 + G * 0.072310 + B * 0.986039
# Convert XYZ to xy
x = X / (X + Y + Z)
y = Y / (X + Y + Z)
# Brightness
Y = 1 if Y > 1 else Y
brightness = round(Y * 255)
# Check if the given xy value is within the color-reach of the lamp.
if Gamut:
in_reach = check_point_in_lamps_reach((x, y), Gamut)
if not in_reach:
xy_closest = get_closest_point_to_point((x, y), Gamut)
x = xy_closest[0]
y = xy_closest[1]
return round(x, 3), round(y, 3), brightness
def color_xy_to_RGB(
vX: float, vY: float,
Gamut: Optional[GamutType] = None) -> Tuple[int, int, int]:
"""Convert from XY to a normalized RGB."""
return color_xy_brightness_to_RGB(vX, vY, 255, Gamut)
# Converted to Python from Obj-C, original source from:
# http://www.developers.meethue.com/documentation/color-conversions-rgb-xy
def color_xy_brightness_to_RGB(
vX: float, vY: float, ibrightness: int,
Gamut: Optional[GamutType] = None) -> Tuple[int, int, int]:
"""Convert from XYZ to RGB."""
if Gamut:
if not check_point_in_lamps_reach((vX, vY), Gamut):
xy_closest = get_closest_point_to_point((vX, vY), Gamut)
vX = xy_closest[0]
vY = xy_closest[1]
brightness = ibrightness / 255.
if brightness == 0.0:
return (0, 0, 0)
Y = brightness
if vY == 0.0:
vY += 0.00000000001
X = (Y / vY) * vX
Z = (Y / vY) * (1 - vX - vY)
# Convert to RGB using Wide RGB D65 conversion.
r = X * 1.656492 - Y * 0.354851 - Z * 0.255038
g = -X * 0.707196 + Y * 1.655397 + Z * 0.036152
b = X * 0.051713 - Y * 0.121364 + Z * 1.011530
# Apply reverse gamma correction.
r, g, b = map(
lambda x: (12.92 * x) if (x <= 0.0031308) else
((1.0 + 0.055) * pow(x, (1.0 / 2.4)) - 0.055),
[r, g, b]
)
# Bring all negative components to zero.
r, g, b = map(lambda x: max(0, x), [r, g, b])
# If one component is greater than 1, weight components by that value.
max_component = max(r, g, b)
if max_component > 1:
r, g, b = map(lambda x: x / max_component, [r, g, b])
ir, ig, ib = map(lambda x: int(x * 255), [r, g, b])
return (ir, ig, ib)
def color_hsb_to_RGB(fH: float, fS: float, fB: float) -> Tuple[int, int, int]:
"""Convert a hsb into its rgb representation."""
if fS == 0.0:
fV = int(fB * 255)
return fV, fV, fV
r = g = b = 0
h = fH / 60
f = h - float(math.floor(h))
p = fB * (1 - fS)
q = fB * (1 - fS * f)
t = fB * (1 - (fS * (1 - f)))
if int(h) == 0:
r = int(fB * 255)
g = int(t * 255)
b = int(p * 255)
elif int(h) == 1:
r = int(q * 255)
g = int(fB * 255)
b = int(p * 255)
elif int(h) == 2:
r = int(p * 255)
g = int(fB * 255)
b = int(t * 255)
elif int(h) == 3:
r = int(p * 255)
g = int(q * 255)
b = int(fB * 255)
elif int(h) == 4:
r = int(t * 255)
g = int(p * 255)
b = int(fB * 255)
elif int(h) == 5:
r = int(fB * 255)
g = int(p * 255)
b = int(q * 255)
return (r, g, b)
def color_RGB_to_hsv(
iR: float, iG: float, iB: float) -> Tuple[float, float, float]:
"""Convert an rgb color to its hsv representation.
Hue is scaled 0-360
Sat is scaled 0-100
Val is scaled 0-100
"""
fHSV = colorsys.rgb_to_hsv(iR/255.0, iG/255.0, iB/255.0)
return round(fHSV[0]*360, 3), round(fHSV[1]*100, 3), round(fHSV[2]*100, 3)
def color_RGB_to_hs(iR: float, iG: float, iB: float) -> Tuple[float, float]:
"""Convert an rgb color to its hs representation."""
return color_RGB_to_hsv(iR, iG, iB)[:2]
def color_hsv_to_RGB(iH: float, iS: float, iV: float) -> Tuple[int, int, int]:
"""Convert an hsv color into its rgb representation.
Hue is scaled 0-360
Sat is scaled 0-100
Val is scaled 0-100
"""
fRGB = colorsys.hsv_to_rgb(iH/360, iS/100, iV/100)
return (int(fRGB[0]*255), int(fRGB[1]*255), int(fRGB[2]*255))
def color_hs_to_RGB(iH: float, iS: float) -> Tuple[int, int, int]:
"""Convert an hsv color into its rgb representation."""
return color_hsv_to_RGB(iH, iS, 100)
def color_xy_to_hs(vX: float, vY: float,
Gamut: Optional[GamutType] = None) -> Tuple[float, float]:
"""Convert an xy color to its hs representation."""
h, s, _ = color_RGB_to_hsv(*color_xy_to_RGB(vX, vY, Gamut))
return h, s
def color_hs_to_xy(iH: float, iS: float,
Gamut: Optional[GamutType] = None) -> Tuple[float, float]:
"""Convert an hs color to its xy representation."""
return color_RGB_to_xy(*color_hs_to_RGB(iH, iS), Gamut)
def _match_max_scale(input_colors: Tuple, output_colors: Tuple) -> Tuple:
"""Match the maximum value of the output to the input."""
max_in = max(input_colors)
max_out = max(output_colors)
if max_out == 0:
factor = 0.0
else:
factor = max_in / max_out
return tuple(int(round(i * factor)) for i in output_colors)
def color_rgb_to_rgbw(r: int, g: int, b: int) -> Tuple[int, int, int, int]:
"""Convert an rgb color to an rgbw representation."""
# Calculate the white channel as the minimum of input rgb channels.
# Subtract the white portion from the remaining rgb channels.
w = min(r, g, b)
rgbw = (r - w, g - w, b - w, w)
# Match the output maximum value to the input. This ensures the full
# channel range is used.
return _match_max_scale((r, g, b), rgbw) # type: ignore
def color_rgbw_to_rgb(r: int, g: int, b: int, w: int) -> Tuple[int, int, int]:
"""Convert an rgbw color to an rgb representation."""
# Add the white channel back into the rgb channels.
rgb = (r + w, g + w, b + w)
# Match the output maximum value to the input. This ensures the
# output doesn't overflow.
return _match_max_scale((r, g, b, w), rgb) # type: ignore
def color_rgb_to_hex(r: int, g: int, b: int) -> str:
"""Return a RGB color from a hex color string."""
return '{0:02x}{1:02x}{2:02x}'.format(round(r), round(g), round(b))
def rgb_hex_to_rgb_list(hex_string: str) -> List[int]:
"""Return an RGB color value list from a hex color string."""
return [int(hex_string[i:i + len(hex_string) // 3], 16)
for i in range(0,
len(hex_string),
len(hex_string) // 3)]
def color_temperature_to_hs(
color_temperature_kelvin: float) -> Tuple[float, float]:
"""Return an hs color from a color temperature in Kelvin."""
return color_RGB_to_hs(*color_temperature_to_rgb(color_temperature_kelvin))
def color_temperature_to_rgb(
color_temperature_kelvin: float) -> Tuple[float, float, float]:
"""
Return an RGB color from a color temperature in Kelvin.
This is a rough approximation based on the formula provided by T. Helland
http://www.tannerhelland.com/4435/convert-temperature-rgb-algorithm-code/
"""
# range check
if color_temperature_kelvin < 1000:
color_temperature_kelvin = 1000
elif color_temperature_kelvin > 40000:
color_temperature_kelvin = 40000
tmp_internal = color_temperature_kelvin / 100.0
red = _get_red(tmp_internal)
green = _get_green(tmp_internal)
blue = _get_blue(tmp_internal)
return red, green, blue
def _bound(color_component: float, minimum: float = 0,
maximum: float = 255) -> float:
"""
Bound the given color component value between the given min and max values.
The minimum and maximum values will be included in the valid output.
i.e. Given a color_component of 0 and a minimum of 10, the returned value
will be 10.
"""
color_component_out = max(color_component, minimum)
return min(color_component_out, maximum)
def _get_red(temperature: float) -> float:
"""Get the red component of the temperature in RGB space."""
if temperature <= 66:
return 255
tmp_red = 329.698727446 * math.pow(temperature - 60, -0.1332047592)
return _bound(tmp_red)
def _get_green(temperature: float) -> float:
"""Get the green component of the given color temp in RGB space."""
if temperature <= 66:
green = 99.4708025861 * math.log(temperature) - 161.1195681661
else:
green = 288.1221695283 * math.pow(temperature - 60, -0.0755148492)
return _bound(green)
def _get_blue(temperature: float) -> float:
"""Get the blue component of the given color temperature in RGB space."""
if temperature >= 66:
return 255
if temperature <= 19:
return 0
blue = 138.5177312231 * math.log(temperature - 10) - 305.0447927307
return _bound(blue)
def color_temperature_mired_to_kelvin(mired_temperature: float) -> float:
"""Convert absolute mired shift to degrees kelvin."""
return math.floor(1000000 / mired_temperature)
def color_temperature_kelvin_to_mired(kelvin_temperature: float) -> float:
"""Convert degrees kelvin to mired shift."""
return math.floor(1000000 / kelvin_temperature)
# The following 5 functions are adapted from rgbxy provided by Benjamin Knight
# License: The MIT License (MIT), 2014.
# https://github.com/benknight/hue-python-rgb-converter
def cross_product(p1: XYPoint, p2: XYPoint) -> float:
"""Calculate the cross product of two XYPoints."""
return float(p1.x * p2.y - p1.y * p2.x)
def get_distance_between_two_points(one: XYPoint, two: XYPoint) -> float:
"""Calculate the distance between two XYPoints."""
dx = one.x - two.x
dy = one.y - two.y
return math.sqrt(dx * dx + dy * dy)
def get_closest_point_to_line(A: XYPoint, B: XYPoint, P: XYPoint) -> XYPoint:
"""
Find the closest point from P to a line defined by A and B.
This point will be reproducible by the lamp
as it is on the edge of the gamut.
"""
AP = XYPoint(P.x - A.x, P.y - A.y)
AB = XYPoint(B.x - A.x, B.y - A.y)
ab2 = AB.x * AB.x + AB.y * AB.y
ap_ab = AP.x * AB.x + AP.y * AB.y
t = ap_ab / ab2
if t < 0.0:
t = 0.0
elif t > 1.0:
t = 1.0
return XYPoint(A.x + AB.x * t, A.y + AB.y * t)
def get_closest_point_to_point(xy_tuple: Tuple[float, float],
Gamut: GamutType) -> Tuple[float, float]:
"""
Get the closest matching color within the gamut of the light.
Should only be used if the supplied color is outside of the color gamut.
"""
xy_point = XYPoint(xy_tuple[0], xy_tuple[1])
# find the closest point on each line in the CIE 1931 'triangle'.
pAB = get_closest_point_to_line(Gamut.red, Gamut.green, xy_point)
pAC = get_closest_point_to_line(Gamut.blue, Gamut.red, xy_point)
pBC = get_closest_point_to_line(Gamut.green, Gamut.blue, xy_point)
# Get the distances per point and see which point is closer to our Point.
dAB = get_distance_between_two_points(xy_point, pAB)
dAC = get_distance_between_two_points(xy_point, pAC)
dBC = get_distance_between_two_points(xy_point, pBC)
lowest = dAB
closest_point = pAB
if dAC < lowest:
lowest = dAC
closest_point = pAC
if dBC < lowest:
lowest = dBC
closest_point = pBC
# Change the xy value to a value which is within the reach of the lamp.
cx = closest_point.x
cy = closest_point.y
return (cx, cy)
def check_point_in_lamps_reach(p: Tuple[float, float],
Gamut: GamutType) -> bool:
"""Check if the provided XYPoint can be recreated by a Hue lamp."""
v1 = XYPoint(Gamut.green.x - Gamut.red.x, Gamut.green.y - Gamut.red.y)
v2 = XYPoint(Gamut.blue.x - Gamut.red.x, Gamut.blue.y - Gamut.red.y)
q = XYPoint(p[0] - Gamut.red.x, p[1] - Gamut.red.y)
s = cross_product(q, v2) / cross_product(v1, v2)
t = cross_product(v1, q) / cross_product(v1, v2)
return (s >= 0.0) and (t >= 0.0) and (s + t <= 1.0)
def check_valid_gamut(Gamut: GamutType) -> bool:
"""Check if the supplied gamut is valid."""
# Check if the three points of the supplied gamut are not on the same line.
v1 = XYPoint(Gamut.green.x - Gamut.red.x, Gamut.green.y - Gamut.red.y)
v2 = XYPoint(Gamut.blue.x - Gamut.red.x, Gamut.blue.y - Gamut.red.y)
not_on_line = cross_product(v1, v2) > 0.0001
# Check if all six coordinates of the gamut lie between 0 and 1.
red_valid = Gamut.red.x >= 0 and Gamut.red.x <= 1 and \
Gamut.red.y >= 0 and Gamut.red.y <= 1
green_valid = Gamut.green.x >= 0 and Gamut.green.x <= 1 and \
Gamut.green.y >= 0 and Gamut.green.y <= 1
blue_valid = Gamut.blue.x >= 0 and Gamut.blue.x <= 1 and \
Gamut.blue.y >= 0 and Gamut.blue.y <= 1
return not_on_line and red_valid and green_valid and blue_valid
| |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for USEquityPricingLoader and related classes.
"""
from nose_parameterized import parameterized
from numpy import (
arange,
datetime64,
float64,
ones,
uint32,
)
from numpy.testing import (
assert_allclose,
assert_array_equal,
)
from pandas import (
concat,
DataFrame,
Int64Index,
Timestamp,
)
from pandas.util.testing import assert_frame_equal
from toolz.curried.operator import getitem
from zipline.lib.adjustment import Float64Multiply
from zipline.pipeline.loaders.synthetic import (
NullAdjustmentReader,
make_bar_data,
expected_bar_values_2d,
)
from zipline.pipeline.loaders.equity_pricing_loader import (
USEquityPricingLoader,
)
from zipline.errors import WindowLengthTooLong
from zipline.pipeline.data import USEquityPricing
from zipline.testing import (
seconds_to_timestamp,
str_to_seconds,
MockDailyBarReader,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
ZiplineTestCase,
)
# Test calendar ranges over the month of June 2015
# June 2015
# Mo Tu We Th Fr Sa Su
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
TEST_CALENDAR_START = Timestamp('2015-06-01', tz='UTC')
TEST_CALENDAR_STOP = Timestamp('2015-06-30', tz='UTC')
TEST_QUERY_START = Timestamp('2015-06-10', tz='UTC')
TEST_QUERY_STOP = Timestamp('2015-06-19', tz='UTC')
# One asset for each of the cases enumerated in load_raw_arrays_from_bcolz.
EQUITY_INFO = DataFrame(
[
# 1) The equity's trades start and end before query.
{'start_date': '2015-06-01', 'end_date': '2015-06-05'},
# 2) The equity's trades start and end after query.
{'start_date': '2015-06-22', 'end_date': '2015-06-30'},
# 3) The equity's data covers all dates in range.
{'start_date': '2015-06-02', 'end_date': '2015-06-30'},
# 4) The equity's trades start before the query start, but stop
# before the query end.
{'start_date': '2015-06-01', 'end_date': '2015-06-15'},
# 5) The equity's trades start and end during the query.
{'start_date': '2015-06-12', 'end_date': '2015-06-18'},
# 6) The equity's trades start during the query, but extend through
# the whole query.
{'start_date': '2015-06-15', 'end_date': '2015-06-25'},
],
index=arange(1, 7),
columns=['start_date', 'end_date'],
).astype(datetime64)
EQUITY_INFO['symbol'] = [chr(ord('A') + n) for n in range(len(EQUITY_INFO))]
TEST_QUERY_ASSETS = EQUITY_INFO.index
# ADJUSTMENTS use the following scheme to indicate information about the value
# upon inspection.
#
# 1s place is the equity
#
# 0.1s place is the action type, with:
#
# splits, 1
# mergers, 2
# dividends, 3
#
# 0.001s is the date
SPLITS = DataFrame(
[
# Before query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-03'),
'ratio': 1.103,
'sid': 1},
# First day of query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-10'),
'ratio': 3.110,
'sid': 3},
# Third day of query range, should have last_row of 2
{'effective_date': str_to_seconds('2015-06-12'),
'ratio': 3.112,
'sid': 3},
# After query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-21'),
'ratio': 6.121,
'sid': 6},
# Another action in query range, should have last_row of 1
{'effective_date': str_to_seconds('2015-06-11'),
'ratio': 3.111,
'sid': 3},
# Last day of range. Should have last_row of 7
{'effective_date': str_to_seconds('2015-06-19'),
'ratio': 3.119,
'sid': 3},
],
columns=['effective_date', 'ratio', 'sid'],
)
MERGERS = DataFrame(
[
# Before query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-03'),
'ratio': 1.203,
'sid': 1},
# First day of query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-10'),
'ratio': 3.210,
'sid': 3},
# Third day of query range, should have last_row of 2
{'effective_date': str_to_seconds('2015-06-12'),
'ratio': 3.212,
'sid': 3},
# After query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-25'),
'ratio': 6.225,
'sid': 6},
# Another action in query range, should have last_row of 2
{'effective_date': str_to_seconds('2015-06-12'),
'ratio': 4.212,
'sid': 4},
# Last day of range. Should have last_row of 7
{'effective_date': str_to_seconds('2015-06-19'),
'ratio': 3.219,
'sid': 3},
],
columns=['effective_date', 'ratio', 'sid'],
)
DIVIDENDS = DataFrame(
[
# Before query range, should be excluded.
{'declared_date': Timestamp('2015-05-01', tz='UTC').to_datetime64(),
'ex_date': Timestamp('2015-06-01', tz='UTC').to_datetime64(),
'record_date': Timestamp('2015-06-03', tz='UTC').to_datetime64(),
'pay_date': Timestamp('2015-06-05', tz='UTC').to_datetime64(),
'amount': 90.0,
'sid': 1},
# First day of query range, should be excluded.
{'declared_date': Timestamp('2015-06-01', tz='UTC').to_datetime64(),
'ex_date': Timestamp('2015-06-10', tz='UTC').to_datetime64(),
'record_date': Timestamp('2015-06-15', tz='UTC').to_datetime64(),
'pay_date': Timestamp('2015-06-17', tz='UTC').to_datetime64(),
'amount': 80.0,
'sid': 3},
# Third day of query range, should have last_row of 2
{'declared_date': Timestamp('2015-06-01', tz='UTC').to_datetime64(),
'ex_date': Timestamp('2015-06-12', tz='UTC').to_datetime64(),
'record_date': Timestamp('2015-06-15', tz='UTC').to_datetime64(),
'pay_date': Timestamp('2015-06-17', tz='UTC').to_datetime64(),
'amount': 70.0,
'sid': 3},
# After query range, should be excluded.
{'declared_date': Timestamp('2015-06-01', tz='UTC').to_datetime64(),
'ex_date': Timestamp('2015-06-25', tz='UTC').to_datetime64(),
'record_date': Timestamp('2015-06-28', tz='UTC').to_datetime64(),
'pay_date': Timestamp('2015-06-30', tz='UTC').to_datetime64(),
'amount': 60.0,
'sid': 6},
# Another action in query range, should have last_row of 3
{'declared_date': Timestamp('2015-06-01', tz='UTC').to_datetime64(),
'ex_date': Timestamp('2015-06-15', tz='UTC').to_datetime64(),
'record_date': Timestamp('2015-06-18', tz='UTC').to_datetime64(),
'pay_date': Timestamp('2015-06-20', tz='UTC').to_datetime64(),
'amount': 50.0,
'sid': 3},
# Last day of range. Should have last_row of 7
{'declared_date': Timestamp('2015-06-01', tz='UTC').to_datetime64(),
'ex_date': Timestamp('2015-06-19', tz='UTC').to_datetime64(),
'record_date': Timestamp('2015-06-22', tz='UTC').to_datetime64(),
'pay_date': Timestamp('2015-06-30', tz='UTC').to_datetime64(),
'amount': 40.0,
'sid': 3},
],
columns=['declared_date',
'ex_date',
'record_date',
'pay_date',
'amount',
'sid'],
)
DIVIDENDS_EXPECTED = DataFrame(
[
# Before query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-01'),
'ratio': 0.1,
'sid': 1},
# First day of query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-10'),
'ratio': 0.20,
'sid': 3},
# Third day of query range, should have last_row of 2
{'effective_date': str_to_seconds('2015-06-12'),
'ratio': 0.30,
'sid': 3},
# After query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-25'),
'ratio': 0.40,
'sid': 6},
# Another action in query range, should have last_row of 3
{'effective_date': str_to_seconds('2015-06-15'),
'ratio': 0.50,
'sid': 3},
# Last day of range. Should have last_row of 7
{'effective_date': str_to_seconds('2015-06-19'),
'ratio': 0.60,
'sid': 3},
],
columns=['effective_date', 'ratio', 'sid'],
)
class USEquityPricingLoaderTestCase(WithAdjustmentReader,
ZiplineTestCase):
START_DATE = TEST_CALENDAR_START
END_DATE = TEST_CALENDAR_STOP
asset_ids = 1, 2, 3
@classmethod
def make_equity_info(cls):
return EQUITY_INFO
@classmethod
def make_splits_data(cls):
return SPLITS
@classmethod
def make_mergers_data(cls):
return MERGERS
@classmethod
def make_dividends_data(cls):
return DIVIDENDS
@classmethod
def make_adjustment_writer_equity_daily_bar_reader(cls):
return MockDailyBarReader()
@classmethod
def make_equity_daily_bar_data(cls):
return make_bar_data(
EQUITY_INFO,
cls.equity_daily_bar_days,
)
@classmethod
def init_class_fixtures(cls):
super(USEquityPricingLoaderTestCase, cls).init_class_fixtures()
cls.assets = TEST_QUERY_ASSETS
cls.asset_info = EQUITY_INFO
def test_input_sanity(self):
# Ensure that the input data doesn't contain adjustments during periods
# where the corresponding asset didn't exist.
for table in SPLITS, MERGERS:
for eff_date_secs, _, sid in table.itertuples(index=False):
eff_date = Timestamp(eff_date_secs, unit='s')
asset_start, asset_end = EQUITY_INFO.ix[
sid, ['start_date', 'end_date']
]
self.assertGreaterEqual(eff_date, asset_start)
self.assertLessEqual(eff_date, asset_end)
def calendar_days_between(self, start_date, end_date, shift=0):
slice_ = self.equity_daily_bar_days.slice_indexer(start_date, end_date)
start = slice_.start + shift
stop = slice_.stop + shift
if start < 0:
raise KeyError(start_date, shift)
return self.equity_daily_bar_days[start:stop]
def expected_adjustments(self, start_date, end_date):
price_adjustments = {}
volume_adjustments = {}
query_days = self.calendar_days_between(start_date, end_date)
start_loc = query_days.get_loc(start_date)
for table in SPLITS, MERGERS, DIVIDENDS_EXPECTED:
for eff_date_secs, ratio, sid in table.itertuples(index=False):
eff_date = Timestamp(eff_date_secs, unit='s', tz='UTC')
# Ignore adjustments outside the query bounds.
if not (start_date <= eff_date <= end_date):
continue
eff_date_loc = query_days.get_loc(eff_date)
delta = eff_date_loc - start_loc
# Pricing adjustments should be applied on the date
# corresponding to the effective date of the input data. They
# should affect all rows **before** the effective date.
price_adjustments.setdefault(delta, []).append(
Float64Multiply(
first_row=0,
last_row=delta,
first_col=sid - 1,
last_col=sid - 1,
value=ratio,
)
)
# Volume is *inversely* affected by *splits only*.
if table is SPLITS:
volume_adjustments.setdefault(delta, []).append(
Float64Multiply(
first_row=0,
last_row=delta,
first_col=sid - 1,
last_col=sid - 1,
value=1.0 / ratio,
)
)
return price_adjustments, volume_adjustments
def test_load_adjustments_from_sqlite(self):
columns = [USEquityPricing.close, USEquityPricing.volume]
query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP,
)
adjustments = self.adjustment_reader.load_adjustments(
[c.name for c in columns],
query_days,
self.assets,
)
close_adjustments = adjustments[0]
volume_adjustments = adjustments[1]
expected_close_adjustments, expected_volume_adjustments = \
self.expected_adjustments(TEST_QUERY_START, TEST_QUERY_STOP)
for key in expected_close_adjustments:
close_adjustment = close_adjustments[key]
for j, adj in enumerate(close_adjustment):
expected = expected_close_adjustments[key][j]
self.assertEqual(adj.first_row, expected.first_row)
self.assertEqual(adj.last_row, expected.last_row)
self.assertEqual(adj.first_col, expected.first_col)
self.assertEqual(adj.last_col, expected.last_col)
assert_allclose(adj.value, expected.value)
for key in expected_volume_adjustments:
volume_adjustment = volume_adjustments[key]
for j, adj in enumerate(volume_adjustment):
expected = expected_volume_adjustments[key][j]
self.assertEqual(adj.first_row, expected.first_row)
self.assertEqual(adj.last_row, expected.last_row)
self.assertEqual(adj.first_col, expected.first_col)
self.assertEqual(adj.last_col, expected.last_col)
assert_allclose(adj.value, expected.value)
@parameterized([(True,), (False,)])
def test_load_adjustments_to_df(self, convert_dts):
reader = self.adjustment_reader
adjustment_dfs = reader.unpack_db_to_component_dfs(
convert_dates=convert_dts
)
name_and_raw = (
('splits', SPLITS),
('mergers', MERGERS),
('dividends', DIVIDENDS_EXPECTED)
)
def create_expected_table(df, name):
expected_df = df.copy()
if convert_dts:
for colname in reader._datetime_int_cols[name]:
expected_df[colname] = expected_df[colname].astype(
'datetime64[s]'
)
return expected_df
def create_expected_div_table(df, name):
expected_df = df.copy()
if not convert_dts:
for colname in reader._datetime_int_cols[name]:
expected_df[colname] = expected_df[colname].astype(
'datetime64[s]'
).astype(int)
return expected_df
for action_name, raw_tbl in name_and_raw:
exp = create_expected_table(raw_tbl, action_name)
assert_frame_equal(
adjustment_dfs[action_name],
exp
)
# DIVIDENDS is in the opposite form from the rest of the dataframes, so
# needs to be converted separately.
div_name = 'dividend_payouts'
assert_frame_equal(
adjustment_dfs[div_name],
create_expected_div_table(DIVIDENDS, div_name)
)
def test_read_no_adjustments(self):
adjustment_reader = NullAdjustmentReader()
columns = [USEquityPricing.close, USEquityPricing.volume]
query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP
)
# Our expected results for each day are based on values from the
# previous day.
shifted_query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP,
shift=-1,
)
adjustments = adjustment_reader.load_adjustments(
[c.name for c in columns],
query_days,
self.assets,
)
self.assertEqual(adjustments, [{}, {}])
pricing_loader = USEquityPricingLoader(
self.bcolz_equity_daily_bar_reader,
adjustment_reader,
)
results = pricing_loader.load_adjusted_array(
columns,
dates=query_days,
assets=self.assets,
mask=ones((len(query_days), len(self.assets)), dtype=bool),
)
closes, volumes = map(getitem(results), columns)
expected_baseline_closes = expected_bar_values_2d(
shifted_query_days,
self.asset_info,
'close',
)
expected_baseline_volumes = expected_bar_values_2d(
shifted_query_days,
self.asset_info,
'volume',
)
# AdjustedArrays should yield the same data as the expected baseline.
for windowlen in range(1, len(query_days) + 1):
for offset, window in enumerate(closes.traverse(windowlen)):
assert_array_equal(
expected_baseline_closes[offset:offset + windowlen],
window,
)
for offset, window in enumerate(volumes.traverse(windowlen)):
assert_array_equal(
expected_baseline_volumes[offset:offset + windowlen],
window,
)
# Verify that we checked up to the longest possible window.
with self.assertRaises(WindowLengthTooLong):
closes.traverse(windowlen + 1)
with self.assertRaises(WindowLengthTooLong):
volumes.traverse(windowlen + 1)
def apply_adjustments(self, dates, assets, baseline_values, adjustments):
min_date, max_date = dates[[0, -1]]
# HACK: Simulate the coercion to float64 we do in adjusted_array. This
# should be removed when AdjustedArray properly supports
# non-floating-point types.
orig_dtype = baseline_values.dtype
values = baseline_values.astype(float64).copy()
for eff_date_secs, ratio, sid in adjustments.itertuples(index=False):
eff_date = seconds_to_timestamp(eff_date_secs)
# Don't apply adjustments that aren't in the current date range.
if eff_date not in dates:
continue
eff_date_loc = dates.get_loc(eff_date)
asset_col = assets.get_loc(sid)
# Apply ratio multiplicatively to the asset column on all rows less
# than or equal adjustment effective date.
values[:eff_date_loc + 1, asset_col] *= ratio
return values.astype(orig_dtype)
def test_read_with_adjustments(self):
columns = [USEquityPricing.high, USEquityPricing.volume]
query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP
)
# Our expected results for each day are based on values from the
# previous day.
shifted_query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP,
shift=-1,
)
pricing_loader = USEquityPricingLoader(
self.bcolz_equity_daily_bar_reader,
self.adjustment_reader,
)
results = pricing_loader.load_adjusted_array(
columns,
dates=query_days,
assets=Int64Index(arange(1, 7)),
mask=ones((len(query_days), 6), dtype=bool),
)
highs, volumes = map(getitem(results), columns)
expected_baseline_highs = expected_bar_values_2d(
shifted_query_days,
self.asset_info,
'high',
)
expected_baseline_volumes = expected_bar_values_2d(
shifted_query_days,
self.asset_info,
'volume',
)
# At each point in time, the AdjustedArrays should yield the baseline
# with all adjustments up to that date applied.
for windowlen in range(1, len(query_days) + 1):
for offset, window in enumerate(highs.traverse(windowlen)):
baseline = expected_baseline_highs[offset:offset + windowlen]
baseline_dates = query_days[offset:offset + windowlen]
expected_adjusted_highs = self.apply_adjustments(
baseline_dates,
self.assets,
baseline,
# Apply all adjustments.
concat([SPLITS, MERGERS, DIVIDENDS_EXPECTED],
ignore_index=True),
)
assert_allclose(expected_adjusted_highs, window)
for offset, window in enumerate(volumes.traverse(windowlen)):
baseline = expected_baseline_volumes[offset:offset + windowlen]
baseline_dates = query_days[offset:offset + windowlen]
# Apply only splits and invert the ratio.
adjustments = SPLITS.copy()
adjustments.ratio = 1 / adjustments.ratio
expected_adjusted_volumes = self.apply_adjustments(
baseline_dates,
self.assets,
baseline,
adjustments,
)
# FIXME: Make AdjustedArray properly support integral types.
assert_array_equal(
expected_adjusted_volumes,
window.astype(uint32),
)
# Verify that we checked up to the longest possible window.
with self.assertRaises(WindowLengthTooLong):
highs.traverse(windowlen + 1)
with self.assertRaises(WindowLengthTooLong):
volumes.traverse(windowlen + 1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.