code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/python
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to control the application global properties.
This module will manage a singleton object for the PyreRing global
properties. These properties include: root_dir, testdatabase etc.
These properties are stored as a dictionary which is referred through a global
variable and managed by some module level methods in this module.
"""
__author__ = 'mwu@google.com (Mingyu Wu)'
import getpass
import os
import time
from lib import filesystemhandlerextend
# Runtime configuration keys, user can't overwrite through config file.
NON_OVERWRITTEN_KEYS = ['time', 'tester', 'host_name']
class PyreRingConfig(object):
"""A class to store PyreRing runtime config info in a dict.
This class is used to manage the pyrering related configuration data
and it will have a dictionary to hold them and pushed to global. It should be
maintained as a single instance.
During the whole test run, this is the only one copy of the properties.
It will contain a dictionary with key value pairs from the config file and
some extra items generated automatically, namely:
Automatically set by PyreRing, not user configurable:
root_dir: PyreRing root directory.
PyreRing automatically discovers it.
host_name: The machine name PyreRing is running on.
PyreRing automatically discovers it.
tester: The user account PyreRing is running as.
PyreRing automatically discovers it.
time: The time string identifies the pyrering was started.
PyreRing automatically discovers it.
Managed by config file only, not through command line:
log_level: The logging level as defined in Python logging module.
default value is INFO
skip_setup: If True, PyreRing will skip user setup suite.
default value is False.
header_file: User specified report header file which will be insert into
PyreRing report.
default value is <root_dir>/header_info.txt
FATAL_STRING: a string contains comma separated substrings. If any
substring is found in the test output, the test will fail,
regardless of the return code of the test.
default_suite: The name of default test suite, not currently used.
No default value.
Managed by config file and user can overwrite through command line options:
report_dir: the PyreRing report and log directory.
default value <root_dir>/reports/
conf_file: the name of PyreRing config file with path. If a non_absolute
path provided, the actual value will be os.path.join(ed) with
'<root_dir>/conf'
default name is pyrering.conf
project_name: The name of a project PyreRing will test on.
sendmail: a boolean value if PyreRing should send out email report or not.
default value is False. Note: there will be no email if all test
passed regardless of this flag.
email_recipients: comma separated email addresses as email recipients.
default value is the same as tester.
log_file: the name of the log file. If a non_absulte path provided, the
the actual value will be os.path.join(ed) with
'<root_dir>/report'
default name is pyrering.log
file_errors: a boolean value that turns on filing the output of each none
passing testcase to a separate output file.
reset: a boolean value user sets from the command line. If true, the run
time configuration will replace existing configuration file. It has
no effect in the conf file.
"""
def __init__(self,
filesystem=filesystemhandlerextend.FileSystemHandlerExtend()):
self.settings = {}
self.filesystem = filesystem
def _CreateConfig(self):
"""Create a config file based on user config plus default config.
This method should create a new config file using some runtime information.
Returns:
None. The constructed info write to conf_file
"""
key_list = sorted(self.settings.keys())
output = ''.join(['%s=%s\n' % (key, self.settings[key])
for key in key_list])
self.filesystem.WriteToFile(self.settings['conf_file'], output)
print """
***********Attention Please***************************
Either no configuration file was found at: %s
Or a reset option was issued.
Creating a default configuration file.
User can edit it later to change default values at: %s.
******************************************************
""" % (self.settings['conf_file'], self.settings['conf_file'])
def _ReadConfig(self):
"""Convert the conf_file to a dictionary.
Returns:
a dictionary with key value pairs from the conf file.
"""
settings = {}
conf_handler = self.filesystem.FileOpenForRead(self.settings['conf_file'])
for line in conf_handler:
line = line.strip()
if (not line) or line.startswith('#') or (not '=' in line):
continue
key, value = line.split('=', 1)
# make it java.util.Properties like property reader.
# so I have to strip the quotes around the values
key = key.strip()
value = value.strip(' \t\r\'"')
# sendmail, reset and skip_setup should be treated as boolean values,
# others are treated as strings.
if key in ['sendmail', 'reset', 'skip_setup']:
settings[key] = (value.lower().startswith('true') or
value.startswith('1'))
else:
settings[key] = value
conf_handler.close()
# Remove the config we don't need. Most likely they will be generated on the
# runtime.
for key in NON_OVERWRITTEN_KEYS:
settings.pop(key, None)
return settings
def _AddDefaultConfig(self, pyrering_root):
"""Populate the settings dictionary with default values.
This method will provide a base configuration dictionary for PyreRing.
Args:
pyrering_root: path refer to the pyrering root dir.
Returns:
None.
"""
self.settings.update({
'root_dir': pyrering_root,
'report_dir': self.filesystem.PathJoin(pyrering_root, 'reports'),
'conf_file': self.filesystem.PathJoin(pyrering_root,
'conf',
'pyrering.conf'),
'host_name': self.filesystem.GetHostName(),
'tester': getpass.getuser(),
'project_name': '<YOUR PROJECT NAME>',
'default_suite': 'default_suite',
'source_dir': '<YOUR TEST SCRIPT TOP DIRECTORY>',
'sendmail': False,
'email_recipients': getpass.getuser(),
'log_file': 'pyrering.log',
'file_errors': False,
'reset': False,
'runner': 'baserunner',
'FATAL_STRING': '',
'header_file': 'header_info.txt',
'skip_setup': False,
'log_level': 'INFO',
# A timestamp string to identify the time pyrering is started.
# The format should be yyymmddHHMM
'time': time.strftime('%Y%m%d%H%M'),
})
def Populate(self, pyrering_root, user_settings):
"""Populate settings dictionary.
If the conf file exist, it will use user settings update conf file
settings and update default settings.
If the conf file doesn't exist, it will user user settings update default
settings and export as conf file.
Args:
pyrering_root: the path of the project root
user_settings: user settings dictionary
Returns:
None. self.settings will have the effective values.
"""
pyrering_root = self.filesystem.FindAbsPath(pyrering_root)
# If config file is not set in the user arguments, use the default one:
# '<pyrering_root>/conf/pyrering.conf' to populate the default
# dictionary. Create the directory if it doesn't exist.
if not user_settings.get('conf_file', None):
conf_path = self.filesystem.PathJoin(pyrering_root, 'conf')
else:
conf_path = os.path.dirname(user_settings.get('conf_file'))
if not self.filesystem.CheckDir(conf_path):
self.filesystem.MkDir(conf_path)
self._AddDefaultConfig(pyrering_root)
self.settings.update(user_settings)
# if the conf_file exists, read it, else populate the conf file and inform
# user to examine.
if (not user_settings.get('reset', False) and
self.filesystem.CheckFile(self.settings['conf_file'])):
# The user_settings coming from the command line will update the
# config file settings.
read_conf_dict = self._ReadConfig()
read_conf_dict.update(user_settings)
self.settings.update(read_conf_dict)
else:
self._CreateConfig()
# If after all this settings, the source_dir is still not set, we will
# temporarily set it as current dir to let user run script from current
# directory.
if self.settings['source_dir'] == '<YOUR TEST SCRIPT TOP DIRECTORY>':
self.settings['source_dir'] = self.filesystem.FindAbsPath('.')
# The GlobalPyreRingConfig should be one and only instance in the PyreRing
# life cycle.
GlobalPyreRingConfig = PyreRingConfig()
def Init(pyrering_root, user_settings):
"""Get settings populated.
This method will check if settings still empty means it is never initialized,
then it calls populate to populate the settings for use.
Args:
pyrering_root: the path of the root dir of pyrering.py file
user_settings: a dictionary populated with settings.
Returns:
None.
"""
if not GlobalPyreRingConfig.settings.keys():
GlobalPyreRingConfig.Populate(pyrering_root, user_settings)
return
def Update(new_settings):
"""Update the settings with new values."""
GlobalPyreRingConfig.settings.update(new_settings)
def Reset():
"""Clean up the contents of settings."""
GlobalPyreRingConfig.settings.clear()
| kdlucas/pyrering | lib/pyreringconfig.py | Python | apache-2.0 | 10,508 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lldpparam(base_resource) :
""" Configuration for lldp params resource. """
def __init__(self) :
self._holdtimetxmult = 0
self._timer = 0
self._mode = ""
@property
def holdtimetxmult(self) :
"""A multiplier for calculating the duration for which the receiving device stores the LLDP information in its database before discarding or removing it. The duration is calculated as the holdtimeTxMult (Holdtime Multiplier) parameter value multiplied by the timer (Timer) parameter value.<br/>Default value: 4<br/>Minimum length = 1<br/>Maximum length = 20.
"""
try :
return self._holdtimetxmult
except Exception as e:
raise e
@holdtimetxmult.setter
def holdtimetxmult(self, holdtimetxmult) :
"""A multiplier for calculating the duration for which the receiving device stores the LLDP information in its database before discarding or removing it. The duration is calculated as the holdtimeTxMult (Holdtime Multiplier) parameter value multiplied by the timer (Timer) parameter value.<br/>Default value: 4<br/>Minimum length = 1<br/>Maximum length = 20
"""
try :
self._holdtimetxmult = holdtimetxmult
except Exception as e:
raise e
@property
def timer(self) :
"""Interval, in seconds, between LLDP packet data units (LLDPDUs). that the NetScaler ADC sends to a directly connected device.<br/>Default value: 30<br/>Minimum length = 1<br/>Maximum length = 3000.
"""
try :
return self._timer
except Exception as e:
raise e
@timer.setter
def timer(self, timer) :
"""Interval, in seconds, between LLDP packet data units (LLDPDUs). that the NetScaler ADC sends to a directly connected device.<br/>Default value: 30<br/>Minimum length = 1<br/>Maximum length = 3000
"""
try :
self._timer = timer
except Exception as e:
raise e
@property
def mode(self) :
"""Global mode of Link Layer Discovery Protocol (LLDP) on the NetScaler ADC. The resultant LLDP mode of an interface depends on the LLDP mode configured at the global and the interface levels.<br/>Possible values = NONE, TRANSMITTER, RECEIVER, TRANSCEIVER.
"""
try :
return self._mode
except Exception as e:
raise e
@mode.setter
def mode(self, mode) :
"""Global mode of Link Layer Discovery Protocol (LLDP) on the NetScaler ADC. The resultant LLDP mode of an interface depends on the LLDP mode configured at the global and the interface levels.<br/>Possible values = NONE, TRANSMITTER, RECEIVER, TRANSCEIVER
"""
try :
self._mode = mode
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lldpparam_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lldpparam
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update lldpparam.
"""
try :
if type(resource) is not list :
updateresource = lldpparam()
updateresource.holdtimetxmult = resource.holdtimetxmult
updateresource.timer = resource.timer
updateresource.mode = resource.mode
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of lldpparam resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = lldpparam()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the lldpparam resources that are configured on netscaler.
"""
try :
if not name :
obj = lldpparam()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Mode:
NONE = "NONE"
TRANSMITTER = "TRANSMITTER"
RECEIVER = "RECEIVER"
TRANSCEIVER = "TRANSCEIVER"
class lldpparam_response(base_response) :
def __init__(self, length=1) :
self.lldpparam = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lldpparam = [lldpparam() for _ in range(length)]
| mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/lldp/lldpparam.py | Python | apache-2.0 | 5,812 |
__author__ = 'BeyondSky'
from collections import defaultdict
class Solution(object):
def getHint(self, secret, guess):
"""
:type secret: str
:type guess: str
:rtype: str
"""
bulls = cows = 0
digits = defaultdict(int)
# first pass: count bulls and non-matching digits
for index in range(len(secret)):
if secret[index] == guess[index]:
# matches, count the number of bulls
bulls += 1
else:
# not match, increase number of non-matching digits
digits[secret[index]] += 1
# second pass: count number of cows
for index in range(len(secret)):
if secret[index] != guess[index]:
# decrease number of non-matching digit by 1 if it is greater than 0
if digits[guess[index]] > 0:
cows += 1
digits[guess[index]] -= 1
return str(bulls) + 'A' + str(cows) + 'B' | BeyondSkyCoder/BeyondCoder | leetcode/python/bulls_and_cows.py | Python | apache-2.0 | 1,021 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utils."""
import os
from typing import Dict, Optional
from absl.testing import parameterized
from jax.lib import xla_bridge
import numpy as np
class TestCase(parameterized.TestCase):
"""Custom test class containing additional useful utility methods."""
def assertArrayEqual(self, actual: np.ndarray, expected: np.ndarray):
actual = actual.ravel().tolist()
expected = expected.ravel().tolist()
self.assertSequenceEqual(actual, expected)
def assertArrayAlmostEqual(self,
actual: np.ndarray,
expected: np.ndarray,
places: Optional[int] = 7):
actual = actual.ravel().tolist()
expected = expected.ravel().tolist()
self.assertSequenceAlmostEqual(actual, expected, places=places)
def force_multi_devices(num_cpu_devices: int):
"""Run with set number of CPU devices."""
prev_xla_flags = os.getenv('XLA_FLAGS')
flags_str = prev_xla_flags or ''
# Don't override user-specified device count, or other XLA flags.
if 'xla_force_host_platform_device_count' not in flags_str:
os.environ['XLA_FLAGS'] = (
flags_str +
' --xla_force_host_platform_device_count={}'.format(num_cpu_devices))
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
def tensor_to_numpy(tensor):
"""Convert numpy if not already numpy array."""
if isinstance(tensor, np.ndarray):
return tensor
else:
return tensor.numpy()
def gen_mention_pretraining_sample(
text_length: int,
n_mentions: int,
n_linked_mentions: int,
max_length: int = 100,
vocab_size: int = 100,
entity_vocab_size: int = 1000,
mention_size: int = 2,
) -> Dict[str, np.ndarray]:
"""Generate test raw decoded input for mention pre-training pipeline."""
text_pad_shape = (0, max_length - text_length)
text_ids = np.random.randint(
low=1, high=vocab_size, size=text_length, dtype=np.int64)
text_ids = np.pad(text_ids, pad_width=text_pad_shape, mode='constant')
text_mask = np.pad(
np.ones(shape=text_length, dtype=np.int64),
pad_width=text_pad_shape,
mode='constant')
mention_start_positions = np.random.choice(
text_length // mention_size, size=n_mentions,
replace=False) * mention_size
mention_start_positions.sort()
mention_end_positions = mention_start_positions + mention_size - 1
dense_span_starts = np.zeros(shape=max_length, dtype=np.int64)
dense_span_starts[mention_start_positions] = 1
dense_span_ends = np.zeros(shape=max_length, dtype=np.int64)
dense_span_ends[mention_end_positions] = 1
linked_mention_indices = np.arange(n_linked_mentions)
linked_mention_position_slices = [
np.arange(mention_start_positions[idx], mention_end_positions[idx] + 1)
for idx in linked_mention_indices
]
if n_linked_mentions > 0:
dense_linked_mention_positions = np.concatenate(
linked_mention_position_slices)
else:
dense_linked_mention_positions = np.arange(0)
linked_mention_ids = np.random.randint(
low=1, high=entity_vocab_size, size=len(linked_mention_indices))
dense_mention_mask = np.zeros(shape=max_length, dtype=np.int64)
dense_mention_mask[dense_linked_mention_positions] = 1
dense_mention_ids = np.zeros(shape=max_length, dtype=np.int64)
for idx, position_slice in enumerate(linked_mention_position_slices):
dense_mention_ids[position_slice] = linked_mention_ids[idx]
dense_answer_mask = np.ones_like(dense_mention_mask)
raw_example = {
'text_ids': text_ids,
'text_mask': text_mask,
'dense_span_starts': dense_span_starts,
'dense_span_ends': dense_span_ends,
'dense_mention_mask': dense_mention_mask,
'dense_mention_ids': dense_mention_ids,
'dense_answer_mask': dense_answer_mask,
}
return raw_example
| google-research/language | language/mentionmemory/utils/test_utils.py | Python | apache-2.0 | 4,490 |
'''
More information at: http://www.pymolwiki.org/index.php/elbow_angle
Calculate the elbow angle of an antibody Fab complex and optionally draw a
graphical representation of the vectors used to determine the angle.
NOTE: There is no automatic checking of the validity of limit_l and limit_h
values or of the assignment of light and heavy chain IDs. If these are entered
incorrectly or omitted, the reported angle will likely be incorrect.
As always with these things, your mileage may vary. Use at your own risk!
REQUIREMENTS
numpy, version 1.6
http://numpy.scipy.org
transformations.py, version 2012.01.01
by Christoph Gohlke
www.lfd.uci.edu/~gohlke/code
May also require an edit to transformations.py:
Changes `1e-8` to `1e-7` in lines 357 & 363 to avoid a numerical error.
com.py
by Jason Vertrees
http://www.pymolwiki.org/index.php/com
'''
__author__ = 'Jared Sampson'
__version__ = '0.1'
from pymol import cmd
import transformations
import com
import numpy
################################################################################
def calc_super_matrix(mobile,static):
'''
DESCRIPTION
Aligns two objects (or selections), returns the transformation matrix,
and resets the matrix of the mobile object.
Uses CEAlign PyMOL function for alignment.
ARGUMENTS
mobile = string: selection describing the mobile object whose rotation
matrix will be reported
static = string: selection describing the static object onto which the
mobile object will be aligned
REQUIRES: numpy
'''
cmd.cealign(static,mobile)
# cmd.super(mobile,static)
T = cmd.get_object_matrix(mobile)
R = numpy.identity(4)
k=0
for i in range (0,4):
for j in range (0,4):
R[i][j] = T[k]
k+=1
return R
################################################################################
#def elbow_angle(obj,light='L',heavy='H',limit_l=110,limit_h=113,draw=1):
# alpha = light, beta = heavy
# def elbow_angle(obj,light,heavy,limit_l=128,limit_h=127,draw=0):
def elbow_angle(obj,heavy,light,limit_h="1001E",limit_l=1001,draw=0):
"""
DESCRIPTION
Calculates the integer elbow angle of an antibody Fab complex and
optionally draws a graphical representation of the vectors used to
determine the angle.
ARGUMENTS
obj = string: object
light/heavy = strings: chain ID of light and heavy chains, respectively
limit_l/limit_h = integers: residue numbers of the last residue in the
light and heavy chain variable domains, respectively
draw = boolean: Choose whether or not to draw the angle visualization
REQUIRES: com.py, transformations.py, numpy (see above)
"""
# store current view
orig_view = cmd.get_view()
#limit_l = int(limit_l)
#limit_h = int(limit_h)
draw = int(draw)
# for temp object names
tmp_prefix = "tmp_elbow_"
prefix = tmp_prefix + obj + '_'
# names
vl = prefix + 'VL'
vh = prefix + 'VH'
cl = prefix + 'CL'
ch = prefix + 'CH'
# selections
vl_sel = 'polymer and %s and chain %s and resi 1-%i' % (obj, light, limit_l)
vh_sel = 'polymer and %s and chain %s and resi 1-%s & !resi 1001D & !resi 1001C & !resi 1001B & !resi 1001A & !resi 1001' % (obj, heavy, limit_h)
cl_sel = 'polymer and %s and chain %s and not resi 1-%i' % (obj, light, limit_l)
#ch_sel = 'polymer and %s and chain %s and not resi 1-%i' % (obj, heavy, limit_h)
ch_sel = 'polymer and %s and chain %s and not resi 1-127 and not resi 1001D and not resi 1001C and not resi 1001B and not resi 1001A and not resi 1001' % (obj, heavy)
v_sel = '(('+vl_sel+') or ('+vh_sel+'))'
c_sel = '(('+cl_sel+') or ('+ch_sel+'))'
# create temp objects
cmd.create(vl,vl_sel)
cmd.create(vh,vh_sel)
cmd.create(cl,cl_sel)
cmd.create(ch,ch_sel)
# superimpose vl onto vh, calculate axis and angle
Rv = calc_super_matrix(vl,vh)
angle_v,direction_v,point_v = transformations.rotation_from_matrix(Rv)
# superimpose cl onto ch, calculate axis and angle
Rc = calc_super_matrix(cl,ch)
angle_c,direction_c,point_c = transformations.rotation_from_matrix(Rc)
# delete temporary objects
cmd.delete(vl)
cmd.delete(vh)
cmd.delete(cl)
cmd.delete(ch)
# if dot product is positive, angle is acute
if (numpy.dot(direction_v,direction_c)>0):
direction_c = direction_c * -1 # ensure angle is > 90 (need to standardize this)
# TODO: make both directions point away from the elbow axis.
elbow = int(numpy.degrees(numpy.arccos(numpy.dot(direction_v,direction_c))))
# while (elbow < 90):
# elbow = 180 - elbow # limit to physically reasonable range
# compare the direction_v and direction_c axes to the vector defined by
# the C-heavy atoms of limit_l and limit_h of the original fab
hinge_l_sel = "%s//%s/%s/CA" % (obj,light,limit_l)
hinge_h_sel = "%s//%s/%s/CA" % (obj,heavy,limit_h)
hinge_l = cmd.get_atom_coords(hinge_l_sel)
hinge_h = cmd.get_atom_coords(hinge_h_sel)
hinge_vec = numpy.array(hinge_h) - numpy.array(hinge_l)
test = numpy.dot(hinge_vec,numpy.cross(direction_v,direction_c))
if (test > 0):
elbow = 360 - elbow
#print " Elbow angle: %i degrees" % elbow
if (draw==1):
# there is probably a more elegant way to do this, but
# it works so I'm not going to mess with it for now
pre = obj+'_elbow_'
# draw hinge vector
cmd.pseudoatom(pre+"hinge_l",pos=hinge_l)
cmd.pseudoatom(pre+"hinge_h",pos=hinge_h)
cmd.distance(pre+"hinge_vec",pre+"hinge_l",pre+"hinge_h")
cmd.set("dash_gap",0)
# draw the variable domain axis
com_v = com.COM(v_sel)
start_v = [a - 10*b for a, b in zip(com_v, direction_v)]
end_v = [a + 10*b for a, b in zip(com_v, direction_v)]
cmd.pseudoatom(pre+"start_v",pos=start_v)
cmd.pseudoatom(pre+"end_v",pos=end_v)
cmd.distance(pre+"v_vec",pre+"start_v",pre+"end_v")
# draw the constant domain axis
com_c = com.COM(c_sel)
start_c = [a - 10*b for a, b in zip(com_c, direction_c)]
end_c = [a + 10*b for a, b in zip(com_c, direction_c)]
cmd.pseudoatom(pre+"start_c",pos=start_c)
cmd.pseudoatom(pre+"end_c",pos=end_c)
cmd.distance(pre+"c_vec",pre+"start_c",pre+"end_c")
# customize appearance
cmd.hide("labels",pre+"hinge_vec");cmd.hide("labels",pre+"v_vec");cmd.hide("labels",pre+"c_vec");
cmd.color("green",pre+"hinge_l");cmd.color("red",pre+"hinge_h");cmd.color("black",pre+"hinge_vec");
cmd.color("black",pre+"start_v");cmd.color("black",pre+"end_v");cmd.color("black",pre+"v_vec");
cmd.color("black",pre+"start_c");cmd.color("black",pre+"end_c");cmd.color("black",pre+"c_vec")
# draw spheres
cmd.show("spheres",pre+"hinge_l or "+pre+"hinge_h")
cmd.show("spheres",pre+"start_v or "+pre+"start_c")
cmd.show("spheres",pre+"end_v or "+pre+"end_c")
cmd.set("sphere_scale",2)
cmd.set("dash_gap",0,pre+"hinge_vec")
cmd.set("dash_width",5)
cmd.set("dash_radius",0.3)
# group drawing objects
cmd.group(pre,pre+"*")
# restore original view
cmd.set_view(orig_view)
return elbow
def setup_antibody():
my_struc = cmd.load("1mhp_ch.pdb")
my_elbow = elbow_angle(my_struc)
print(my_elbow)
return 0
| demharters/git_scripts | my_elbow_angle_tcr_imgt.py | Python | apache-2.0 | 7,744 |
# ====================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================================
from unittest import TestCase, main
from lucene import *
from PyLuceneTestCase import PyLuceneTestCase
class TestRegexQuery(PyLuceneTestCase):
FN = "field"
def setUp(self):
PyLuceneTestCase.setUp(self)
writer = self.getWriter(analyzer=SimpleAnalyzer(self.TEST_VERSION))
doc = Document()
doc.add(Field(self.FN, "the quick brown fox jumps over the lazy dog", TextField.TYPE_NOT_STORED))
writer.addDocument(doc)
writer.commit()
writer.close()
self.searcher = self.getSearcher()
def tearDown(self):
del self.searcher
def newTerm(self, value):
return Term(self.FN, value)
def regexQueryNrHits(self, regex):
query = RegexQuery(self.newTerm(regex))
return self.searcher.search(query, 50).totalHits
def spanRegexQueryNrHits(self, regex1, regex2, slop, ordered):
srq1 = SpanMultiTermQueryWrapper(RegexQuery(self.newTerm(regex1)))
srq2 = SpanMultiTermQueryWrapper(RegexQuery(self.newTerm(regex2)))
query = SpanNearQuery([srq1, srq2], slop, ordered)
return self.searcher.search(query, 50).totalHits
def testRegex1(self):
self.assertEqual(1, self.regexQueryNrHits("^q.[aeiou]c.*$"))
def testRegex2(self):
self.assertEqual(0, self.regexQueryNrHits("^.[aeiou]c.*$"))
def testRegex3(self):
self.assertEqual(0, self.regexQueryNrHits("^q.[aeiou]c$"))
def testSpanRegex1(self):
self.assertEqual(1, self.spanRegexQueryNrHits("^q.[aeiou]c.*$",
"dog", 6, True))
def testSpanRegex2(self):
self.assertEqual(0, self.spanRegexQueryNrHits("^q.[aeiou]c.*$",
"dog", 5, True))
if __name__ == "__main__":
import sys, lucene
lucene.initVM()
if '-loop' in sys.argv:
sys.argv.remove('-loop')
while True:
try:
main()
except:
pass
else:
main()
| romanchyla/pylucene-trunk | test/test_RegexQuery.py | Python | apache-2.0 | 2,757 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Amazon EC2, Eucalyptus, Nimbus and Outscale drivers.
"""
import re
import sys
import base64
import copy
import warnings
import time
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.utils.py3 import b, basestring, ensure_string
from libcloud.utils.xml import fixxpath, findtext, findattr, findall
from libcloud.utils.publickey import get_pubkey_ssh2_fingerprint
from libcloud.utils.publickey import get_pubkey_comment
from libcloud.utils.iso8601 import parse_date
from libcloud.common.aws import AWSBaseResponse, SignedAWSConnection
from libcloud.common.aws import DEFAULT_SIGNATURE_VERSION
from libcloud.common.types import (InvalidCredsError, MalformedResponseError,
LibcloudError)
from libcloud.compute.providers import Provider
from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize
from libcloud.compute.base import NodeImage, StorageVolume, VolumeSnapshot
from libcloud.compute.base import KeyPair
from libcloud.compute.types import NodeState, KeyPairDoesNotExistError, \
StorageVolumeState, VolumeSnapshotState
__all__ = [
'API_VERSION',
'NAMESPACE',
'INSTANCE_TYPES',
'OUTSCALE_INSTANCE_TYPES',
'OUTSCALE_SAS_REGION_DETAILS',
'OUTSCALE_INC_REGION_DETAILS',
'DEFAULT_EUCA_API_VERSION',
'EUCA_NAMESPACE',
'EC2NodeDriver',
'BaseEC2NodeDriver',
'NimbusNodeDriver',
'EucNodeDriver',
'OutscaleSASNodeDriver',
'OutscaleINCNodeDriver',
'EC2NodeLocation',
'EC2ReservedNode',
'EC2SecurityGroup',
'EC2ImportSnapshotTask',
'EC2PlacementGroup',
'EC2Network',
'EC2NetworkSubnet',
'EC2NetworkInterface',
'EC2RouteTable',
'EC2Route',
'EC2SubnetAssociation',
'ExEC2AvailabilityZone',
'IdempotentParamError'
]
API_VERSION = '2016-11-15'
NAMESPACE = 'http://ec2.amazonaws.com/doc/%s/' % (API_VERSION)
# Eucalyptus Constants
DEFAULT_EUCA_API_VERSION = '3.3.0'
EUCA_NAMESPACE = 'http://msgs.eucalyptus.com/%s' % (DEFAULT_EUCA_API_VERSION)
# Outscale Constants
DEFAULT_OUTSCALE_API_VERSION = '2016-04-01'
OUTSCALE_NAMESPACE = 'http://api.outscale.com/wsdl/fcuext/2014-04-15/'
"""
Sizes must be hardcoded, because Amazon doesn't provide an API to fetch them.
From http://aws.amazon.com/ec2/instance-types/
and <http://aws.amazon.com/ec2/previous-generation/>
ram = [MiB], disk = [GB]
"""
def GiB(value):
return int(value * 1024)
INSTANCE_TYPES = {
't1.micro': {
'id': 't1.micro',
'name': 'Micro Instance',
'ram': GiB(0.613),
'disk': 15, # GB
'bandwidth': None
},
'm1.small': {
'id': 'm1.small',
'name': 'Small Instance',
'ram': GiB(1.7),
'disk': 160, # GB
'bandwidth': None
},
'm1.medium': {
'id': 'm1.medium',
'name': 'Medium Instance',
'ram': GiB(3.75),
'disk': 410, # GB
'bandwidth': None
},
'm1.large': {
'id': 'm1.large',
'name': 'Large Instance',
'ram': GiB(7.5),
'disk': 2 * 420, # GB
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'm1.xlarge': {
'id': 'm1.xlarge',
'name': 'Extra Large Instance',
'ram': GiB(15),
'disk': 4 * 420, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'c1.medium': {
'id': 'c1.medium',
'name': 'High-CPU Medium Instance',
'ram': GiB(1.7),
'disk': 350, # GB
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'c1.xlarge': {
'id': 'c1.xlarge',
'name': 'High-CPU Extra Large Instance',
'ram': GiB(7),
'disk': 4 * 420, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'm2.xlarge': {
'id': 'm2.xlarge',
'name': 'High-Memory Extra Large Instance',
'ram': GiB(17.1),
'disk': 420, # GB
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'm2.2xlarge': {
'id': 'm2.2xlarge',
'name': 'High-Memory Double Extra Large Instance',
'ram': GiB(34.2),
'disk': 850, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'm2.4xlarge': {
'id': 'm2.4xlarge',
'name': 'High-Memory Quadruple Extra Large Instance',
'ram': GiB(68.4),
'disk': 2 * 840, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'm3.medium': {
'id': 'm3.medium',
'name': 'Medium Instance',
'ram': GiB(3.75),
'disk': 4, # GB
'bandwidth': None,
'extra': {
'cpu': 1
}
},
'm3.large': {
'id': 'm3.large',
'name': 'Large Instance',
'ram': GiB(7.5),
'disk': 32, # GB
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'm3.xlarge': {
'id': 'm3.xlarge',
'name': 'Extra Large Instance',
'ram': GiB(15),
'disk': 2 * 40, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'm3.2xlarge': {
'id': 'm3.2xlarge',
'name': 'Double Extra Large Instance',
'ram': GiB(30),
'disk': 2 * 80, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'm4.large': {
'id': 'm4.large',
'name': 'Large Instance',
'ram': GiB(8),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'm4.xlarge': {
'id': 'm4.xlarge',
'name': 'Extra Large Instance',
'ram': GiB(16),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'm4.2xlarge': {
'id': 'm4.2xlarge',
'name': 'Double Extra Large Instance',
'ram': GiB(32),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'm4.4xlarge': {
'id': 'm4.4xlarge',
'name': 'Quadruple Extra Large Instance',
'ram': GiB(64),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'm4.10xlarge': {
'id': 'm4.10xlarge',
'name': '10 Extra Large Instance',
'ram': GiB(160),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 40
}
},
'm4.16xlarge': {
'id': 'm4.16xlarge',
'name': '16 Extra Large Instance',
'ram': GiB(256),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 64
}
},
'cg1.4xlarge': {
'id': 'cg1.4xlarge',
'name': 'Cluster GPU Quadruple Extra Large Instance',
'ram': GiB(22.5),
'disk': 2 * 840, # GB
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'g2.2xlarge': {
'id': 'g2.2xlarge',
'name': 'Cluster GPU G2 Double Extra Large Instance',
'ram': GiB(15),
'disk': 60, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'g2.8xlarge': {
'id': 'g2.8xlarge',
'name': 'Cluster GPU G2 Eight Extra Large Instance',
'ram': GiB(60),
'disk': 2 * 120, # GB
'bandwidth': None,
'extra': {
'cpu': 32
}
},
'p2.xlarge': {
'id': 'p2.xlarge',
'name': 'Cluster GPU P2 Large Instance',
'ram': GiB(61),
'disk': 4,
'bandwidth': None
},
'p2.8xlarge': {
'id': 'p2.8xlarge',
'name': 'Cluster GPU P2 Large Instance',
'ram': GiB(488),
'disk': 32,
'bandwidth': None
},
'p2.16xlarge': {
'id': 'p2.16xlarge',
'name': 'Cluster GPU P2 Large Instance',
'ram': GiB(732),
'disk': 64,
'bandwidth': None
},
'cc1.4xlarge': {
'id': 'cc1.4xlarge',
'name': 'Cluster Compute Quadruple Extra Large Instance',
'ram': 23552,
'disk': 1690,
'bandwidth': None
},
'cc2.8xlarge': {
'id': 'cc2.8xlarge',
'name': 'Cluster Compute Eight Extra Large Instance',
'ram': GiB(60.5),
'disk': 4 * 840, # GB
'bandwidth': None,
'extra': {
'cpu': 32
}
},
# c3 instances have 2 SSDs of the specified disk size
'c3.large': {
'id': 'c3.large',
'name': 'Compute Optimized Large Instance',
'ram': GiB(3.75),
'disk': 2 * 16, # GB
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'c3.xlarge': {
'id': 'c3.xlarge',
'name': 'Compute Optimized Extra Large Instance',
'ram': GiB(7.5),
'disk': 2 * 40, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'c3.2xlarge': {
'id': 'c3.2xlarge',
'name': 'Compute Optimized Double Extra Large Instance',
'ram': GiB(15),
'disk': 2 * 80, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'c3.4xlarge': {
'id': 'c3.4xlarge',
'name': 'Compute Optimized Quadruple Extra Large Instance',
'ram': GiB(30),
'disk': 2 * 160, # GB
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'c3.8xlarge': {
'id': 'c3.8xlarge',
'name': 'Compute Optimized Eight Extra Large Instance',
'ram': GiB(60),
'disk': 2 * 320, # GB
'bandwidth': None,
'extra': {
'cpu': 32
}
},
'c4.large': {
'id': 'c4.large',
'name': 'Compute Optimized Large Instance',
'ram': GiB(3.75),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'c4.xlarge': {
'id': 'c4.xlarge',
'name': 'Compute Optimized Extra Large Instance',
'ram': GiB(7.5),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'c4.2xlarge': {
'id': 'c4.2xlarge',
'name': 'Compute Optimized Double Large Instance',
'ram': GiB(15),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'c4.4xlarge': {
'id': 'c4.4xlarge',
'name': 'Compute Optimized Quadruple Extra Large Instance',
'ram': GiB(30),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'c4.8xlarge': {
'id': 'c4.8xlarge',
'name': 'Compute Optimized Eight Extra Large Instance',
'ram': GiB(60),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 32
}
},
'cr1.8xlarge': {
'id': 'cr1.8xlarge',
'name': 'High Memory Cluster Eight Extra Large',
'ram': GiB(244),
'disk': 2 * 120, # GB
'bandwidth': None,
'extra': {
'cpu': 32
}
},
'hs1.4xlarge': {
'id': 'hs1.4xlarge',
'name': 'High Storage Quadruple Extra Large Instance',
'ram': GiB(64),
'disk': 2 * 1024, # GB
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'hs1.8xlarge': {
'id': 'hs1.8xlarge',
'name': 'High Storage Eight Extra Large Instance',
'ram': GiB(117),
'disk': 24 * 2000,
'bandwidth': None,
'extra': {
'cpu': 17
}
},
# i2 instances have up to eight SSD drives
'i2.xlarge': {
'id': 'i2.xlarge',
'name': 'High I/O Storage Optimized Extra Large Instance',
'ram': GiB(30.5),
'disk': 800, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'i2.2xlarge': {
'id': 'i2.2xlarge',
'name': 'High I/O Storage Optimized Double Extra Large Instance',
'ram': GiB(61),
'disk': 2 * 800, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'i2.4xlarge': {
'id': 'i2.4xlarge',
'name': 'High I/O Storage Optimized Quadruple Large Instance',
'ram': GiB(122),
'disk': 4 * 800, # GB
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'i2.8xlarge': {
'id': 'i2.8xlarge',
'name': 'High I/O Storage Optimized Eight Extra Large Instance',
'ram': GiB(244),
'disk': 8 * 800, # GB
'bandwidth': None,
'extra': {
'cpu': 32
}
},
'd2.xlarge': {
'id': 'd2.xlarge',
'name': 'Dense Storage Optimized Extra Large Instance',
'ram': GiB(30.5),
'disk': 3 * 2000, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'd2.2xlarge': {
'id': 'd2.2xlarge',
'name': 'Dense Storage Optimized Double Extra Large Instance',
'ram': GiB(61),
'disk': 6 * 2000, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'd2.4xlarge': {
'id': 'd2.4xlarge',
'name': 'Dense Storage Optimized Quadruple Extra Large Instance',
'ram': GiB(122),
'disk': 12 * 2000, # GB
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'd2.8xlarge': {
'id': 'd2.8xlarge',
'name': 'Dense Storage Optimized Eight Extra Large Instance',
'ram': GiB(244),
'disk': 24 * 2000, # GB
'bandwidth': None,
'extra': {
'cpu': 36
}
},
# 1x SSD
'r3.large': {
'id': 'r3.large',
'name': 'Memory Optimized Large instance',
'ram': GiB(15.25),
'disk': 32, # GB
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'r3.xlarge': {
'id': 'r3.xlarge',
'name': 'Memory Optimized Extra Large instance',
'ram': GiB(30.5),
'disk': 80, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'r3.2xlarge': {
'id': 'r3.2xlarge',
'name': 'Memory Optimized Double Extra Large instance',
'ram': GiB(61),
'disk': 160, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'r3.4xlarge': {
'id': 'r3.4xlarge',
'name': 'Memory Optimized Quadruple Extra Large instance',
'ram': GiB(122),
'disk': 320, # GB
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'r3.8xlarge': {
'id': 'r3.8xlarge',
'name': 'Memory Optimized Eight Extra Large instance',
'ram': GiB(244),
'disk': 2 * 320, # GB
'bandwidth': None,
'extra': {
'cpu': 32
}
},
'r4.large': {
'id': 'r4.large',
'name': 'Memory Optimized Large instance',
'ram': GiB(15.25),
'disk': 0, # GB
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'r4.xlarge': {
'id': 'r4.xlarge',
'name': 'Memory Optimized Extra Large instance',
'ram': GiB(30.5),
'disk': 0, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'r4.2xlarge': {
'id': 'r4.2xlarge',
'name': 'Memory Optimized Double Extra Large instance',
'ram': GiB(61),
'disk': 0, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'r4.4xlarge': {
'id': 'r4.4xlarge',
'name': 'Memory Optimized Quadruple Extra Large instance',
'ram': GiB(122),
'disk': 0, # GB
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'r4.8xlarge': {
'id': 'r4.8xlarge',
'name': 'Memory Optimized Eight Extra Large instance',
'ram': GiB(244),
'disk': 0, # GB
'bandwidth': None,
'extra': {
'cpu': 32
}
},
'r4.16xlarge': {
'id': 'r4.16xlarge',
'name': 'Memory Optimized Sixteen Extra Large instance',
'ram': GiB(488),
'disk': 0, # GB
'bandwidth': None,
'extra': {
'cpu': 64
}
},
# Burstable Performance General Purpose
't2.nano': {
'id': 't2.nano',
'name': 'Burstable Performance Nano Instance',
'ram': 512,
'disk': 0, # EBS Only
'bandwidth': None,
'extra': {
'cpu': 1
}
},
't2.micro': {
'id': 't2.micro',
'name': 'Burstable Performance Micro Instance',
'ram': GiB(1),
'disk': 0, # EBS Only
'bandwidth': None,
'extra': {
'cpu': 1
}
},
't2.small': {
'id': 't2.small',
'name': 'Burstable Performance Small Instance',
'ram': GiB(2),
'disk': 0, # EBS Only
'bandwidth': None,
'extra': {
'cpu': 1
}
},
't2.medium': {
'id': 't2.medium',
'name': 'Burstable Performance Medium Instance',
'ram': GiB(4),
'disk': 0, # EBS Only
'bandwidth': None,
'extra': {
'cpu': 2
}
},
't2.large': {
'id': 't2.large',
'name': 'Burstable Performance Medium Instance',
'ram': GiB(8),
'disk': 0, # EBS Only
'bandwidth': None,
'extra': {
'cpu': 2
}
},
't2.xlarge': {
'id': 't2.xlarge',
'name': 'Burstable Performance Extra Large Instance',
'ram': GiB(16),
'disk': 0, # EBS Only
'bandwidth': None,
'extra': {
'cpu': 4
}
},
't2.2xlarge': {
'id': 't2.2xlarge',
'name': 'Burstable Performance Double Extra Large Instance',
'ram': GiB(32),
'disk': 0, # EBS Only
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'x1.32xlarge': {
'id': 'x1.32xlarge',
'name': 'Memory Optimized ThirtyTwo Extra Large instance',
'ram': GiB(1952),
'disk': 2 * 1920, # GB
'bandwidth': None,
'extra': {
'cpu': 128
}
}
}
# From <https://aws.amazon.com/marketplace/help/200777880>
REGION_DETAILS = {
# US East (Northern Virginia) Region
'us-east-1': {
'endpoint': 'ec2.us-east-1.amazonaws.com',
'api_name': 'ec2_us_east',
'country': 'USA',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'cc2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'cg1.4xlarge',
'g2.2xlarge',
'g2.8xlarge',
'cr1.8xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
# US West (Northern California) Region
'us-west-1': {
'endpoint': 'ec2.us-west-1.amazonaws.com',
'api_name': 'ec2_us_west',
'country': 'USA',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'g2.2xlarge',
'g2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large'
]
},
# US East (Ohio) Region
'us-east-2': {
'endpoint': 'ec2.us-east-2.amazonaws.com',
'api_name': 'ec2_us_east_ohio',
'country': 'USA',
'signature_version': '4',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'cc2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'cg1.4xlarge',
'g2.2xlarge',
'g2.8xlarge',
'cr1.8xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
# US West (Oregon) Region
'us-west-2': {
'endpoint': 'ec2.us-west-2.amazonaws.com',
'api_name': 'ec2_us_west_oregon',
'country': 'US',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'g2.2xlarge',
'g2.8xlarge',
'p2.xlarge',
'p2.8xlarge',
'p2.16xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'hs1.8xlarge',
'cc2.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
# EU (Ireland) Region
'eu-west-1': {
'endpoint': 'ec2.eu-west-1.amazonaws.com',
'api_name': 'ec2_eu_west',
'country': 'Ireland',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'g2.2xlarge',
'g2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'hs1.8xlarge',
'cc2.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
# EU (London) Region
'eu-west-2': {
'endpoint': 'ec2.eu-west-2.amazonaws.com',
'api_name': 'ec2_eu_west_london',
'country': 'United Kingdom',
'signature_version': '4',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'cc2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'cg1.4xlarge',
'g2.2xlarge',
'g2.8xlarge',
'cr1.8xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
# EU (Frankfurt) Region
'eu-central-1': {
'endpoint': 'ec2.eu-central-1.amazonaws.com',
'api_name': 'ec2_eu_central',
'country': 'Frankfurt',
'signature_version': '4',
'instance_types': [
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c3.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
# Asia Pacific (Mumbai, India) Region
'ap-south-1': {
'endpoint': 'ec2.ap-south-1.amazonaws.com',
'api_name': 'ec2_ap_south_1',
'country': 'India',
'signature_version': '4',
'instance_types': [
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge'
]
},
# Asia Pacific (Singapore) Region
'ap-southeast-1': {
'endpoint': 'ec2.ap-southeast-1.amazonaws.com',
'api_name': 'ec2_ap_southeast',
'country': 'Singapore',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
'x1.32xlarge'
]
},
# Asia Pacific (Tokyo) Region
'ap-northeast-1': {
'endpoint': 'ec2.ap-northeast-1.amazonaws.com',
'api_name': 'ec2_ap_northeast',
'country': 'Japan',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'g2.2xlarge',
'g2.8xlarge',
'c1.xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
# Asia Pacific (Seoul) Region
'ap-northeast-2': {
'endpoint': 'ec2.ap-northeast-2.amazonaws.com',
'api_name': 'ec2_ap_northeast',
'country': 'South Korea',
'signature_version': '4',
'instance_types': [
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
# South America (Sao Paulo) Region
'sa-east-1': {
'endpoint': 'ec2.sa-east-1.amazonaws.com',
'api_name': 'ec2_sa_east',
'country': 'Brazil',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large'
]
},
# Asia Pacific (Sydney) Region
'ap-southeast-2': {
'endpoint': 'ec2.ap-southeast-2.amazonaws.com',
'api_name': 'ec2_ap_southeast_2',
'country': 'Australia',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
# Canada (Central) Region
'ca-central-1': {
'endpoint': 'ec2.ca-central-1.amazonaws.com',
'api_name': 'ec2_ca_central_1',
'country': 'Canada',
'signature_version': '4',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'cc2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'cg1.4xlarge',
'g2.2xlarge',
'g2.8xlarge',
'cr1.8xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
'us-gov-west-1': {
'endpoint': 'ec2.us-gov-west-1.amazonaws.com',
'api_name': 'ec2_us_govwest',
'country': 'US',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'g2.2xlarge',
'g2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'hs1.4xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large'
]
},
# China (North) Region
'cn-north-1': {
'endpoint': 'ec2.cn-north-1.amazonaws.com.cn',
'api_name': 'ec2_cn_north',
'country': 'China',
'signature_version': '4',
'instance_types': [
't1.micro',
't2.micro',
't2.small',
't2.medium',
't2.large',
't2.xlarge',
't2.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm1.small',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
]
},
'nimbus': {
# Nimbus clouds have 3 EC2-style instance types but their particular
# RAM allocations are configured by the admin
'country': 'custom',
'signature_version': '2',
'instance_types': [
'm1.small',
'm1.large',
'm1.xlarge'
]
}
}
"""
Sizes must be hardcoded because Outscale doesn't provide an API to fetch them.
Outscale cloud instances share some names with EC2 but have different
specifications so declare them in another constant.
"""
OUTSCALE_INSTANCE_TYPES = {
't1.micro': {
'id': 't1.micro',
'name': 'Micro Instance',
'ram': 615,
'disk': 0,
'bandwidth': None
},
'm1.small': {
'id': 'm1.small',
'name': 'Standard Small Instance',
'ram': 1740,
'disk': 150,
'bandwidth': None
},
'm1.medium': {
'id': 'm1.medium',
'name': 'Standard Medium Instance',
'ram': 3840,
'disk': 420,
'bandwidth': None
},
'm1.large': {
'id': 'm1.large',
'name': 'Standard Large Instance',
'ram': 7680,
'disk': 840,
'bandwidth': None
},
'm1.xlarge': {
'id': 'm1.xlarge',
'name': 'Standard Extra Large Instance',
'ram': 15360,
'disk': 1680,
'bandwidth': None
},
'c1.medium': {
'id': 'c1.medium',
'name': 'Compute Optimized Medium Instance',
'ram': 1740,
'disk': 340,
'bandwidth': None
},
'c1.xlarge': {
'id': 'c1.xlarge',
'name': 'Compute Optimized Extra Large Instance',
'ram': 7168,
'disk': 1680,
'bandwidth': None
},
'c3.large': {
'id': 'c3.large',
'name': 'Compute Optimized Large Instance',
'ram': 3840,
'disk': 32,
'bandwidth': None
},
'c3.xlarge': {
'id': 'c3.xlarge',
'name': 'Compute Optimized Extra Large Instance',
'ram': 7168,
'disk': 80,
'bandwidth': None
},
'c3.2xlarge': {
'id': 'c3.2xlarge',
'name': 'Compute Optimized Double Extra Large Instance',
'ram': 15359,
'disk': 160,
'bandwidth': None
},
'c3.4xlarge': {
'id': 'c3.4xlarge',
'name': 'Compute Optimized Quadruple Extra Large Instance',
'ram': 30720,
'disk': 320,
'bandwidth': None
},
'c3.8xlarge': {
'id': 'c3.8xlarge',
'name': 'Compute Optimized Eight Extra Large Instance',
'ram': 61440,
'disk': 640,
'bandwidth': None
},
'm2.xlarge': {
'id': 'm2.xlarge',
'name': 'High Memory Extra Large Instance',
'ram': 17510,
'disk': 420,
'bandwidth': None
},
'm2.2xlarge': {
'id': 'm2.2xlarge',
'name': 'High Memory Double Extra Large Instance',
'ram': 35020,
'disk': 840,
'bandwidth': None
},
'm2.4xlarge': {
'id': 'm2.4xlarge',
'name': 'High Memory Quadruple Extra Large Instance',
'ram': 70042,
'disk': 1680,
'bandwidth': None
},
'nv1.small': {
'id': 'nv1.small',
'name': 'GPU Small Instance',
'ram': 1739,
'disk': 150,
'bandwidth': None
},
'nv1.medium': {
'id': 'nv1.medium',
'name': 'GPU Medium Instance',
'ram': 3839,
'disk': 420,
'bandwidth': None
},
'nv1.large': {
'id': 'nv1.large',
'name': 'GPU Large Instance',
'ram': 7679,
'disk': 840,
'bandwidth': None
},
'nv1.xlarge': {
'id': 'nv1.xlarge',
'name': 'GPU Extra Large Instance',
'ram': 15358,
'disk': 1680,
'bandwidth': None
},
'g2.2xlarge': {
'id': 'g2.2xlarge',
'name': 'GPU Double Extra Large Instance',
'ram': 15360,
'disk': 60,
'bandwidth': None
},
'cc1.4xlarge': {
'id': 'cc1.4xlarge',
'name': 'Cluster Compute Quadruple Extra Large Instance',
'ram': 24576,
'disk': 1680,
'bandwidth': None
},
'cc2.8xlarge': {
'id': 'cc2.8xlarge',
'name': 'Cluster Compute Eight Extra Large Instance',
'ram': 65536,
'disk': 3360,
'bandwidth': None
},
'hi1.xlarge': {
'id': 'hi1.xlarge',
'name': 'High Storage Extra Large Instance',
'ram': 15361,
'disk': 1680,
'bandwidth': None
},
'm3.xlarge': {
'id': 'm3.xlarge',
'name': 'High Storage Optimized Extra Large Instance',
'ram': 15357,
'disk': 0,
'bandwidth': None
},
'm3.2xlarge': {
'id': 'm3.2xlarge',
'name': 'High Storage Optimized Double Extra Large Instance',
'ram': 30720,
'disk': 0,
'bandwidth': None
},
'm3s.xlarge': {
'id': 'm3s.xlarge',
'name': 'High Storage Optimized Extra Large Instance',
'ram': 15359,
'disk': 0,
'bandwidth': None
},
'm3s.2xlarge': {
'id': 'm3s.2xlarge',
'name': 'High Storage Optimized Double Extra Large Instance',
'ram': 30719,
'disk': 0,
'bandwidth': None
},
'cr1.8xlarge': {
'id': 'cr1.8xlarge',
'name': 'Memory Optimized Eight Extra Large Instance',
'ram': 249855,
'disk': 240,
'bandwidth': None
},
'os1.2xlarge': {
'id': 'os1.2xlarge',
'name': 'Memory Optimized, High Storage, Passthrough NIC Double Extra '
'Large Instance',
'ram': 65536,
'disk': 60,
'bandwidth': None
},
'os1.4xlarge': {
'id': 'os1.4xlarge',
'name': 'Memory Optimized, High Storage, Passthrough NIC Quadruple Ext'
'ra Large Instance',
'ram': 131072,
'disk': 120,
'bandwidth': None
},
'os1.8xlarge': {
'id': 'os1.8xlarge',
'name': 'Memory Optimized, High Storage, Passthrough NIC Eight Extra L'
'arge Instance',
'ram': 249856,
'disk': 500,
'bandwidth': None
},
'oc1.4xlarge': {
'id': 'oc1.4xlarge',
'name': 'Outscale Quadruple Extra Large Instance',
'ram': 24575,
'disk': 1680,
'bandwidth': None
},
'oc2.8xlarge': {
'id': 'oc2.8xlarge',
'name': 'Outscale Eight Extra Large Instance',
'ram': 65535,
'disk': 3360,
'bandwidth': None
}
}
"""
The function manipulating Outscale cloud regions will be overridden because
Outscale instances types are in a separate dict so also declare Outscale cloud
regions in some other constants.
"""
OUTSCALE_SAS_REGION_DETAILS = {
'eu-west-3': {
'endpoint': 'api-ppd.outscale.com',
'api_name': 'osc_sas_eu_west_3',
'country': 'FRANCE',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'eu-west-1': {
'endpoint': 'api.eu-west-1.outscale.com',
'api_name': 'osc_sas_eu_west_1',
'country': 'FRANCE',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'eu-west-2': {
'endpoint': 'fcu.eu-west-2.outscale.com',
'api_name': 'osc_sas_eu_west_2',
'country': 'FRANCE',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'us-east-1': {
'endpoint': 'api.us-east-1.outscale.com',
'api_name': 'osc_sas_us_east_1',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'us-east-2': {
'endpoint': 'fcu.us-east-2.outscale.com',
'api_name': 'osc_sas_us_east_2',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'us-east-2': {
'endpoint': 'fcu.us-east-2.outscale.com',
'api_name': 'osc_sas_us_east_2',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'p2.xlarge',
'p2.8xlarge',
'p2.16xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'us-east-2': {
'endpoint': 'fcu.us-east-2.outscale.com',
'api_name': 'osc_sas_us_east_2',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'us-east-2': {
'endpoint': 'fcu.us-east-2.outscale.com',
'api_name': 'osc_sas_us_east_2',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
}
}
OUTSCALE_INC_REGION_DETAILS = {
'eu-west-1': {
'endpoint': 'api.eu-west-1.outscale.com',
'api_name': 'osc_inc_eu_west_1',
'country': 'FRANCE',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'p2.xlarge',
'p2.8xlarge',
'p2.16xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'eu-west-2': {
'endpoint': 'fcu.eu-west-2.outscale.com',
'api_name': 'osc_inc_eu_west_2',
'country': 'FRANCE',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'eu-west-3': {
'endpoint': 'api-ppd.outscale.com',
'api_name': 'osc_inc_eu_west_3',
'country': 'FRANCE',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'us-east-1': {
'endpoint': 'api.us-east-1.outscale.com',
'api_name': 'osc_inc_us_east_1',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'us-east-2': {
'endpoint': 'fcu.us-east-2.outscale.com',
'api_name': 'osc_inc_us_east_2',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
}
}
"""
Define the extra dictionary for specific resources
"""
RESOURCE_EXTRA_ATTRIBUTES_MAP = {
'ebs_volume': {
'snapshot_id': {
'xpath': 'ebs/snapshotId',
'transform_func': str
},
'volume_id': {
'xpath': 'ebs/volumeId',
'transform_func': str
},
'volume_size': {
'xpath': 'ebs/volumeSize',
'transform_func': int
},
'delete': {
'xpath': 'ebs/deleteOnTermination',
'transform_func': str
},
'volume_type': {
'xpath': 'ebs/volumeType',
'transform_func': str
},
'iops': {
'xpath': 'ebs/iops',
'transform_func': int
}
},
'elastic_ip': {
'allocation_id': {
'xpath': 'allocationId',
'transform_func': str,
},
'association_id': {
'xpath': 'associationId',
'transform_func': str,
},
'interface_id': {
'xpath': 'networkInterfaceId',
'transform_func': str,
},
'owner_id': {
'xpath': 'networkInterfaceOwnerId',
'transform_func': str,
},
'private_ip': {
'xpath': 'privateIp',
'transform_func': str,
}
},
'image': {
'state': {
'xpath': 'imageState',
'transform_func': str
},
'owner_id': {
'xpath': 'imageOwnerId',
'transform_func': str
},
'owner_alias': {
'xpath': 'imageOwnerAlias',
'transform_func': str
},
'is_public': {
'xpath': 'isPublic',
'transform_func': str
},
'architecture': {
'xpath': 'architecture',
'transform_func': str
},
'image_type': {
'xpath': 'imageType',
'transform_func': str
},
'image_location': {
'xpath': 'imageLocation',
'transform_func': str
},
'platform': {
'xpath': 'platform',
'transform_func': str
},
'description': {
'xpath': 'description',
'transform_func': str
},
'root_device_type': {
'xpath': 'rootDeviceType',
'transform_func': str
},
'virtualization_type': {
'xpath': 'virtualizationType',
'transform_func': str
},
'hypervisor': {
'xpath': 'hypervisor',
'transform_func': str
},
'kernel_id': {
'xpath': 'kernelId',
'transform_func': str
},
'ramdisk_id': {
'xpath': 'ramdiskId',
'transform_func': str
},
'ena_support': {
'xpath': 'enaSupport',
'transform_func': str
},
'sriov_net_support': {
'xpath': 'sriovNetSupport',
'transform_func': str
}
},
'network': {
'state': {
'xpath': 'state',
'transform_func': str
},
'dhcp_options_id': {
'xpath': 'dhcpOptionsId',
'transform_func': str
},
'instance_tenancy': {
'xpath': 'instanceTenancy',
'transform_func': str
},
'is_default': {
'xpath': 'isDefault',
'transform_func': str
}
},
'network_interface': {
'subnet_id': {
'xpath': 'subnetId',
'transform_func': str
},
'vpc_id': {
'xpath': 'vpcId',
'transform_func': str
},
'zone': {
'xpath': 'availabilityZone',
'transform_func': str
},
'description': {
'xpath': 'description',
'transform_func': str
},
'owner_id': {
'xpath': 'ownerId',
'transform_func': str
},
'mac_address': {
'xpath': 'macAddress',
'transform_func': str
},
'private_dns_name': {
'xpath': 'privateIpAddressesSet/privateDnsName',
'transform_func': str
},
'source_dest_check': {
'xpath': 'sourceDestCheck',
'transform_func': str
}
},
'network_interface_attachment': {
'attachment_id': {
'xpath': 'attachment/attachmentId',
'transform_func': str
},
'instance_id': {
'xpath': 'attachment/instanceId',
'transform_func': str
},
'owner_id': {
'xpath': 'attachment/instanceOwnerId',
'transform_func': str
},
'device_index': {
'xpath': 'attachment/deviceIndex',
'transform_func': int
},
'status': {
'xpath': 'attachment/status',
'transform_func': str
},
'attach_time': {
'xpath': 'attachment/attachTime',
'transform_func': parse_date
},
'delete': {
'xpath': 'attachment/deleteOnTermination',
'transform_func': str
}
},
'node': {
'availability': {
'xpath': 'placement/availabilityZone',
'transform_func': str
},
'architecture': {
'xpath': 'architecture',
'transform_func': str
},
'client_token': {
'xpath': 'clientToken',
'transform_func': str
},
'dns_name': {
'xpath': 'dnsName',
'transform_func': str
},
'hypervisor': {
'xpath': 'hypervisor',
'transform_func': str
},
'iam_profile': {
'xpath': 'iamInstanceProfile/id',
'transform_func': str
},
'image_id': {
'xpath': 'imageId',
'transform_func': str
},
'instance_id': {
'xpath': 'instanceId',
'transform_func': str
},
'instance_lifecycle': {
'xpath': 'instanceLifecycle',
'transform_func': str
},
'instance_tenancy': {
'xpath': 'placement/tenancy',
'transform_func': str
},
'instance_type': {
'xpath': 'instanceType',
'transform_func': str
},
'key_name': {
'xpath': 'keyName',
'transform_func': str
},
'launch_index': {
'xpath': 'amiLaunchIndex',
'transform_func': int
},
'launch_time': {
'xpath': 'launchTime',
'transform_func': str
},
'kernel_id': {
'xpath': 'kernelId',
'transform_func': str
},
'monitoring': {
'xpath': 'monitoring/state',
'transform_func': str
},
'platform': {
'xpath': 'platform',
'transform_func': str
},
'private_dns': {
'xpath': 'privateDnsName',
'transform_func': str
},
'ramdisk_id': {
'xpath': 'ramdiskId',
'transform_func': str
},
'root_device_type': {
'xpath': 'rootDeviceType',
'transform_func': str
},
'root_device_name': {
'xpath': 'rootDeviceName',
'transform_func': str
},
'reason': {
'xpath': 'reason',
'transform_func': str
},
'source_dest_check': {
'xpath': 'sourceDestCheck',
'transform_func': str
},
'status': {
'xpath': 'instanceState/name',
'transform_func': str
},
'subnet_id': {
'xpath': 'subnetId',
'transform_func': str
},
'virtualization_type': {
'xpath': 'virtualizationType',
'transform_func': str
},
'ebs_optimized': {
'xpath': 'ebsOptimized',
'transform_func': str
},
'vpc_id': {
'xpath': 'vpcId',
'transform_func': str
}
},
'reserved_node': {
'instance_type': {
'xpath': 'instanceType',
'transform_func': str
},
'availability': {
'xpath': 'availabilityZone',
'transform_func': str
},
'start': {
'xpath': 'start',
'transform_func': str
},
'duration': {
'xpath': 'duration',
'transform_func': int
},
'usage_price': {
'xpath': 'usagePrice',
'transform_func': float
},
'fixed_price': {
'xpath': 'fixedPrice',
'transform_func': float
},
'instance_count': {
'xpath': 'instanceCount',
'transform_func': int
},
'description': {
'xpath': 'productDescription',
'transform_func': str
},
'instance_tenancy': {
'xpath': 'instanceTenancy',
'transform_func': str
},
'currency_code': {
'xpath': 'currencyCode',
'transform_func': str
},
'offering_type': {
'xpath': 'offeringType',
'transform_func': str
}
},
'security_group': {
'vpc_id': {
'xpath': 'vpcId',
'transform_func': str
},
'description': {
'xpath': 'groupDescription',
'transform_func': str
},
'owner_id': {
'xpath': 'ownerId',
'transform_func': str
}
},
'snapshot': {
'volume_id': {
'xpath': 'volumeId',
'transform_func': str
},
'state': {
'xpath': 'status',
'transform_func': str
},
'description': {
'xpath': 'description',
'transform_func': str
},
'progress': {
'xpath': 'progress',
'transform_func': str
},
'start_time': {
'xpath': 'startTime',
'transform_func': parse_date
}
},
'subnet': {
'cidr_block': {
'xpath': 'cidrBlock',
'transform_func': str
},
'available_ips': {
'xpath': 'availableIpAddressCount',
'transform_func': int
},
'zone': {
'xpath': 'availabilityZone',
'transform_func': str
},
'vpc_id': {
'xpath': 'vpcId',
'transform_func': str
}
},
'volume': {
'device': {
'xpath': 'attachmentSet/item/device',
'transform_func': str
},
'snapshot_id': {
'xpath': 'snapshotId',
'transform_func': lambda v: str(v) or None
},
'iops': {
'xpath': 'iops',
'transform_func': int
},
'zone': {
'xpath': 'availabilityZone',
'transform_func': str
},
'create_time': {
'xpath': 'createTime',
'transform_func': parse_date
},
'state': {
'xpath': 'status',
'transform_func': str
},
'attach_time': {
'xpath': 'attachmentSet/item/attachTime',
'transform_func': parse_date
},
'attachment_status': {
'xpath': 'attachmentSet/item/status',
'transform_func': str
},
'instance_id': {
'xpath': 'attachmentSet/item/instanceId',
'transform_func': str
},
'delete': {
'xpath': 'attachmentSet/item/deleteOnTermination',
'transform_func': str
},
'volume_type': {
'xpath': 'volumeType',
'transform_func': str
}
},
'route_table': {
'vpc_id': {
'xpath': 'vpcId',
'transform_func': str
}
}
}
VOLUME_MODIFICATION_ATTRIBUTE_MAP = {
'end_time': {
'xpath': 'endTime',
'transform_func': parse_date
},
'modification_state': {
'xpath': 'modificationState',
'transform_func': str
},
'original_iops': {
'xpath': 'originalIops',
'transform_func': int
},
'original_size': {
'xpath': 'originalSize',
'transform_func': int
},
'original_volume_type': {
'xpath': 'originalVolumeType',
'transform_func': str
},
'progress': {
'xpath': 'progress',
'transform_func': int
},
'start_time': {
'xpath': 'startTime',
'transform_func': parse_date
},
'status_message': {
'xpath': 'statusMessage',
'transform_func': str
},
'target_iops': {
'xpath': 'targetIops',
'transform_func': int
},
'target_size': {
'xpath': 'targetSize',
'transform_func': int
},
'target_volume_type': {
'xpath': 'targetVolumeType',
'transform_func': str
},
'volume_id': {
'xpath': 'volumeId',
'transform_func': str
}
}
VALID_EC2_REGIONS = REGION_DETAILS.keys()
VALID_EC2_REGIONS = [r for r in VALID_EC2_REGIONS if r != 'nimbus']
VALID_VOLUME_TYPES = ['standard', 'io1', 'gp2', 'st1', 'sc1']
class EC2NodeLocation(NodeLocation):
def __init__(self, id, name, country, driver, availability_zone):
super(EC2NodeLocation, self).__init__(id, name, country, driver)
self.availability_zone = availability_zone
def __repr__(self):
return (('<EC2NodeLocation: id=%s, name=%s, country=%s, '
'availability_zone=%s driver=%s>')
% (self.id, self.name, self.country,
self.availability_zone, self.driver.name))
class EC2Response(AWSBaseResponse):
"""
EC2 specific response parsing and error handling.
"""
def parse_error(self):
err_list = []
# Okay, so for Eucalyptus, you can get a 403, with no body,
# if you are using the wrong user/password.
msg = "Failure: 403 Forbidden"
if self.status == 403 and self.body[:len(msg)] == msg:
raise InvalidCredsError(msg)
try:
body = ET.XML(self.body)
except:
raise MalformedResponseError("Failed to parse XML",
body=self.body, driver=EC2NodeDriver)
for err in body.findall('Errors/Error'):
code, message = err.getchildren()
err_list.append('%s: %s' % (code.text, message.text))
if code.text == 'InvalidClientTokenId':
raise InvalidCredsError(err_list[-1])
if code.text == 'SignatureDoesNotMatch':
raise InvalidCredsError(err_list[-1])
if code.text == 'AuthFailure':
raise InvalidCredsError(err_list[-1])
if code.text == 'OptInRequired':
raise InvalidCredsError(err_list[-1])
if code.text == 'IdempotentParameterMismatch':
raise IdempotentParamError(err_list[-1])
if code.text == 'InvalidKeyPair.NotFound':
# TODO: Use connection context instead
match = re.match(r'.*\'(.+?)\'.*', message.text)
if match:
name = match.groups()[0]
else:
name = None
raise KeyPairDoesNotExistError(name=name,
driver=self.connection.driver)
return '\n'.join(err_list)
class EC2Connection(SignedAWSConnection):
"""
Represents a single connection to the EC2 Endpoint.
"""
version = API_VERSION
host = REGION_DETAILS['us-east-1']['endpoint']
responseCls = EC2Response
service_name = 'ec2'
class ExEC2AvailabilityZone(object):
"""
Extension class which stores information about an EC2 availability zone.
Note: This class is EC2 specific.
"""
def __init__(self, name, zone_state, region_name):
self.name = name
self.zone_state = zone_state
self.region_name = region_name
def __repr__(self):
return (('<ExEC2AvailabilityZone: name=%s, zone_state=%s, '
'region_name=%s>')
% (self.name, self.zone_state, self.region_name))
class EC2ReservedNode(Node):
"""
Class which stores information about EC2 reserved instances/nodes
Inherits from Node and passes in None for name and private/public IPs
Note: This class is EC2 specific.
"""
def __init__(self, id, state, driver, size=None, image=None, extra=None):
super(EC2ReservedNode, self).__init__(id=id, name=None, state=state,
public_ips=None,
private_ips=None,
driver=driver, extra=extra)
def __repr__(self):
return (('<EC2ReservedNode: id=%s>') % (self.id))
class EC2SecurityGroup(object):
"""
Represents information about a Security group
Note: This class is EC2 specific.
"""
def __init__(self, id, name, ingress_rules, egress_rules, extra=None):
self.id = id
self.name = name
self.ingress_rules = ingress_rules
self.egress_rules = egress_rules
self.extra = extra or {}
def __repr__(self):
return (('<EC2SecurityGroup: id=%s, name=%s')
% (self.id, self.name))
class EC2ImportSnapshotTask(object):
"""
Represents information about a describe_import_snapshot_task.
Note: This class is EC2 specific.
"""
def __init__(self, status, snapshotId):
self.status = status
self.snapshotId = snapshotId
def __repr__(self):
return (('<EC2SecurityGroup: status=%s, snapshotId=%s')
% (self.status, self.snapshotId))
class EC2PlacementGroup(object):
"""
Represents information about a Placement Grous
Note: This class is EC2 specific.
"""
def __init__(self, name, state, strategy='cluster', extra=None):
self.name = name
self.strategy = strategy
self.extra = extra or {}
def __repr__(self):
return '<EC2PlacementGroup: name=%s, state=%s>' % (self.name,
self.strategy)
class EC2Network(object):
"""
Represents information about a VPC (Virtual Private Cloud) network
Note: This class is EC2 specific.
"""
def __init__(self, id, name, cidr_block, extra=None):
self.id = id
self.name = name
self.cidr_block = cidr_block
self.extra = extra or {}
def __repr__(self):
return (('<EC2Network: id=%s, name=%s')
% (self.id, self.name))
class EC2NetworkSubnet(object):
"""
Represents information about a VPC (Virtual Private Cloud) subnet
Note: This class is EC2 specific.
"""
def __init__(self, id, name, state, extra=None):
self.id = id
self.name = name
self.state = state
self.extra = extra or {}
def __repr__(self):
return (('<EC2NetworkSubnet: id=%s, name=%s') % (self.id, self.name))
class EC2NetworkInterface(object):
"""
Represents information about a VPC network interface
Note: This class is EC2 specific. The state parameter denotes the current
status of the interface. Valid values for state are attaching, attached,
detaching and detached.
"""
def __init__(self, id, name, state, extra=None):
self.id = id
self.name = name
self.state = state
self.extra = extra or {}
def __repr__(self):
return (('<EC2NetworkInterface: id=%s, name=%s')
% (self.id, self.name))
class ElasticIP(object):
"""
Represents information about an elastic IP address
:param ip: The elastic IP address
:type ip: ``str``
:param domain: The domain that the IP resides in (EC2-Classic/VPC).
EC2 classic is represented with standard and VPC
is represented with vpc.
:type domain: ``str``
:param instance_id: The identifier of the instance which currently
has the IP associated.
:type instance_id: ``str``
Note: This class is used to support both EC2 and VPC IPs.
For VPC specific attributes are stored in the extra
dict to make promotion to the base API easier.
"""
def __init__(self, ip, domain, instance_id, extra=None):
self.ip = ip
self.domain = domain
self.instance_id = instance_id
self.extra = extra or {}
def __repr__(self):
return (('<ElasticIP: ip=%s, domain=%s, instance_id=%s>')
% (self.ip, self.domain, self.instance_id))
class VPCInternetGateway(object):
"""
Class which stores information about VPC Internet Gateways.
Note: This class is VPC specific.
"""
def __init__(self, id, name, vpc_id, state, driver, extra=None):
self.id = id
self.name = name
self.vpc_id = vpc_id
self.state = state
self.extra = extra or {}
def __repr__(self):
return (('<VPCInternetGateway: id=%s>') % (self.id))
class EC2RouteTable(object):
"""
Class which stores information about VPC Route Tables.
Note: This class is VPC specific.
"""
def __init__(self, id, name, routes, subnet_associations,
propagating_gateway_ids, extra=None):
"""
:param id: The ID of the route table.
:type id: ``str``
:param name: The name of the route table.
:type name: ``str``
:param routes: A list of routes in the route table.
:type routes: ``list`` of :class:`EC2Route`
:param subnet_associations: A list of associations between the
route table and one or more subnets.
:type subnet_associations: ``list`` of
:class:`EC2SubnetAssociation`
:param propagating_gateway_ids: The list of IDs of any virtual
private gateways propagating the
routes.
:type propagating_gateway_ids: ``list``
"""
self.id = id
self.name = name
self.routes = routes
self.subnet_associations = subnet_associations
self.propagating_gateway_ids = propagating_gateway_ids
self.extra = extra or {}
def __repr__(self):
return (('<EC2RouteTable: id=%s>') % (self.id))
class EC2Route(object):
"""
Class which stores information about a Route.
Note: This class is VPC specific.
"""
def __init__(self, cidr, gateway_id, instance_id, owner_id,
interface_id, state, origin, vpc_peering_connection_id):
"""
:param cidr: The CIDR block used for the destination match.
:type cidr: ``str``
:param gateway_id: The ID of a gateway attached to the VPC.
:type gateway_id: ``str``
:param instance_id: The ID of a NAT instance in the VPC.
:type instance_id: ``str``
:param owner_id: The AWS account ID of the owner of the instance.
:type owner_id: ``str``
:param interface_id: The ID of the network interface.
:type interface_id: ``str``
:param state: The state of the route (active | blackhole).
:type state: ``str``
:param origin: Describes how the route was created.
:type origin: ``str``
:param vpc_peering_connection_id: The ID of the VPC
peering connection.
:type vpc_peering_connection_id: ``str``
"""
self.cidr = cidr
self.gateway_id = gateway_id
self.instance_id = instance_id
self.owner_id = owner_id
self.interface_id = interface_id
self.state = state
self.origin = origin
self.vpc_peering_connection_id = vpc_peering_connection_id
def __repr__(self):
return (('<EC2Route: cidr=%s>') % (self.cidr))
class EC2SubnetAssociation(object):
"""
Class which stores information about Route Table associated with
a given Subnet in a VPC
Note: This class is VPC specific.
"""
def __init__(self, id, route_table_id, subnet_id, main=False):
"""
:param id: The ID of the subnet association in the VPC.
:type id: ``str``
:param route_table_id: The ID of a route table in the VPC.
:type route_table_id: ``str``
:param subnet_id: The ID of a subnet in the VPC.
:type subnet_id: ``str``
:param main: If true, means this is a main VPC route table.
:type main: ``bool``
"""
self.id = id
self.route_table_id = route_table_id
self.subnet_id = subnet_id
self.main = main
def __repr__(self):
return (('<EC2SubnetAssociation: id=%s>') % (self.id))
class EC2VolumeModification(object):
"""
Describes the modification status of an EBS volume.
If the volume has never been modified, some element values will be null.
"""
def __init__(self, end_time=None, modification_state=None,
original_iops=None, original_size=None,
original_volume_type=None, progress=None, start_time=None,
status_message=None, target_iops=None, target_size=None,
target_volume_type=None, volume_id=None):
self.end_time = end_time
self.modification_state = modification_state
self.original_iops = original_iops
self.original_size = original_size
self.original_volume_type = original_volume_type
self.progress = progress
self.start_time = start_time
self.status_message = status_message
self.target_iops = target_iops
self.target_size = target_size
self.target_volume_type = target_volume_type
self.volume_id = volume_id
def __repr__(self):
return (('<EC2VolumeModification: end_time=%s, modification_state=%s, '
'original_iops=%s, original_size=%s, '
'original_volume_type=%s, progress=%s, start_time=%s, '
'status_message=%s, target_iops=%s, target_size=%s, '
'target_volume_type=%s, volume_id=%s>')
% (self.end_time, self.modification_state, self.original_iops,
self.original_size, self.original_volume_type,
self.progress, self.start_time, self.status_message,
self.target_iops, self.target_size, self.target_volume_type,
self.volume_id))
class BaseEC2NodeDriver(NodeDriver):
"""
Base Amazon EC2 node driver.
Used for main EC2 and other derivate driver classes to inherit from it.
"""
connectionCls = EC2Connection
features = {'create_node': ['ssh_key']}
path = '/'
signature_version = DEFAULT_SIGNATURE_VERSION
NODE_STATE_MAP = {
'pending': NodeState.PENDING,
'running': NodeState.RUNNING,
'shutting-down': NodeState.UNKNOWN,
'terminated': NodeState.TERMINATED
}
# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Volume.html
VOLUME_STATE_MAP = {
'available': StorageVolumeState.AVAILABLE,
'in-use': StorageVolumeState.INUSE,
'error': StorageVolumeState.ERROR,
'creating': StorageVolumeState.CREATING,
'deleting': StorageVolumeState.DELETING,
'deleted': StorageVolumeState.DELETED,
'error_deleting': StorageVolumeState.ERROR
}
SNAPSHOT_STATE_MAP = {
'pending': VolumeSnapshotState.CREATING,
'completed': VolumeSnapshotState.AVAILABLE,
'error': VolumeSnapshotState.ERROR,
}
def list_nodes(self, ex_node_ids=None, ex_filters=None):
"""
Lists all nodes.
Ex_node_ids parameter is used to filter the list of
nodes that should be returned. Only the nodes
with the corresponding node IDs will be returned.
:param ex_node_ids: List of ``node.id``
:type ex_node_ids: ``list`` of ``str``
:param ex_filters: The filters so that the list includes
information for certain nodes only.
:type ex_filters: ``dict``
:rtype: ``list`` of :class:`Node`
"""
params = {'Action': 'DescribeInstances'}
if ex_node_ids:
params.update(self._pathlist('InstanceId', ex_node_ids))
if ex_filters:
params.update(self._build_filters(ex_filters))
elem = self.connection.request(self.path, params=params).object
nodes = []
for rs in findall(element=elem, xpath='reservationSet/item',
namespace=NAMESPACE):
nodes += self._to_nodes(rs, 'instancesSet/item')
nodes_elastic_ips_mappings = self.ex_describe_addresses(nodes)
for node in nodes:
ips = nodes_elastic_ips_mappings[node.id]
node.public_ips.extend(ips)
return nodes
def list_sizes(self, location=None):
available_types = REGION_DETAILS[self.region_name]['instance_types']
sizes = []
for instance_type in available_types:
attributes = INSTANCE_TYPES[instance_type]
attributes = copy.deepcopy(attributes)
price = self._get_size_price(size_id=instance_type)
attributes.update({'price': price})
sizes.append(NodeSize(driver=self, **attributes))
return sizes
def list_images(self, location=None, ex_image_ids=None, ex_owner=None,
ex_executableby=None, ex_filters=None):
"""
Lists all images
@inherits: :class:`NodeDriver.list_images`
Ex_image_ids parameter is used to filter the list of
images that should be returned. Only the images
with the corresponding image IDs will be returned.
Ex_owner parameter is used to filter the list of
images that should be returned. Only the images
with the corresponding owner will be returned.
Valid values: amazon|aws-marketplace|self|all|aws id
Ex_executableby parameter describes images for which
the specified user has explicit launch permissions.
The user can be an AWS account ID, self to return
images for which the sender of the request has
explicit launch permissions, or all to return
images with public launch permissions.
Valid values: all|self|aws id
Ex_filters parameter is used to filter the list of
images that should be returned. Only images matching
the filter will be returned.
:param ex_image_ids: List of ``NodeImage.id``
:type ex_image_ids: ``list`` of ``str``
:param ex_owner: Owner name
:type ex_owner: ``str``
:param ex_executableby: Executable by
:type ex_executableby: ``str``
:param ex_filters: Filter by
:type ex_filters: ``dict``
:rtype: ``list`` of :class:`NodeImage`
"""
params = {'Action': 'DescribeImages'}
if ex_owner:
params.update({'Owner.1': ex_owner})
if ex_executableby:
params.update({'ExecutableBy.1': ex_executableby})
if ex_image_ids:
for index, image_id in enumerate(ex_image_ids):
index += 1
params.update({'ImageId.%s' % (index): image_id})
if ex_filters:
params.update(self._build_filters(ex_filters))
images = self._to_images(
self.connection.request(self.path, params=params).object
)
return images
def get_image(self, image_id):
"""
Gets an image based on an image_id.
:param image_id: Image identifier
:type image_id: ``str``
:return: A NodeImage object
:rtype: :class:`NodeImage`
"""
images = self.list_images(ex_image_ids=[image_id])
image = images[0]
return image
def list_locations(self):
locations = []
for index, availability_zone in \
enumerate(self.ex_list_availability_zones()):
locations.append(EC2NodeLocation(
index, availability_zone.name, self.country, self,
availability_zone)
)
return locations
def list_volumes(self, node=None):
params = {
'Action': 'DescribeVolumes',
}
if node:
filters = {'attachment.instance-id': node.id}
params.update(self._build_filters(filters))
response = self.connection.request(self.path, params=params).object
volumes = [self._to_volume(el) for el in response.findall(
fixxpath(xpath='volumeSet/item', namespace=NAMESPACE))
]
return volumes
def create_node(self, **kwargs):
"""
Create a new EC2 node.
Reference: http://bit.ly/8ZyPSy [docs.amazonwebservices.com]
@inherits: :class:`NodeDriver.create_node`
:keyword ex_keyname: The name of the key pair
:type ex_keyname: ``str``
:keyword ex_userdata: User data
:type ex_userdata: ``str``
:keyword ex_security_groups: A list of names of security groups to
assign to the node.
:type ex_security_groups: ``list``
:keyword ex_security_group_ids: A list of ids of security groups to
assign to the node.[for VPC nodes only]
:type ex_security_group_ids: ``list``
:keyword ex_metadata: Key/Value metadata to associate with a node
:type ex_metadata: ``dict``
:keyword ex_mincount: Minimum number of instances to launch
:type ex_mincount: ``int``
:keyword ex_maxcount: Maximum number of instances to launch
:type ex_maxcount: ``int``
:keyword ex_clienttoken: Unique identifier to ensure idempotency
:type ex_clienttoken: ``str``
:keyword ex_blockdevicemappings: ``list`` of ``dict`` block device
mappings.
:type ex_blockdevicemappings: ``list`` of ``dict``
:keyword ex_iamprofile: Name or ARN of IAM profile
:type ex_iamprofile: ``str``
:keyword ex_ebs_optimized: EBS-Optimized if True
:type ex_ebs_optimized: ``bool``
:keyword ex_subnet: The subnet to launch the instance into.
:type ex_subnet: :class:`.EC2Subnet`
:keyword ex_placement_group: The name of the placement group to
launch the instance into.
:type ex_placement_group: ``str``
:keyword ex_assign_public_ip: If True, the instance will
be assigned a public ip address.
Note : It takes takes a short
while for the instance to be
assigned the public ip so the
node returned will NOT have
the public ip assigned yet.
:type ex_assign_public_ip: ``bool``
:keyword ex_terminate_on_shutdown: Indicates if the instance
should be terminated instead
of just shut down when using
the operating systems command
for system shutdown.
:type ex_terminate_on_shutdown: ``bool``
"""
image = kwargs["image"]
size = kwargs["size"]
params = {
'Action': 'RunInstances',
'ImageId': image.id,
'MinCount': str(kwargs.get('ex_mincount', '1')),
'MaxCount': str(kwargs.get('ex_maxcount', '1')),
'InstanceType': size.id
}
if kwargs.get("ex_terminate_on_shutdown", False):
params["InstanceInitiatedShutdownBehavior"] = "terminate"
if 'ex_security_groups' in kwargs and 'ex_securitygroup' in kwargs:
raise ValueError('You can only supply ex_security_groups or'
' ex_securitygroup')
# ex_securitygroup is here for backward compatibility
ex_security_groups = kwargs.get('ex_security_groups', None)
ex_securitygroup = kwargs.get('ex_securitygroup', None)
security_groups = ex_security_groups or ex_securitygroup
if security_groups:
if not isinstance(security_groups, (tuple, list)):
security_groups = [security_groups]
for sig in range(len(security_groups)):
params['SecurityGroup.%d' % (sig + 1,)] =\
security_groups[sig]
if 'ex_security_group_ids' in kwargs and 'ex_subnet' not in kwargs:
raise ValueError('You can only supply ex_security_group_ids'
' combinated with ex_subnet')
security_group_ids = kwargs.get('ex_security_group_ids', None)
security_group_id_params = {}
if security_group_ids:
if not isinstance(security_group_ids, (tuple, list)):
security_group_ids = [security_group_ids]
for sig in range(len(security_group_ids)):
security_group_id_params['SecurityGroupId.%d' % (sig + 1,)] =\
security_group_ids[sig]
if 'location' in kwargs:
availability_zone = getattr(kwargs['location'],
'availability_zone', None)
if availability_zone:
if availability_zone.region_name != self.region_name:
raise AttributeError('Invalid availability zone: %s'
% (availability_zone.name))
params['Placement.AvailabilityZone'] = availability_zone.name
if 'auth' in kwargs and 'ex_keyname' in kwargs:
raise AttributeError('Cannot specify auth and ex_keyname together')
if 'auth' in kwargs:
auth = self._get_and_check_auth(kwargs['auth'])
key = self.ex_find_or_import_keypair_by_key_material(auth.pubkey)
params['KeyName'] = key['keyName']
if 'ex_keyname' in kwargs:
params['KeyName'] = kwargs['ex_keyname']
if 'ex_userdata' in kwargs:
params['UserData'] = base64.b64encode(b(kwargs['ex_userdata']))\
.decode('utf-8')
if 'ex_clienttoken' in kwargs:
params['ClientToken'] = kwargs['ex_clienttoken']
if 'ex_blockdevicemappings' in kwargs:
params.update(self._get_block_device_mapping_params(
kwargs['ex_blockdevicemappings']))
if 'ex_iamprofile' in kwargs:
if not isinstance(kwargs['ex_iamprofile'], basestring):
raise AttributeError('ex_iamprofile not string')
if kwargs['ex_iamprofile'].startswith('arn:aws:iam:'):
params['IamInstanceProfile.Arn'] = kwargs['ex_iamprofile']
else:
params['IamInstanceProfile.Name'] = kwargs['ex_iamprofile']
if 'ex_ebs_optimized' in kwargs:
params['EbsOptimized'] = kwargs['ex_ebs_optimized']
subnet_id = None
if 'ex_subnet' in kwargs:
subnet_id = kwargs['ex_subnet'].id
if 'ex_placement_group' in kwargs and kwargs['ex_placement_group']:
params['Placement.GroupName'] = kwargs['ex_placement_group']
assign_public_ip = kwargs.get('ex_assign_public_ip', False)
# In the event that a public ip is requested a NetworkInterface
# needs to be specified. Some properties that would
# normally be at the root (security group ids and subnet id)
# need to be moved to the level of the NetworkInterface because
# the NetworkInterface is no longer created implicitly
if assign_public_ip:
root_key = 'NetworkInterface.1.'
params[root_key + 'AssociatePublicIpAddress'] = "true"
# This means that when the instance is terminated, the
# NetworkInterface we created for the instance will be
# deleted automatically
params[root_key + 'DeleteOnTermination'] = "true"
# Required to be 0 if we are associating a public ip
params[root_key + 'DeviceIndex'] = "0"
if subnet_id:
params[root_key + 'SubnetId'] = subnet_id
for key, security_group_id in security_group_id_params.items():
key = root_key + key
params[key] = security_group_id
else:
params.update(security_group_id_params)
if subnet_id:
params['SubnetId'] = subnet_id
object = self.connection.request(self.path, params=params).object
nodes = self._to_nodes(object, 'instancesSet/item')
for node in nodes:
tags = {'Name': kwargs['name']}
if 'ex_metadata' in kwargs:
tags.update(kwargs['ex_metadata'])
try:
self.ex_create_tags(resource=node, tags=tags)
except Exception:
continue
node.name = kwargs['name']
node.extra.update({'tags': tags})
if len(nodes) == 1:
return nodes[0]
else:
return nodes
def reboot_node(self, node):
params = {'Action': 'RebootInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def destroy_node(self, node):
params = {'Action': 'TerminateInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_terminate_boolean(res)
def create_volume(self, size, name, location=None, snapshot=None,
ex_volume_type='standard', ex_iops=None,
ex_encrypted=None, ex_kms_key_id=None):
"""
Create a new volume.
:param size: Size of volume in gigabytes (required)
:type size: ``int``
:param name: Name of the volume to be created
:type name: ``str``
:param location: Which data center to create a volume in. If
empty, undefined behavior will be selected.
(optional)
:type location: :class:`.NodeLocation`
:param snapshot: Snapshot from which to create the new
volume. (optional)
:type snapshot: :class:`.VolumeSnapshot`
:param location: Datacenter in which to create a volume in.
:type location: :class:`.ExEC2AvailabilityZone`
:param ex_volume_type: Type of volume to create.
:type ex_volume_type: ``str``
:param iops: The number of I/O operations per second (IOPS)
that the volume supports. Only used if ex_volume_type
is io1.
:type iops: ``int``
:param ex_encrypted: Specifies whether the volume should be encrypted.
:type ex_encrypted: ``bool``
:param ex_kms_key_id: The full ARN of the AWS Key Management
Service (AWS KMS) customer master key (CMK) to use
when creating the encrypted volume.
Example:
arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123
-456a-a12b-a123b4cd56ef.
Only used if encrypted is set to True.
:type ex_kms_key_id: ``str``
:return: The newly created volume.
:rtype: :class:`StorageVolume`
"""
params = {
'Action': 'CreateVolume',
'Size': str(size)}
if ex_volume_type and ex_volume_type not in VALID_VOLUME_TYPES:
raise ValueError('Invalid volume type specified: %s' %
(ex_volume_type))
if snapshot:
params['SnapshotId'] = snapshot.id
if location is not None:
params['AvailabilityZone'] = location.availability_zone.name
if ex_volume_type:
params['VolumeType'] = ex_volume_type
if ex_volume_type == 'io1' and ex_iops:
params['Iops'] = ex_iops
if ex_encrypted is not None:
params['Encrypted'] = 1
if ex_kms_key_id is not None:
params['KmsKeyId'] = ex_kms_key_id
volume = self._to_volume(
self.connection.request(self.path, params=params).object,
name=name)
if self.ex_create_tags(volume, {'Name': name}):
volume.extra['tags']['Name'] = name
return volume
def attach_volume(self, node, volume, device):
params = {
'Action': 'AttachVolume',
'VolumeId': volume.id,
'InstanceId': node.id,
'Device': device}
self.connection.request(self.path, params=params)
return True
def detach_volume(self, volume, ex_force=False):
params = {
'Action': 'DetachVolume',
'VolumeId': volume.id}
if ex_force:
params['Force'] = 1
self.connection.request(self.path, params=params)
return True
def destroy_volume(self, volume):
params = {
'Action': 'DeleteVolume',
'VolumeId': volume.id}
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
def create_volume_snapshot(self, volume, name=None):
"""
Create snapshot from volume
:param volume: Instance of ``StorageVolume``
:type volume: ``StorageVolume``
:param name: Name of snapshot (optional)
:type name: ``str``
:rtype: :class:`VolumeSnapshot`
"""
params = {
'Action': 'CreateSnapshot',
'VolumeId': volume.id,
}
if name:
params.update({
'Description': name,
})
response = self.connection.request(self.path, params=params).object
snapshot = self._to_snapshot(response, name)
if name and self.ex_create_tags(snapshot, {'Name': name}):
snapshot.extra['tags']['Name'] = name
return snapshot
def list_volume_snapshots(self, volume):
return [snapshot for snapshot in self.list_snapshots(owner='self')
if snapshot.extra["volume_id"] == volume.id]
def list_snapshots(self, snapshot=None, owner=None):
"""
Describes all snapshots.
:param snapshot: If provided, only returns snapshot information for the
provided snapshot.
:param owner: The owner of the snapshot: self|amazon|ID
:type owner: ``str``
:rtype: ``list`` of :class:`VolumeSnapshot`
"""
params = {
'Action': 'DescribeSnapshots',
}
if snapshot:
params.update({
'SnapshotId.1': snapshot.id,
})
if owner:
params.update({
'Owner.1': owner,
})
response = self.connection.request(self.path, params=params).object
snapshots = self._to_snapshots(response)
return snapshots
def destroy_volume_snapshot(self, snapshot):
params = {
'Action': 'DeleteSnapshot',
'SnapshotId': snapshot.id
}
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
# Key pair management methods
def list_key_pairs(self):
params = {
'Action': 'DescribeKeyPairs'
}
response = self.connection.request(self.path, params=params)
elems = findall(element=response.object, xpath='keySet/item',
namespace=NAMESPACE)
key_pairs = self._to_key_pairs(elems=elems)
return key_pairs
def get_key_pair(self, name):
params = {
'Action': 'DescribeKeyPairs',
'KeyName': name
}
response = self.connection.request(self.path, params=params)
elems = findall(element=response.object, xpath='keySet/item',
namespace=NAMESPACE)
key_pair = self._to_key_pairs(elems=elems)[0]
return key_pair
def create_key_pair(self, name):
params = {
'Action': 'CreateKeyPair',
'KeyName': name
}
response = self.connection.request(self.path, params=params)
elem = response.object
key_pair = self._to_key_pair(elem=elem)
return key_pair
def import_key_pair_from_string(self, name, key_material):
base64key = ensure_string(base64.b64encode(b(key_material)))
params = {
'Action': 'ImportKeyPair',
'KeyName': name,
'PublicKeyMaterial': base64key
}
response = self.connection.request(self.path, params=params)
elem = response.object
key_pair = self._to_key_pair(elem=elem)
return key_pair
def delete_key_pair(self, key_pair):
params = {
'Action': 'DeleteKeyPair',
'KeyName': key_pair.name
}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def copy_image(self, image, source_region, name=None, description=None):
"""
Copy an Amazon Machine Image from the specified source region
to the current region.
@inherits: :class:`NodeDriver.copy_image`
:param source_region: The region where the image resides
:type source_region: ``str``
:param image: Instance of class NodeImage
:type image: :class:`NodeImage`
:param name: The name of the new image
:type name: ``str``
:param description: The description of the new image
:type description: ``str``
:return: Instance of class ``NodeImage``
:rtype: :class:`NodeImage`
"""
params = {'Action': 'CopyImage',
'SourceRegion': source_region,
'SourceImageId': image.id}
if name is not None:
params['Name'] = name
if description is not None:
params['Description'] = description
image = self._to_image(
self.connection.request(self.path, params=params).object)
return image
def create_image(self, node, name, description=None, reboot=False,
block_device_mapping=None):
"""
Create an Amazon Machine Image based off of an EBS-backed instance.
@inherits: :class:`NodeDriver.create_image`
:param node: Instance of ``Node``
:type node: :class: `Node`
:param name: The name for the new image
:type name: ``str``
:param block_device_mapping: A dictionary of the disk layout
An example of this dict is included
below.
:type block_device_mapping: ``list`` of ``dict``
:param reboot: Whether or not to shutdown the instance before
creation. Amazon calls this NoReboot and
sets it to false by default to ensure a
clean image.
:type reboot: ``bool``
:param description: An optional description for the new image
:type description: ``str``
An example block device mapping dictionary is included:
mapping = [{'VirtualName': None,
'Ebs': {'VolumeSize': 10,
'VolumeType': 'standard',
'DeleteOnTermination': 'true'},
'DeviceName': '/dev/sda1'}]
:return: Instance of class ``NodeImage``
:rtype: :class:`NodeImage`
"""
params = {'Action': 'CreateImage',
'InstanceId': node.id,
'Name': name,
'NoReboot': not reboot}
if description is not None:
params['Description'] = description
if block_device_mapping is not None:
params.update(self._get_block_device_mapping_params(
block_device_mapping))
image = self._to_image(
self.connection.request(self.path, params=params).object)
return image
def delete_image(self, image):
"""
Deletes an image at Amazon given a NodeImage object
@inherits: :class:`NodeDriver.delete_image`
:param image: Instance of ``NodeImage``
:type image: :class: `NodeImage`
:rtype: ``bool``
"""
params = {'Action': 'DeregisterImage',
'ImageId': image.id}
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
def ex_create_placement_group(self, name):
"""
Creates a new placement group.
:param name: The name for the new placement group
:type name: ``str``
:rtype: ``bool``
"""
params = {'Action': 'CreatePlacementGroup',
'Strategy': 'cluster',
'GroupName': name}
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
def ex_delete_placement_group(self, name):
"""
Deletes a placement group.
:param name: The placement group name
:type name: ``str``
:rtype: ``bool``
"""
params = {'Action': 'DeletePlacementGroup',
'GroupName': name}
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
def ex_import_snapshot(self, client_data=None,
client_token=None, description=None,
disk_container=None, dry_run=None, role_name=None):
"""
Imports a disk into an EBS snapshot. More information can be found
at https://goo.gl/sbXkYA.
:param client_data: Describes the client specific data (optional)
:type client_data: ``dict``
:param client_token: The token to enable idempotency for VM
import requests.(optional)
:type client_token: ``str``
:param description: The description string for the
import snapshot task.(optional)
:type description: ``str``
:param disk_container:The disk container object for the
import snapshot request.
:type disk_container:``dict``
:param dry_run: Checks whether you have the permission for
the action, without actually making the request,
and provides an error response.(optional)
:type dry_run: ``bool``
:param role_name: The name of the role to use when not using the
default role, 'vmimport'.(optional)
:type role_name: ``str``
:rtype: :class: ``VolumeSnapshot``
"""
params = {'Action': 'ImportSnapshot'}
if client_data is not None:
params.update(self._get_client_date_params(client_data))
if client_token is not None:
params['ClientToken'] = client_token
if description is not None:
params['Description'] = description
if disk_container is not None:
params.update(self._get_disk_container_params(disk_container))
if dry_run is not None:
params['DryRun'] = dry_run
if role_name is not None:
params['RoleName'] = role_name
importSnapshot = self.connection.request(self.path,
params=params).object
importTaskId = findtext(element=importSnapshot,
xpath='importTaskId',
namespace=NAMESPACE)
volumeSnapshot = self._wait_for_import_snapshot_completion(
import_task_id=importTaskId, timeout=1800, interval=15)
return volumeSnapshot
def _wait_for_import_snapshot_completion(self,
import_task_id,
timeout=1800,
interval=15):
"""
It waits for import snapshot to be completed
:param import_task_id: Import task Id for the
current Import Snapshot Task
:type import_task_id: ``str``
:param timeout: Timeout value for snapshot generation
:type timeout: ``float``
:param interval: Time interval for repetative describe
import snapshot tasks requests
:type interval: ``float``
:rtype: :class:``VolumeSnapshot``
"""
start_time = time.time()
snapshotId = None
while snapshotId is None:
if (time.time() - start_time >= timeout):
raise Exception('Timeout while waiting '
'for import task Id %s'
% import_task_id)
res = self.ex_describe_import_snapshot_tasks(import_task_id)
snapshotId = res.snapshotId
if snapshotId is None:
time.sleep(interval)
volumeSnapshot = VolumeSnapshot(snapshotId, driver=self)
return volumeSnapshot
def ex_describe_import_snapshot_tasks(self, import_task_id, dry_run=None):
"""
Describes your import snapshot tasks. More information can be found
at https://goo.gl/CI0MdS.
:param import_task_id: Import task Id for the current
Import Snapshot Task
:type import_task_id: ``str``
:param dry_run: Checks whether you have the permission for
the action, without actually making the request,
and provides an error response.(optional)
:type dry_run: ``bool``
:rtype: :class:``DescribeImportSnapshotTasks Object``
"""
params = {'Action': 'DescribeImportSnapshotTasks'}
if dry_run is not None:
params['DryRun'] = dry_run
# This can be extended for multiple import snapshot tasks
params['ImportTaskId.1'] = import_task_id
res = self._to_import_snapshot_task(
self.connection.request(self.path, params=params).object
)
return res
def ex_list_placement_groups(self, names=None):
"""
A list of placement groups.
:param names: Placement Group names
:type names: ``list`` of ``str``
:rtype: ``list`` of :class:`.EC2PlacementGroup`
"""
names = names or []
params = {'Action': 'DescribePlacementGroups'}
for index, name in enumerate(names):
params['GroupName.%s' % index + 1] = name
response = self.connection.request(self.path, params=params).object
return self._to_placement_groups(response)
def ex_register_image(self, name, description=None, architecture=None,
image_location=None, root_device_name=None,
block_device_mapping=None, kernel_id=None,
ramdisk_id=None, virtualization_type=None,
ena_support=None, billing_products=None,
sriov_net_support=None):
"""
Registers an Amazon Machine Image based off of an EBS-backed instance.
Can also be used to create images from snapshots. More information
can be found at http://goo.gl/hqZq0a.
:param name: The name for the AMI being registered
:type name: ``str``
:param description: The description of the AMI (optional)
:type description: ``str``
:param architecture: The architecture of the AMI (i386/x86_64)
(optional)
:type architecture: ``str``
:param image_location: The location of the AMI within Amazon S3
Required if registering an instance
store-backed AMI
:type image_location: ``str``
:param root_device_name: The device name for the root device
Required if registering an EBS-backed AMI
:type root_device_name: ``str``
:param block_device_mapping: A dictionary of the disk layout
(optional)
:type block_device_mapping: ``dict``
:param kernel_id: Kernel id for AMI (optional)
:type kernel_id: ``str``
:param ramdisk_id: RAM disk for AMI (optional)
:type ramdisk_id: ``str``
:param virtualization_type: The type of virtualization for the
AMI you are registering, paravirt
or hvm (optional)
:type virtualization_type: ``str``
:param ena_support: Enable enhanced networking with Elastic
Network Adapter for the AMI
:type ena_support: ``bool``
:param billing_products: The billing product codes
:type billing_products: ''list''
:param sriov_net_support: Set to "simple" to enable enhanced
networking with the Intel 82599 Virtual
Function interface
:type sriov_net_support: ``str``
:rtype: :class:`NodeImage`
"""
params = {'Action': 'RegisterImage',
'Name': name}
if description is not None:
params['Description'] = description
if architecture is not None:
params['Architecture'] = architecture
if image_location is not None:
params['ImageLocation'] = image_location
if root_device_name is not None:
params['RootDeviceName'] = root_device_name
if block_device_mapping is not None:
params.update(self._get_block_device_mapping_params(
block_device_mapping))
if kernel_id is not None:
params['KernelId'] = kernel_id
if ramdisk_id is not None:
params['RamDiskId'] = ramdisk_id
if virtualization_type is not None:
params['VirtualizationType'] = virtualization_type
if ena_support is not None:
params['EnaSupport'] = ena_support
if billing_products is not None:
params.update(self._get_billing_product_params(
billing_products))
if sriov_net_support is not None:
params['SriovNetSupport'] = sriov_net_support
image = self._to_image(
self.connection.request(self.path, params=params).object
)
return image
def ex_list_networks(self, network_ids=None, filters=None):
"""
Returns a list of :class:`EC2Network` objects for the
current region.
:param network_ids: Returns only networks matching the provided
network IDs. If not specified, a list of all
the networks in the corresponding region
is returned.
:type network_ids: ``list``
:param filters: The filters so that the list returned includes
information for certain networks only.
:type filters: ``dict``
:rtype: ``list`` of :class:`EC2Network`
"""
params = {'Action': 'DescribeVpcs'}
if network_ids:
params.update(self._pathlist('VpcId', network_ids))
if filters:
params.update(self._build_filters(filters))
return self._to_networks(
self.connection.request(self.path, params=params).object
)
def ex_create_network(self, cidr_block, name=None,
instance_tenancy='default'):
"""
Create a network/VPC
:param cidr_block: The CIDR block assigned to the network
:type cidr_block: ``str``
:param name: An optional name for the network
:type name: ``str``
:param instance_tenancy: The allowed tenancy of instances launched
into the VPC.
Valid values: default/dedicated
:type instance_tenancy: ``str``
:return: Dictionary of network properties
:rtype: ``dict``
"""
params = {'Action': 'CreateVpc',
'CidrBlock': cidr_block,
'InstanceTenancy': instance_tenancy}
response = self.connection.request(self.path, params=params).object
element = response.findall(fixxpath(xpath='vpc',
namespace=NAMESPACE))[0]
network = self._to_network(element, name)
if name and self.ex_create_tags(network, {'Name': name}):
network.extra['tags']['Name'] = name
return network
def ex_delete_network(self, vpc):
"""
Deletes a network/VPC.
:param vpc: VPC to delete.
:type vpc: :class:`.EC2Network`
:rtype: ``bool``
"""
params = {'Action': 'DeleteVpc', 'VpcId': vpc.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_list_subnets(self, subnet_ids=None, filters=None):
"""
Returns a list of :class:`EC2NetworkSubnet` objects for the
current region.
:param subnet_ids: Returns only subnets matching the provided
subnet IDs. If not specified, a list of all
the subnets in the corresponding region
is returned.
:type subnet_ids: ``list``
:param filters: The filters so that the list returned includes
information for certain subnets only.
:type filters: ``dict``
:rtype: ``list`` of :class:`EC2NetworkSubnet`
"""
params = {'Action': 'DescribeSubnets'}
if subnet_ids:
params.update(self._pathlist('SubnetId', subnet_ids))
if filters:
params.update(self._build_filters(filters))
return self._to_subnets(
self.connection.request(self.path, params=params).object
)
def ex_create_subnet(self, vpc_id, cidr_block,
availability_zone, name=None):
"""
Creates a network subnet within a VPC.
:param vpc_id: The ID of the VPC that the subnet should be
associated with
:type vpc_id: ``str``
:param cidr_block: The CIDR block assigned to the subnet
:type cidr_block: ``str``
:param availability_zone: The availability zone where the subnet
should reside
:type availability_zone: ``str``
:param name: An optional name for the network
:type name: ``str``
:rtype: :class: `EC2NetworkSubnet`
"""
params = {'Action': 'CreateSubnet',
'VpcId': vpc_id,
'CidrBlock': cidr_block,
'AvailabilityZone': availability_zone}
response = self.connection.request(self.path, params=params).object
element = response.findall(fixxpath(xpath='subnet',
namespace=NAMESPACE))[0]
subnet = self._to_subnet(element, name)
if name and self.ex_create_tags(subnet, {'Name': name}):
subnet.extra['tags']['Name'] = name
return subnet
def ex_delete_subnet(self, subnet):
"""
Deletes a VPC subnet.
:param subnet: The subnet to delete
:type subnet: :class:`.EC2NetworkSubnet`
:rtype: ``bool``
"""
params = {'Action': 'DeleteSubnet', 'SubnetId': subnet.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_list_security_groups(self):
"""
Lists existing Security Groups.
@note: This is a non-standard extension API, and only works for EC2.
:rtype: ``list`` of ``str``
"""
params = {'Action': 'DescribeSecurityGroups'}
response = self.connection.request(self.path, params=params).object
groups = []
for group in findall(element=response, xpath='securityGroupInfo/item',
namespace=NAMESPACE):
name = findtext(element=group, xpath='groupName',
namespace=NAMESPACE)
groups.append(name)
return groups
def ex_get_security_groups(self, group_ids=None,
group_names=None, filters=None):
"""
Returns a list of :class:`EC2SecurityGroup` objects for the
current region.
:param group_ids: Returns only groups matching the provided
group IDs.
:type group_ids: ``list``
:param group_names: Returns only groups matching the provided
group names.
:type group_ids: ``list``
:param filters: The filters so that the list returned includes
information for specific security groups only.
:type filters: ``dict``
:rtype: ``list`` of :class:`EC2SecurityGroup`
"""
params = {'Action': 'DescribeSecurityGroups'}
if group_ids:
params.update(self._pathlist('GroupId', group_ids))
if group_names:
for name_idx, group_name in enumerate(group_names):
name_idx += 1 # We want 1-based indexes
name_key = 'GroupName.%s' % (name_idx)
params[name_key] = group_name
if filters:
params.update(self._build_filters(filters))
response = self.connection.request(self.path, params=params)
return self._to_security_groups(response.object)
def ex_create_security_group(self, name, description, vpc_id=None):
"""
Creates a new Security Group in EC2-Classic or a targeted VPC.
:param name: The name of the security group to create.
This must be unique.
:type name: ``str``
:param description: Human readable description of a Security
Group.
:type description: ``str``
:param vpc_id: Optional identifier for VPC networks
:type vpc_id: ``str``
:rtype: ``dict``
"""
params = {'Action': 'CreateSecurityGroup',
'GroupName': name,
'GroupDescription': description}
if vpc_id is not None:
params['VpcId'] = vpc_id
response = self.connection.request(self.path, params=params).object
group_id = findattr(element=response, xpath='groupId',
namespace=NAMESPACE)
return {
'group_id': group_id
}
def ex_delete_security_group_by_id(self, group_id):
"""
Deletes a new Security Group using the group ID.
:param group_id: The ID of the security group
:type group_id: ``str``
:rtype: ``bool``
"""
params = {'Action': 'DeleteSecurityGroup', 'GroupId': group_id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_delete_security_group_by_name(self, group_name):
"""
Deletes a new Security Group using the group name.
:param group_name: The name of the security group
:type group_name: ``str``
:rtype: ``bool``
"""
params = {'Action': 'DeleteSecurityGroup', 'GroupName': group_name}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_delete_security_group(self, name):
"""
A wrapper method which calls ex_delete_security_group_by_name.
:param name: The name of the security group
:type name: ``str``
:rtype: ``bool``
"""
return self.ex_delete_security_group_by_name(name)
def ex_authorize_security_group(self, name, from_port, to_port, cidr_ip,
protocol='tcp'):
"""
Edit a Security Group to allow specific traffic.
@note: This is a non-standard extension API, and only works for EC2.
:param name: The name of the security group to edit
:type name: ``str``
:param from_port: The beginning of the port range to open
:type from_port: ``str``
:param to_port: The end of the port range to open
:type to_port: ``str``
:param cidr_ip: The ip to allow traffic for.
:type cidr_ip: ``str``
:param protocol: tcp/udp/icmp
:type protocol: ``str``
:rtype: ``bool``
"""
params = {'Action': 'AuthorizeSecurityGroupIngress',
'GroupName': name,
'IpProtocol': protocol,
'FromPort': str(from_port),
'ToPort': str(to_port),
'CidrIp': cidr_ip}
try:
res = self.connection.request(
self.path, params=params.copy()).object
return self._get_boolean(res)
except Exception:
e = sys.exc_info()[1]
if e.args[0].find('InvalidPermission.Duplicate') == -1:
raise e
def ex_authorize_security_group_ingress(self, id, from_port, to_port,
cidr_ips=None, group_pairs=None,
protocol='tcp'):
"""
Edit a Security Group to allow specific ingress traffic using
CIDR blocks or either a group ID, group name or user ID (account).
:param id: The id of the security group to edit
:type id: ``str``
:param from_port: The beginning of the port range to open
:type from_port: ``int``
:param to_port: The end of the port range to open
:type to_port: ``int``
:param cidr_ips: The list of IP ranges to allow traffic for.
:type cidr_ips: ``list``
:param group_pairs: Source user/group pairs to allow traffic for.
More info can be found at http://goo.gl/stBHJF
EC2 Classic Example: To allow access from any system
associated with the default group on account 1234567890
[{'group_name': 'default', 'user_id': '1234567890'}]
VPC example: To allow access from any system associated
with security group sg-47ad482e on your own account
[{'group_id': ' sg-47ad482e'}]
:type group_pairs: ``list`` of ``dict``
:param protocol: tcp/udp/icmp
:type protocol: ``str``
:rtype: ``bool``
"""
params = self._get_common_security_group_params(id,
protocol,
from_port,
to_port,
cidr_ips,
group_pairs)
params["Action"] = 'AuthorizeSecurityGroupIngress'
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_authorize_security_group_egress(self, id, from_port, to_port,
cidr_ips, group_pairs=None,
protocol='tcp'):
"""
Edit a Security Group to allow specific egress traffic using
CIDR blocks or either a group ID, group name or user ID (account).
This call is not supported for EC2 classic and only works for VPC
groups.
:param id: The id of the security group to edit
:type id: ``str``
:param from_port: The beginning of the port range to open
:type from_port: ``int``
:param to_port: The end of the port range to open
:type to_port: ``int``
:param cidr_ips: The list of ip ranges to allow traffic for.
:type cidr_ips: ``list``
:param group_pairs: Source user/group pairs to allow traffic for.
More info can be found at http://goo.gl/stBHJF
EC2 Classic Example: To allow access from any system
associated with the default group on account 1234567890
[{'group_name': 'default', 'user_id': '1234567890'}]
VPC Example: Allow access from any system associated with
security group sg-47ad482e on your own account
[{'group_id': ' sg-47ad482e'}]
:type group_pairs: ``list`` of ``dict``
:param protocol: tcp/udp/icmp
:type protocol: ``str``
:rtype: ``bool``
"""
params = self._get_common_security_group_params(id,
protocol,
from_port,
to_port,
cidr_ips,
group_pairs)
params["Action"] = 'AuthorizeSecurityGroupEgress'
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_revoke_security_group_ingress(self, id, from_port, to_port,
cidr_ips=None, group_pairs=None,
protocol='tcp'):
"""
Edits a Security Group to revoke specific ingress traffic using
CIDR blocks or either a group ID, group name or user ID (account).
:param id: The ID of the security group to edit
:type id: ``str``
:param from_port: The beginning of the port range to open
:type from_port: ``int``
:param to_port: The end of the port range to open
:type to_port: ``int``
:param cidr_ips: The list of ip ranges to allow traffic for.
:type cidr_ips: ``list``
:param group_pairs: Source user/group pairs to allow traffic for.
More info can be found at http://goo.gl/stBHJF
EC2 Classic Example: To allow access from any system
associated with the default group on account 1234567890
[{'group_name': 'default', 'user_id': '1234567890'}]
VPC Example: Allow access from any system associated with
security group sg-47ad482e on your own account
[{'group_id': ' sg-47ad482e'}]
:type group_pairs: ``list`` of ``dict``
:param protocol: tcp/udp/icmp
:type protocol: ``str``
:rtype: ``bool``
"""
params = self._get_common_security_group_params(id,
protocol,
from_port,
to_port,
cidr_ips,
group_pairs)
params["Action"] = 'RevokeSecurityGroupIngress'
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_revoke_security_group_egress(self, id, from_port, to_port,
cidr_ips=None, group_pairs=None,
protocol='tcp'):
"""
Edit a Security Group to revoke specific egress traffic using
CIDR blocks or either a group ID, group name or user ID (account).
This call is not supported for EC2 classic and only works for
VPC groups.
:param id: The id of the security group to edit
:type id: ``str``
:param from_port: The beginning of the port range to open
:type from_port: ``int``
:param to_port: The end of the port range to open
:type to_port: ``int``
:param cidr_ips: The list of ip ranges to allow traffic for.
:type cidr_ips: ``list``
:param group_pairs: Source user/group pairs to allow traffic for.
More info can be found at http://goo.gl/stBHJF
EC2 Classic Example: To allow access from any system
associated with the default group on account 1234567890
[{'group_name': 'default', 'user_id': '1234567890'}]
VPC Example: Allow access from any system associated with
security group sg-47ad482e on your own account
[{'group_id': ' sg-47ad482e'}]
:type group_pairs: ``list`` of ``dict``
:param protocol: tcp/udp/icmp
:type protocol: ``str``
:rtype: ``bool``
"""
params = self._get_common_security_group_params(id,
protocol,
from_port,
to_port,
cidr_ips,
group_pairs)
params['Action'] = 'RevokeSecurityGroupEgress'
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_authorize_security_group_permissive(self, name):
"""
Edit a Security Group to allow all traffic.
@note: This is a non-standard extension API, and only works for EC2.
:param name: The name of the security group to edit
:type name: ``str``
:rtype: ``list`` of ``str``
"""
results = []
params = {'Action': 'AuthorizeSecurityGroupIngress',
'GroupName': name,
'IpProtocol': 'tcp',
'FromPort': '0',
'ToPort': '65535',
'CidrIp': '0.0.0.0/0'}
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception:
e = sys.exc_info()[1]
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
params['IpProtocol'] = 'udp'
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception:
e = sys.exc_info()[1]
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
params.update({'IpProtocol': 'icmp', 'FromPort': '-1', 'ToPort': '-1'})
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception:
e = sys.exc_info()[1]
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
return results
def ex_list_availability_zones(self, only_available=True):
"""
Returns a list of :class:`ExEC2AvailabilityZone` objects for the
current region.
Note: This is an extension method and is only available for EC2
driver.
:keyword only_available: If true, returns only availability zones
with state 'available'
:type only_available: ``str``
:rtype: ``list`` of :class:`ExEC2AvailabilityZone`
"""
params = {'Action': 'DescribeAvailabilityZones'}
filters = {'region-name': self.region_name}
if only_available:
filters['state'] = 'available'
params.update(self._build_filters(filters))
result = self.connection.request(self.path,
params=params.copy()).object
availability_zones = []
for element in findall(element=result,
xpath='availabilityZoneInfo/item',
namespace=NAMESPACE):
name = findtext(element=element, xpath='zoneName',
namespace=NAMESPACE)
zone_state = findtext(element=element, xpath='zoneState',
namespace=NAMESPACE)
region_name = findtext(element=element, xpath='regionName',
namespace=NAMESPACE)
availability_zone = ExEC2AvailabilityZone(
name=name,
zone_state=zone_state,
region_name=region_name
)
availability_zones.append(availability_zone)
return availability_zones
def ex_describe_tags(self, resource):
"""
Returns a dictionary of tags for a resource (e.g. Node or
StorageVolume).
:param resource: The resource to be used
:type resource: any resource class, such as :class:`Node,`
:class:`StorageVolume,` or :class:NodeImage`
:return: A dictionary of Node tags
:rtype: ``dict``
"""
params = {'Action': 'DescribeTags'}
filters = {
'resource-id': resource.id
}
params.update(self._build_filters(filters))
result = self.connection.request(self.path, params=params).object
return self._get_resource_tags(result)
def ex_create_tags(self, resource, tags):
"""
Creates tags for a resource (Node or StorageVolume).
:param resource: The resource to be tagged
:type resource: :class:`Node` or :class:`StorageVolume` or
:class:`VolumeSnapshot`
:param tags: A dictionary or other mapping of strings to strings,
associating tag names with tag values.
:type tags: ``dict``
:rtype: ``bool``
"""
if not tags:
return
params = {'Action': 'CreateTags',
'ResourceId.0': resource.id}
for i, key in enumerate(tags):
params['Tag.%d.Key' % i] = key
params['Tag.%d.Value' % i] = tags[key]
res = self.connection.request(self.path,
params=params.copy()).object
return self._get_boolean(res)
def ex_delete_tags(self, resource, tags):
"""
Deletes tags from a resource.
:param resource: The resource to be tagged
:type resource: :class:`Node` or :class:`StorageVolume`
:param tags: A dictionary or other mapping of strings to strings,
specifying the tag names and tag values to be deleted.
:type tags: ``dict``
:rtype: ``bool``
"""
if not tags:
return
params = {'Action': 'DeleteTags',
'ResourceId.0': resource.id}
for i, key in enumerate(tags):
params['Tag.%d.Key' % i] = key
if tags[key] is not None:
params['Tag.%d.Value' % i] = tags[key]
res = self.connection.request(self.path,
params=params.copy()).object
return self._get_boolean(res)
def ex_get_metadata_for_node(self, node):
"""
Returns the metadata associated with the node.
:param node: Node instance
:type node: :class:`Node`
:return: A dictionary or other mapping of strings to strings,
associating tag names with tag values.
:rtype tags: ``dict``
"""
return node.extra['tags']
def ex_allocate_address(self, domain='standard'):
"""
Allocate a new Elastic IP address for EC2 classic or VPC
:param domain: The domain to allocate the new address in
(standard/vpc)
:type domain: ``str``
:return: Instance of ElasticIP
:rtype: :class:`ElasticIP`
"""
params = {'Action': 'AllocateAddress'}
if domain == 'vpc':
params['Domain'] = domain
response = self.connection.request(self.path, params=params).object
return self._to_address(response, only_associated=False)
def ex_release_address(self, elastic_ip, domain=None):
"""
Releases an Elastic IP address using the IP (EC2-Classic) or
using the allocation ID (VPC).
:param elastic_ip: Elastic IP instance
:type elastic_ip: :class:`ElasticIP`
:param domain: The domain where the IP resides (vpc only)
:type domain: ``str``
:return: True on success, False otherwise.
:rtype: ``bool``
"""
params = {'Action': 'ReleaseAddress'}
if domain is not None and domain != 'vpc':
raise AttributeError('Domain can only be set to vpc')
if domain is None:
params['PublicIp'] = elastic_ip.ip
else:
params['AllocationId'] = elastic_ip.extra['allocation_id']
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
def ex_describe_all_addresses(self, only_associated=False):
"""
Returns all the Elastic IP addresses for this account
optionally, returns only addresses associated with nodes.
:param only_associated: If true, return only the addresses
that are associated with an instance.
:type only_associated: ``bool``
:return: List of Elastic IP addresses.
:rtype: ``list`` of :class:`ElasticIP`
"""
params = {'Action': 'DescribeAddresses'}
response = self.connection.request(self.path, params=params).object
# We will send our only_associated boolean over to
# shape how the return data is sent back
return self._to_addresses(response, only_associated)
def ex_associate_address_with_node(self, node, elastic_ip, domain=None):
"""
Associate an Elastic IP address with a particular node.
:param node: Node instance
:type node: :class:`Node`
:param elastic_ip: Elastic IP instance
:type elastic_ip: :class:`ElasticIP`
:param domain: The domain where the IP resides (vpc only)
:type domain: ``str``
:return: A string representation of the association ID which is
required for VPC disassociation. EC2/standard
addresses return None
:rtype: ``None`` or ``str``
"""
params = {'Action': 'AssociateAddress', 'InstanceId': node.id}
if domain is not None and domain != 'vpc':
raise AttributeError('Domain can only be set to vpc')
if domain is None:
params.update({'PublicIp': elastic_ip.ip})
else:
params.update({'AllocationId': elastic_ip.extra['allocation_id']})
response = self.connection.request(self.path, params=params).object
association_id = findtext(element=response,
xpath='associationId',
namespace=NAMESPACE)
return association_id
def ex_associate_addresses(self, node, elastic_ip, domain=None):
"""
Note: This method has been deprecated in favor of
the ex_associate_address_with_node method.
"""
return self.ex_associate_address_with_node(node=node,
elastic_ip=elastic_ip,
domain=domain)
def ex_disassociate_address(self, elastic_ip, domain=None):
"""
Disassociates an Elastic IP address using the IP (EC2-Classic)
or the association ID (VPC).
:param elastic_ip: ElasticIP instance
:type elastic_ip: :class:`ElasticIP`
:param domain: The domain where the IP resides (vpc only)
:type domain: ``str``
:return: True on success, False otherwise.
:rtype: ``bool``
"""
params = {'Action': 'DisassociateAddress'}
if domain is not None and domain != 'vpc':
raise AttributeError('Domain can only be set to vpc')
if domain is None:
params['PublicIp'] = elastic_ip.ip
else:
params['AssociationId'] = elastic_ip.extra['association_id']
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_describe_addresses(self, nodes):
"""
Returns Elastic IP addresses for all the nodes in the provided list.
:param nodes: A list of :class:`Node` instances
:type nodes: ``list`` of :class:`Node`
:return: Dictionary where a key is a node ID and the value is a
list with the Elastic IP addresses associated with
this node.
:rtype: ``dict``
"""
if not nodes:
return {}
params = {'Action': 'DescribeAddresses'}
if len(nodes) == 1:
self._add_instance_filter(params, nodes[0])
result = self.connection.request(self.path, params=params).object
node_instance_ids = [node.id for node in nodes]
nodes_elastic_ip_mappings = {}
# We will set only_associated to True so that we only get back
# IPs which are associated with instances
only_associated = True
for node_id in node_instance_ids:
nodes_elastic_ip_mappings.setdefault(node_id, [])
for addr in self._to_addresses(result,
only_associated):
instance_id = addr.instance_id
if node_id == instance_id:
nodes_elastic_ip_mappings[instance_id].append(
addr.ip)
return nodes_elastic_ip_mappings
def ex_describe_addresses_for_node(self, node):
"""
Returns a list of Elastic IP Addresses associated with this node.
:param node: Node instance
:type node: :class:`Node`
:return: List Elastic IP Addresses attached to this node.
:rtype: ``list`` of ``str``
"""
node_elastic_ips = self.ex_describe_addresses([node])
return node_elastic_ips[node.id]
# Network interface management methods
def ex_list_network_interfaces(self):
"""
Returns all network interfaces.
:return: List of EC2NetworkInterface instances
:rtype: ``list`` of :class `EC2NetworkInterface`
"""
params = {'Action': 'DescribeNetworkInterfaces'}
return self._to_interfaces(
self.connection.request(self.path, params=params).object
)
def ex_create_network_interface(self, subnet, name=None,
description=None,
private_ip_address=None):
"""
Create a network interface within a VPC subnet.
:param subnet: EC2NetworkSubnet instance
:type subnet: :class:`EC2NetworkSubnet`
:param name: Optional name of the interface
:type name: ``str``
:param description: Optional description of the network interface
:type description: ``str``
:param private_ip_address: Optional address to assign as the
primary private IP address of the
interface. If one is not provided then
Amazon will automatically auto-assign
an available IP. EC2 allows assignment
of multiple IPs, but this will be
the primary.
:type private_ip_address: ``str``
:return: EC2NetworkInterface instance
:rtype: :class `EC2NetworkInterface`
"""
params = {'Action': 'CreateNetworkInterface',
'SubnetId': subnet.id}
if description:
params['Description'] = description
if private_ip_address:
params['PrivateIpAddress'] = private_ip_address
response = self.connection.request(self.path, params=params).object
element = response.findall(fixxpath(xpath='networkInterface',
namespace=NAMESPACE))[0]
interface = self._to_interface(element, name)
if name and self.ex_create_tags(interface, {'Name': name}):
interface.extra['tags']['Name'] = name
return interface
def ex_delete_network_interface(self, network_interface):
"""
Deletes a network interface.
:param network_interface: EC2NetworkInterface instance
:type network_interface: :class:`EC2NetworkInterface`
:rtype: ``bool``
"""
params = {'Action': 'DeleteNetworkInterface',
'NetworkInterfaceId': network_interface.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_attach_network_interface_to_node(self, network_interface,
node, device_index):
"""
Attach a network interface to an instance.
:param network_interface: EC2NetworkInterface instance
:type network_interface: :class:`EC2NetworkInterface`
:param node: Node instance
:type node: :class:`Node`
:param device_index: The interface device index
:type device_index: ``int``
:return: String representation of the attachment id.
This is required to detach the interface.
:rtype: ``str``
"""
params = {'Action': 'AttachNetworkInterface',
'NetworkInterfaceId': network_interface.id,
'InstanceId': node.id,
'DeviceIndex': device_index}
response = self.connection.request(self.path, params=params).object
attachment_id = findattr(element=response, xpath='attachmentId',
namespace=NAMESPACE)
return attachment_id
def ex_detach_network_interface(self, attachment_id, force=False):
"""
Detach a network interface from an instance.
:param attachment_id: The attachment ID associated with the
interface
:type attachment_id: ``str``
:param force: Forces the detachment.
:type force: ``bool``
:return: ``True`` on successful detachment, ``False`` otherwise.
:rtype: ``bool``
"""
params = {'Action': 'DetachNetworkInterface',
'AttachmentId': attachment_id}
if force:
params['Force'] = True
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_modify_instance_attribute(self, node, attributes):
"""
Modify node attributes.
A list of valid attributes can be found at http://goo.gl/gxcj8
:param node: Node instance
:type node: :class:`Node`
:param attributes: Dictionary with node attributes
:type attributes: ``dict``
:return: True on success, False otherwise.
:rtype: ``bool``
"""
attributes = attributes or {}
attributes.update({'InstanceId': node.id})
params = {'Action': 'ModifyInstanceAttribute'}
params.update(attributes)
res = self.connection.request(self.path,
params=params.copy()).object
return self._get_boolean(res)
def ex_modify_snapshot_attribute(self, snapshot, attributes):
"""
Modify Snapshot attributes.
:param snapshot: VolumeSnapshot instance
:type snanpshot: :class:`VolumeSnapshot`
:param attributes: Dictionary with snapshot attributes
:type attributes: ``dict``
:return: True on success, False otherwise.
:rtype: ``bool``
"""
attributes = attributes or {}
attributes.update({'SnapshotId': snapshot.id})
params = {'Action': 'ModifySnapshotAttribute'}
params.update(attributes)
res = self.connection.request(self.path,
params=params.copy()).object
return self._get_boolean(res)
def ex_modify_image_attribute(self, image, attributes):
"""
Modifies image attributes.
:param image: NodeImage instance
:type image: :class:`NodeImage`
:param attributes: A dictionary with node attributes
:type attributes: ``dict``
:return: True on success, False otherwise.
:rtype: ``bool``
"""
attributes = attributes or {}
attributes.update({'ImageId': image.id})
params = {'Action': 'ModifyImageAttribute'}
params.update(attributes)
res = self.connection.request(self.path,
params=params.copy()).object
return self._get_boolean(res)
def ex_change_node_size(self, node, new_size):
"""
Change the node size.
Note: Node must be turned of before changing the size.
:param node: Node instance
:type node: :class:`Node`
:param new_size: NodeSize instance
:type new_size: :class:`NodeSize`
:return: True on success, False otherwise.
:rtype: ``bool``
"""
if 'instancetype' in node.extra:
current_instance_type = node.extra['instancetype']
if current_instance_type == new_size.id:
raise ValueError('New instance size is the same as' +
'the current one')
attributes = {'InstanceType.Value': new_size.id}
return self.ex_modify_instance_attribute(node, attributes)
def ex_start_node(self, node):
"""
Starts the node by passing in the node object, does not work with
instance store backed instances.
:param node: The node to be used
:type node: :class:`Node`
:rtype: ``bool``
"""
params = {'Action': 'StartInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_state_boolean(res)
def ex_stop_node(self, node):
"""
Stops the node by passing in the node object, does not work with
instance store backed instances
:param node: The node to be used
:type node: :class:`Node`
:rtype: ``bool``
"""
params = {'Action': 'StopInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_state_boolean(res)
def ex_get_console_output(self, node):
"""
Gets console output for the node.
:param node: Node which should be used
:type node: :class:`Node`
:return: A dictionary with the following keys:
- instance_id (``str``)
- timestamp (``datetime.datetime``) - last output timestamp
- output (``str``) - console output
:rtype: ``dict``
"""
params = {
'Action': 'GetConsoleOutput',
'InstanceId': node.id
}
response = self.connection.request(self.path, params=params).object
timestamp = findattr(element=response,
xpath='timestamp',
namespace=NAMESPACE)
encoded_string = findattr(element=response,
xpath='output',
namespace=NAMESPACE)
timestamp = parse_date(timestamp)
if encoded_string:
output = base64.b64decode(b(encoded_string)).decode('utf-8')
else:
# No console output
output = None
return {'instance_id': node.id,
'timestamp': timestamp,
'output': output}
def ex_list_reserved_nodes(self):
"""
Lists all reserved instances/nodes which can be purchased from Amazon
for one or three year terms. Reservations are made at a region level
and reduce the hourly charge for instances.
More information can be found at http://goo.gl/ulXCC7.
:rtype: ``list`` of :class:`.EC2ReservedNode`
"""
params = {'Action': 'DescribeReservedInstances'}
response = self.connection.request(self.path, params=params).object
return self._to_reserved_nodes(response, 'reservedInstancesSet/item')
# Account specific methods
def ex_get_limits(self):
"""
Retrieve account resource limits.
:rtype: ``dict``
"""
attributes = ['max-instances', 'max-elastic-ips',
'vpc-max-elastic-ips']
params = {}
params['Action'] = 'DescribeAccountAttributes'
for index, attribute in enumerate(attributes):
params['AttributeName.%s' % (index)] = attribute
response = self.connection.request(self.path, params=params)
data = response.object
elems = data.findall(fixxpath(xpath='accountAttributeSet/item',
namespace=NAMESPACE))
result = {'resource': {}}
for elem in elems:
name = findtext(element=elem, xpath='attributeName',
namespace=NAMESPACE)
value = findtext(element=elem,
xpath='attributeValueSet/item/attributeValue',
namespace=NAMESPACE)
result['resource'][name] = int(value)
return result
# Deprecated extension methods
def ex_list_keypairs(self):
"""
Lists all the keypair names and fingerprints.
:rtype: ``list`` of ``dict``
"""
warnings.warn('This method has been deprecated in favor of '
'list_key_pairs method')
key_pairs = self.list_key_pairs()
result = []
for key_pair in key_pairs:
item = {
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint,
}
result.append(item)
return result
def ex_describe_all_keypairs(self):
"""
Returns names for all the available key pairs.
@note: This is a non-standard extension API, and only works for EC2.
:rtype: ``list`` of ``str``
"""
names = [key_pair.name for key_pair in self.list_key_pairs()]
return names
def ex_describe_keypairs(self, name):
"""
Here for backward compatibility.
"""
return self.ex_describe_keypair(name=name)
def ex_describe_keypair(self, name):
"""
Describes a keypair by name.
@note: This is a non-standard extension API, and only works for EC2.
:param name: The name of the keypair to describe.
:type name: ``str``
:rtype: ``dict``
"""
params = {
'Action': 'DescribeKeyPairs',
'KeyName.1': name
}
response = self.connection.request(self.path, params=params).object
key_name = findattr(element=response, xpath='keySet/item/keyName',
namespace=NAMESPACE)
fingerprint = findattr(element=response,
xpath='keySet/item/keyFingerprint',
namespace=NAMESPACE).strip()
return {
'keyName': key_name,
'keyFingerprint': fingerprint
}
def ex_create_keypair(self, name):
"""
Creates a new keypair
@note: This is a non-standard extension API, and only works for EC2.
:param name: The name of the keypair to Create. This must be
unique, otherwise an InvalidKeyPair.Duplicate exception is raised.
:type name: ``str``
:rtype: ``dict``
"""
warnings.warn('This method has been deprecated in favor of '
'create_key_pair method')
key_pair = self.create_key_pair(name=name)
result = {
'keyMaterial': key_pair.private_key,
'keyFingerprint': key_pair.fingerprint
}
return result
def ex_delete_keypair(self, keypair):
"""
Deletes a key pair by name.
@note: This is a non-standard extension API, and only works with EC2.
:param keypair: The name of the keypair to delete.
:type keypair: ``str``
:rtype: ``bool``
"""
warnings.warn('This method has been deprecated in favor of '
'delete_key_pair method')
keypair = KeyPair(name=keypair, public_key=None, fingerprint=None,
driver=self)
return self.delete_key_pair(keypair)
def ex_import_keypair_from_string(self, name, key_material):
"""
Imports a new public key where the public key is passed in as a string.
@note: This is a non-standard extension API, and only works for EC2.
:param name: The name of the public key to import. This must be
unique, otherwise an InvalidKeyPair.Duplicate exception is raised.
:type name: ``str``
:param key_material: The contents of a public key file.
:type key_material: ``str``
:rtype: ``dict``
"""
warnings.warn('This method has been deprecated in favor of '
'import_key_pair_from_string method')
key_pair = self.import_key_pair_from_string(name=name,
key_material=key_material)
result = {
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint
}
return result
def ex_import_keypair(self, name, keyfile):
"""
Imports a new public key where the public key is passed via a filename.
@note: This is a non-standard extension API, and only works for EC2.
:param name: The name of the public key to import. This must be
unique, otherwise an InvalidKeyPair. Duplicate
exception is raised.
:type name: ``str``
:param keyfile: The filename with the path of the public key
to import.
:type keyfile: ``str``
:rtype: ``dict``
"""
warnings.warn('This method has been deprecated in favor of '
'import_key_pair_from_file method')
key_pair = self.import_key_pair_from_file(name=name,
key_file_path=keyfile)
result = {
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint
}
return result
def ex_find_or_import_keypair_by_key_material(self, pubkey):
"""
Given a public key, look it up in the EC2 KeyPair database. If it
exists, return any information we have about it. Otherwise, create it.
Keys that are created are named based on their comment and fingerprint.
:rtype: ``dict``
"""
key_fingerprint = get_pubkey_ssh2_fingerprint(pubkey)
key_comment = get_pubkey_comment(pubkey, default='unnamed')
key_name = '%s-%s' % (key_comment, key_fingerprint)
key_pairs = self.list_key_pairs()
key_pairs = [key_pair for key_pair in key_pairs if
key_pair.fingerprint == key_fingerprint]
if len(key_pairs) >= 1:
key_pair = key_pairs[0]
result = {
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint
}
else:
result = self.ex_import_keypair_from_string(key_name, pubkey)
return result
def ex_list_internet_gateways(self, gateway_ids=None, filters=None):
"""
Describes available Internet gateways and whether or not they are
attached to a VPC. These are required for VPC nodes to communicate
over the Internet.
:param gateway_ids: Returns only Internet gateways matching the
provided Internet gateway IDs. If not
specified, a list of all the Internet
gateways in the corresponding region is
returned.
:type gateway_ids: ``list``
:param filters: The filters so the list returned inclues
information for certain gateways only.
:type filters: ``dict``
:rtype: ``list`` of :class:`.VPCInternetGateway`
"""
params = {'Action': 'DescribeInternetGateways'}
if gateway_ids:
params.update(self._pathlist('InternetGatewayId', gateway_ids))
if filters:
params.update(self._build_filters(filters))
response = self.connection.request(self.path, params=params).object
return self._to_internet_gateways(response, 'internetGatewaySet/item')
def ex_create_internet_gateway(self, name=None):
"""
Delete a VPC Internet gateway
:rtype: ``bool``
"""
params = {'Action': 'CreateInternetGateway'}
resp = self.connection.request(self.path, params=params).object
element = resp.findall(fixxpath(xpath='internetGateway',
namespace=NAMESPACE))
gateway = self._to_internet_gateway(element[0], name)
if name and self.ex_create_tags(gateway, {'Name': name}):
gateway.extra['tags']['Name'] = name
return gateway
def ex_delete_internet_gateway(self, gateway):
"""
Deletes a VPC Internet gateway.
:param gateway: The gateway to delete
:type gateway: :class:`.VPCInternetGateway`
:rtype: ``bool``
"""
params = {'Action': 'DeleteInternetGateway',
'InternetGatewayId': gateway.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_attach_internet_gateway(self, gateway, network):
"""
Attach an Internet gateway to a VPC
:param gateway: The gateway to attach
:type gateway: :class:`.VPCInternetGateway`
:param network: The VPC network to attach to
:type network: :class:`.EC2Network`
:rtype: ``bool``
"""
params = {'Action': 'AttachInternetGateway',
'InternetGatewayId': gateway.id,
'VpcId': network.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_detach_internet_gateway(self, gateway, network):
"""
Detaches an Internet gateway from a VPC.
:param gateway: The gateway to detach
:type gateway: :class:`.VPCInternetGateway`
:param network: The VPC network to detach from
:type network: :class:`.EC2Network`
:rtype: ``bool``
"""
params = {'Action': 'DetachInternetGateway',
'InternetGatewayId': gateway.id,
'VpcId': network.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_list_route_tables(self, route_table_ids=None, filters=None):
"""
Describes one or more of a VPC's route tables.
These are used to determine where network traffic is directed.
:param route_table_ids: Returns only route tables matching the
provided route table IDs. If not specified,
a list of all the route tables in the
corresponding region is returned.
:type route_table_ids: ``list``
:param filters: The filters so that the list returned includes
information for certain route tables only.
:type filters: ``dict``
:rtype: ``list`` of :class:`.EC2RouteTable`
"""
params = {'Action': 'DescribeRouteTables'}
if route_table_ids:
params.update(self._pathlist('RouteTableId', route_table_ids))
if filters:
params.update(self._build_filters(filters))
response = self.connection.request(self.path, params=params)
return self._to_route_tables(response.object)
def ex_create_route_table(self, network, name=None):
"""
Creates a route table within a VPC.
:param vpc_id: The VPC that the subnet should be created in.
:type vpc_id: :class:`.EC2Network`
:rtype: :class: `.EC2RouteTable`
"""
params = {'Action': 'CreateRouteTable',
'VpcId': network.id}
response = self.connection.request(self.path, params=params).object
element = response.findall(fixxpath(xpath='routeTable',
namespace=NAMESPACE))[0]
route_table = self._to_route_table(element, name=name)
if name and self.ex_create_tags(route_table, {'Name': name}):
route_table.extra['tags']['Name'] = name
return route_table
def ex_delete_route_table(self, route_table):
"""
Deletes a VPC route table.
:param route_table: The route table to delete.
:type route_table: :class:`.EC2RouteTable`
:rtype: ``bool``
"""
params = {'Action': 'DeleteRouteTable',
'RouteTableId': route_table.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_associate_route_table(self, route_table, subnet):
"""
Associates a route table with a subnet within a VPC.
Note: A route table can be associated with multiple subnets.
:param route_table: The route table to associate.
:type route_table: :class:`.EC2RouteTable`
:param subnet: The subnet to associate with.
:type subnet: :class:`.EC2Subnet`
:return: Route table association ID.
:rtype: ``str``
"""
params = {'Action': 'AssociateRouteTable',
'RouteTableId': route_table.id,
'SubnetId': subnet.id}
result = self.connection.request(self.path, params=params).object
association_id = findtext(element=result,
xpath='associationId',
namespace=NAMESPACE)
return association_id
def ex_dissociate_route_table(self, subnet_association):
"""
Dissociates a subnet from a route table.
:param subnet_association: The subnet association object or
subnet association ID.
:type subnet_association: :class:`.EC2SubnetAssociation` or
``str``
:rtype: ``bool``
"""
if isinstance(subnet_association, EC2SubnetAssociation):
subnet_association_id = subnet_association.id
else:
subnet_association_id = subnet_association
params = {'Action': 'DisassociateRouteTable',
'AssociationId': subnet_association_id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_replace_route_table_association(self, subnet_association,
route_table):
"""
Changes the route table associated with a given subnet in a VPC.
Note: This method can be used to change which table is the main route
table in the VPC (Specify the main route table's association ID
and the route table to be the new main route table).
:param subnet_association: The subnet association object or
subnet association ID.
:type subnet_association: :class:`.EC2SubnetAssociation` or
``str``
:param route_table: The new route table to associate.
:type route_table: :class:`.EC2RouteTable`
:return: A new route table association ID.
:rtype: ``str``
"""
if isinstance(subnet_association, EC2SubnetAssociation):
subnet_association_id = subnet_association.id
else:
subnet_association_id = subnet_association
params = {'Action': 'ReplaceRouteTableAssociation',
'AssociationId': subnet_association_id,
'RouteTableId': route_table.id}
result = self.connection.request(self.path, params=params).object
new_association_id = findtext(element=result,
xpath='newAssociationId',
namespace=NAMESPACE)
return new_association_id
def ex_create_route(self, route_table, cidr,
internet_gateway=None, node=None,
network_interface=None, vpc_peering_connection=None):
"""
Creates a route entry in the route table.
:param route_table: The route table to create the route in.
:type route_table: :class:`.EC2RouteTable`
:param cidr: The CIDR block used for the destination match.
:type cidr: ``str``
:param internet_gateway: The Internet gateway to route
traffic through.
:type internet_gateway: :class:`.VPCInternetGateway`
:param node: The NAT instance to route traffic through.
:type node: :class:`Node`
:param network_interface: The network interface of the node
to route traffic through.
:type network_interface: :class:`.EC2NetworkInterface`
:param vpc_peering_connection: The VPC peering connection.
:type vpc_peering_connection: :class:`.VPCPeeringConnection`
:rtype: ``bool``
Note: You must specify one of the following: internet_gateway,
node, network_interface, vpc_peering_connection.
"""
params = {'Action': 'CreateRoute',
'RouteTableId': route_table.id,
'DestinationCidrBlock': cidr}
if internet_gateway:
params['GatewayId'] = internet_gateway.id
if node:
params['InstanceId'] = node.id
if network_interface:
params['NetworkInterfaceId'] = network_interface.id
if vpc_peering_connection:
params['VpcPeeringConnectionId'] = vpc_peering_connection.id
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_delete_route(self, route_table, cidr):
"""
Deletes a route entry from the route table.
:param route_table: The route table to delete the route from.
:type route_table: :class:`.EC2RouteTable`
:param cidr: The CIDR block used for the destination match.
:type cidr: ``str``
:rtype: ``bool``
"""
params = {'Action': 'DeleteRoute',
'RouteTableId': route_table.id,
'DestinationCidrBlock': cidr}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_replace_route(self, route_table, cidr,
internet_gateway=None, node=None,
network_interface=None, vpc_peering_connection=None):
"""
Replaces an existing route entry within a route table in a VPC.
:param route_table: The route table to replace the route in.
:type route_table: :class:`.EC2RouteTable`
:param cidr: The CIDR block used for the destination match.
:type cidr: ``str``
:param internet_gateway: The new internet gateway to route
traffic through.
:type internet_gateway: :class:`.VPCInternetGateway`
:param node: The new NAT instance to route traffic through.
:type node: :class:`Node`
:param network_interface: The new network interface of the node
to route traffic through.
:type network_interface: :class:`.EC2NetworkInterface`
:param vpc_peering_connection: The new VPC peering connection.
:type vpc_peering_connection: :class:`.VPCPeeringConnection`
:rtype: ``bool``
Note: You must specify one of the following: internet_gateway,
node, network_interface, vpc_peering_connection.
"""
params = {'Action': 'ReplaceRoute',
'RouteTableId': route_table.id,
'DestinationCidrBlock': cidr}
if internet_gateway:
params['GatewayId'] = internet_gateway.id
if node:
params['InstanceId'] = node.id
if network_interface:
params['NetworkInterfaceId'] = network_interface.id
if vpc_peering_connection:
params['VpcPeeringConnectionId'] = vpc_peering_connection.id
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_modify_volume(self, volume, parameters):
"""
Modify volume parameters.
A list of valid parameters can be found at https://goo.gl/N0rPEQ
:param Volume: Volume instance
:type Volume: :class:`Volume`
:param parameters: Dictionary with updated volume parameters
:type parameters: ``dict``
:return: Volume modification status object
:rtype: :class:`VolumeModification
"""
parameters = parameters or {}
volume_type = parameters.get('VolumeType')
if volume_type and volume_type not in VALID_VOLUME_TYPES:
raise ValueError('Invalid volume type specified: %s' % volume_type)
parameters.update({'Action': 'ModifyVolume', 'VolumeId': volume.id})
response = self.connection.request(self.path,
params=parameters.copy()).object
return self._to_volume_modification(response.findall(
fixxpath(xpath='volumeModification', namespace=NAMESPACE))[0])
def ex_describe_volumes_modifications(self, dry_run=False, volume_ids=None,
filters=None):
"""
Describes one or more of your volume modifications.
:param dry_run: dry_run
:type dry_run: ``bool``
:param volume_ids: The volume_ids so that the response includes
information for only said volumes
:type volume_ids: ``dict``
:param filters: The filters so that the response includes
information for only certain volumes
:type filters: ``dict``
:return: List of volume modification status objects
:rtype: ``list`` of :class:`VolumeModification
"""
params = {'Action': 'DescribeVolumesModifications'}
if dry_run:
params.update({'DryRun': dry_run})
if volume_ids:
params.update(self._pathlist('VolumeId', volume_ids))
if filters:
params.update(self._build_filters(filters))
response = self.connection.request(self.path, params=params).object
return self._to_volume_modifications(response)
def _ex_connection_class_kwargs(self):
kwargs = super(BaseEC2NodeDriver, self)._ex_connection_class_kwargs()
if hasattr(self, 'token') and self.token is not None:
kwargs['token'] = self.token
# Force signature_version 4 for tokens or auth breaks
kwargs['signature_version'] = '4'
else:
kwargs['signature_version'] = self.signature_version
return kwargs
def _to_nodes(self, object, xpath):
return [self._to_node(el)
for el in object.findall(fixxpath(xpath=xpath,
namespace=NAMESPACE))]
def _to_node(self, element):
try:
state = self.NODE_STATE_MAP[findattr(element=element,
xpath="instanceState/name",
namespace=NAMESPACE)
]
except KeyError:
state = NodeState.UNKNOWN
created = parse_date(findtext(element=element, xpath='launchTime',
namespace=NAMESPACE))
instance_id = findtext(element=element, xpath='instanceId',
namespace=NAMESPACE)
public_ip = findtext(element=element, xpath='ipAddress',
namespace=NAMESPACE)
public_ips = [public_ip] if public_ip else []
private_ip = findtext(element=element, xpath='privateIpAddress',
namespace=NAMESPACE)
private_ips = [private_ip] if private_ip else []
product_codes = []
for p in findall(element=element,
xpath="productCodesSet/item/productCode",
namespace=NAMESPACE):
product_codes.append(p)
# Get our tags
tags = self._get_resource_tags(element)
name = tags.get('Name', instance_id)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['node'])
# Add additional properties to our extra dictionary
extra['block_device_mapping'] = self._to_device_mappings(element)
extra['groups'] = self._get_security_groups(element)
extra['network_interfaces'] = self._to_interfaces(element)
extra['product_codes'] = product_codes
extra['tags'] = tags
return Node(id=instance_id, name=name, state=state,
public_ips=public_ips, private_ips=private_ips,
driver=self.connection.driver, created_at=created,
extra=extra)
def _to_images(self, object):
return [self._to_image(el) for el in object.findall(
fixxpath(xpath='imagesSet/item', namespace=NAMESPACE))
]
def _to_image(self, element):
id = findtext(element=element, xpath='imageId', namespace=NAMESPACE)
name = findtext(element=element, xpath='name', namespace=NAMESPACE)
# Build block device mapping
block_device_mapping = self._to_device_mappings(element)
billing_products = []
for p in findall(element=element,
xpath="billingProducts/item/billingProduct",
namespace=NAMESPACE):
billing_products.append(p.text)
# Get our tags
tags = self._get_resource_tags(element)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['image'])
# Add our tags and block device mapping
extra['tags'] = tags
extra['block_device_mapping'] = block_device_mapping
extra['billing_products'] = billing_products
return NodeImage(id=id, name=name, driver=self, extra=extra)
def _to_volume(self, element, name=None):
"""
Parse the XML element and return a StorageVolume object.
:param name: An optional name for the volume. If not provided
then either tag with a key "Name" or volume ID
will be used (which ever is available first in that
order).
:type name: ``str``
:rtype: :class:`StorageVolume`
"""
volId = findtext(element=element, xpath='volumeId',
namespace=NAMESPACE)
size = findtext(element=element, xpath='size', namespace=NAMESPACE)
raw_state = findtext(element=element, xpath='status',
namespace=NAMESPACE)
state = self.VOLUME_STATE_MAP.get(raw_state,
StorageVolumeState.UNKNOWN)
# Get our tags
tags = self._get_resource_tags(element)
# If name was not passed into the method then
# fall back then use the volume id
name = name if name else tags.get('Name', volId)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['volume'])
extra['tags'] = tags
return StorageVolume(id=volId,
name=name,
size=int(size),
driver=self,
state=state,
extra=extra)
def _to_volume_modifications(self, object):
return [self._to_volume_modification(el) for el in object.findall(
fixxpath(xpath='volumeModificationSet/item', namespace=NAMESPACE))
]
def _to_volume_modification(self, element):
"""
Parse the XML element and return a StorageVolume object.
:rtype: :class:`EC2VolumeModification`
"""
params = self._get_extra_dict(element,
VOLUME_MODIFICATION_ATTRIBUTE_MAP)
return EC2VolumeModification(**params)
def _to_snapshots(self, response):
return [self._to_snapshot(el) for el in response.findall(
fixxpath(xpath='snapshotSet/item', namespace=NAMESPACE))
]
def _to_snapshot(self, element, name=None):
snapId = findtext(element=element, xpath='snapshotId',
namespace=NAMESPACE)
size = findtext(element=element, xpath='volumeSize',
namespace=NAMESPACE)
created = parse_date(findtext(element=element, xpath='startTime',
namespace=NAMESPACE))
# Get our tags
tags = self._get_resource_tags(element)
# If name was not passed into the method then
# fall back then use the snapshot id
name = name if name else tags.get('Name', snapId)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['snapshot'])
# Add tags and name to the extra dict
extra['tags'] = tags
extra['name'] = name
# state
state = self.SNAPSHOT_STATE_MAP.get(
extra["state"],
VolumeSnapshotState.UNKNOWN
)
return VolumeSnapshot(snapId,
size=int(size),
driver=self,
extra=extra,
created=created,
state=state,
name=name)
def _to_import_snapshot_task(self, element):
status = findtext(element=element, xpath='importSnapshotTaskSet/item/'
'snapshotTaskDetail/status', namespace=NAMESPACE)
if status != 'completed':
snapshotId = None
else:
xpath = 'importSnapshotTaskSet/item/snapshotTaskDetail/snapshotId'
snapshotId = findtext(element=element, xpath=xpath,
namespace=NAMESPACE)
return EC2ImportSnapshotTask(status, snapshotId=snapshotId)
def _to_key_pairs(self, elems):
key_pairs = [self._to_key_pair(elem=elem) for elem in elems]
return key_pairs
def _to_key_pair(self, elem):
name = findtext(element=elem, xpath='keyName', namespace=NAMESPACE)
fingerprint = findtext(element=elem, xpath='keyFingerprint',
namespace=NAMESPACE).strip()
private_key = findtext(element=elem, xpath='keyMaterial',
namespace=NAMESPACE)
key_pair = KeyPair(name=name,
public_key=None,
fingerprint=fingerprint,
private_key=private_key,
driver=self)
return key_pair
def _to_security_groups(self, response):
return [self._to_security_group(el) for el in response.findall(
fixxpath(xpath='securityGroupInfo/item', namespace=NAMESPACE))
]
def _to_security_group(self, element):
# security group id
sg_id = findtext(element=element,
xpath='groupId',
namespace=NAMESPACE)
# security group name
name = findtext(element=element,
xpath='groupName',
namespace=NAMESPACE)
# Get our tags
tags = self._get_resource_tags(element)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['security_group'])
# Add tags to the extra dict
extra['tags'] = tags
# Get ingress rules
ingress_rules = self._to_security_group_rules(
element, 'ipPermissions/item'
)
# Get egress rules
egress_rules = self._to_security_group_rules(
element, 'ipPermissionsEgress/item'
)
return EC2SecurityGroup(sg_id, name, ingress_rules,
egress_rules, extra=extra)
def _to_security_group_rules(self, element, xpath):
return [self._to_security_group_rule(el) for el in element.findall(
fixxpath(xpath=xpath, namespace=NAMESPACE))
]
def _to_security_group_rule(self, element):
"""
Parse the XML element and return a SecurityGroup object.
:rtype: :class:`EC2SecurityGroup`
"""
rule = {}
rule['protocol'] = findtext(element=element,
xpath='ipProtocol',
namespace=NAMESPACE)
rule['from_port'] = findtext(element=element,
xpath='fromPort',
namespace=NAMESPACE)
rule['to_port'] = findtext(element=element,
xpath='toPort',
namespace=NAMESPACE)
# get security groups
elements = element.findall(fixxpath(
xpath='groups/item',
namespace=NAMESPACE
))
rule['group_pairs'] = []
for element in elements:
item = {
'user_id': findtext(
element=element,
xpath='userId',
namespace=NAMESPACE),
'group_id': findtext(
element=element,
xpath='groupId',
namespace=NAMESPACE),
'group_name': findtext(
element=element,
xpath='groupName',
namespace=NAMESPACE)
}
rule['group_pairs'].append(item)
# get ip ranges
elements = element.findall(fixxpath(
xpath='ipRanges/item',
namespace=NAMESPACE
))
rule['cidr_ips'] = [
findtext(
element=element,
xpath='cidrIp',
namespace=NAMESPACE
) for element in elements]
return rule
def _to_networks(self, response):
return [self._to_network(el) for el in response.findall(
fixxpath(xpath='vpcSet/item', namespace=NAMESPACE))
]
def _to_network(self, element, name=None):
# Get the network id
vpc_id = findtext(element=element,
xpath='vpcId',
namespace=NAMESPACE)
# Get our tags
tags = self._get_resource_tags(element)
# Set our name if the Name key/value if available
# If we don't get anything back then use the vpc_id
name = name if name else tags.get('Name', vpc_id)
cidr_block = findtext(element=element,
xpath='cidrBlock',
namespace=NAMESPACE)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['network'])
# Add tags to the extra dict
extra['tags'] = tags
return EC2Network(vpc_id, name, cidr_block, extra=extra)
def _to_addresses(self, response, only_associated):
"""
Builds a list of dictionaries containing elastic IP properties.
:param only_associated: If true, return only those addresses
that are associated with an instance.
If false, return all addresses.
:type only_associated: ``bool``
:rtype: ``list`` of :class:`ElasticIP`
"""
addresses = []
for el in response.findall(fixxpath(xpath='addressesSet/item',
namespace=NAMESPACE)):
addr = self._to_address(el, only_associated)
if addr is not None:
addresses.append(addr)
return addresses
def _to_address(self, element, only_associated):
instance_id = findtext(element=element, xpath='instanceId',
namespace=NAMESPACE)
public_ip = findtext(element=element,
xpath='publicIp',
namespace=NAMESPACE)
domain = findtext(element=element,
xpath='domain',
namespace=NAMESPACE)
# Build our extra dict
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['elastic_ip'])
# Return NoneType if only associated IPs are requested
if only_associated and not instance_id:
return None
return ElasticIP(public_ip, domain, instance_id, extra=extra)
def _to_placement_groups(self, response):
return [self._to_placement_group(el)
for el in response.findall(
fixxpath(xpath='placementGroupSet/item',
namespace=NAMESPACE))]
def _to_placement_group(self, element):
name = findtext(element=element,
xpath='groupName',
namespace=NAMESPACE)
state = findtext(element=element,
xpath='state',
namespace=NAMESPACE)
strategy = findtext(element=element,
xpath='strategy',
namespace=NAMESPACE)
return EC2PlacementGroup(name, state, strategy)
def _to_subnets(self, response):
return [self._to_subnet(el) for el in response.findall(
fixxpath(xpath='subnetSet/item', namespace=NAMESPACE))
]
def _to_subnet(self, element, name=None):
# Get the subnet ID
subnet_id = findtext(element=element,
xpath='subnetId',
namespace=NAMESPACE)
# Get our tags
tags = self._get_resource_tags(element)
# If we don't get anything back then use the subnet_id
name = name if name else tags.get('Name', subnet_id)
state = findtext(element=element,
xpath='state',
namespace=NAMESPACE)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['subnet'])
# Also include our tags
extra['tags'] = tags
return EC2NetworkSubnet(subnet_id, name, state, extra=extra)
def _to_interfaces(self, response):
return [self._to_interface(el) for el in response.findall(
fixxpath(xpath='networkInterfaceSet/item', namespace=NAMESPACE))
]
def _to_interface(self, element, name=None):
"""
Parse the XML element and return an EC2NetworkInterface object.
:param name: An optional name for the interface. If not provided
then either tag with a key "Name" or the interface ID
will be used (whichever is available first in that
order).
:type name: ``str``
:rtype: :class: `EC2NetworkInterface`
"""
interface_id = findtext(element=element,
xpath='networkInterfaceId',
namespace=NAMESPACE)
state = findtext(element=element,
xpath='status',
namespace=NAMESPACE)
# Get tags
tags = self._get_resource_tags(element)
name = name if name else tags.get('Name', interface_id)
# Build security groups
groups = self._get_security_groups(element)
# Build private IPs
priv_ips = []
for item in findall(element=element,
xpath='privateIpAddressesSet/item',
namespace=NAMESPACE):
priv_ips.append({'private_ip': findtext(element=item,
xpath='privateIpAddress',
namespace=NAMESPACE),
'private_dns': findtext(element=item,
xpath='privateDnsName',
namespace=NAMESPACE),
'primary': findtext(element=item,
xpath='primary',
namespace=NAMESPACE)})
# Build our attachment dictionary which we will add into extra later
attributes_map = \
RESOURCE_EXTRA_ATTRIBUTES_MAP['network_interface_attachment']
attachment = self._get_extra_dict(element, attributes_map)
# Build our extra dict
attributes_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['network_interface']
extra = self._get_extra_dict(element, attributes_map)
# Include our previously built items as well
extra['tags'] = tags
extra['attachment'] = attachment
extra['private_ips'] = priv_ips
extra['groups'] = groups
return EC2NetworkInterface(interface_id, name, state, extra=extra)
def _to_reserved_nodes(self, object, xpath):
return [self._to_reserved_node(el)
for el in object.findall(fixxpath(xpath=xpath,
namespace=NAMESPACE))]
def _to_reserved_node(self, element):
"""
Build an EC2ReservedNode object using the reserved instance properties.
Information on these properties can be found at http://goo.gl/ulXCC7.
"""
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['reserved_node'])
try:
size = [size for size in self.list_sizes() if
size.id == extra['instance_type']][0]
except IndexError:
size = None
return EC2ReservedNode(id=findtext(element=element,
xpath='reservedInstancesId',
namespace=NAMESPACE),
state=findattr(element=element,
xpath='state',
namespace=NAMESPACE),
driver=self,
size=size,
extra=extra)
def _to_device_mappings(self, object):
return [self._to_device_mapping(el) for el in object.findall(
fixxpath(xpath='blockDeviceMapping/item', namespace=NAMESPACE))
]
def _to_device_mapping(self, element):
"""
Parse the XML element and return a dictionary of device properties.
Additional information can be found at http://goo.gl/GjWYBf.
@note: EBS volumes do not have a virtual name. Only ephemeral
disks use this property.
:rtype: ``dict``
"""
mapping = {}
mapping['device_name'] = findattr(element=element,
xpath='deviceName',
namespace=NAMESPACE)
mapping['virtual_name'] = findattr(element=element,
xpath='virtualName',
namespace=NAMESPACE)
# If virtual name does not exist then this is an EBS volume.
# Build the EBS dictionary leveraging the _get_extra_dict method.
if mapping['virtual_name'] is None:
mapping['ebs'] = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['ebs_volume'])
return mapping
def _to_internet_gateways(self, object, xpath):
return [self._to_internet_gateway(el)
for el in object.findall(fixxpath(xpath=xpath,
namespace=NAMESPACE))]
def _to_internet_gateway(self, element, name=None):
id = findtext(element=element,
xpath='internetGatewayId',
namespace=NAMESPACE)
vpc_id = findtext(element=element,
xpath='attachmentSet/item/vpcId',
namespace=NAMESPACE)
state = findtext(element=element,
xpath='attachmentSet/item/state',
namespace=NAMESPACE)
# If there's no attachment state, let's
# set it to available
if not state:
state = 'available'
# Get our tags
tags = self._get_resource_tags(element)
# If name was not passed into the method then
# fall back then use the gateway id
name = name if name else tags.get('Name', id)
return VPCInternetGateway(id=id, name=name, vpc_id=vpc_id,
state=state, driver=self.connection.driver,
extra={'tags': tags})
def _to_route_tables(self, response):
return [self._to_route_table(el) for el in response.findall(
fixxpath(xpath='routeTableSet/item', namespace=NAMESPACE))
]
def _to_route_table(self, element, name=None):
# route table id
route_table_id = findtext(element=element,
xpath='routeTableId',
namespace=NAMESPACE)
# Get our tags
tags = self._get_resource_tags(element)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['route_table'])
# Add tags to the extra dict
extra['tags'] = tags
# Get routes
routes = self._to_routes(element, 'routeSet/item')
# Get subnet associations
subnet_associations = self._to_subnet_associations(
element, 'associationSet/item')
# Get propagating routes virtual private gateways (VGW) IDs
propagating_gateway_ids = []
for el in element.findall(fixxpath(xpath='propagatingVgwSet/item',
namespace=NAMESPACE)):
propagating_gateway_ids.append(findtext(element=el,
xpath='gatewayId',
namespace=NAMESPACE))
name = name if name else tags.get('Name', id)
return EC2RouteTable(route_table_id, name, routes, subnet_associations,
propagating_gateway_ids, extra=extra)
def _to_routes(self, element, xpath):
return [self._to_route(el) for el in element.findall(
fixxpath(xpath=xpath, namespace=NAMESPACE))
]
def _to_route(self, element):
"""
Parse the XML element and return a route object
:rtype: :class: `EC2Route`
"""
destination_cidr = findtext(element=element,
xpath='destinationCidrBlock',
namespace=NAMESPACE)
gateway_id = findtext(element=element,
xpath='gatewayId',
namespace=NAMESPACE)
instance_id = findtext(element=element,
xpath='instanceId',
namespace=NAMESPACE)
owner_id = findtext(element=element,
xpath='instanceOwnerId',
namespace=NAMESPACE)
interface_id = findtext(element=element,
xpath='networkInterfaceId',
namespace=NAMESPACE)
state = findtext(element=element,
xpath='state',
namespace=NAMESPACE)
origin = findtext(element=element,
xpath='origin',
namespace=NAMESPACE)
vpc_peering_connection_id = findtext(element=element,
xpath='vpcPeeringConnectionId',
namespace=NAMESPACE)
return EC2Route(destination_cidr, gateway_id, instance_id, owner_id,
interface_id, state, origin, vpc_peering_connection_id)
def _to_subnet_associations(self, element, xpath):
return [self._to_subnet_association(el) for el in element.findall(
fixxpath(xpath=xpath, namespace=NAMESPACE))
]
def _to_subnet_association(self, element):
"""
Parse the XML element and return a route table association object
:rtype: :class: `EC2SubnetAssociation`
"""
association_id = findtext(element=element,
xpath='routeTableAssociationId',
namespace=NAMESPACE)
route_table_id = findtext(element=element,
xpath='routeTableId',
namespace=NAMESPACE)
subnet_id = findtext(element=element,
xpath='subnetId',
namespace=NAMESPACE)
main = findtext(element=element,
xpath='main',
namespace=NAMESPACE)
main = True if main else False
return EC2SubnetAssociation(association_id, route_table_id,
subnet_id, main)
def _pathlist(self, key, arr):
"""
Converts a key and an array of values into AWS query param format.
"""
params = {}
i = 0
for value in arr:
i += 1
params['%s.%s' % (key, i)] = value
return params
def _get_boolean(self, element):
tag = '{%s}%s' % (NAMESPACE, 'return')
return element.findtext(tag) == 'true'
def _get_terminate_boolean(self, element):
status = element.findtext(".//{%s}%s" % (NAMESPACE, 'name'))
return any([term_status == status
for term_status
in ('shutting-down', 'terminated')])
def _add_instance_filter(self, params, node):
"""
Add instance filter to the provided params dictionary.
"""
filters = {'instance-id': node.id}
params.update(self._build_filters(filters))
return params
def _get_state_boolean(self, element):
"""
Checks for the instances's state
"""
state = findall(element=element,
xpath='instancesSet/item/currentState/name',
namespace=NAMESPACE)[0].text
return state in ('stopping', 'pending', 'starting')
def _get_extra_dict(self, element, mapping):
"""
Extract attributes from the element based on rules provided in the
mapping dictionary.
:param element: Element to parse the values from.
:type element: xml.etree.ElementTree.Element.
:param mapping: Dictionary with the extra layout
:type node: :class:`Node`
:rtype: ``dict``
"""
extra = {}
for attribute, values in mapping.items():
transform_func = values['transform_func']
value = findattr(element=element,
xpath=values['xpath'],
namespace=NAMESPACE)
if value is not None:
extra[attribute] = transform_func(value)
else:
extra[attribute] = None
return extra
def _get_resource_tags(self, element):
"""
Parse tags from the provided element and return a dictionary with
key/value pairs.
:rtype: ``dict``
"""
tags = {}
# Get our tag set by parsing the element
tag_set = findall(element=element,
xpath='tagSet/item',
namespace=NAMESPACE)
for tag in tag_set:
key = findtext(element=tag,
xpath='key',
namespace=NAMESPACE)
value = findtext(element=tag,
xpath='value',
namespace=NAMESPACE)
tags[key] = value
return tags
def _get_block_device_mapping_params(self, block_device_mapping):
"""
Return a list of dictionaries with query parameters for
a valid block device mapping.
:param mapping: List of dictionaries with the drive layout
:type mapping: ``list`` or ``dict``
:return: Dictionary representation of the drive mapping
:rtype: ``dict``
"""
if not isinstance(block_device_mapping, (list, tuple)):
raise AttributeError(
'block_device_mapping not list or tuple')
params = {}
for idx, mapping in enumerate(block_device_mapping):
idx += 1 # We want 1-based indexes
if not isinstance(mapping, dict):
raise AttributeError(
'mapping %s in block_device_mapping '
'not a dict' % mapping)
for k, v in mapping.items():
if not isinstance(v, dict):
params['BlockDeviceMapping.%d.%s' % (idx, k)] = str(v)
else:
for key, value in v.items():
params['BlockDeviceMapping.%d.%s.%s'
% (idx, k, key)] = str(value)
return params
def _get_billing_product_params(self, billing_products):
"""
Return a list of dictionaries with valid param for billing product.
:param billing_product: List of billing code values(str)
:type billing product: ``list``
:return: Dictionary representation of the billing product codes
:rtype: ``dict``
"""
if not isinstance(billing_products, (list, tuple)):
raise AttributeError(
'billing_products not list or tuple')
params = {}
for idx, v in enumerate(billing_products):
idx += 1 # We want 1-based indexes
params['BillingProduct.%d' % (idx)] = str(v)
def _get_disk_container_params(self, disk_container):
"""
Return a list of dictionaries with query parameters for
a valid disk container.
:param disk_container: List of dictionaries with
disk_container details
:type disk_container: ``list`` or ``dict``
:return: Dictionary representation of the disk_container
:rtype: ``dict``
"""
if not isinstance(disk_container, (list, tuple)):
raise AttributeError('disk_container not list or tuple')
params = {}
for idx, content in enumerate(disk_container):
idx += 1 # We want 1-based indexes
if not isinstance(content, dict):
raise AttributeError(
'content %s in disk_container not a dict' % content)
for k, v in content.items():
if not isinstance(v, dict):
params['DiskContainer.%s' % (k)] = str(v)
else:
for key, value in v.items():
params['DiskContainer.%s.%s'
% (k, key)] = str(value)
return params
def _get_client_data_params(self, client_data):
"""
Return a dictionary with query parameters for
a valid client data.
:param client_data: List of dictionaries with the disk
upload details
:type client_data: ``dict``
:return: Dictionary representation of the client data
:rtype: ``dict``
"""
if not isinstance(client_data, (list, tuple)):
raise AttributeError('client_data not list or tuple')
params = {}
for idx, content in enumerate(client_data):
idx += 1 # We want 1-based indexes
if not isinstance(content, dict):
raise AttributeError(
'content %s in client_data'
'not a dict' % content)
for k, v in content.items():
params['ClientData.%s' % (k)] = str(v)
return params
def _get_common_security_group_params(self, group_id, protocol,
from_port, to_port, cidr_ips,
group_pairs):
"""
Return a dictionary with common query parameters which are used when
operating on security groups.
:rtype: ``dict``
"""
params = {'GroupId': group_id,
'IpPermissions.1.IpProtocol': protocol,
'IpPermissions.1.FromPort': from_port,
'IpPermissions.1.ToPort': to_port}
if cidr_ips is not None:
ip_ranges = {}
for index, cidr_ip in enumerate(cidr_ips):
index += 1
ip_ranges['IpPermissions.1.IpRanges.%s.CidrIp'
% (index)] = cidr_ip
params.update(ip_ranges)
if group_pairs is not None:
user_groups = {}
for index, group_pair in enumerate(group_pairs):
index += 1
if 'group_id' in group_pair.keys():
user_groups['IpPermissions.1.Groups.%s.GroupId'
% (index)] = group_pair['group_id']
if 'group_name' in group_pair.keys():
user_groups['IpPermissions.1.Groups.%s.GroupName'
% (index)] = group_pair['group_name']
if 'user_id' in group_pair.keys():
user_groups['IpPermissions.1.Groups.%s.UserId'
% (index)] = group_pair['user_id']
params.update(user_groups)
return params
def _get_security_groups(self, element):
"""
Parse security groups from the provided element and return a
list of security groups with the id ane name key/value pairs.
:rtype: ``list`` of ``dict``
"""
groups = []
for item in findall(element=element,
xpath='groupSet/item',
namespace=NAMESPACE):
groups.append({
'group_id': findtext(element=item,
xpath='groupId',
namespace=NAMESPACE),
'group_name': findtext(element=item,
xpath='groupName',
namespace=NAMESPACE)
})
return groups
def _build_filters(self, filters):
"""
Return a dictionary with filter query parameters which are used when
listing networks, security groups, etc.
:param filters: Dict of filter names and filter values
:type filters: ``dict``
:rtype: ``dict``
"""
filter_entries = {}
for filter_idx, filter_data in enumerate(filters.items()):
filter_idx += 1 # We want 1-based indexes
filter_name, filter_values = filter_data
filter_key = 'Filter.%s.Name' % (filter_idx)
filter_entries[filter_key] = filter_name
if isinstance(filter_values, list):
for value_idx, value in enumerate(filter_values):
value_idx += 1 # We want 1-based indexes
value_key = 'Filter.%s.Value.%s' % (filter_idx,
value_idx)
filter_entries[value_key] = value
else:
value_key = 'Filter.%s.Value.1' % (filter_idx)
filter_entries[value_key] = filter_values
return filter_entries
class EC2NodeDriver(BaseEC2NodeDriver):
"""
Amazon EC2 node driver.
"""
connectionCls = EC2Connection
type = Provider.EC2
name = 'Amazon EC2'
website = 'http://aws.amazon.com/ec2/'
path = '/'
NODE_STATE_MAP = {
'pending': NodeState.PENDING,
'running': NodeState.RUNNING,
'shutting-down': NodeState.UNKNOWN,
'terminated': NodeState.TERMINATED,
'stopped': NodeState.STOPPED
}
def __init__(self, key, secret=None, secure=True, host=None, port=None,
region='us-east-1', token=None, **kwargs):
if hasattr(self, '_region'):
region = self._region
valid_regions = self.list_regions()
if region not in valid_regions:
raise ValueError('Invalid region: %s' % (region))
details = REGION_DETAILS[region]
self.region_name = region
self.token = token
self.api_name = details['api_name']
self.country = details['country']
self.signature_version = details.get('signature_version',
DEFAULT_SIGNATURE_VERSION)
host = host or details['endpoint']
super(EC2NodeDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port, **kwargs)
@classmethod
def list_regions(cls):
return VALID_EC2_REGIONS
class IdempotentParamError(LibcloudError):
"""
Request used the same client token as a previous,
but non-identical request.
"""
def __str__(self):
return repr(self.value)
class EucConnection(EC2Connection):
"""
Connection class for Eucalyptus
"""
host = None
class EucNodeDriver(BaseEC2NodeDriver):
"""
Driver class for Eucalyptus
"""
name = 'Eucalyptus'
website = 'http://www.eucalyptus.com/'
api_name = 'ec2_us_east'
region_name = 'us-east-1'
connectionCls = EucConnection
signature_version = '2'
def __init__(self, key, secret=None, secure=True, host=None,
path=None, port=None, api_version=DEFAULT_EUCA_API_VERSION):
"""
@inherits: :class:`EC2NodeDriver.__init__`
:param path: The host where the API can be reached.
:type path: ``str``
:param api_version: The API version to extend support for
Eucalyptus proprietary API calls
:type api_version: ``str``
"""
super(EucNodeDriver, self).__init__(key, secret, secure, host, port)
if path is None:
path = '/services/Eucalyptus'
self.path = path
self.EUCA_NAMESPACE = 'http://msgs.eucalyptus.com/%s' % (api_version)
def list_locations(self):
raise NotImplementedError(
'list_locations not implemented for this driver')
def _to_sizes(self, response):
return [self._to_size(el) for el in response.findall(
fixxpath(xpath='instanceTypeDetails/item',
namespace=self.EUCA_NAMESPACE))]
def _to_size(self, el):
name = findtext(element=el,
xpath='name',
namespace=self.EUCA_NAMESPACE)
cpu = findtext(element=el,
xpath='cpu',
namespace=self.EUCA_NAMESPACE)
disk = findtext(element=el,
xpath='disk',
namespace=self.EUCA_NAMESPACE)
memory = findtext(element=el,
xpath='memory',
namespace=self.EUCA_NAMESPACE)
return NodeSize(id=name,
name=name,
ram=int(memory),
disk=int(disk),
bandwidth=None,
price=None,
driver=EucNodeDriver,
extra={
'cpu': int(cpu)
})
def list_sizes(self):
"""
Lists available nodes sizes.
:rtype: ``list`` of :class:`NodeSize`
"""
params = {'Action': 'DescribeInstanceTypes'}
response = self.connection.request(self.path, params=params).object
return self._to_sizes(response)
def _add_instance_filter(self, params, node):
"""
Eucalyptus driver doesn't support filtering on instance id so this is a
no-op.
"""
pass
class NimbusConnection(EC2Connection):
"""
Connection class for Nimbus
"""
host = None
class NimbusNodeDriver(BaseEC2NodeDriver):
"""
Driver class for Nimbus
"""
type = Provider.NIMBUS
name = 'Nimbus'
website = 'http://www.nimbusproject.org/'
country = 'Private'
api_name = 'nimbus'
region_name = 'nimbus'
friendly_name = 'Nimbus Private Cloud'
connectionCls = NimbusConnection
signature_version = '2'
def ex_describe_addresses(self, nodes):
"""
Nimbus doesn't support elastic IPs, so this is a pass-through.
@inherits: :class:`EC2NodeDriver.ex_describe_addresses`
"""
nodes_elastic_ip_mappings = {}
for node in nodes:
# empty list per node
nodes_elastic_ip_mappings[node.id] = []
return nodes_elastic_ip_mappings
def ex_create_tags(self, resource, tags):
"""
Nimbus doesn't support creating tags, so this is a pass-through.
@inherits: :class:`EC2NodeDriver.ex_create_tags`
"""
pass
class OutscaleConnection(EC2Connection):
"""
Connection class for Outscale
"""
version = DEFAULT_OUTSCALE_API_VERSION
host = None
class OutscaleNodeDriver(BaseEC2NodeDriver):
"""
Base Outscale FCU node driver.
Outscale per provider driver classes inherit from it.
"""
connectionCls = OutscaleConnection
name = 'Outscale'
website = 'http://www.outscale.com'
path = '/'
signature_version = '2'
NODE_STATE_MAP = {
'pending': NodeState.PENDING,
'running': NodeState.RUNNING,
'shutting-down': NodeState.UNKNOWN,
'terminated': NodeState.TERMINATED,
'stopped': NodeState.STOPPED
}
def __init__(self, key, secret=None, secure=True, host=None, port=None,
region='us-east-1', region_details=None, **kwargs):
if hasattr(self, '_region'):
region = self._region
if region_details is None:
raise ValueError('Invalid region_details argument')
if region not in region_details.keys():
raise ValueError('Invalid region: %s' % (region))
self.region_name = region
self.region_details = region_details
details = self.region_details[region]
self.api_name = details['api_name']
self.country = details['country']
self.connectionCls.host = details['endpoint']
self._not_implemented_msg =\
'This method is not supported in the Outscale driver'
super(BaseEC2NodeDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port, **kwargs)
def create_node(self, **kwargs):
"""
Creates a new Outscale node. The ex_iamprofile keyword
is not supported.
@inherits: :class:`BaseEC2NodeDriver.create_node`
:keyword ex_keyname: The name of the key pair
:type ex_keyname: ``str``
:keyword ex_userdata: The user data
:type ex_userdata: ``str``
:keyword ex_security_groups: A list of names of security groups to
assign to the node.
:type ex_security_groups: ``list``
:keyword ex_metadata: The Key/Value metadata to associate
with a node.
:type ex_metadata: ``dict``
:keyword ex_mincount: The minimum number of nodes to launch
:type ex_mincount: ``int``
:keyword ex_maxcount: The maximum number of nodes to launch
:type ex_maxcount: ``int``
:keyword ex_clienttoken: A unique identifier to ensure idempotency
:type ex_clienttoken: ``str``
:keyword ex_blockdevicemappings: ``list`` of ``dict`` block device
mappings.
:type ex_blockdevicemappings: ``list`` of ``dict``
:keyword ex_ebs_optimized: EBS-Optimized if True
:type ex_ebs_optimized: ``bool``
"""
if 'ex_iamprofile' in kwargs:
raise NotImplementedError("ex_iamprofile not implemented")
return super(OutscaleNodeDriver, self).create_node(**kwargs)
def ex_create_network(self, cidr_block, name=None):
"""
Creates a network/VPC. Outscale does not support instance_tenancy.
:param cidr_block: The CIDR block assigned to the network
:type cidr_block: ``str``
:param name: An optional name for the network
:type name: ``str``
:return: Dictionary of network properties
:rtype: ``dict``
"""
return super(OutscaleNodeDriver, self).ex_create_network(cidr_block,
name=name)
def ex_modify_instance_attribute(self, node, disable_api_termination=None,
ebs_optimized=None, group_id=None,
source_dest_check=None, user_data=None,
instance_type=None):
"""
Modifies node attributes.
Ouscale supports the following attributes:
'DisableApiTermination.Value', 'EbsOptimized', 'GroupId.n',
'SourceDestCheck.Value', 'UserData.Value',
'InstanceType.Value'
:param node: Node instance
:type node: :class:`Node`
:param attributes: A dictionary with node attributes
:type attributes: ``dict``
:return: True on success, False otherwise.
:rtype: ``bool``
"""
attributes = {}
if disable_api_termination is not None:
attributes['DisableApiTermination.Value'] = disable_api_termination
if ebs_optimized is not None:
attributes['EbsOptimized'] = ebs_optimized
if group_id is not None:
attributes['GroupId.n'] = group_id
if source_dest_check is not None:
attributes['SourceDestCheck.Value'] = source_dest_check
if user_data is not None:
attributes['UserData.Value'] = user_data
if instance_type is not None:
attributes['InstanceType.Value'] = instance_type
return super(OutscaleNodeDriver, self).ex_modify_instance_attribute(
node, attributes)
def ex_register_image(self, name, description=None, architecture=None,
root_device_name=None, block_device_mapping=None):
"""
Registers a Machine Image based off of an EBS-backed instance.
Can also be used to create images from snapshots.
Outscale does not support image_location, kernel_id and ramdisk_id.
:param name: The name for the AMI being registered
:type name: ``str``
:param description: The description of the AMI (optional)
:type description: ``str``
:param architecture: The architecture of the AMI (i386/x86_64)
(optional)
:type architecture: ``str``
:param root_device_name: The device name for the root device
Required if registering an EBS-backed AMI
:type root_device_name: ``str``
:param block_device_mapping: A dictionary of the disk layout
(optional)
:type block_device_mapping: ``dict``
:rtype: :class:`NodeImage`
"""
return super(OutscaleNodeDriver, self).ex_register_image(
name, description=description, architecture=architecture,
root_device_name=root_device_name,
block_device_mapping=block_device_mapping)
def ex_copy_image(self, source_region, image, name=None, description=None):
"""
Outscale does not support copying images.
@inherits: :class:`EC2NodeDriver.ex_copy_image`
"""
raise NotImplementedError(self._not_implemented_msg)
def ex_get_limits(self):
"""
Outscale does not support getting limits.
@inherits: :class:`EC2NodeDriver.ex_get_limits`
"""
raise NotImplementedError(self._not_implemented_msg)
def ex_create_network_interface(self, subnet, name=None,
description=None,
private_ip_address=None):
"""
Outscale does not support creating a network interface within a VPC.
@inherits: :class:`EC2NodeDriver.ex_create_network_interface`
"""
raise NotImplementedError(self._not_implemented_msg)
def ex_delete_network_interface(self, network_interface):
"""
Outscale does not support deleting a network interface within a VPC.
@inherits: :class:`EC2NodeDriver.ex_delete_network_interface`
"""
raise NotImplementedError(self._not_implemented_msg)
def ex_attach_network_interface_to_node(self, network_interface,
node, device_index):
"""
Outscale does not support attaching a network interface.
@inherits: :class:`EC2NodeDriver.ex_attach_network_interface_to_node`
"""
raise NotImplementedError(self._not_implemented_msg)
def ex_detach_network_interface(self, attachment_id, force=False):
"""
Outscale does not support detaching a network interface
@inherits: :class:`EC2NodeDriver.ex_detach_network_interface`
"""
raise NotImplementedError(self._not_implemented_msg)
def list_sizes(self, location=None):
"""
Lists available nodes sizes.
This overrides the EC2 default method in order to use Outscale
information or data.
:rtype: ``list`` of :class:`NodeSize`
"""
available_types =\
self.region_details[self.region_name]['instance_types']
sizes = []
for instance_type in available_types:
attributes = OUTSCALE_INSTANCE_TYPES[instance_type]
attributes = copy.deepcopy(attributes)
price = self._get_size_price(size_id=instance_type)
attributes.update({'price': price})
sizes.append(NodeSize(driver=self, **attributes))
return sizes
def ex_modify_instance_keypair(self, instance_id, key_name=None):
"""
Modifies the keypair associated with a specified instance.
Once the modification is done, you must restart the instance.
:param instance_id: The ID of the instance
:type instance_id: ``string``
:param key_name: The name of the keypair
:type key_name: ``string``
"""
params = {'Action': 'ModifyInstanceKeypair'}
params.update({'instanceId': instance_id})
if key_name is not None:
params.update({'keyName': key_name})
response = self.connection.request(self.path, params=params,
method='GET').object
return (findtext(element=response, xpath='return',
namespace=OUTSCALE_NAMESPACE) == 'true')
def _to_quota(self, elem):
"""
To Quota
"""
quota = {}
for reference_quota_item in findall(element=elem,
xpath='referenceQuotaSet/item',
namespace=OUTSCALE_NAMESPACE):
reference = findtext(element=reference_quota_item,
xpath='reference',
namespace=OUTSCALE_NAMESPACE)
quota_set = []
for quota_item in findall(element=reference_quota_item,
xpath='quotaSet/item',
namespace=OUTSCALE_NAMESPACE):
ownerId = findtext(element=quota_item,
xpath='ownerId',
namespace=OUTSCALE_NAMESPACE)
name = findtext(element=quota_item,
xpath='name',
namespace=OUTSCALE_NAMESPACE)
displayName = findtext(element=quota_item,
xpath='displayName',
namespace=OUTSCALE_NAMESPACE)
description = findtext(element=quota_item,
xpath='description',
namespace=OUTSCALE_NAMESPACE)
groupName = findtext(element=quota_item,
xpath='groupName',
namespace=OUTSCALE_NAMESPACE)
maxQuotaValue = findtext(element=quota_item,
xpath='maxQuotaValue',
namespace=OUTSCALE_NAMESPACE)
usedQuotaValue = findtext(element=quota_item,
xpath='usedQuotaValue',
namespace=OUTSCALE_NAMESPACE)
quota_set.append({'ownerId': ownerId,
'name': name,
'displayName': displayName,
'description': description,
'groupName': groupName,
'maxQuotaValue': maxQuotaValue,
'usedQuotaValue': usedQuotaValue})
quota[reference] = quota_set
return quota
def ex_describe_quotas(self, dry_run=False, filters=None,
max_results=None, marker=None):
"""
Describes one or more of your quotas.
:param dry_run: dry_run
:type dry_run: ``bool``
:param filters: The filters so that the response returned includes
information for certain quotas only.
:type filters: ``dict``
:param max_results: The maximum number of items that can be
returned in a single page (by default, 100)
:type max_results: ``int``
:param marker: Set quota marker
:type marker: ``string``
:return: (is_truncated, quota) tuple
:rtype: ``(bool, dict)``
"""
if filters:
raise NotImplementedError(
'quota filters are not implemented')
if marker:
raise NotImplementedError(
'quota marker is not implemented')
params = {'Action': 'DescribeQuotas'}
if dry_run:
params.update({'DryRun': dry_run})
if max_results:
params.update({'MaxResults': max_results})
response = self.connection.request(self.path, params=params,
method='GET').object
quota = self._to_quota(response)
is_truncated = findtext(element=response, xpath='isTruncated',
namespace=OUTSCALE_NAMESPACE)
return is_truncated, quota
def _to_product_type(self, elem):
productTypeId = findtext(element=elem, xpath='productTypeId',
namespace=OUTSCALE_NAMESPACE)
description = findtext(element=elem, xpath='description',
namespace=OUTSCALE_NAMESPACE)
return {'productTypeId': productTypeId,
'description': description}
def ex_get_product_type(self, image_id, snapshot_id=None):
"""
Gets the product type of a specified OMI or snapshot.
:param image_id: The ID of the OMI
:type image_id: ``string``
:param snapshot_id: The ID of the snapshot
:type snapshot_id: ``string``
:return: A product type
:rtype: ``dict``
"""
params = {'Action': 'GetProductType'}
params.update({'ImageId': image_id})
if snapshot_id is not None:
params.update({'SnapshotId': snapshot_id})
response = self.connection.request(self.path, params=params,
method='GET').object
product_type = self._to_product_type(response)
return product_type
def _to_product_types(self, elem):
product_types = []
for product_types_item in findall(element=elem,
xpath='productTypeSet/item',
namespace=OUTSCALE_NAMESPACE):
productTypeId = findtext(element=product_types_item,
xpath='productTypeId',
namespace=OUTSCALE_NAMESPACE)
description = findtext(element=product_types_item,
xpath='description',
namespace=OUTSCALE_NAMESPACE)
product_types.append({'productTypeId': productTypeId,
'description': description})
return product_types
def ex_describe_product_types(self, filters=None):
"""
Describes product types.
:param filters: The filters so that the list returned includes
information for certain quotas only.
:type filters: ``dict``
:return: A product types list
:rtype: ``list``
"""
params = {'Action': 'DescribeProductTypes'}
if filters:
params.update(self._build_filters(filters))
response = self.connection.request(self.path, params=params,
method='GET').object
product_types = self._to_product_types(response)
return product_types
def _to_instance_types(self, elem):
instance_types = []
for instance_types_item in findall(element=elem,
xpath='instanceTypeSet/item',
namespace=OUTSCALE_NAMESPACE):
name = findtext(element=instance_types_item,
xpath='name',
namespace=OUTSCALE_NAMESPACE)
vcpu = findtext(element=instance_types_item,
xpath='vcpu',
namespace=OUTSCALE_NAMESPACE)
memory = findtext(element=instance_types_item,
xpath='memory',
namespace=OUTSCALE_NAMESPACE)
storageSize = findtext(element=instance_types_item,
xpath='storageSize',
namespace=OUTSCALE_NAMESPACE)
storageCount = findtext(element=instance_types_item,
xpath='storageCount',
namespace=OUTSCALE_NAMESPACE)
maxIpAddresses = findtext(element=instance_types_item,
xpath='maxIpAddresses',
namespace=OUTSCALE_NAMESPACE)
ebsOptimizedAvailable = findtext(element=instance_types_item,
xpath='ebsOptimizedAvailable',
namespace=OUTSCALE_NAMESPACE)
d = {'name': name,
'vcpu': vcpu,
'memory': memory,
'storageSize': storageSize,
'storageCount': storageCount,
'maxIpAddresses': maxIpAddresses,
'ebsOptimizedAvailable': ebsOptimizedAvailable}
instance_types.append(d)
return instance_types
def ex_describe_instance_types(self, filters=None):
"""
Describes instance types.
:param filters: The filters so that the list returned includes
information for instance types only
:type filters: ``dict``
:return: A instance types list
:rtype: ``list``
"""
params = {'Action': 'DescribeInstanceTypes'}
if filters:
params.update(self._build_filters(filters))
response = self.connection.request(self.path, params=params,
method='GET').object
instance_types = self._to_instance_types(response)
return instance_types
class OutscaleSASNodeDriver(OutscaleNodeDriver):
"""
Outscale SAS node driver
"""
name = 'Outscale SAS'
type = Provider.OUTSCALE_SAS
def __init__(self, key, secret=None, secure=True, host=None, port=None,
region='us-east-1', region_details=None, **kwargs):
super(OutscaleSASNodeDriver, self).__init__(
key=key, secret=secret, secure=secure, host=host, port=port,
region=region, region_details=OUTSCALE_SAS_REGION_DETAILS,
**kwargs)
class OutscaleINCNodeDriver(OutscaleNodeDriver):
"""
Outscale INC node driver
"""
name = 'Outscale INC'
type = Provider.OUTSCALE_INC
def __init__(self, key, secret=None, secure=True, host=None, port=None,
region='us-east-1', region_details=None, **kwargs):
super(OutscaleINCNodeDriver, self).__init__(
key=key, secret=secret, secure=secure, host=host, port=port,
region=region, region_details=OUTSCALE_INC_REGION_DETAILS,
**kwargs)
| illfelder/libcloud | libcloud/compute/drivers/ec2.py | Python | apache-2.0 | 256,785 |
#!/usr/bin/env python
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark scipy.stats.arcsine."""
from __future__ import print_function
import timeit
REPEATS = 3
COUNT = [0] # use a list to allow modification within nested scopes
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(iterations, elapsed):
"""Print benchmark results.
# Arguments
* `iterations`: number of iterations
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(1000000, 0.131009101868)
```
"""
rate = iterations / elapsed
print(" ---")
print(" iterations: " + str(iterations))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark(name, setup, stmt, iterations):
"""Run the benchmark and print benchmark results.
# Arguments
* `name`: benchmark name
* `setup`: benchmark setup
* `stmt`: statement to benchmark
* `iterations`: number of iterations
# Examples
``` python
python> benchmark("random", "from random import random;", "y = random()", 1000000)
```
"""
t = timeit.Timer(stmt, setup=setup)
print_version()
i = 0
while i < REPEATS:
print("# python::" + name)
COUNT[0] += 1
elapsed = t.timeit(number=iterations)
print_results(iterations, elapsed)
print("ok " + str(COUNT[0]) + " benchmark finished")
i += 1
def main():
"""Run the benchmarks."""
name = "arcsine:entropy"
setup = "from scipy.stats import arcsine; from random import random; rv = arcsine(10.0, 20.0);"
stmt = "y = rv.entropy()"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "arcsine:kurtosis"
setup = "from scipy.stats import arcsine; from random import random; rv = arcsine(10.0, 20.0);"
stmt = "y = rv.stats(moments='k')"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "arcsine:mean"
setup = "from scipy.stats import arcsine; from random import random; rv = arcsine(10.0, 20.0);"
stmt = "y = rv.mean()"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "arcsine:median"
setup = "from scipy.stats import arcsine; from random import random; rv = arcsine(10.0, 20.0);"
stmt = "y = rv.median()"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "arcsine:skewness"
setup = "from scipy.stats import arcsine; from random import random; rv = arcsine(10.0, 20.0);"
stmt = "y = rv.stats(moments='s')"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "arcsine:stdev"
setup = "from scipy.stats import arcsine; from random import random; rv = arcsine(10.0, 20.0);"
stmt = "y = rv.std()"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "arcsine:variance"
setup = "from scipy.stats import arcsine; from random import random; rv = arcsine(10.0, 20.0);"
stmt = "y = rv.var()"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "arcsine:cdf"
setup = "from scipy.stats import arcsine; from random import random; rv = arcsine(10.0, 20.0);"
stmt = "y = rv.cdf(random())"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "arcsine:logpdf"
setup = "from scipy.stats import arcsine; from random import random; rv = arcsine(10.0, 20.0);"
stmt = "y = rv.logpdf(random())"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "arcsine:pdf"
setup = "from scipy.stats import arcsine; from random import random; rv = arcsine(10.0, 20.0);"
stmt = "y = rv.pdf(random())"
iterations = 1000
benchmark(name, setup, stmt, iterations)
name = "arcsine:quantile"
setup = "from scipy.stats import arcsine; from random import random; rv = arcsine(10.0, 20.0);"
stmt = "y = rv.ppf(random())"
iterations = 1000
benchmark(name, setup, stmt, iterations)
print_summary(COUNT[0], COUNT[0])
if __name__ == "__main__":
main()
| stdlib-js/stdlib | lib/node_modules/@stdlib/stats/base/dists/arcsine/ctor/benchmark/python/benchmark.scipy.py | Python | apache-2.0 | 5,040 |
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import logging
from collections import OrderedDict
from contextlib import contextmanager
import tensorflow.compat.v1 as tf
import fedlearner.data_join.common as common
from fedlearner.data_join.raw_data_iter_impl.raw_data_iter import RawDataIter
class TfExampleItem(RawDataIter.Item):
def __init__(self, record_str, cache_type=None, index=None):
super().__init__()
self._cache_type = cache_type
self._index = index
if self._cache_type:
assert self._index is not None,\
"store space is disk, index cann't be None"
self._parse_example_error = False
example = self._parse_example(record_str)
dic = common.convert_tf_example_to_dict(example)
# should not be list for data block
new_dict = {}
for key, val in dic.items():
new_dict[key] = val[0] if len(val) == 1 else val
self._features.update({key: new_dict[key] for key in new_dict
if key in common.ALLOWED_FIELDS.keys()})
self._set_tf_record(record_str)
self._csv_record = None
self._gc_example(example)
@classmethod
def make(cls, example_id, event_time, raw_id, fname=None, fvalue=None):
row = OrderedDict()
row["example_id"] = example_id.decode()
row["event_time"] = event_time
if raw_id:
row["raw_id"] = raw_id
if fname:
assert len(fname) == len(fvalue), \
"Field name should match field value"
for i, v in enumerate(fname):
row[v] = fvalue[i]
ex = common.convert_dict_to_tf_example(row)
return cls(ex.SerializeToString())
@property
def tf_record(self):
if self._cache_type:
return self._cache_type.get_data(self._index)
return self._record_str
def _set_tf_record(self, record_str, cache=False):
# if cache set, we switch the store space to memory
# to speed up accessing later
if self._cache_type and not cache:
self._record_str = None
self._cache_type.set_data(self._index, record_str)
else:
self._cache_type = None
self._record_str = record_str
@property
def csv_record(self):
if self._csv_record is None:
self._csv_record = {}
example = self._parse_example(self.tf_record)
if not self._parse_example_error:
try:
self._csv_record = \
common.convert_tf_example_to_dict(example)
except Exception as e: # pylint: disable=broad-except
logging.error("Failed convert tf example to csv record, "\
"reason %s", e)
self._gc_example(example)
return self._csv_record
def add_extra_fields(self, additional_records, cache=False):
example = self._parse_example(self.tf_record)
if example is not None:
feat = example.features.feature
for name, value in additional_records.items():
if name not in common.ALLOWED_FIELDS:
continue
self._features.update({name: value})
if common.ALLOWED_FIELDS[name].type is bytes:
if isinstance(value, str):
value = value.encode()
feat[name].CopyFrom(tf.train.Feature(
bytes_list=tf.train.BytesList(value=[value])
)
)
elif common.ALLOWED_FIELDS[name].type is float:
feat[name].CopyFrom(tf.train.Feature(
float_list=tf.train.FloatList(value=[value]))
)
else:
assert common.ALLOWED_FIELDS[name].type is int
feat[name].CopyFrom(tf.train.Feature(
int64_list=tf.train.Int64List(value=[value]))
)
self._set_tf_record(example.SerializeToString(), cache)
if self._csv_record is not None:
self._csv_record = None
self._gc_example(example)
def _parse_example(self, record_str):
try:
if not self._parse_example_error:
example = tf.train.Example()
example.ParseFromString(record_str)
return example
except Exception as e: # pylint: disable=broad-except
logging.error("Failed parse tf.Example from record %s, reason %s",
record_str, e)
self._parse_example_error = True
return None
@staticmethod
def _gc_example(example):
if example is not None:
example.Clear()
del example
def clear(self):
if self._cache_type:
self._cache_type.delete(self._index)
del self._record_str
del self._csv_record
class TfRecordIter(RawDataIter):
@classmethod
def name(cls):
return 'TF_RECORD'
@contextmanager
def _data_set(self, fpath):
data_set = None
expt = None
try:
data_set = tf.data.TFRecordDataset(
[fpath],
compression_type=self._options.compressed_type,
num_parallel_reads=1,
buffer_size=None if self._options.read_ahead_size <= 0 \
else self._options.read_ahead_size
)
batch_size = self._options.read_batch_size if \
self._options.read_batch_size > 0 else 1
data_set = data_set.batch(batch_size)
yield data_set
except Exception as e: # pylint: disable=broad-except
logging.warning("Failed to access file: %s, reason %s", fpath, e)
expt = e
if data_set is not None:
del data_set
if expt is not None:
raise expt
def _inner_iter(self, fpath):
with self._data_set(fpath) as data_set:
for batch in iter(data_set):
for raw_data in batch.numpy():
if not self._validator.check_tfrecord(raw_data):
continue
index = self._index
if index is None:
index = 0
yield TfExampleItem(raw_data,
self._cache_type, index)
def _reset_iter(self, index_meta):
if index_meta is not None:
fpath = index_meta.fpath
fiter = self._inner_iter(fpath)
item = next(fiter)
return fiter, item
return None, None
| bytedance/fedlearner | fedlearner/data_join/raw_data_iter_impl/tf_record_iter.py | Python | apache-2.0 | 7,400 |
import os,re,fnmatch
from ehive.runnable.IGFBaseJobFactory import IGFBaseJobFactory
class FastqFileFactory(IGFBaseJobFactory):
'''
A job factory class for creating fan jobs for demultilexed fastq files
'''
def param_defaults(self):
params_dict=super(FastqFileFactory,self).param_defaults()
params_dict.update({
'required_keyword':None,
'filter_keyword':None,
'read_pattern':'\S+_L00\d_R[12]_\d+\.fastq(\.gz)?',
})
return params_dict
def run(self):
try:
fastq_dir=self.param_required('fastq_dir')
seqrun_igf_id=self.param_required('seqrun_igf_id')
required_keyword=self.param('required_keyword')
filter_keyword=self.param('filter_keyword')
read_pattern=self.param_required('read_pattern')
if required_keyword is None and \
filter_keyword is None:
raise ValueError('Required either required_keyword or filter_keyword')
read_pattern=re.compile(r'{0}'.format(read_pattern)) # compile read pattern
if not os.path.exists(fastq_dir):
raise IOError('fastq dir {0} not accessible'.format(fastq_dir))
fastq_list=list() # create empty output list
for root, _, files in os.walk(top=fastq_dir):
for file in files:
if fnmatch.fnmatch(file, '*.fastq.gz'): # only consider fastq.gz files for now
if re.search(read_pattern,file): # skip if its not R1 and R2 reads and not illumina format name
if required_keyword and fnmatch.fnmatch(file, required_keyword ):
fastq_list.append({'fastq_file':os.path.join(root,file)}) # add fastq file to the list if its amatch
elif filter_keyword and not fnmatch.fnmatch(file, filter_keyword ):
fastq_list.append({'fastq_file':os.path.join(root,file)}) # add fastq file to the list if its not a match
self.param('sub_tasks',fastq_list) # add fastq files to the dataflow
except Exception as e:
message = \
'seqrun: {2}, Error in {0}: {1}'.\
format(
self.__class__.__name__,
e,
seqrun_igf_id)
self.warning(message)
self.post_message_to_slack(message,reaction='fail') # post msg to slack for failed jobs
self.post_message_to_ms_team(
message=message,
reaction='fail')
raise | imperial-genomics-facility/data-management-python | ehive/runnable/jobfactory/FastqFileFactory.py | Python | apache-2.0 | 2,589 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = 'CwT'
from .global_state import State
Global = State()
| CvvT/crawler_sqlmap | crawler/util/__init__.py | Python | apache-2.0 | 120 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate the wide baseline stereo image dataset from the Matterport3D.
We generate the data by randomly sample different perspective views from
panoramic images in Matterport3D to create a large scale dataset with a large
varieties of motion. The dataset contains a pair of perspective images labeled
with the relative rotation from camera 2 to camera 1, and the relative
translation direction in the frame of camera 1.
Matterport3D: https://niessner.github.io/Matterport/
https://arxiv.org/pdf/1709.06158.pdf
"""
import collections
import math
import numpy as np
from pano_utils import math_utils
from pano_utils import transformation
import tensorflow.compat.v1 as tf
def world_to_image_projection(p_world, intrinsics, pose_w2c):
"""Project points in the world frame to the image plane.
Args:
p_world: [HEIGHT, WIDTH, 3] points in the world's coordinate frame.
intrinsics: [3, 3] camera's intrinsic matrix.
pose_w2c: [3, 4] camera pose matrix (world to camera).
Returns:
[HEIGHT, WIDTH, 2] points in the image coordinate.
[HEIGHT, WIDTH, 1] the z depth.
"""
shape = p_world.shape.as_list()
height, width = shape[0], shape[1]
p_world_homogeneous = tf.concat([p_world, tf.ones([height, width, 1])], -1)
p_camera = tf.squeeze(
tf.matmul(pose_w2c[tf.newaxis, tf.newaxis, :],
tf.expand_dims(p_world_homogeneous, -1)), -1)
p_camera = p_camera*tf.constant([1., 1., -1.], shape=[1, 1, 3])
p_image = tf.squeeze(tf.matmul(intrinsics[tf.newaxis, tf.newaxis, :],
tf.expand_dims(p_camera, -1)), -1)
z = p_image[:, :, -1:]
return tf.math.divide_no_nan(p_image[:, :, :2], z), z
def image_to_world_projection(depth, intrinsics, pose_c2w):
"""Project points on the image to the world frame.
Args:
depth: [HEIGHT, WIDTH, 1] the depth map contains the radial distance from
the camera eye to each point corresponding to each pixel.
intrinsics: [3, 3] camera's intrinsic matrix.
pose_c2w: [3, 4] camera pose matrix (camera to world).
Returns:
[HEIGHT, WIDTH, 3] points in the world's coordinate frame.
"""
shape = depth.shape.as_list()
height, width = shape[0], shape[1]
xx, yy = tf.meshgrid(tf.lin_space(0., width-1., width),
tf.lin_space(0., height-1., height))
p_pixel_homogeneous = tf.concat([tf.stack([xx, yy], axis=-1),
tf.ones([height, width, 1])], -1)
p_image = tf.squeeze(tf.matmul(
tf.matrix_inverse(intrinsics[tf.newaxis, tf.newaxis, :]),
tf.expand_dims(p_pixel_homogeneous, -1)), -1)
z = depth*tf.reduce_sum(
tf.math.l2_normalize(p_image, axis=-1)*tf.constant([[[0., 0., 1.]]]),
axis=-1,
keepdims=True)
p_camera = z*p_image
# convert to OpenGL coordinate system.
p_camera = p_camera*tf.constant([1., 1., -1.], shape=[1, 1, 3])
p_camera_homogeneous = tf.concat(
[p_camera, tf.ones(shape=[height, width, 1])], -1)
# Convert camera coordinates to world coordinates.
p_world = tf.squeeze(
tf.matmul(pose_c2w[tf.newaxis, tf.newaxis, :],
tf.expand_dims(p_camera_homogeneous, -1)), -1)
return p_world
def overlap_mask(depth1,
pose1_c2w,
depth2,
pose2_c2w,
intrinsics):
"""Compute the overlap masks of two views using triangulation.
The masks have the same shape of the input images. A pixel value is true if it
can be seen by both cameras.
Args:
depth1: [HEIGHT, WIDTH, 1] the depth map of the first view.
pose1_c2w: [3, 4] camera pose matrix (camera to world) of the first view.
pose1_c2w[:, :3] is the rotation and pose1_c2w[:, -1] is the translation.
depth2: [HEIGHT, WIDTH, 1] the depth map of the second view.
pose2_c2w: [3, 4] camera pose matrix (camera to world) of the second view.
pose1_c2w[:, :3] is the rotation and pose1_c2w[:, -1] is the translation.
intrinsics: [3, 3] camera's intrinsic matrix.
Returns:
[HEIGHT, WIDTH] two overlap masks of the two inputs respectively.
"""
pose1_w2c = tf.matrix_inverse(
tf.concat([pose1_c2w, tf.constant([[0., 0., 0., 1.]])], 0))[:3]
pose2_w2c = tf.matrix_inverse(
tf.concat([pose2_c2w, tf.constant([[0., 0., 0., 1.]])], 0))[:3]
p_world1 = image_to_world_projection(depth1, intrinsics, pose1_c2w)
p_image1_in_2, z1_c2 = world_to_image_projection(
p_world1, intrinsics, pose2_w2c)
p_world2 = image_to_world_projection(depth2, intrinsics, pose2_c2w)
p_image2_in_1, z2_c1 = world_to_image_projection(
p_world2, intrinsics, pose1_w2c)
shape = depth1.shape.as_list()
height, width = shape[0], shape[1]
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
# Error tolerance.
eps = 1e-4
# check the object seen by camera 2 is also projected to camera 1's image
# plane and in front of the camera 1.
mask_h2_in_1 = tf.logical_and(
tf.less_equal(p_image2_in_1[:, :, 1], height+eps),
tf.greater_equal(p_image2_in_1[:, :, 1], 0.-eps))
mask_w2_in_1 = tf.logical_and(
tf.less_equal(p_image2_in_1[:, :, 0], width+eps),
tf.greater_equal(p_image2_in_1[:, :, 0], 0.-eps))
# check the projected points are within the image boundaries and in front of
# the camera.
mask2_in_1 = tf.logical_and(
tf.logical_and(mask_h2_in_1, mask_w2_in_1), tf.squeeze(z2_c1, -1) > 0)
# check the object seen by camera 1 is also projected to camera 2's image
# plane and in front of the camera 2.
mask_h1_in_2 = tf.logical_and(
tf.less_equal(p_image1_in_2[:, :, 1], height+eps),
tf.greater_equal(p_image1_in_2[:, :, 1], 0.-eps))
mask_w1_in_2 = tf.logical_and(
tf.less_equal(p_image1_in_2[:, :, 0], width+eps),
tf.greater_equal(p_image1_in_2[:, :, 0], 0.-eps))
# check the projected points are within the image boundaries and in front of
# the camera.
mask1_in_2 = tf.logical_and(
tf.logical_and(mask_h1_in_2, mask_w1_in_2), tf.squeeze(z1_c2, -1) > 0)
return mask1_in_2, mask2_in_1
def overlap_ratio(mask1, mask2):
"""Check if the overlapping ratio of the input is within given limits.
The overlap ratio is measured by the minimum of the ratio between the area
seen by both cameras and the image size. This function returns a ViewPair
object containing the perspective images, the masks that shows the common area
seen by both cameras, the camera's field of view (FoV), the relative rotation
from camera 2 to camera 1, and the relative translation direction in the frame
of camera 1.
Args:
mask1: [HEIGHT, WIDTH] overlapping mask.
mask2: [HEIGHT, WIDTH] overlapping mask.
Returns:
A tf.float32 tensor.
"""
shape = mask1.shape.as_list()
height, width = shape[0], shape[1]
return tf.min(tf.reduce_sum(tf.cast(mask1, tf.float32))/(height * width),
tf.reduce_sum(tf.cast(mask2, tf.float32))/(height * width))
# This is written for Matterport3D's directory structure.
def generate_from_meta(meta_data_path,
pano_data_dir,
pano_height=1024,
pano_width=2048,
output_height=512,
output_width=512):
"""Generate the stereo image dataset from Matterport3D using the meta data.
Example call:
ds = generate_from_meta(
meta_data_path='matterport3d/saved_meta/R90_fov90/test_meta/',
pano_data_dir='matterport3d/pano/')
Args:
meta_data_path: (string) the path to the meta data files.
pano_data_dir: (string) the path to the panorama images of the Matterport3D.
pano_height: (int) the height dimension of the panorama images.
pano_width: (int) the width dimension of the panorama images.
output_height: (int) the height dimension of the output perspective images.
output_width: (int) the width dimension of the output perspective images.
Returns:
Tensorflow Dataset.
"""
def load_text(file_path, n_lines=200):
"""Load text data from a file."""
return tf.data.Dataset.from_tensor_slices(
tf.data.experimental.get_single_element(
tf.data.TextLineDataset(file_path).batch(n_lines)))
def load_single_image(filename):
"""Load a single image given the filename."""
image = tf.image.decode_jpeg(tf.read_file(filename), 3)
image = tf.image.convert_image_dtype(image, tf.float32)
image.set_shape([pano_height, pano_width, 3])
return image
def string_to_matrix(s, shape):
"""Decode strings to matrices tensor."""
m = tf.reshape(
tf.stack([tf.decode_csv(s, [0.0] * np.prod(shape))], 0), shape)
m.set_shape(shape)
return m
def decode_line(line):
"""Decode text lines."""
DataPair = collections.namedtuple(
'DataPair', ['src_img', 'trt_img', 'fov', 'rotation', 'translation'])
splitted = tf.decode_csv(line, ['']*10, field_delim=' ')
img1 = load_single_image(pano_data_dir+splitted[0]+'/'+splitted[1]+'.jpeg')
img2 = load_single_image(pano_data_dir+splitted[0]+'/'+splitted[2]+'.jpeg')
fov = string_to_matrix(splitted[3], [1])
r1 = string_to_matrix(splitted[4], [3, 3])
t1 = string_to_matrix(splitted[5], [3])
r2 = string_to_matrix(splitted[6], [3, 3])
t2 = string_to_matrix(splitted[7], [3])
sampled_r1 = string_to_matrix(splitted[8], [3, 3])
sampled_r2 = string_to_matrix(splitted[9], [3, 3])
r_c2_to_c1 = tf.matmul(sampled_r1, sampled_r2, transpose_a=True)
t_c1 = tf.squeeze(tf.matmul(sampled_r1,
tf.expand_dims(tf.nn.l2_normalize(t2-t1), -1),
transpose_a=True))
sampled_rotation = tf.matmul(tf.stack([sampled_r1, sampled_r2], 0),
tf.stack([r1, r2], 0), transpose_a=True)
sampled_views = transformation.rectilinear_projection(
tf.stack([img1, img2], 0),
[output_height, output_width],
fov,
tf.matrix_transpose(sampled_rotation))
src_img, trt_img = sampled_views[0], sampled_views[1]
return DataPair(src_img, trt_img, fov, r_c2_to_c1, t_c1)
# meta_data_path has slash '/' at the end.
ds = tf.data.Dataset.list_files(meta_data_path+'*')
ds = ds.flat_map(load_text)
ds = ds.map(decode_line)
return ds
def generate_random_views(pano1_rgb,
pano2_rgb,
r1, t1, r2, t2,
max_rotation=90.,
max_tilt=5.,
output_fov=90.,
output_height=512,
output_width=512,
pano1_depth=None,
pano2_depth=None):
"""Generate stereo image pairs by randomly sampling the panoramic images.
We randomly sample camera lookat directions and project the panorama to
perspective images. We also compute the overlaping area between the pair given
the depth map if depthmaps are provided. The overlap is measured by the
minimum of the ratio between the area seen by both cameras and the image size.
This function returns a ViewPair object containing the perspective images,
the masks that shows the common area seen by both cameras, the camera's field
of view (FoV), the relative rotation from camera 2 to camera 1, and the
relative translation direction in the frame of camera 1.
Args:
pano1_rgb: [HEIGHT, WIDTH, 3] the input RGB panoramic image.
pano2_rgb: [HEIGHT, WIDTH, 3] the input RGB panoramic image.
r1: [3, 3] the camera to world rotation of camera 1.
t1: [3] the world location of camera 1.
r2: [3, 3] the camera to world rotation of camera 2.
t2: [3] the world location of camera 2.
max_rotation: (float) maximum relative rotation between the output image
pair in degrees.
max_tilt: (float) maximum tilt angle of the up vector in degrees.
output_fov: (float) output images' horizontal field of view in degrees.
output_height: (int) the height dimension of the output perspective images.
output_width: (int) the width dimension of the output perspective images.
pano1_depth: [HEIGHT, WIDTH, 1] the panoramic depth map of pano1_rgb.
pano2_depth: [HEIGHT, WIDTH, 1] the panoramic depth map of pano2_rgb.
Returns:
ViewPair
"""
ViewPair = collections.namedtuple(
'ViewPair', ['img1', 'img2', 'mask1', 'mask2', 'fov', 'r', 't'])
swap_yz = tf.constant([[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]],
shape=[1, 3, 3])
lookat_direction1 = math_utils.random_vector_on_sphere(
1, [[-math.sin(math.pi/3), math.sin(math.pi/3)], [0., 2*math.pi]])
lookat_direction1 = tf.squeeze(
tf.matmul(swap_yz, tf.expand_dims(lookat_direction1, -1)), -1)
lookat_direction2 = math_utils.uniform_sampled_vector_within_cone(
lookat_direction1, math_utils.degrees_to_radians(max_rotation))
lookat_directions = tf.concat([lookat_direction1, lookat_direction2], 0)
up1 = math_utils.uniform_sampled_vector_within_cone(
tf.constant([[0., 0., 1.]]), math_utils.degrees_to_radians(max_tilt))
up2 = math_utils.uniform_sampled_vector_within_cone(
tf.constant([[0., 0., 1.]]), math_utils.degrees_to_radians(max_tilt))
lookat_rotations = math_utils.lookat_matrix(
tf.concat([up1, up2], 0), lookat_directions)
sample_rotations = tf.matmul(
tf.concat([r1, r2], 0), lookat_rotations, transpose_a=True)
sampled_views = transformation.rectilinear_projection(
tf.stack([pano1_rgb, pano2_rgb], 0),
[output_height, output_width],
output_fov,
sample_rotations)
r_c2_to_c1 = tf.matmul(
lookat_rotations[0], lookat_rotations[1], transpose_a=True)
t_c1 = tf.squeeze(tf.matmul(lookat_rotations[0],
tf.expand_dims(tf.nn.l2_normalize(t2-t1), -1),
transpose_a=True))
if pano1_depth is not None and pano2_depth is not None:
sampled_depth = transformation.rectilinear_projection(
tf.stack([pano1_depth, pano2_depth], 0),
[output_height, output_width],
output_fov,
sample_rotations)
fx = output_width*0.5/math.tan(math_utils.degrees_to_radians(output_fov)/2)
intrinsics = tf.constant([[fx, 0., output_width*0.5],
[0., -fx, output_height*0.5],
[0., 0., 1.]])
pose1_c2w = tf.concat([lookat_rotations[0], tf.expand_dims(t1, -1)], 1)
pose2_c2w = tf.concat([lookat_rotations[1], tf.expand_dims(t2, -1)], 1)
mask1, mask2 = overlap_mask(sampled_depth[0],
pose1_c2w,
sampled_depth[1],
pose2_c2w,
intrinsics)
else:
mask1 = None
mask2 = None
return ViewPair(sampled_views[0],
sampled_views[1],
mask1,
mask2,
output_fov,
r_c2_to_c1,
t_c1)
| google-research/google-research | direction_net/dataset.py | Python | apache-2.0 | 15,616 |
# coding=utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements a parallel data reader with queues and optional shuffling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint:disable=g-direct-tensorflow-import
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import gfile
from tensorflow.python.summary import summary
from tensorflow.python.training import input as tf_input
from tensorflow.python.training import queue_runner
# pylint:enable=g-direct-tensorflow-import
class ParallelReader(io_ops.ReaderBase):
"""Reader class that uses multiple readers in parallel to improve speed.
See ReaderBase for supported methods.
"""
def __init__(self,
reader_class,
common_queue,
num_readers=4,
reader_kwargs=None):
"""ParallelReader creates num_readers instances of the reader_class.
Each instance is created by calling the `reader_class` function passing
the arguments specified in `reader_kwargs` as in:
reader_class(**read_kwargs)
When you read from a ParallelReader, with its `read()` method,
you just dequeue examples from the `common_queue`.
The readers will read different files in parallel, asynchronously enqueueing
their output into `common_queue`. The `common_queue.dtypes` must be
[tf.string, tf.string]
Because each reader can read from a different file, the examples in the
`common_queue` could be from different files. Due to the asynchronous
reading there is no guarantee that all the readers will read the same
number of examples.
If the `common_queue` is a shuffling queue, then the examples are shuffled.
Usage:
common_queue = tf.queue.RandomShuffleQueue(
capacity=256,
min_after_dequeue=128,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(tf.compat.v1.TFRecordReader, common_queue)
common_queue = tf.queue.FIFOQueue(
capacity=256,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(readers, common_queue, num_readers=2)
Args:
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader
common_queue: a Queue to hold (key, value pairs) with `dtypes` equal to
[tf.string, tf.string]. Must be one of the data_flow_ops.Queues
instances, ex. `tf.queue.FIFOQueue()`, `tf.queue.RandomShuffleQueue()`,
...
num_readers: a integer, number of instances of reader_class to create.
reader_kwargs: an optional dict of kwargs to create the readers.
Raises:
TypeError: if `common_queue.dtypes` is not [tf.string, tf.string].
"""
if len(common_queue.dtypes) != 2:
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
for dtype in common_queue.dtypes:
if not dtype.is_compatible_with(tf_dtypes.string):
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
reader_kwargs = reader_kwargs or {}
self._readers = [reader_class(**reader_kwargs) for _ in range(num_readers)]
self._common_queue = common_queue
@property
def num_readers(self):
return len(self._readers)
@property
def common_queue(self):
return self._common_queue
def read(self, queue, name=None):
"""Returns the next record (key, value pair) produced by the reader.
The multiple reader instances are all configured to `read()` from the
filenames listed in `queue` and enqueue their output into the `common_queue`
passed to the constructor, and this method returns the next record dequeued
from that `common_queue`.
Readers dequeue a work unit from `queue` if necessary (e.g. when a
reader needs to start reading from a new file since it has finished with
the previous file).
A queue runner for enqueuing in the `common_queue` is automatically added
to the TF QueueRunners collection.
Args:
queue: A Queue or a mutable string Tensor representing a handle to a
Queue, with string work items.
name: A name for the operation (optional).
Returns:
The next record (i.e. (key, value pair)) from the common_queue.
"""
self._configure_readers_by(queue)
return self._common_queue.dequeue(name=name)
def read_up_to(self, queue, num_records, name=None):
"""Returns up to num_records (key, value pairs) produced by a reader.
Will dequeue a work unit from queue if necessary (e.g., when the
Reader needs to start reading from a new file since it has
finished with the previous file).
It may return less than num_records even before the last batch.
**Note** This operation is not supported by all types of `common_queue`s.
If a `common_queue` does not support `dequeue_up_to()`, then a
`tf.errors.UnimplementedError` is raised.
Args:
queue: A Queue or a mutable string Tensor representing a handle to a
Queue, with string work items.
num_records: Number of records to read.
name: A name for the operation (optional).
Returns:
A tuple of Tensors (keys, values) from common_queue.
keys: A 1-D string Tensor.
values: A 1-D string Tensor.
"""
self._configure_readers_by(queue)
return self._common_queue.dequeue_up_to(num_records, name)
def _configure_readers_by(self, queue):
enqueue_ops = []
for reader in self._readers:
enqueue_ops.append(self._common_queue.enqueue(reader.read(queue)))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(self._common_queue, enqueue_ops))
def num_records_produced(self, name=None):
"""Returns the number of records this reader has produced.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
num_records = [r.num_records_produced() for r in self._readers]
return math_ops.add_n(num_records, name=name)
def num_work_units_completed(self, name=None):
"""Returns the number of work units this reader has finished processing.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
num_work_units = [r.num_work_units_completed() for r in self._readers]
return math_ops.add_n(num_work_units, name=name)
def parallel_read(data_sources,
reader_class,
num_epochs=None,
num_readers=4,
reader_kwargs=None,
shuffle=True,
dtypes=None,
capacity=256,
min_after_dequeue=128,
seed=None,
scope=None):
"""Reads multiple records in parallel from data_sources using n readers.
It uses a ParallelReader to read from multiple files in parallel using
multiple readers created using `reader_class` with `reader_kwargs'.
If shuffle is True the common_queue would be a RandomShuffleQueue otherwise
it would be a FIFOQueue.
Usage:
data_sources = ['path_to/train*']
key, value = parallel_read(data_sources, tf.CSVReader, num_readers=4)
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader
num_epochs: The number of times each data source is read. If left as None,
the data will be cycled through indefinitely.
num_readers: a integer, number of Readers to create.
reader_kwargs: an optional dict, of kwargs for the reader.
shuffle: boolean, whether should shuffle the files and the records by using
RandomShuffleQueue as common_queue.
dtypes: A list of types. The length of dtypes must equal the number of
elements in each record. If it is None it will default to [tf.string,
tf.string] for (key, value).
capacity: integer, capacity of the common_queue.
min_after_dequeue: integer, minimum number of records in the common_queue
after dequeue. Needed for a good shuffle.
seed: A seed for RandomShuffleQueue.
scope: Optional name scope for the ops.
Returns:
key, value: a tuple of keys and values from the data_source.
"""
data_files = get_data_files(data_sources)
with ops.name_scope(scope, 'parallel_read'):
filename_queue = tf_input.string_input_producer(
data_files,
num_epochs=num_epochs,
shuffle=shuffle,
seed=seed,
name='filenames')
dtypes = dtypes or [tf_dtypes.string, tf_dtypes.string]
if shuffle:
common_queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity,
min_after_dequeue=min_after_dequeue,
dtypes=dtypes,
seed=seed,
name='common_queue')
else:
common_queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=dtypes, name='common_queue')
summary.scalar(
'fraction_of_%d_full' % capacity,
math_ops.cast(common_queue.size(), tf_dtypes.float32) * (1. / capacity))
return ParallelReader(
reader_class,
common_queue,
num_readers=num_readers,
reader_kwargs=reader_kwargs).read(filename_queue)
def single_pass_read(data_sources, reader_class, reader_kwargs=None,
scope=None):
"""Reads sequentially the data_sources using the reader, doing a single pass.
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader.
reader_kwargs: an optional dict, of kwargs for the reader.
scope: Optional name scope for the ops.
Returns:
key, value: a tuple of keys and values from the data_source.
"""
data_files = get_data_files(data_sources)
with ops.name_scope(scope, 'single_pass_read'):
filename_queue = tf_input.string_input_producer(
data_files, num_epochs=1, shuffle=False, capacity=1, name='filenames')
reader_kwargs = reader_kwargs or {}
return reader_class(**reader_kwargs).read(filename_queue)
def get_data_files(data_sources):
"""Get data_files from data_sources.
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
Returns:
a list of data_files.
Raises:
ValueError: if data files are not found
"""
if isinstance(data_sources, (list, tuple)):
data_files = []
for source in data_sources:
data_files += get_data_files(source)
else:
if '*' in data_sources or '?' in data_sources or '[' in data_sources:
data_files = gfile.Glob(data_sources)
else:
data_files = [data_sources]
if not data_files:
raise ValueError('No data files found in %s' % (data_sources,))
return data_files
| google-research/tf-slim | tf_slim/data/parallel_reader.py | Python | apache-2.0 | 11,775 |
from osgeo import gdal, osr, ogr
import numpy as np
import scipy.misc
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("banner-generator")
def create_raster_from_band( red, green, blue, output_file):
logger.debug("Create big raster in output_file : %s"%output_file)
red_ds = gdal.Open(red)
nx = red_ds.GetRasterBand(1).XSize
ny = red_ds.GetRasterBand(1).YSize
dst_ds = gdal.GetDriverByName('GTiff').Create(output_file, ny, nx, 3, gdal.GDT_UInt16)
dst_ds.SetGeoTransform(red_ds.GetGeoTransform())
dst_ds.SetProjection(red_ds.GetProjection())
def write_band(band, index_band):
logger.debug("Write band : %s"%index_band)
band_ds = gdal.Open(band)
array = band_ds.GetRasterBand(1).ReadAsArray()
dst_ds.GetRasterBand(index_band).WriteArray(array)
write_band(red, 1)
write_band(blue, 2)
write_band(green, 3)
dst_ds.FlushCache()
dst_ds = None
logger.debug("Big raster is write in output_file : %s"%output_file)
def create_png_from_raster(raster_file, output_file, blue_clip=(0.,2500.), red_clip=(0.,2500.), green_clip=(0.,2500.)):
logger.debug("Create big png in output_file : %s"%output_file)
raster_ds = gdal.Open(raster_file)
bytes_max = 255.
if blue_clip[0] > blue_clip[1] :
logger.error("Maximum clip value should be higther than the Minimum clip value")
return False
if red_clip[0] > red_clip[1] :
logger.error("Maximum clip value should be higther than the Minimum clip value")
return False
if green_clip[0] > green_clip[1] :
logger.error("Maximum clip value should be higther than the Minimum clip value")
return False
def clip_array(band_index, clip):
array = np.array(raster_ds.GetRasterBand(band_index).ReadAsArray())
array = np.clip(array, clip[0], clip[1])
array = array - clip[0]
array = (np.float32(array)*bytes_max)/(clip[1]-clip[0])
array = array.astype(int)
return array
logger.debug("Prepare red color, clip raw value at %s, %s"%red_clip)
red_array = clip_array(1, red_clip)
logger.debug("Prepare green color, clip raw value at %s, %s"%green_clip)
green_array = clip_array(2, green_clip)
logger.debug("Prepare blue color, clip raw value at %s, %s"%blue_clip)
blue_array = clip_array(3, blue_clip)
rgb = np.zeros((len(red_array), len(red_array[0]), 3), dtype=np.uint8)
rgb[..., 0] = red_array
rgb[..., 1] = green_array
rgb[..., 2] = blue_array
logger.debug("Writing png file in %s"%output_file)
scipy.misc.imsave(output_file, rgb)
return True
def get_x_y_for_lon_lat(raster_file, lon, lat):
logger.debug("Compute x and y from lon lat")
logger.debug("Longitude : %s"%lon)
logger.debug("Latitude : %s"%lat)
sref = osr.SpatialReference()
sref.ImportFromEPSG(4326)
# create a geometry from coordinates
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(lon, lat)
raster_ds = gdal.Open(raster_file)
dref = osr.SpatialReference()
dref.ImportFromWkt(raster_ds.GetProjection())
ct = osr.CoordinateTransformation(sref,dref)
point.Transform(ct)
point_x = point.GetX()
point_y = point.GetY()
logger.debug("Point value in raster proj")
logger.debug("Point x : %s"%point_x)
logger.debug("Point y : %s"%point_y)
ulx, xres, xskew, uly, yskew, yres = raster_ds.GetGeoTransform()
logger.debug("Upper left coordinate in proj")
logger.debug("Point x : %s"%ulx)
logger.debug("Point x : %s"%uly)
lrx = ulx + (raster_ds.RasterXSize * xres)
lry = uly + (raster_ds.RasterYSize * yres)
logger.debug("Lower rigth coordinate in proj")
logger.debug("Point x : %s"%lrx)
logger.debug("Point x : %s"%lry)
logger.debug("Raster resolution")
logger.debug("Res on X : %s"%xres)
logger.debug("Res on Y : %s"%yres)
point_x = (point_x- ulx)/xres
point_y = (point_y- uly)/yres
return (int(point_x), int(point_y) )
def extract_banner(img_path, x, y, size_x, size_y, out_path):
logger.debug("Extract banner")
y_min = int(y-size_y/2)
y_max = y_min+size_y
x_min = int(x-size_x/2)
x_max = x_min+size_x
logger.debug("Extract data from table")
logger.debug("Min x : %s"%x_min)
logger.debug("Max x : %s"%x_max)
logger.debug("Min y : %s"%y_min)
logger.debug("Max y : %s"%y_max)
img = scipy.misc.imread(img_path)
y_min = max(0, min(y_min, len(img)))
y_max = max(0, min(y_max, len(img)))
x_min = max(0, min(x_min, len(img[0])))
x_max = max(0, min(x_max, len(img[0])))
logger.debug("After clamp")
logger.debug("Min x : %s"%x_min)
logger.debug("Max x : %s"%x_max)
logger.debug("Min y : %s"%y_min)
logger.debug("Max y : %s"%y_max)
logger.debug("Image y: %s"%len(img))
logger.debug("Image x: %s"%len(img[0]))
if y_max == y_min:
logger.error("After clamp, image size is Null")
return False
if x_max == x_min:
logger.error("After clamp, image size is Null")
return False
rgb = np.zeros((y_max-y_min, x_max-x_min, 3), dtype=np.uint8)
rgb[..., 0] = img[y_min:y_max,x_min:x_max, 0]
rgb[..., 1] = img[y_min:y_max,x_min:x_max, 1]
rgb[..., 2] = img[y_min:y_max,x_min:x_max, 2]
logger.debug("Write banner in output file %s", out_path)
scipy.misc.imsave(out_path, rgb)
return True
if __name__ == '__main__':
logger.setLevel(logging.DEBUG)
tiff_file = "/tmp/out.tiff"
big_png_file = "/tmp/out_big.png"
banner_file = "/tmp/out.png"
# create_raster_from_band( '/tmp/tmp0_if50g9','/tmp/tmpz61ja8cq','/tmp/tmp7dl287r9', tiff_file)
# x, y = get_x_y_for_lon_lat(tiff_file, 1.433333, 43.6)
# create_png_from_raster(tiff_file, big_png_file, red_clip=(250., 2500.), blue_clip=(250., 2500.), green_clip=(250., 2500.))
# extract_banner(big_png_file, x, y,1400, 800, banner_file)
extract_banner(big_png_file, 0, 0,1400, 800, banner_file)
extract_banner(big_png_file, 10980, 10980,1400, 800, banner_file)
extract_banner(big_png_file, 20980, 20980,1400, 800, banner_file)
| yoms/sentinel-banner-generator | banner_generator.py | Python | apache-2.0 | 6,246 |
#encoding=utf-8
#一大类特征
from scipy import sparse
import re
class VectorEncoder(object):
def __init__(self):
self.n_size = 0
self.idmap = {}
def fit(self, X):
for row in X:
units = re.split("\\s+", row)
for unit in units:
if unit == '-1': unit = 'null:0'
ent, value = unit.split(":")
if ent not in self.idmap:
self.idmap[ent] = 1 + len(self.idmap)
def size(self):
return len(self.idmap)
def transform(self, X):
"""
:param X:
:return: sparse matrix.
"""
data = []
indices = []
indptr= [0] # row-i indptr[i]:indptr[i+1]
n_row = 0
n_col = self.size() + 1
for row in X:
n_row += 1
units = re.split("\\s+", row)
buf = []
for unit in units:
if unit == '-1': unit = 'null:0'
ent, value = unit.split(":")
value = float(value)
if ent in self.idmap:
ind = self.idmap[ent]
buf.append((ind, value))
# a = (1,2)
buf = sorted(buf, key=lambda x : x[0] )
for ind, val in buf:
indices.append(ind)
data.append(val)
indptr.append(len(data))
return sparse.csr_matrix((data, indices, indptr),shape=(n_row,n_col), dtype=float)
if __name__ == '__main__':
data = [
"a:1 b:2",
"a:3 c:4"
]
enc = VectorEncoder()
enc.fit(data)
print(enc.transform(data).toarray())
| eryueniaobp/contest | encoder/VectorEncoder.py | Python | apache-2.0 | 1,662 |
"""Test study generator."""
from typing import Collection
from vizier.pyvizier import pythia as vz
def flat_space_with_all_types() -> vz.SearchSpace:
"""Search space with all parameter types."""
space = vz.SearchSpace()
root = space.select_root()
root.add_float_param('lineardouble', -1., 2.)
root.add_float_param('logdouble', 1e-4, 1e2, scale_type=vz.ScaleType.LOG)
root.add_int_param('integer', -2, 2)
root.add_categorical_param('categorical', ['a', 'aa', 'aaa'])
root.add_bool_param('boolean')
root.add_discrete_param('discrete_double', [-.5, 1.0, 1.2])
root.add_discrete_param(
'discrete_logdouble', [1e-5, 1e-2, 1e-1], scale_type=vz.ScaleType.LOG)
root.add_discrete_param('discrete_int', [-1, 1, 2])
return space
def metrics_objective_goals() -> Collection[vz.MetricInformation]:
return [
vz.MetricInformation('gain', goal=vz.ObjectiveMetricGoal.MAXIMIZE),
vz.MetricInformation('loss', goal=vz.ObjectiveMetricGoal.MINIMIZE),
vz.MetricInformation(
'auc',
goal=vz.ObjectiveMetricGoal.MAXIMIZE,
min_value=0.0,
max_value=1.0),
vz.MetricInformation(
'crossentropy', goal=vz.ObjectiveMetricGoal.MINIMIZE, min_value=0.0),
]
def metrics_all_unconstrained() -> Collection[vz.MetricInformation]:
return [
vz.MetricInformation('gain', goal=vz.ObjectiveMetricGoal.MAXIMIZE),
vz.MetricInformation('loss', goal=vz.ObjectiveMetricGoal.MINIMIZE),
vz.MetricInformation(
'gt2', goal=vz.ObjectiveMetricGoal.MAXIMIZE, safety_threshold=2.0),
vz.MetricInformation(
'lt2', goal=vz.ObjectiveMetricGoal.MINIMIZE, safety_threshold=2.0),
]
def metrics_all_constrained() -> Collection[vz.MetricInformation]:
return [
vz.MetricInformation(
'auc',
goal=vz.ObjectiveMetricGoal.MAXIMIZE,
min_value=0.0,
max_value=1.0),
vz.MetricInformation(
'crossentropy', goal=vz.ObjectiveMetricGoal.MINIMIZE, min_value=0.0),
vz.MetricInformation(
'gt2',
goal=vz.ObjectiveMetricGoal.MAXIMIZE,
safety_threshold=2.0,
min_value=-1.0,
max_value=5.0),
vz.MetricInformation(
'lt2',
goal=vz.ObjectiveMetricGoal.MINIMIZE,
safety_threshold=2.0,
min_value=-1.0,
max_value=5.0),
]
| google/vizier | vizier/testing/test_studies.py | Python | apache-2.0 | 2,373 |
from bs4 import BeautifulSoup
class RunParameter_xml:
'''
A class for reading runparameters xml file from Illumina sequencing runs
:param xml_file: A runparameters xml file
'''
def __init__(self, xml_file):
self.xml_file = xml_file
self._read_xml()
def _read_xml(self):
'''
Internal function for reading the xml file using BS4
'''
try:
xml_file = self.xml_file
with open(xml_file, 'r') as fp:
soup = BeautifulSoup(fp, "html5lib")
self._soup = soup
except Exception as e:
raise ValueError(
'Failed to parse xml file {0}, error {1}'.\
format(self.xml_file, e))
def get_nova_workflow_type(self):
try:
soup = self._soup
workflowtype = None
if soup.workflowtype:
workflowtype = \
soup.workflowtype.contents[0]
return workflowtype
except Exception as e:
raise ValueError('Failed to get NovaSeq workflow type')
def get_novaseq_flowcell(self):
try:
soup = self._soup
flowcell_id = None
workflowtype = self.get_nova_workflow_type()
if workflowtype is None or \
workflowtype != 'NovaSeqXp':
raise ValueError(
'Missing NovaSeq workflow type: {0}'.\
format(workflowtype))
if soup.rfidsinfo and \
soup.rfidsinfo.flowcellserialbarcode:
flowcell_id = \
soup.rfidsinfo.flowcellmode.contents[0]
if flowcell_id is None:
raise ValueError(
'Missing NovaSeq flowcell id, file: {0}'.\
format(self.xml_file))
except Exception as e:
raise ValueError(
'Failed to get NovaSeq flowcell id, error: {0}'.format(e))
def get_novaseq_flowcell_mode(self):
try:
soup = self._soup
flowcell_mode = None
workflowtype = self.get_nova_workflow_type()
if workflowtype is None or \
workflowtype != 'NovaSeqXp':
raise ValueError(
'Missing NovaSeq workflow type: {0}'.\
format(workflowtype))
if soup.rfidsinfo and \
soup.rfidsinfo.flowcellmode:
flowcell_mode = \
soup.rfidsinfo.flowcellmode.contents[0]
if flowcell_mode is None:
raise ValueError(
'Missing NovaSeq flowcell mode, file: {0}'.\
format(self.xml_file))
except Exception as e:
raise ValueError(
'Failed to get NovaSeq flowcell mode, error: {0}'.format(e))
def get_hiseq_flowcell(self):
'''
A method for fetching flowcell details for hiseq run
:returns: Flowcell info or None (for MiSeq, NextSeq or NovaSeq runs)
'''
try:
soup = self._soup
if soup.flowcell:
flowcell = soup.flowcell.contents[0]
else:
flowcell = None
return flowcell
except Exception as e:
raise ValueError(
'Failed to get flowcell for hiseq, error: {0}'.\
format(e))
| imperial-genomics-facility/data-management-python | igf_data/illumina/runparameters_xml.py | Python | apache-2.0 | 3,007 |
# Copyright 2022 moco_beta
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from .sysdic import entries, mmap_entries, connections, chardef, unknowns # type: ignore
from .dic import RAMDictionary, MMapDictionary, UnknownsDictionary
class SystemDictionary(RAMDictionary, UnknownsDictionary):
"""
System dictionary class
"""
__INSTANCE = None
__lock = threading.Lock()
@classmethod
def instance(cls):
if not cls.__INSTANCE:
with cls.__lock:
if not cls.__INSTANCE:
cls.__INSTANCE = SystemDictionary(entries(), connections, chardef.DATA, unknowns.DATA)
return cls.__INSTANCE
def __init__(self, entries, connections, chardefs, unknowns):
RAMDictionary.__init__(self, entries, connections)
UnknownsDictionary.__init__(self, chardefs, unknowns)
class MMapSystemDictionary(MMapDictionary, UnknownsDictionary):
"""
MMap System dictionary class
"""
__INSTANCE = None
__lock = threading.Lock()
@classmethod
def instance(cls):
if not cls.__INSTANCE:
with cls.__lock:
if not cls.__INSTANCE:
cls.__INSTANCE = MMapSystemDictionary(mmap_entries(), connections, chardef.DATA, unknowns.DATA)
return cls.__INSTANCE
def __init__(self, mmap_entries, connections, chardefs, unknowns):
MMapDictionary.__init__(self, mmap_entries[0], mmap_entries[1], mmap_entries[2], connections)
UnknownsDictionary.__init__(self, chardefs, unknowns)
| mocobeta/janome | janome/system_dic.py | Python | apache-2.0 | 2,058 |
#!/bin/python3.5
from collections import Iterable
d = {'a':1, 'b':2, 'c':3}
for key in d:
print(key)
for value in d.values():
print(value)
for ch in 'ABC':
print(ch)
print(isinstance('abc', Iterable) )
print(isinstance(123,Iterable))
for i, value in enumerate(['a', 'b', 'c']):
print(i, value)
for x,y in [(1,1), (2,4), (3,9)]:
print(x,y)
list(range(1, 11))
print(list)
| dronly/python | lxfpython/advance/do_iter.py | Python | apache-2.0 | 382 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Training helper functions that are shared across tasks."""
import contextlib
import functools
import operator
import signal
import typing
from typing import Any, Callable, Dict, Iterable, Optional, Sequence, Tuple, Union
from absl import logging
import dataclasses
import flax
import gin
import jax
import jax.numpy as jnp
import numpy as np
import optax
from gfsa import jax_util
from gfsa.datasets import data_loading
@jax_util.register_dataclass_pytree
@dataclasses.dataclass
class ExampleWithMetadata:
"""Stores an example or batch of examples.
Attributes:
epoch: Integer representing the epoch that this example comes from.
example_id: Integer ID uniquely identifying this example in the dataset.
example: The example itself.
mask: Array that is True for actual examples, False for padding.
static_metadata: Metadata about this example or batch that should result in
a new `jit` XLA computation (i.e. padded shapes).
"""
epoch: Any
example_id: Any
example: Any
mask: jax_util.NDArray = np.array(True)
static_metadata: Any = None
@jax_util.register_dataclass_pytree
@dataclasses.dataclass
class RatioMetric:
"""A ratio, where numerator and denominator should be summed separately.
Attributes:
numerator: Numerator of the metric.
denominator: Denominator of the metric.
"""
numerator: jax_util.NDArray
denominator: jax_util.NDArray
MetricValue = Union[float, jax_util.NDArray, RatioMetric]
# A loss function is a callable (model, example, static_metadata)
# -> (loss, metrics)
# pyformat: disable
LossFunWithMetrics = Callable[
[Any, Any, Any],
Tuple[jax_util.NDArray, Dict[str, MetricValue]]]
# pyformat: enable
# A validation function is a callable (replicated_model) -> (objective, metrics)
# where model is a tree of ShardedDeviceArrays, and objective is the value we
# want to make decrease.
ValidationFunction = Callable[[Any], Tuple[float, Dict[str, MetricValue]]]
def device_broadcast(x, num_devices):
"""Broadcast a value to all devices."""
return jax.pmap(lambda _: x)(jnp.arange(num_devices))
def _parallel_train_step(
optimizer,
batched_examples,
static_batch_metadata,
loss_fn,
max_global_norm = None,
**optimizer_hyper_params,
):
"""Train the model for one step in parallel across devices.
Args:
optimizer: Optimizer that tracks the model and parameter state. Should be
replicated to each device, i.e. should contain ShardedDeviceArrays with a
leading axis (num_devices, ...) but with the same content on each device.
batched_examples: A structure of NDArrays representing a batch of examples.
Should have two leading batch dimensions: (num_devices,
batch_size_per_device, ...)
static_batch_metadata: Metadata about this batch, which will be shared
across all batched examples. Each value of this results in a separate
XLA-compiled module.
loss_fn: Task-specific non-batched loss function to apply. Should take the
current model (optimizer.target) and an example from batched_examples, and
return a tuple of the current loss (as a scalar) and a dictionary from
string names to metric values (also scalars, or RatioMetrics).
max_global_norm: Maximum global norm to clip gradients to. Should be a
scalar, which will be broadcast automatically.
**optimizer_hyper_params: Hyperparameters to pass to the optimizer's
`apply_gradient` function, which will be broadcast across devices
automatically.
Returns:
Tuple (updated_optimizer, grads_ok, metrics). Metrics will be as returned by
loss_fn, with an extra elements "loss". All metrics will be averaged
across all elements of the batch. Both optimizer and metrics will contain
ShardedDeviceArrays that are identical across devices. grads_ok will be
a replicated bool ndarray that is True if the gradients were finite.
"""
def batched_loss_fn(model):
"""Apply loss function across a batch of examples."""
loss, metrics = jax.vmap(loss_fn, (None, 0, None))(model, batched_examples,
static_batch_metadata)
return jnp.mean(loss), metrics
# Compute gradients of loss, along with metrics.
(loss, metrics), grads = jax.value_and_grad(
batched_loss_fn, has_aux=True)(
optimizer.target)
metrics["loss"] = loss
# Exchange average gradients and metrics across devices.
agg_grads = jax.lax.pmean(grads, "devices")
agg_metrics = {}
for k, v in metrics.items():
if isinstance(v, RatioMetric):
num = jax.lax.psum(jnp.sum(v.numerator), "devices")
denom = jax.lax.psum(jnp.sum(v.denominator), "devices")
new_value = num / denom
else:
# Use nanmean to aggregate bare floats.
new_value = jnp.nanmean(jax.lax.all_gather(v, "devices"))
agg_metrics[k] = new_value
# Compute global norm and possibly clip.
global_norm = optax.global_norm(agg_grads)
agg_metrics["gradient_global_norm"] = global_norm
if max_global_norm is not None:
should_clip = global_norm > max_global_norm
agg_grads = jax.tree_map(
lambda g: jnp.where(should_clip, g * max_global_norm / global_norm, g),
agg_grads)
agg_metrics["gradient_was_clipped"] = should_clip.astype("float32")
# Check for non-finite gradients.
grads_ok = jnp.all(
jnp.stack([jnp.all(jnp.isfinite(x)) for x in jax.tree_leaves(agg_grads)]))
# Apply updates.
updated_optimizer = optimizer.apply_gradient(agg_grads,
**optimizer_hyper_params)
return updated_optimizer, grads_ok, agg_metrics, agg_grads
def _build_parallel_train_step():
"""Builds an accelerated version of the train step function."""
# We need to wrap and unwrap so that the final function can be called with
# keyword arguments, but we still maintain the proper axes.
@functools.partial(
jax.pmap,
axis_name="devices",
in_axes=(0, 0, None, None, None, None),
static_broadcasted_argnums=(2, 3))
def wrapped(optimizer, batched_examples, static_batch_metadata, loss_fn,
max_global_norm, optimizer_hyper_params):
return _parallel_train_step(optimizer, batched_examples,
static_batch_metadata, loss_fn, max_global_norm,
**optimizer_hyper_params)
@functools.wraps(_parallel_train_step)
def wrapper(optimizer, batched_examples, static_batch_metadata, loss_fn,
max_global_norm, **optimizer_hyper_params):
return wrapped(optimizer, batched_examples, static_batch_metadata, loss_fn,
max_global_norm, optimizer_hyper_params)
return wrapper
# The primary version of the training step, with the associated jit cache.
parallel_train_step = _build_parallel_train_step()
def warmup_train_step(
optimizer,
batched_example,
static_batch_metadata,
loss_fn,
optimizer_is_replicated = False,
profile = False,
runner=None,
):
"""Run a fake train step to warm up JIT cache.
Args:
optimizer: Optimizer that tracks the model and parameter state.
batched_example: A structure of NDArrays representing a batch of examples.
static_batch_metadata: Metadata about the batch, which will be shared across
all batched examples.
loss_fn: Task-specific non-batched loss function to apply. Should take the
current model (optimizer.target) and an example from batched_examples, and
return a tuple of the current loss (as a scalar) and a dictionary from
string names to metric values (also scalars).
optimizer_is_replicated: Whether optimizer is already replicated.
profile: Whether to enable profiling during warmup.
runner: If profile=True, the runner to use when profiling.
"""
num_devices = jax.local_device_count()
if optimizer_is_replicated:
replicated_optimizer = optimizer
else:
replicated_optimizer = device_broadcast(optimizer, num_devices)
(replicated_optimizer,
batched_example) = jax.tree_map(jax.device_put,
(replicated_optimizer, batched_example))
try:
max_global_norm = gin.query_parameter(
"train_util.training_loop.max_global_norm")
except ValueError:
max_global_norm = None
def go():
# Note that value for learning_rate is arbitrary, but we pass it here to
# warm up the jit cache (since we are passing a learning rate at training
# time).
res = parallel_train_step(
replicated_optimizer,
batched_example,
static_batch_metadata,
loss_fn,
max_global_norm=max_global_norm,
learning_rate=0.0)
jax.tree_map(lambda x: x.block_until_ready(), res)
if profile:
stats = runner.try_run_and_profile(go, catch_resource_exhausted=False)
logging.info("Warmed up train step with stats: %s", stats)
else:
go()
logging.info("Warmed up train step")
def build_averaging_validator(
loss_fn,
valid_iterator_factory,
objective_metric_name = None,
include_total_counts = False,
prefetch = True,
):
"""Validate by computing averages over a validation set.
Args:
loss_fn: Loss function for the task.
valid_iterator_factory: Constructs iterators of batched examples from the
validation set, with two batch axes. To iterate over a fixed part of the
validation set, consider using build_one_pass_iterator_factory. To
randomly sample from a validation set, you can use something like
`lambda: itertools.islice(validation_iterator, num_batches)`.
objective_metric_name: Name of the metric that is the objective value.
include_total_counts: Whether to report numerator and denominator separately
for RatioMetric objects, along with the "validation_total_example_count"
metric.
prefetch: Whether to prefetch validation examples.
Returns:
Validation function that runs loss_fn and aggregates the results, reporting
the loss as the objective, and using sum to accumulate metrics.
"""
if objective_metric_name is None:
objective_metric_name = "loss"
@functools.partial(
jax.pmap, axis_name="devices", static_broadcasted_argnums=3)
def parallel_metrics_batch(model, batched_examples, batch_mask,
static_metadata):
loss, metrics = jax.vmap(loss_fn, (None, 0, None))(model, batched_examples,
static_metadata)
metrics["loss"] = loss
metrics = jax.tree_map(
lambda x: jnp.where(batch_mask, x, jnp.zeros_like(x)), metrics)
metrics = jax.tree_map(lambda x: jax.lax.psum(jnp.sum(x), "devices"),
metrics)
return metrics
def validation_function(model):
with contextlib.ExitStack() as exit_stack:
valid_iterator = valid_iterator_factory()
if prefetch:
valid_iterator = exit_stack.enter_context(
data_loading.ThreadedPrefetcher(valid_iterator, 4))
accumulated = None
example_count = 0
for batch in valid_iterator:
results = parallel_metrics_batch(model, batch.example, batch.mask,
batch.static_metadata)
metrics = jax.tree_map(float, flax.jax_utils.unreplicate(results))
metrics["epoch"] = np.sum(batch.epoch)
if accumulated is None:
accumulated = metrics
else:
accumulated = jax.tree_multimap(operator.add, accumulated, metrics)
example_count += jnp.count_nonzero(batch.mask)
assert example_count > 0, "Validation iterator must be nonempty"
accumulated = typing.cast(Dict[str, Any], accumulated)
final_metrics = {}
for k, v in accumulated.items():
if isinstance(v, RatioMetric):
final_metrics[k] = v.numerator / v.denominator
if include_total_counts:
final_metrics[k + "_numerator"] = v.numerator
final_metrics[k + "_denominator"] = v.denominator
else:
final_metrics[k] = v / example_count
objective = final_metrics[objective_metric_name]
if include_total_counts:
final_metrics["validation_total_example_count"] = example_count
return (objective, final_metrics)
return validation_function
@contextlib.contextmanager
def catch_interrupts_once(callback,
catch_signals = (signal.SIGINT,
signal.SIGABRT)):
# pylint: disable=g-doc-return-or-yield
"""Context manager to catch interrupt signals.
Only catches the first signal sent, so that repeated interrupts will kill the
job as normal.
Args:
callback: Function to run when the signal is caught the first time.
catch_signals: Signals to catch.
Returns:
A context manager that will catch interrupts inside the block.
"""
# pylint: enable=g-doc-return-or-yield
known_signals = {
signal.SIGINT: "SIGINT",
signal.SIGABRT: "SIGABRT",
}
def _handler(signal_number, frame):
del frame # Unused.
logging.warning("Caught interrupt signal %s",
known_signals.get(signal_number, signal_number))
callback(signal_number)
_restore_handlers()
original_handlers = {}
for signal_number in catch_signals:
original_handlers[signal_number] = signal.signal(signal_number, _handler)
already_restored = False
def _restore_handlers():
nonlocal already_restored
if already_restored:
return
else:
already_restored = True
for signal_number in catch_signals:
current_handler = signal.signal(signal_number,
original_handlers[signal_number])
if current_handler is not _handler:
logging.error(
"Unexpected active signal handler %s for %s; "
"expected the signal hander from "
"`catch_interrupts_once`! Restored to %s anyways.",
current_handler, known_signals.get(signal_number, signal_number),
original_handlers[signal_number])
try:
yield
finally:
_restore_handlers()
| google-research/google-research | gfsa/training/train_util.py | Python | apache-2.0 | 14,842 |
# Webhooks for external integrations.
from __future__ import absolute_import
from typing import Any, Dict, List, Optional, Text, Tuple
from django.utils.translation import ugettext as _
from django.db.models import Q
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from zerver.models import UserProfile, get_user_profile_by_email, Realm
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import api_key_only_webhook_view, has_request_variables, REQ
import logging
import re
import ujson
IGNORED_EVENTS = [
'comment_created', # we handle issue_update event instead
'comment_updated', # we handle issue_update event instead
'comment_deleted', # we handle issue_update event instead
]
def guess_zulip_user_from_jira(jira_username, realm):
# type: (Text, Realm) -> Optional[UserProfile]
try:
# Try to find a matching user in Zulip
# We search a user's full name, short name,
# and beginning of email address
user = UserProfile.objects.filter(
Q(full_name__iexact=jira_username) |
Q(short_name__iexact=jira_username) |
Q(email__istartswith=jira_username),
is_active=True,
realm=realm).order_by("id")[0]
return user
except IndexError:
return None
def convert_jira_markup(content, realm):
# type: (Text, Realm) -> Text
# Attempt to do some simplistic conversion of JIRA
# formatting to Markdown, for consumption in Zulip
# Jira uses *word* for bold, we use **word**
content = re.sub(r'\*([^\*]+)\*', r'**\1**', content)
# Jira uses {{word}} for monospacing, we use `word`
content = re.sub(r'{{([^\*]+?)}}', r'`\1`', content)
# Starting a line with bq. block quotes that line
content = re.sub(r'bq\. (.*)', r'> \1', content)
# Wrapping a block of code in {quote}stuff{quote} also block-quotes it
quote_re = re.compile(r'{quote}(.*?){quote}', re.DOTALL)
content = re.sub(quote_re, r'~~~ quote\n\1\n~~~', content)
# {noformat}stuff{noformat} blocks are just code blocks with no
# syntax highlighting
noformat_re = re.compile(r'{noformat}(.*?){noformat}', re.DOTALL)
content = re.sub(noformat_re, r'~~~\n\1\n~~~', content)
# Code blocks are delineated by {code[: lang]} {code}
code_re = re.compile(r'{code[^\n]*}(.*?){code}', re.DOTALL)
content = re.sub(code_re, r'~~~\n\1\n~~~', content)
# Links are of form: [https://www.google.com] or [Link Title|https://www.google.com]
# In order to support both forms, we don't match a | in bare links
content = re.sub(r'\[([^\|~]+?)\]', r'[\1](\1)', content)
# Full links which have a | are converted into a better markdown link
full_link_re = re.compile(r'\[(?:(?P<title>[^|~]+)\|)(?P<url>.*)\]')
content = re.sub(full_link_re, r'[\g<title>](\g<url>)', content)
# Try to convert a JIRA user mention of format [~username] into a
# Zulip user mention. We don't know the email, just the JIRA username,
# so we naively guess at their Zulip account using this
if realm:
mention_re = re.compile(u'\[~(.*?)\]')
for username in mention_re.findall(content):
# Try to look up username
user_profile = guess_zulip_user_from_jira(username, realm)
if user_profile:
replacement = u"**{}**".format(user_profile.full_name)
else:
replacement = u"**{}**".format(username)
content = content.replace("[~{}]".format(username,), replacement)
return content
def get_in(payload, keys, default=''):
# type: (Dict[str, Any], List[str], Text) -> Any
try:
for key in keys:
payload = payload[key]
except (AttributeError, KeyError, TypeError):
return default
return payload
def get_issue_string(payload, issue_id=None):
# type: (Dict[str, Any], Text) -> Text
# Guess the URL as it is not specified in the payload
# We assume that there is a /browse/BUG-### page
# from the REST url of the issue itself
if issue_id is None:
issue_id = get_issue_id(payload)
base_url = re.match("(.*)\/rest\/api/.*", get_in(payload, ['issue', 'self']))
if base_url and len(base_url.groups()):
return u"[{}]({}/browse/{})".format(issue_id, base_url.group(1), issue_id)
else:
return issue_id
def get_assignee_mention(assignee_email):
# type: (Text) -> Text
if assignee_email != '':
try:
assignee_name = get_user_profile_by_email(assignee_email).full_name
except UserProfile.DoesNotExist:
assignee_name = assignee_email
return u"**{}**".format(assignee_name)
return ''
def get_issue_author(payload):
# type: (Dict[str, Any]) -> Text
return get_in(payload, ['user', 'displayName'])
def get_issue_id(payload):
# type: (Dict[str, Any]) -> Text
return get_in(payload, ['issue', 'key'])
def get_issue_title(payload):
# type: (Dict[str, Any]) -> Text
return get_in(payload, ['issue', 'fields', 'summary'])
def get_issue_subject(payload):
# type: (Dict[str, Any]) -> Text
return u"{}: {}".format(get_issue_id(payload), get_issue_title(payload))
def get_sub_event_for_update_issue(payload):
# type: (Dict[str, Any]) -> Text
sub_event = payload.get('issue_event_type_name', '')
if sub_event == '':
if payload.get('comment'):
return 'issue_commented'
elif payload.get('transition'):
return 'issue_transited'
return sub_event
def get_event_type(payload):
# type: (Dict[str, Any]) -> Optional[Text]
event = payload.get('webhookEvent')
if event is None and payload.get('transition'):
event = 'jira:issue_updated'
return event
def add_change_info(content, field, from_field, to_field):
# type: (Text, Text, Text, Text) -> Text
content += u"* Changed {}".format(field)
if from_field:
content += u" from **{}**".format(from_field)
if to_field:
content += u" to {}\n".format(to_field)
return content
def handle_updated_issue_event(payload, user_profile):
# Reassigned, commented, reopened, and resolved events are all bundled
# into this one 'updated' event type, so we try to extract the meaningful
# event that happened
# type: (Dict[str, Any], UserProfile) -> Text
issue_id = get_in(payload, ['issue', 'key'])
issue = get_issue_string(payload, issue_id)
assignee_email = get_in(payload, ['issue', 'fields', 'assignee', 'emailAddress'], '')
assignee_mention = get_assignee_mention(assignee_email)
if assignee_mention != '':
assignee_blurb = u" (assigned to {})".format(assignee_mention)
else:
assignee_blurb = ''
sub_event = get_sub_event_for_update_issue(payload)
if 'comment' in sub_event:
if sub_event == 'issue_commented':
verb = 'added comment to'
elif sub_event == 'issue_comment_edited':
verb = 'edited comment on'
else:
verb = 'deleted comment from'
content = u"{} **{}** {}{}".format(get_issue_author(payload), verb, issue, assignee_blurb)
comment = get_in(payload, ['comment', 'body'])
if comment:
comment = convert_jira_markup(comment, user_profile.realm)
content = u"{}:\n\n\n{}\n".format(content, comment)
else:
content = u"{} **updated** {}{}:\n\n".format(get_issue_author(payload), issue, assignee_blurb)
changelog = get_in(payload, ['changelog'])
if changelog != '':
# Use the changelog to display the changes, whitelist types we accept
items = changelog.get('items')
for item in items:
field = item.get('field')
if field == 'assignee' and assignee_mention != '':
target_field_string = assignee_mention
else:
# Convert a user's target to a @-mention if possible
target_field_string = u"**{}**".format(item.get('toString'))
from_field_string = item.get('fromString')
if target_field_string or from_field_string:
content = add_change_info(content, field, from_field_string, target_field_string)
elif sub_event == 'issue_transited':
from_field_string = get_in(payload, ['transition', 'from_status'])
target_field_string = u'**{}**'.format(get_in(payload, ['transition', 'to_status']))
if target_field_string or from_field_string:
content = add_change_info(content, 'status', from_field_string, target_field_string)
return content
def handle_created_issue_event(payload):
# type: (Dict[str, Any]) -> Text
return u"{} **created** {} priority {}, assigned to **{}**:\n\n> {}".format(
get_issue_author(payload),
get_issue_string(payload),
get_in(payload, ['issue', 'fields', 'priority', 'name']),
get_in(payload, ['issue', 'fields', 'assignee', 'displayName'], 'no one'),
get_issue_title(payload)
)
def handle_deleted_issue_event(payload):
# type: (Dict[str, Any]) -> Text
return u"{} **deleted** {}!".format(get_issue_author(payload), get_issue_string(payload))
@api_key_only_webhook_view("JIRA")
@has_request_variables
def api_jira_webhook(request, user_profile,
payload=REQ(argument_type='body'),
stream=REQ(default='jira')):
# type: (HttpRequest, UserProfile, Dict[str, Any], Text) -> HttpResponse
event = get_event_type(payload)
if event == 'jira:issue_created':
subject = get_issue_subject(payload)
content = handle_created_issue_event(payload)
elif event == 'jira:issue_deleted':
subject = get_issue_subject(payload)
content = handle_deleted_issue_event(payload)
elif event == 'jira:issue_updated':
subject = get_issue_subject(payload)
content = handle_updated_issue_event(payload, user_profile)
elif event in IGNORED_EVENTS:
return json_success()
else:
if event is None:
if not settings.TEST_SUITE:
message = u"Got JIRA event with None event type: {}".format(payload)
logging.warning(message)
return json_error(_("Event is not given by JIRA"))
else:
if not settings.TEST_SUITE:
logging.warning("Got JIRA event type we don't support: {}".format(event))
return json_success()
check_send_message(user_profile, request.client, "stream", [stream], subject, content)
return json_success()
| j831/zulip | zerver/webhooks/jira/view.py | Python | apache-2.0 | 10,755 |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MeCab based Segmenter.
Word segmenter module powered by `MeCab <https://github.com/taku910/mecab>`_.
You need to install MeCab to use this segmenter.
The easiest way to install MeCab is to run :code:`make install-mecab`. The
script will download source codes from GitHub and build the tool. It also setup
`IPAdic <https://ja.osdn.net/projects/ipadic/>`_, a standard dictionary for
Japanese.
"""
import logging
import sys
import six
from .segmenter import Segmenter
from .chunk import Chunk, ChunkList
_DEPENDENT_POS_FORWARD = set()
_DEPENDENT_POS_BACKWARD = {u'助詞', u'助動詞'}
_DEPENDENT_LABEL_FORWARD = set()
_DEPENDENT_LABEL_BACKWARD = {u'非自立'}
class MecabSegmenter(Segmenter):
"""MeCab Segmenter.
Attributes:
tagger (MeCab.Tagger): MeCab Tagger to parse the input sentence.
supported_languages (list of str): List of supported languages' codes.
"""
supported_languages = {'ja'}
def __init__(self):
try:
import MeCab
self.tagger = MeCab.Tagger('-Ochasen')
except ImportError:
logging.error(
('mecab-python3 is not installed. Install the module by running '
'`$ pip install mecab-python3`. If MeCab is not installed in your '
'system yet, run `$ make install-mecab` instead.'))
sys.exit(1)
def segment(self, source, language=None):
"""Returns a chunk list from the given sentence.
Args:
source (str): Source string to segment.
language (str, optional): A language code.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
Raises:
ValueError: If :code:`language` is given and it is not included in
:code:`supported_languages`.
"""
if language and not language in self.supported_languages:
raise ValueError(
'Language {} is not supported by MeCab segmenter'.format(language))
chunks = ChunkList()
seek = 0
source_str = source.encode('utf-8') if six.PY2 else source
results = self.tagger.parse(source_str).split('\n')[:-2]
for row in results:
if six.PY2:
row = row.decode('utf-8')
token = row.split('\t')
word = token[0]
labels = token[3].split('-')
pos = labels[0]
label = labels[1] if len(labels) > 1 else None
if source[seek: seek + len(word)] != word:
assert source[seek] == ' '
assert source[seek + 1: seek + len(word) + 1] == word
chunks.append(Chunk.space())
seek += 1
dependency = None
if pos in _DEPENDENT_POS_FORWARD:
dependency = True
elif pos in _DEPENDENT_POS_BACKWARD:
dependency = False
elif label in _DEPENDENT_LABEL_FORWARD:
dependency = True
elif label in _DEPENDENT_LABEL_BACKWARD:
dependency = False
chunk = Chunk(word, pos=pos, label=label, dependency=dependency)
if chunk.is_punct():
chunk.dependency = chunk.is_open_punct()
chunks.append(chunk)
seek += len(word)
chunks.resolve_dependencies()
return chunks
| google/budou | budou/mecabsegmenter.py | Python | apache-2.0 | 3,631 |
# Django settings for ibistu_serverV2_webserver project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('admin', 'c0710204@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'admin.db', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_tnl@7cyqlgnwe!@2ptc56)15+mzpxk4uz!c+xy8#b(w^%0c-2'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ibistu_serverV2_webserver.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'ibistu_serverV2_webserver.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| c0710204/Ibistu-serverV2 | webserver/ibistu_serverV2_webserver/ibistu_serverV2_webserver/settings.py | Python | apache-2.0 | 5,483 |
from ..PulsePrimitives import *
from ..Compiler import compile_to_hardware
from ..PulseSequencePlotter import plot_pulse_files
from .helpers import create_cal_seqs
from itertools import product
import operator
from ..ControlFlow import *
from ..TdmInstructions import *
from functools import reduce
from typing import Iterable, Union, Tuple
@qfunction
def qreset(qubits: Channels.LogicalChannel,
signVec: Tuple[bool],
measDelay: Union[int,float],
buf: Union[int,float],
reg_size: int = None,
TDM_map: Iterable[Union[int,bool]] = None) -> list:
"""
For each qubit, build the set of feedback actions to perform when
receiving a zero or one in the comparison register
Parameters
----------
qubits : Channels.LogicalChannel tuple
A hashable (immutable) tuple of qubits to reset
signVec : boolean tuple
A hashable (immutable) tuple of binary values from the compairison
register indicating the measured state of each qubit in the register
before reset.
measDelay : int/float
Delay after measurement before performing the LOADCMP comparison with
value in the register (seconds)
buf : int/float
Wait time between (seconds)
reg_size : int, optional
Size of the register in number of qubits, including those not reset.
Default value is set to len(qubits).
TDM_map : bit mask, optional
Map each qubit to a TDM digital input. If True, arguments reset a
subset of the qubit register (see Reset).
Default: np.array(qN, qN-1, ..., q1) from MSB to LSB.
Returns
-------
seq : QGL.ControlFlow.Call
QGL sequence with the qreset calls
Examples
--------
>>> qreset((q1, q2), (0,1), 2e-6, 2e-6);
CALL(H:)
"""
if not reg_size:
reg_size = len(qubits)
TDM_map = np.arange(reg_size,0,-1)
FbGates = []
for ct, q in enumerate(qubits):
if signVec[ct] == 0:
FbGates.append([gate(q) for gate in [Id, X]])
else: # inverted logic
FbGates.append([gate(q) for gate in [X, Id]])
FbSeq = [reduce(operator.mul, x) for x in product(*FbGates)]
# load register
seq = [Id(qubits[0], measDelay), qwait(kind='CMP'), Id(qubits[0], buf)]
# create a branch for each possible comparison value
for ct in range(2**reg_size):
# duplicate branches for the irrelevant results
# if reg_size > len(TDM_map)
meas_result = [(ct & TDM_bit)>0 for TDM_bit in 2**(np.array(TDM_map)-1)]
branch_idx = sum([t*2**(len(qubits)-ind-1)
for ind,t in enumerate((meas_result))])
seq += qif(ct, [FbSeq[branch_idx]])
return seq
def Reset(qubits: Iterable[Channels.LogicalChannel],
measDelay: Union[int,float]=1e-6,
signVec: Tuple[bool] = None,
doubleRound: bool = True,
buf: Union[int,float] = 20e-9,
showPlot: bool = False,
measChans: Channels.LogicalChannel = None,
add_cals: bool = True,
calRepeats: int = 2,
reg_size: int = None,
TDM_map: Iterable[Union[int,bool]]=None) -> str:
"""
Preparation, simultanoeus reset, and measurement of an arbitrary number
of qubits
Parameters
----------
qubits : Channels.LogicalChannel tuple
A hashable (immutable) tuple of qubits to reset
measDelay : int/float, optional
Delay after measurement before performing the LOADCMP compairison with
value in the register (seconds)
signVec : boolean tuple, optional
conditions for feedback. Tuple of 0 (flip if signal is above threshold) and 1 (flip if below) for each qubit. Default = 0 for all qubits
doubleRound : boolean, optional
If true, do two rounds of feedback
showPlot : boolean, optional
Whether to plot
measChans : LogicalChannel tuple, optional
A hashable (immutable) tuple of qubits to measured.
add_cals : boolean, optional
Whether to append calibration pulses to the end of the sequence
calRepeats : int, optional
How many times to repeat calibration scalings (default 2)
reg_size : int, optional
Size of the register in number of qubits, including those not reset.
Default value is set to len(qubits).
TDM_map : bit mask, optional
Map each qubit to a TDM digital input. If True, arguments reset a
subset of the qubit register (see Reset).
Default: np.array(qN, qN-1, ..., q1) from MSB to LSB.
Returns
-------
metafile : string
Path to a json metafile with details about the sequences and paths to
compiled machine files
Examples
--------
>>> Reset((q1, q2));
Compiled 12 sequences.
>>> mf
'/path/to/exp/exp-meta.json'
"""
if measChans is None:
measChans = qubits
if signVec == None:
signVec = (0, ) * len(qubits)
seqs = [prep + [qreset(qubits,
signVec,
measDelay,
buf,
reg_size=reg_size,
TDM_map=TDM_map)]
for prep in create_cal_seqs(qubits, 1)]
measBlock = reduce(operator.mul, [MEAS(q) for q in qubits])
if doubleRound:
for seq in seqs:
seq += [measBlock]
seq.append(qreset(qubits,
signVec,
measDelay,
buf,
reg_size=reg_size,
TDM_map=TDM_map))
# add final measurement
for seq in seqs:
seq += [measBlock, Id(qubits[0], measDelay), qwait(kind='CMP')]
if add_cals:
seqs += create_cal_seqs(qubits,
calRepeats,
measChans=measChans,
waitcmp=True)
metafile = compile_to_hardware(seqs, 'Reset/Reset')
if showPlot:
plot_pulse_files(metafile)
return metafile
# do not make it a subroutine for now
def BitFlip3(data_qs: Iterable[Channels.LogicalChannel],
ancilla_qs: Iterable[Channels.LogicalChannel],
theta: Union[int,float] = None,
phi: Union[int,float] = None,
nrounds: int = 1,
meas_delay: Union[int,float] = 1e-6,
add_cals: bool = False,
calRepeats: int = 2) -> str:
"""
Encoding on 3-qubit bit-flip code, followed by n rounds of syndrome
detection, and final correction using the n results.
Parameters
----------
data_qs : Channels.LogicalChannel tuple
A hashable (immutable) tuple of qubits of the 3 code qubits
ancilla_qs : Channels.LogicalChannel tuple
A hashable (immutable) tuple of qubits of the 2 syndrome qubits
theta : int/float, optional
Longitudinal rotation angle for the encoded state (radians).
Default = None.
phi : int/float, optional
Azimuthal rotation angle for the encoded state (radians).
Default = None.
nrounds: int, optional
Number of consecutive measurements
measDelay : int/float, optional
Delay between syndrome check rounds (seconds)
add_cals : boolean, optional
Whether to append calibration pulses to the end of the sequence
calRepeats : int, optional
How many times to repeat calibration scalings (default 2)
Returns
-------
metafile : string
Path to a json metafile with details about the sequences and paths to
compiled machine files
Examples
--------
>>> mf = BitFlip3((q1, q2, q3), (q4, q5));
Compiled 12 sequences.
>>> mf
'/path/to/exp/exp-meta.json'
"""
if len(data_qs) != 3 or len(ancilla_qs) != 2:
raise Exception("Wrong number of qubits")
seqs = [
DecodeSetRounds(1,0,nrounds),
Invalidate(10, 2*nrounds),
Invalidate(11, 0x1)]
# encode single-qubit state into 3 qubits
if theta and phi:
seqs+=[Utheta(data_qs[1], theta, phi),
CNOT(data_qs[1], data_qs[0]),
CNOT(data_qs[1], data_qs[2])]
# multiple rounds of syndrome measurements
for n in range(nrounds):
seqs+= [CNOT(data_qs[0],ancilla_qs[0])*CNOT(data_qs[1],ancilla_qs[1])],
seqs+= [CNOT(data_qs[1], ancilla_qs[0])*CNOT(data_qs[2],ancilla_qs[1])],
seqs+= [MEASA(ancilla_qs[0], maddr=(10, 2*n))*
MEASA(ancilla_qs[1], maddr=(10, 2*n+1)),
Id(ancilla_qs[0], meas_delay),
MEAS(data_qs[0], amp=0)*
MEAS(data_qs[1], amp=0)*
MEAS(data_qs[2], amp=0)]
# virtual msmt's just to keep the number of segments
# uniform across digitizer channels
seqs+=Decode(10, 11, 2*nrounds)
seqs+=qwait("RAM",11)
seqs+=[MEAS(data_qs[0])*
MEAS(data_qs[1])*
MEAS(data_qs[2])*
MEAS(ancilla_qs[0], amp=0)*
MEAS(ancilla_qs[1], amp=0)]
# virtual msmt's
# apply corrective pulses depending on the decoder result
FbGates = []
for q in data_qs:
FbGates.append([gate(q) for gate in [Id, X]])
FbSeq = [reduce(operator.mul, x) for x in product(*FbGates)]
for k in range(8):
seqs += qif(k, [FbSeq[k]])
if add_cals:
seqs += create_cal_seqs(qubits,
calRepeats)
metafile = compile_to_hardware(seqs, 'BitFlip/BitFlip', tdm_seq=True)
return metafile
def MajorityVoteN(qubits: Iterable[Channels.LogicalChannel],
nrounds: int,
prep: Iterable[bool] = [],
meas_delay: float = 1e-6,
add_cals: bool = False,
calRepeats: int = 2) -> str:
"""
Majority vote across multiple measurement results (same or different qubits)
Parameters
----------
qubits : Channels.LogicalChannel tuple
A hashable (immutable) tuple of qubits for majority vote
nrounds: int
Number of consecutive measurements
prep : boolean iterable, optional
Array of binary values mapping X(q) pulses to the list of qubits
proivided. Ex: (q1,q2), prep=(1,0) -> would apply a pi pulse to q1
before the majority vote measurement. Default = []
measDelay : int/float, optional
Delay between syndrome check rounds (seconds)
add_cals : boolean, optional
Whether to append calibration pulses to the end of the sequence
calRepeats : int, optional
How many times to repeat calibration scalings (default 2)
Returns
-------
metafile : string
Path to a json metafile with details about the sequences and paths to
compiled machine files
Examples
--------
>>> mf = MajorityVoteN((q1, q2, q3), 10);
Compiled 1 sequences.
o INVALIDATE(channel=None, addr=0x1, mask=0x0)
o WRITEADDR(channel=None, addr=0x1, value=0xfffff)
MAJORITYMASK(in_addr=1, out_addr=0)
o INVALIDATE(channel=None, addr=0xa, mask=0xfffff)
o INVALIDATE(channel=None, addr=0xb, mask=0x1)
MAJORITY(in_addr=a, out_addr=b)
>>> mf
'/path/to/exp/exp-meta.json'
"""
nqubits = len(qubits)
seqs = [MajorityMask(1, 0, nrounds*nqubits),
Invalidate(10, nrounds*nqubits),
Invalidate(11, 1)]
if prep:
seqs += [reduce(operator.mul,
[X(q) for n,q in enumerate(qubits) if prep[n]])]
for n in range(nrounds):
seqs += [reduce(operator.mul,
[MEASA(q, (10, nqubits*n+m)) for m,q in enumerate(qubits)]),
Id(qubits[0],meas_delay)]
seqs+=MajorityVote(10,11, nrounds*nqubits)
seqs+=qwait("RAM", 11)
seqs+=[Id(qubits[0],100e-9)]
seqs+=qif(1,[X(qubits[0])]) # placeholder for any conditional operation
seqs=[seqs]
if add_cals:
seqs += create_cal_seqs(qubits,
calRepeats)
metafile = compile_to_hardware(seqs,
'MajorityVote/MajorityVote',
tdm_seq=True)
return metafile
| BBN-Q/QGL | QGL/BasicSequences/Feedback.py | Python | apache-2.0 | 12,237 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#---
#--- Python
import sys
import StringIO
import argparse
#---
#--- 3rd party
from redmine import Redmine
from redmine import exceptions as redmine_exceptions
#---
class ProjectTree(object) :
def __init__(self, allProjects ) :
self._projects = list(allProjects)
self._projectsByName = {} # name -> project
for project in self._projects :
self._projectsByName[project.name.encode('utf-8')] = project
self._projects.sort(key = lambda p : self.getBreadcrumbTrail(p))
def iter_dfs(self) :
for project in self._projects :
yield project
def getParent(self, project) :
"""
@return: None or project.parent
"""
try :
parentProjectName = project.parent.name.encode('utf-8')
except AttributeError :
parentProjectName = ''
if not parentProjectName :
parentProject = None
else :
parentProject = self._projectsByName.get(parentProjectName, None)
if 0 :
print "DEBUG: parentName = %r" % (parentProjectName,)
print "DEBUG: parentObject %r" % (parentProject,)
return parentProject
def getBreadcrumbTrail(self, project, encoding = 'utf-8') :
"""
@return: (..., parentName, projectName)
"""
return list(p.name.encode(encoding) for p in self.getAncestorsAndProject(project))
def getAncestorProjects(self, project) :
def _iterAncestors(project) :
parent = self.getParent(project)
if parent is None :
return
yield parent
for a in _iterAncestors(parent) :
yield a
return list(reversed(list(_iterAncestors(project))))
def getAncestorsAndProject(self, project) :
return self.getAncestorProjects(project) + [project]
#---
class PageTree(object) :
def __init__(self, project, allPages) :
self._project = project
self._pages = list(allPages)
self._pagesByTitle = {} # title -> page
for page in self._pages :
self._pagesByTitle[page.title.encode('utf-8')] = page
self._pagesWithParent = {} # title -> {page -> [childPages, ...]}
for page in self._pages :
if 0 :
print "DEBUG: %r %r" % (project, page,)
self._insert(page)
self._pages.sort(key = lambda p : self.getBreadcrumbTrail(p))
def getParent(self, page) :
"""
@return: None or page.parent
"""
try :
parentPageTitle = page.parent.title.encode('utf-8')
except AttributeError :
parentPageTitle = ''
if not parentPageTitle :
parentPage = None
else :
parentPage = self._pagesByTitle.get(parentPageTitle, None)
if 0 :
print "DEBUG: parentPageTitle = %r" % (parentPageTitle,)
print "DEBUG: parentPage %r" % (parentPage,)
return parentPage
def getBreadcrumbTrail(self, page, encoding = 'utf-8') :
"""
@return: (..., parentPage, page)
"""
return list(p.title.encode(encoding).replace('_', ' ') for p in self.getAncestorsAndPage(page))
def getAncestorPages(self, page) :
def _iterAncestors(page) :
parent = self.getParent(page)
if parent is None :
return
yield parent
for a in _iterAncestors(parent) :
yield a
return list(reversed(list(_iterAncestors(page))))
def getAncestorsAndPage(self, page) :
return self.getAncestorPages(page) + [page]
def _insert(self, page) :
#self._pages.append(page)
#if parentPage is None :
# self.pagesWithParent[page] = []
return
def iter_dfs(self) :
for page in self._pages :
yield page
#---
def printGlobalTopicIndex(redmineHandle, topicParentPage) :
lineCount = 0
for line in iterGlobaleTopicIndexLines(redmineHandle, topicParentPage) :
lineCount += 1
print line
return lineCount
def iterGlobaleTopicIndexLines(redmineHandle, topicParentPage, **keywords) :
"""
Iterates over the lines of the target document line by line.
@keyword printProgress: If True progress information will be printed to stdout
@type printProgress: bool
"""
yield "{{>toc}}"
yield ""
yield "h1. %s" % (topicParentPage,)
yield ""
yield "Diese Seite wurde automatisch generiert. Manuelle Änderungen an dieser Seite werden beim nächsten Lauf überschrieben werdern!"
yield ""
yield "h2. In diesem Projekt"
yield ""
yield "{{child_pages}}"
yield ""
yield "h2. In diesem Projekt und Unterprojekten"
yield ""
letterHeading = None
for entry in sorted(iterTopicEntries(redmineHandle, topicParentPage, **keywords)) :
prettyPageTitle = entry[0]
projectIdent = entry[1]
projectBreadcrumbTrail = entry[2]
pageAuthor = entry[3]
pageUpdatedOn = entry[4]
pageCreatedOn = entry[5]
updatedOnDate = pageUpdatedOn.strftime("%d.%m.%Y")
firstLetter = prettyPageTitle[0]
if firstLetter != letterHeading :
yield ""
yield ""
yield "h3. %s" % (firstLetter,)
yield ""
letterHeading = firstLetter
indent = "*"
yield "%(indent)s [[%(projectIdent)s:%(prettyPageTitle)s]] (%(projectBreadcrumbTrail)s) (zuletzt geändert am %(updatedOnDate)s von %(pageAuthor)s) " % locals()
def iterTopicEntries(redmineHandle, topicParentPage, **keywords) :
printProgress = keywords.get('printProgress', False)
baseURL = redmineHandle.url
r = redmineHandle
allProjects = r.project.all()
projectCount = len(allProjects)
if projectCount == 0 :
return
projectTree = ProjectTree(allProjects)
FIRST_TIME = True
for (projNum, project) in enumerate(projectTree.iter_dfs()) :
projectBreadcrumbTrail = " » ".join(projectTree.getBreadcrumbTrail(project))
pid = project.id # numeric
projectIdent = project.identifier.encode('utf-8', 'ignore') # symbolic
pname = project.name
# singlePage = r.wiki_page.get('Mitarbeiter', project_id = PID)
# yield singlePage.text
allPagesQuery = r.wiki_page.filter(project_id = pid)
try :
allPages = list(allPagesQuery)
except redmine_exceptions.ForbiddenError :
#print "Cannot access pages of project '%(pident)s'!" % locals()
allPages = []
if len(allPages) > 0 :
if printProgress :
print "%i/%i:" %(projNum+1, projectCount),
if FIRST_TIME :
FIRST_TIME = False
pageTree = PageTree(project, allPages)
for (pageNum,page) in enumerate(pageTree.iter_dfs()) :
if printProgress :
print ".",
pageTitle = page.title.encode('utf-8', 'ignore')
prettyPageTitle = pageTitle.replace('_', ' ')
pageAuthor = page.author.name.encode('utf-8', 'ignore')
pageCreatedOn = page.created_on
pageUpdatedOn = page.updated_on
pageBreadcrumbTrail = pageTree.getBreadcrumbTrail(page)[:-1]
if topicParentPage in pageBreadcrumbTrail :
indent = "*"
yield (prettyPageTitle, projectIdent, projectBreadcrumbTrail, pageAuthor, pageUpdatedOn, pageCreatedOn)
# yield "%(indent)s %(pageTitle)s (von %(pageAuthor)s) -> %(ancestors)r" % locals()
if printProgress :
print ""
#---
class CLI(object) :
"""
Encapsulates the Command Line Interface.
"""
def __init__(self) :
self._parser = parser = self._createParser()
self._args = args = parser.parse_args()
if args.apikey is None :
print "You must provide a Redmine API-Key as first argument."
sys.exit(1)
return
def _createParser(self) :
parser = argparse.ArgumentParser()
parser.add_argument("--apikey", help = "Valid API-key to use the Python REST-API")
parser.add_argument("-t", "--targetpage",
help = "Fully qualified Name of a Wiki page the result should be stored on",
default = "")
parser.add_argument("-p", "--projectid",
help = "Id of the project the target wiki page belongs to",
default = "")
parser.add_argument("--topicparentpage",
help = "Name of the parent page whose child pages should be collected.",
default = "")
return parser
def getBaseURL(self) :
"""
Base URL of the Redmine instance.
"""
baseURL = 'https://redmine.itz.uni-halle.de'
return baseURL
def getApiKey(self) :
"""
Valid API-key to use the REST-API of Redmine.
"""
return self._args.apikey
def getTargetPage(self) :
"""
@rtype: None or str
"""
return "Begriffe" # self._args.targetpage
def getProjectId(self) :
"""Target Project ID"""
return self._args.projectid
def getTopicParentPage(self) :
"""Pages with this parent page will be listet on the target page."""
return self._args.topicparentpage.replace('_', ' ')
#---
def main() :
cli = CLI()
baseURL = cli.getBaseURL()
apiKey = cli.getApiKey()
redmineHandle = Redmine(baseURL, key = apiKey)
topicParentPage = cli.getTopicParentPage()
allProjects = redmineHandle.project.all()
projectCount = len(allProjects)
if projectCount == 0 :
print "You must provide a VALID Redmine API-Key!"
return
targetPage = cli.getTargetPage()
targetProjectId = cli.getProjectId()
print targetProjectId, targetPage
if not targetPage or not targetProjectId:
lineCount = printGlobalTopicIndex(redmineHandle, topicParentPage)
else :
pageLines = list(iterGlobaleTopicIndexLines(redmineHandle, topicParentPage, printProgress = True))
newText = "\n".join(pageLines)
oldPage = redmineHandle.wiki_page.get(targetPage, project_id = targetProjectId)
oldText = oldPage.text.encode('utf-8')
if oldText != newText :
redmineHandle.wiki_page.update(targetPage,
project_id = targetProjectId,
title = 'Global index',
text = newText,
parent_title ='',
comments = 'automatisch aktualisiert')
return
if __name__ == '__main__' :
#sys_stderr = sys.stderr
#errbuf = StringIO.StringIO()
#sys.stderr = errbuf
try :
main()
except KeyboardInterrupt :
sys.exit(0)
#sys.stderr = sys_stderr
| uni-halle/python-redmine-tools | topic_index.py | Python | apache-2.0 | 11,227 |
import unittest
import logging
from domaincrawl.link_aggregator import LinkAggregator
from domaincrawl.link_filters import DomainFilter, is_acceptable_url_scheme
from domaincrawl.site_graph import SiteGraph
from domaincrawl.util import URLNormalizer, extract_domain_port
class LinkAggregatorTest(unittest.TestCase):
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, datefmt='%m/%d/%Y %I:%M:%S %p')
def test_link_dedup(self):
base_url = "acme.com:8999"
base_domain, port = extract_domain_port(base_url)
logger = logging.getLogger()
url_norm = URLNormalizer(base_domain, port)
normalized_url = url_norm.normalize_with_domain(base_url)
logger.debug("Constructed normalized base url : %s"%normalized_url)
domain_filter = DomainFilter(base_domain, logger)
site_graph = SiteGraph(logger)
link_aggregator = LinkAggregator(logger, site_graph, link_mappers=[url_norm.normalize_with_domain], link_filters=[domain_filter.passes, is_acceptable_url_scheme])
valid_links = ["/a/b","/a/b/./","http://acme.com:8002/a","https://acme.com:8002/b?q=asd#frag"]
expected_links = ["http://acme.com:8999/a/b","http://acme.com:8002/a","https://acme.com:8002/b"]
# This time, we also specify a referrer page
filtered_links = link_aggregator.filter_update_links(valid_links, normalized_url)
self.assertListEqual(expected_links,filtered_links)
self.assertSetEqual(set(expected_links),link_aggregator._links)
# Second invocation should result in deduplication
filtered_links = link_aggregator.filter_update_links(valid_links, None)
self.assertTrue(len(filtered_links) == 0)
self.assertSetEqual(set(expected_links),link_aggregator._links)
# None of the invalid links should pass
invalid_links = ["mailto://user@mail.com","code.acme.com","code.acme.com/b","https://127.122.9.1"]
filtered_links = link_aggregator.filter_update_links(invalid_links, None)
self.assertTrue(len(filtered_links) == 0)
self.assertSetEqual(set(expected_links),link_aggregator._links)
# A new valid link should pass
new_valid_links = ["http://acme.com:8999/"]
filtered_links = link_aggregator.filter_update_links(new_valid_links, None)
expected_result = ["http://acme.com:8999"]
self.assertListEqual(expected_result,filtered_links)
expected_result_set = set(expected_links)
expected_result_set.update(set(expected_result))
self.assertSetEqual(expected_result_set,link_aggregator._links)
self.assertEqual(len(expected_result_set), site_graph.num_nodes())
for link in expected_result_set:
self.assertTrue(site_graph.has_vertex(link))
self.assertEqual(len(expected_links), site_graph.num_edges())
for link in expected_links:
self.assertTrue(site_graph.has_edge(normalized_url, link))
| planBrk/domaincrawler | test/test_link_aggregator.py | Python | apache-2.0 | 3,054 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Matrix compression operator.
Helper functions to have an automated process to take any matrix compression
algorithm and create a tensorflow operator that can be applied on a tensorflow
matrix variable to compress it on the fly during training.
The class MatrixCompressorInferface can be used to implement any matrix
compression algorithm in the method static_matrix_compressor. The other class
CompressionOpInterface is used to create a tensorflow operator that injects
any matrix compression method dynamically into a tensorflow layer. This is
done by specifying in the spec during initialization a
MatrixCompressorInferface object that implements the method.
The get_apply_compression_op return such a tensorflow operator.
Further a tensorflow operator to update variables needs to be invoked
periodically depending on the method. Such an operator is created using
the get_update_op method.
Derived classes of these interfaces can be used to create compression OPs that
implement different compression methods. Such OPs have been implemented using
derived classes such as LowRankDecompMatrixCompressor, CompressionOp for low
rank decomposition, SimhashMatrixCompressor, SimhashCompressionOp for simhash,
DLMatrixCompressor for dictionary learning.
"""
import copy
from absl import logging
import numpy as np
from tensor2tensor.utils.hparam import HParams
import tensorflow.compat.v2 as tf
class MatrixCompressorInferface(object):
"""Interface for any matrix compressor algorithm.
This MatrixCompressorInferface class can be implemented by any third party to
implement any compression algorithm.
"""
def __init__(self, spec):
pass
def static_matrix_compressor(self, a_matrix):
"""Implements the matrix compression algorithm of choice to compress.
Args:
a_matrix: input matrix.
Returns:
The factor(s) or any compressed representation of a_matrix.
"""
raise NotImplementedError()
def default_matrix(self):
"""Returns default matrix for initialization.
Size is taken from spec.
"""
raise NotImplementedError()
class LowRankDecompMatrixCompressor(MatrixCompressorInferface):
"""Low rank decomposition compressor.
Implements matrix compression interface for the low rank decomposition
algorithm.
"""
def __init__(self, spec):
"""Initializer.
Args:
spec: hparams object with default value given by
self.get_default_hparams().
"""
super(LowRankDecompMatrixCompressor, self).__init__(spec)
self._spec = spec
self.uncompressed_size = 0
self.compressed_size = 0
def get_spec(self):
return self._spec
@staticmethod
def get_default_hparams():
"""Get a tf.HParams object with the default values for the hyperparameters.
name: string
name of the low-rank matrix decompressor specification.
rank: integer
rank of the low-rank decomposition that is performed.
compressor_option: integer
indicates what type of factorization (if any) is used.
is_b_matrix_trainable: bool
indicates whether the b_matrix matrix in the factorization is to be
trained.
is_c_matrix_trainable: bool
indicates whether the c_matrix matrix in the factorization is to be
trained.
Returns:
tf.HParams object initialized to default values.
"""
return HParams(
name='model_compression',
rank=100,
num_rows=10,
num_cols=10,
use_tpu=False,
compressor_option=0,
is_b_matrix_trainable=True,
is_c_matrix_trainable=True,
is_c_matrix_present=True,
block_size=1,
pruning_fraction=0.0,
use_lsh=False)
def static_matrix_compressor(self, a_matrix):
"""Low-rank decomposition of a_matrix.
Args:
a_matrix: input matrix.
Returns:
A list [b_matrix,c_matrix] which is the low-rank decomposition of
a_matrix. Rank is taken from spec.rank.
"""
u, s, vh = np.linalg.svd(a_matrix)
# If matrix dimension is smaller than rank specified then adjust rank
rank = max(min(np.min(a_matrix.shape), self._spec.rank), 1)
# rank = comp_op_utils.compute_compressed_rank_from_matrix_shape(
# a_matrix.shape, self._spec.rank)
b_matrix = u[:, :rank]
c_matrix = vh[:rank, :]
s_mat = np.diag(np.sqrt(s[:rank]))
b_matrix = np.matmul(b_matrix, s_mat)
c_matrix = np.matmul(s_mat, c_matrix)
logging.info(
'Inside static_matrix_compressor: a_matrix,b_matrix,c_matrix shapes '
'are: %s, %s, %s', a_matrix.shape, b_matrix.shape, c_matrix.shape)
self.uncompressed_size = tf.size(a_matrix)
self.compressed_size = b_matrix.size + c_matrix.size
return [b_matrix, c_matrix]
class CompressionOpInterface(object):
"""Interface for a compression op.
Class to take a matrix compression algorithm and create a tensorflow
compression operator to inject that compression dynamically during training.
The compression algorithm is specified using an object of
MatrixCompressorInferface class.
"""
def __init__(self, scope='default_scope', spec=None, global_step=None):
pass
def get_apply_compression_op(self,
a_matrix_tfvar,
matrix_compressor,
scope='default_scope'):
"""Returns compressed tensorflow operator.
Does it for variable a_matrix_tfvar for compression method specified in
matrix_compressor.
Args:
a_matrix_tfvar: TF variable representing a tensor variable in a model.
matrix_compressor: MatrixCompressorInferface object to specify the
compression algorithm.
scope: TF scope used for creating new TF variables.
Returns:
A TF node that has the compressed version of a_matrix_tfvar.
"""
raise NotImplementedError()
def get_update_op(self):
"""Update operator.
Returns:
TF operator that implements the update steps that may need to
be applied periodically.
"""
raise NotImplementedError()
class CompressionOp(CompressionOpInterface):
"""Implements a compression OP.
Does this based on any matrix factorization compression algorithm by
replacing a variable a_matrix by alpha*a_matrix +
(1-alpha)b_matrix*c_matrix. See the doc linked in the directory README for
details.
"""
def __init__(self,
scope='default_scope',
spec=None,
global_step=None,
layer=None):
"""Initializer.
Args:
scope: TF scope used for creating new TF variables.
spec: compression hyper parameters default value given by
self.get_default_hparams().
global_step: tf variable that has the global step.
layer: Layer to compress.
"""
super(CompressionOp, self).__init__(scope, spec, global_step)
# Compression specification
self._spec = spec
# Sanity check for compression hparams
self._validate_spec()
self._global_step = global_step
# public member variables to track the compressor, the variables and
# other tf nodes corresponding to this OP.
self.matrix_compressor = None
self.a_matrix_tfvar = None
self.b_matrix_tfvar = None
self.c_matrix_tfvar = None
self.alpha = None
self.layer = layer
self.last_alpha_update_step = None
self.uncompressed_size = 0
self.compressed_size = 0
@staticmethod
def get_default_hparams():
"""Get a tf.HParams object with the default values for the hyperparameters.
name: string
name of the compression specification. Used for adding summaries and ops
under a common tensorflow name_scope.
alpha_decrement_value: float
a positive real number by which alpha is decremented at each update.
begin_compression_step: integer
the global step at which to begin compression.
end_compression_step: integer
the global step at which to terminate compression. Defaults to -1
implying that compression continues till the training stops.
use_tpu: False
indicates whether to use TPU.
compression_option: integer
indicates what type of factorization (if any) is used.
rank: integer
indicates what type of factorization (if any) is used.
update_option: integer
indicates how the update logic is being run. More specifically:
0 - run the update logic in TF; needed when using GPU/TPU.
1 - run the update logic in regular python as opposed to TF.
2 - run the update logic in TF and in regular python.
Returns:
tf.HParams object initialized to default values.
"""
return HParams(
name='model_compression',
alpha_decrement_value=0.01,
begin_compression_step=0,
end_compression_step=-1,
compression_frequency=10,
use_tpu=False,
compression_option=0,
rank=100,
update_option=0,
run_update_interval_check=1,
block_size=1,
pruning_fraction=0.0,
begin_pruning_step=0,
end_pruning_step=-1,
weight_sparsity_map=[''],
block_dims_map=[''],
threshold_decay=0.0,
pruning_frequency=10,
nbins=256,
block_height=1,
block_width=1,
block_pooling_function='AVG',
initial_sparsity=0.0,
target_sparsity=0.5,
sparsity_function_begin_step=0,
sparsity_function_end_step=100,
sparsity_function_exponent=3.0,
gradient_decay_rate=0.99,
prune_option='weight')
def setup_variables(self, a_matrix_tfvar, matrix_compressor, layer):
"""Create compressed layer weight matrices."""
self.matrix_compressor = matrix_compressor
a_matrix = np.zeros(shape=a_matrix_tfvar.shape)
[b_matrix, c_matrix] = matrix_compressor.static_matrix_compressor(a_matrix)
self.b_matrix_tfvar = layer.add_weight(
'b_matrix',
shape=b_matrix.shape,
initializer=layer.kernel_initializer,
regularizer=layer.kernel_regularizer,
constraint=layer.kernel_constraint,
dtype=layer.dtype,
trainable=True)
self.c_matrix_tfvar = layer.add_weight(
'c_matrix',
shape=c_matrix.shape,
initializer=layer.kernel_initializer,
regularizer=layer.kernel_regularizer,
constraint=layer.kernel_constraint,
dtype=layer.dtype,
trainable=True)
self.alpha = layer.add_weight(
'alpha',
shape=(),
initializer=tf.keras.initializers.Ones(),
dtype=layer.dtype,
trainable=False)
self.last_alpha_update_step = layer.add_weight(
'last_alpha_update_step',
shape=(),
initializer=tf.keras.initializers.Constant(value=-1),
dtype=tf.int32,
trainable=False)
self.a_matrix_tfvar = a_matrix_tfvar
self.layer.alpha = self.alpha
def compressed_matmul_keras(self, inputs, training=False):
"""Matmul with a convex combination of original and compressed weights."""
if training:
compressed_mat = self.alpha * self.a_matrix_tfvar + (
1 - self.alpha) * tf.matmul(self.b_matrix_tfvar, self.c_matrix_tfvar)
return tf.matmul(inputs, compressed_mat)
else:
# This prevents the TFLite converter from constant-folding the product of
# B & C matrices.
intermediate = tf.matmul(inputs, self.b_matrix_tfvar)
return tf.matmul(intermediate, self.c_matrix_tfvar)
def maybe_run_update_step(self):
"""Creates TensorFlow update op for compression."""
def maybe_update_alpha():
"""Maybe update the alpha param.
Checks if global_step is between begin_compression_step and
end_compression_step, and if the current training step is a
compression step.
Returns:
Boolean tensor whether the training step is a compression step.
"""
is_step_within_compression_range = tf.logical_and(
tf.greater_equal(
tf.cast(self._global_step, tf.int32),
self._spec.begin_compression_step),
tf.logical_or(
tf.less_equal(
tf.cast(self._global_step, tf.int32),
self._spec.end_compression_step),
tf.less(self._spec.end_compression_step, 0)))
is_compression_step = tf.less_equal(
tf.add(self.last_alpha_update_step, self._spec.compression_frequency),
tf.cast(self._global_step, tf.int32))
return tf.logical_and(is_step_within_compression_range,
is_compression_step)
def no_update_op():
pass
def compressor_and_alpha_update_op_fn():
return self._compressor_and_alpha_update_op()
tf.cond(
pred=maybe_update_alpha(),
true_fn=compressor_and_alpha_update_op_fn,
false_fn=no_update_op)
return
def _compressor_op(self, matrix_compressor, a_matrix_tfvar):
"""Creates compressor op based on matrix_compressor.
Meant to create the factors once at begin_compression_step.
Args:
matrix_compressor: specifies the matrix compressor object.
a_matrix_tfvar: the tf tensor to be compressed.
"""
[b_matrix_out, c_matrix_out
] = tf.compat.v1.py_function(matrix_compressor.static_matrix_compressor,
[a_matrix_tfvar], [tf.float32, tf.float32])
self.b_matrix_tfvar.assign(b_matrix_out)
self.c_matrix_tfvar.assign(c_matrix_out)
return
def _update_alpha_op(self):
self.alpha.assign_sub(self._spec.alpha_decrement_value, 0)
self.alpha.assign(tf.math.maximum(self.alpha, 0))
return
def _compressor_and_alpha_update_op(self):
"""Applies compressor and also updates alpha."""
self._compressor_op(self.matrix_compressor, self.a_matrix_tfvar)
self._update_alpha_op()
self.last_alpha_update_step.assign(tf.cast(self._global_step, tf.int32))
def _validate_spec(self):
spec = self._spec
if spec.begin_compression_step < 0:
raise ValueError('Illegal value for begin_compression_step')
if spec.begin_compression_step >= spec.end_compression_step:
if spec.end_compression_step != -1:
raise ValueError(
'Compression must begin before it can end. begin_step=%d, '
'end_step=%d. Set end_compression_step to -1 if compression is '
'required till training stops' %
(spec.begin_compression_step, spec.end_compression_step))
class ApplyCompression(object):
"""Wrapper class.
This is to repeatedly invoke above compression operator to different
layers in a model.
Intialized by specifying the compressor and compression_spec.
After that apply_compression can be called several times for different
matrices in the model.
Finally all_update_op returns the combined update OP from all these
compressions.
"""
def __init__(self, scope, compression_spec, compressor, global_step=None):
"""Initializer.
Args:
scope: TF scope used for creating new TF variables.
compression_spec: compression hyper parameters.
compressor: matrix compressor object of class MatrixCompressorInferface.
global_step: tf variable that has the global step.
"""
logging.info('Entering ApplyCompression constructor')
self._compression_op_spec = compression_spec
self._scope = scope
self._global_step = global_step
self._matrix_compressor = compressor
self._compression_ops = []
self._update_ops = []
self._all_update_op = None
self.uncompressed_size = 0
self.compressed_size = 0
def apply_compression_keras(self,
a_matrix_tfvar,
scope='default_scope',
layer=None):
"""keras version of apply_compression.
Applies matrix compression OP on
a_matrix_tfvar as specified in spec.
Args:
a_matrix_tfvar: TF variable representing a tensor variable in a model.
scope: TF scope used for creating new TF variables.
layer: keras layer object calling this function. Must support an
add_weight method.
Returns:
TF node that represents the compressed version of a_matrix_tfvar.
"""
if self._compression_op_spec.compression_option == 9:
raise NotImplementedError('InputCompression not Supported.')
else:
c = CompressionOp(
scope=scope,
spec=self._compression_op_spec,
global_step=self._global_step,
layer=layer)
c.setup_variables(a_matrix_tfvar, self._matrix_compressor, layer=layer)
return c
def get_operator_hparam(self, hparam):
"""Returns the value of queried hparam of the compression operator."""
return self._compression_op_spec.get(hparam)
def get_compression_ops(self):
"""Returns the compression operators used during the update steps.
Returns:
A list of CompressionOp objects.
"""
return copy.copy(self._compression_ops)
def get_spec(self):
"""Get the spec / hparams used to create the Pruning object."""
return self._compression_op_spec
| google-research/google-research | non_semantic_speech_benchmark/distillation/compression_lib/compression_op.py | Python | apache-2.0 | 17,758 |
#!/usr/bin/env python
# standard library imports
# third party related imports
import factory
# local library imports
from mobile_push.db import Session, Topic
class TopicFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta(object):
model = Topic
sqlalchemy_session = Session
name = factory.Sequence(lambda n: 'topic-%s' % n)
arn = factory.Sequence(lambda n: 'topic-arn-%s' % n)
| theKono/mobile-push | tests/factories/topic.py | Python | apache-2.0 | 422 |
from setuptools import setup
setup(
name='soundcurl',
version='0.1.0',
description='A command line utility for downloading songs from SoundCloud.',
author='Jeremy McKibben-Sanders',
author_email='jmckib2+soundcurl@gmail.com',
url='https://github.com/jmckib/soundcurl',
package_dir={'': 'src'},
py_modules=['soundcurl'],
entry_points={'console_scripts': ['soundcurl = soundcurl:main']},
install_requires=['beautifulsoup4==4.2.1', 'mutagen==1.21'],
)
| jmckib/soundcurl | setup.py | Python | apache-2.0 | 490 |
from sqlalchemy import Column, ForeignKey, Integer, String, Text, DateTime, Float
from models import Base, Account
from sqlalchemy.orm import relationship
from datetime import datetime
# STOCK EXCHANGE
class Company(Base):
__tablename__ = 'companies'
code = Column(String(4), primary_key=True)
name = Column(Text, nullable=False)
submission_id = Column(Text, nullable=False)
value_points = relationship('ValuePoint', back_populates="company", cascade="all, delete, delete-orphan")
owned_stocks = relationship('Stock', back_populates="company", cascade="all, delete, delete-orphan")
def __repr__(self):
return '<Company %r (%r)>' % (self.name, self.code)
class ValuePoint(Base):
__tablename__ = 'valuepoints'
id = Column(Integer, primary_key=True)
datetime = Column(DateTime, nullable=False, default=datetime.utcnow)
value = Column(Float, nullable=False)
company_code = Column(String(4), ForeignKey('companies.code'), nullable=False)
company = relationship(Company, back_populates="value_points")
def __repr__(self):
return '<Value of %r at %r: $%.2f>' % (self.company.name, self.datetime, self.value)
class Stock(Base):
__tablename__ = 'stocks'
id = Column(Integer, primary_key=True)
count = Column(Integer, nullable=False)
value_per_stock_at_purchase = Column(Float, nullable=False)
owner_number = Column(Integer, ForeignKey('accounts.number'), nullable=False)
owner = relationship(Account, back_populates="owned_stocks")
company_code = Column(String(4), ForeignKey('companies.code'), nullable=False)
company = relationship(Company, back_populates="owned_stocks")
Account.owned_stocks = relationship('Stock', back_populates="owner", cascade="all, delete, delete-orphan") | skyman/YamScripts | YamTeller/yamsdaq_models.py | Python | apache-2.0 | 1,788 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2008 - 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# usage: %prog file [ file [ file [...]]]
# This script merges the timing data from several files into a single
# aggregate which is sent to stdout.
class stamp:
def __init__(this, time, weight):
this.time = long(time)
this.weight = long(weight)
def weighted_time(this):
return this.time * this.weight
def minimum(x, y):
if x < y:
return x
else:
return y
def maximum(x, y):
if x > y:
return x
else:
return y
class timing_file:
def __init__(this, filename = ''):
this.stamps = {}
this.filename = ''
this.filename = filename
if this.filename:
f = open(filename, 'r')
this.lines = f.readlines()
f.close()
this.lines = [ line.strip() for line in this.lines ]
for line in this.lines:
space_sep = line.split()
if len(space_sep) != 2:
raise Exception('bad timing line in %s: %s' % (this.filename, line))
star_sep = space_sep[0].split('*')
if len(star_sep) == 1:
weight = 1
else:
weight = star_sep[1]
this.stamps[space_sep[1]] = stamp(star_sep[0], weight)
def write(this):
for stamp in this.stamps:
print '%d*%d %s' % (this.stamps[stamp].time, this.stamps[stamp].weight, stamp)
def merge(this, old):
new = timing_file()
minmax = ['maximum', 'minimum']
for s in this.stamps:
if s in minmax:
continue
if s in old.stamps:
total_weight = this.stamps[s].weight + old.stamps[s].weight
weighted_average_time = (this.stamps[s].weighted_time() + old.stamps[s].weighted_time()) / total_weight
new.stamps[s] = stamp(weighted_average_time, total_weight)
else:
new.stamps[s] = this.stamps[stamp]
for s in old.stamps:
if s in minmax:
continue
if s not in this.stamps:
new.stamps[s] = old.stamps[s]
stamps = [this.stamps[s].time for s in this.stamps] + [old.stamps[s].time for s in old.stamps]
new.stamps['maximum'] = stamp(reduce(maximum, stamps, 0), 0)
if new.stamps['maximum'] > 0:
new.stamps['minimum'] = stamp(reduce(minimum, stamps, new.stamps['maximum'].time), 0)
return new
def option_parser():
import optparse
usage = "Usage: %prog file [ file [ file [...]]]"
parser = optparse.OptionParser(usage = usage)
general = optparse.OptionGroup(parser, 'General Options', '')
# general.add_option('-i', '--input',
# type = 'string',
# dest = 'infile',
# default = '',
# help = 'use this as the input file [default: stdin]')
parser.add_option_group(general)
return parser
if __name__ == '__main__':
import optparse
options, args = option_parser().parse_args()
sum = timing_file()
for a in args:
sum = sum.merge(timing_file(a))
sum.write()
| rich-pixley/zoo-animals | statlog-rollup.py | Python | apache-2.0 | 3,874 |
'''
Create 2 VMs with same image. Then commit 2 new images from 2 VMs.
@author: Youyk
'''
import time
import os
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import zstackwoodpecker.zstack_test.zstack_test_image as test_image
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm
test_stub = test_lib.lib_get_specific_stub()
test_obj_dict = test_state.TestStateDict()
def test():
vm1 = test_stub.create_vm(vm_name = 'basic-test-vm')
test_obj_dict.add_vm(vm1)
#vm1.check()
image_creation_option = test_util.ImageOption()
backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm1.vm)
for bs in backup_storage_list:
if bs.type in [inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE, inventory.CEPH_BACKUP_STORAGE_TYPE]:
image_creation_option.set_backup_storage_uuid_list([backup_storage_list[0].uuid])
break
else:
vm1.destroy()
test_util.test_skip('Not find image store or ceph type backup storage.')
vm2 = test_stub.create_vm(vm_name = 'basic-test-vm')
test_obj_dict.add_vm(vm2)
image_creation_option.set_root_volume_uuid(vm1.vm.rootVolumeUuid)
image_creation_option.set_name('test_create_vm_images_with_same_name')
#image_creation_option.set_platform('Linux')
image1 = test_image.ZstackTestImage()
image1.set_creation_option(image_creation_option)
image1.create()
test_obj_dict.add_image(image1)
image1.check()
vm1.destroy()
image_creation_option.set_root_volume_uuid(vm2.vm.rootVolumeUuid)
image_creation_option.set_name('test_create_vm_images_with_same_name')
image2 = test_image.ZstackTestImage()
image2.set_creation_option(image_creation_option)
image2.create()
test_obj_dict.add_image(image2)
image2.check()
vm3 = test_stub.create_vm(image_name = 'test_create_vm_images_with_same_name')
test_obj_dict.add_vm(vm3)
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('Create 2 VM images from same origin Image Successfully')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
| zstackio/zstack-woodpecker | integrationtest/vm/virt_plus/image_store/test_crt_2_vms_imgs_from_same_img.py | Python | apache-2.0 | 2,414 |
# Copyright 2020 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
from absl import app
from iree.tf.support import tf_test_utils
import numpy as np
import tensorflow.compat.v2 as tf
class LinspaceModule(tf.Module):
def __init__(self):
pass
@tf.function(input_signature=[
tf.TensorSpec([], tf.float32),
tf.TensorSpec([], tf.float32)
])
def linspace(self, start, stop):
# 'num' is const because XLA's iota operation does not support dynamic
# shapes.
num = np.array(3, dtype=np.int32)
return tf.linspace(start, stop, num)
class LinspaceTest(tf_test_utils.TracedModuleTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._modules = tf_test_utils.compile_tf_module(LinspaceModule)
def test_linspace(self):
def linspace(module):
start = np.array(10., dtype=np.float32)
stop = np.array(12., dtype=np.float32)
module.linspace(start, stop)
self.compare_backends(linspace, self._modules)
def main(argv):
del argv # Unused
if hasattr(tf, 'enable_v2_behavior'):
tf.enable_v2_behavior()
tf.test.main()
if __name__ == '__main__':
app.run(main)
| google/iree | integrations/tensorflow/test/python/iree_tf_tests/uncategorized/linspace_test.py | Python | apache-2.0 | 1,322 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Image operations."""
from __future__ import absolute_import as _abs
from . import _make
def resize(data,
size,
layout="NCHW",
method="bilinear",
align_corners=True,
out_dtype=None):
"""Image resize operator.
This operator takes data as input and does 2D scaling to the given scale factor.
In the default case, where the data_layout is `NCHW`
with data of shape (n, c, h, w)
out will have a shape (n, c, size[0], size[1])
method indicates the algorithm to be used while calculating ghe out value
and method can be one of ("bilinear", "nearest_neighbor", "bicubic")
Parameters
----------
data : relay.Expr
The input data to the operator.
size: Tuple of Expr
The out size to which the image will be resized.
layout : str, optional
Layout of the input.
method : str, optional
Scale method to used [nearest_neighbor, bilinear, bicubic].
align_corners : int, optional
Should be true to preserve the values at the corner pixels
out_dtype : str, optional
Type to return. If left None returns the same type as input.
Returns
-------
result: relay.Expr
The resized result.
"""
return _make.resize(data, size, layout, method, align_corners, out_dtype)
| Huyuwei/tvm | python/tvm/relay/op/image/image.py | Python | apache-2.0 | 2,130 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateConversationProfile
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_v2_generated_ConversationProfiles_UpdateConversationProfile_async]
from google.cloud import dialogflow_v2
async def sample_update_conversation_profile():
# Create a client
client = dialogflow_v2.ConversationProfilesAsyncClient()
# Initialize request argument(s)
conversation_profile = dialogflow_v2.ConversationProfile()
conversation_profile.display_name = "display_name_value"
request = dialogflow_v2.UpdateConversationProfileRequest(
conversation_profile=conversation_profile,
)
# Make the request
response = await client.update_conversation_profile(request=request)
# Handle the response
print(response)
# [END dialogflow_v2_generated_ConversationProfiles_UpdateConversationProfile_async]
| googleapis/python-dialogflow | samples/generated_samples/dialogflow_v2_generated_conversation_profiles_update_conversation_profile_async.py | Python | apache-2.0 | 1,721 |
from asynctnt_queue import Queue, Tube
from tests import BaseTarantoolTestCase
class QueueTestCase(BaseTarantoolTestCase):
async def test__queue_create(self):
q = Queue(self.conn)
self.assertEqual(q.conn, self.conn, 'conn valid')
def test__queue_get_tube(self):
q = Queue(self.conn)
tube = q.tube('test_tube')
self.assertEqual(tube.name, 'test_tube', 'name valid')
self.assertIsInstance(tube, Tube, 'tube valid type')
self.assertEqual(tube.conn, self.conn, 'conn valid')
def test__queue_get_tube_multiple(self):
q = Queue(self.conn)
tube1 = q.tube('test_tube')
tube2 = q.tube('test_tube')
self.assertIs(tube1, tube2, 'the same object')
async def test__queue_statistics(self):
q = Queue(self.conn)
res = await q.statistics()
self.assertIsNotNone(res)
self.assertIn('test_tube', res)
| igorcoding/asynctnt-queue | tests/test_queue.py | Python | apache-2.0 | 925 |
# -*- coding: utf-8 -*-
import django
import sys
from itertools import chain
from django import forms
from django.conf import settings
from django.db.models.query import QuerySet
from django.template.loader import render_to_string
from django.utils.encoding import force_text
from django.utils.html import conditional_escape, escape
from django.utils.safestring import mark_safe
if sys.version_info[0] < 3:
iteritems = lambda d: iter(d.iteritems())
string_types = basestring,
str_ = unicode
else:
iteritems = lambda d: iter(d.items())
string_types = str,
str_ = str
STATIC_URL = getattr(settings, 'STATIC_URL', settings.MEDIA_URL)
class SortedCheckboxSelectMultiple(forms.CheckboxSelectMultiple):
class Media:
js = (
STATIC_URL + 'sortedm2m/widget.js',
STATIC_URL + 'sortedm2m/jquery-ui.js',
)
css = {'screen': (
STATIC_URL + 'sortedm2m/widget.css',
)}
def build_attrs(self, attrs=None, **kwargs):
attrs = super(SortedCheckboxSelectMultiple, self).\
build_attrs(attrs, **kwargs)
classes = attrs.setdefault('class', '').split()
classes.append('sortedm2m')
attrs['class'] = ' '.join(classes)
return attrs
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
has_id = attrs and 'id' in attrs
final_attrs = self.build_attrs(attrs, name=name)
# Normalize to strings
str_values = [force_text(v) for v in value]
selected = []
unselected = []
for i, (option_value, option_label) in enumerate(chain(self.choices, choices)):
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))
label_for = ' for="%s"' % conditional_escape(final_attrs['id'])
else:
label_for = ''
cb = forms.CheckboxInput(final_attrs, check_test=lambda value: value in str_values)
option_value = force_text(option_value)
rendered_cb = cb.render(name, option_value)
option_label = conditional_escape(force_text(option_label))
item = {'label_for': label_for, 'rendered_cb': rendered_cb, 'option_label': option_label, 'option_value': option_value}
if option_value in str_values:
selected.append(item)
else:
unselected.append(item)
# re-order `selected` array according str_values which is a set of `option_value`s in the order they should be shown on screen
ordered = []
for value in str_values:
for select in selected:
if value == select['option_value']:
ordered.append(select)
selected = ordered
html = render_to_string(
'sortedm2m/sorted_checkbox_select_multiple_widget.html',
{'selected': selected, 'unselected': unselected})
return mark_safe(html)
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if isinstance(value, string_types):
return [v for v in value.split(',') if v]
return value
if django.VERSION < (1, 7):
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = [force_text(value) for value in initial]
data_set = [force_text(value) for value in data]
return data_set != initial_set
class SortedMultipleChoiceField(forms.ModelMultipleChoiceField):
widget = SortedCheckboxSelectMultiple
def clean(self, value):
queryset = super(SortedMultipleChoiceField, self).clean(value)
if value is None or not isinstance(queryset, QuerySet):
return queryset
object_list = dict((
(str_(key), value)
for key, value in iteritems(queryset.in_bulk(value))))
return [object_list[str_(pk)] for pk in value]
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = [force_text(value) for value in self.prepare_value(initial)]
data_set = [force_text(value) for value in data]
return data_set != initial_set
| Venturi/oldcms | env/lib/python2.7/site-packages/sortedm2m/forms.py | Python | apache-2.0 | 4,668 |
# Fetch BoM gridded files from the Bureau server.
import sys
from datetime import datetime
import calendar
import subprocess
import os.path
R = 'http://www.bom.gov.au/web03/ncc/www/awap'
WGET = '/usr/bin/wget'
# Append yyyymmddyyyymmdd.grid.Z to these URLs to get the file
vars = {
'rain/day' : R+'/rainfall/totals/daily/grid/0.05/history/nat',
'rain_rmse/day' : R+'/rainfall/rmse/daily/grid/0.05/history/nat',
'rain_recal/day' : R+'/rainfall/totals/daily/grid/0.05/history/nat_recal',
'tmax/day' : R+'/temperature/maxave/daily/grid/0.05/history/nat',
'tmax_rmse/day' : R+'/temperature/maxrmse/daily/grid/0.05/history/nat',
'tmin/day' : R+'/temperature/minave/daily/grid/0.05/history/nat',
'tmin_rmse/day' : R+'/temperature/minrmse/daily/grid/0.05/history/nat',
'rad/day' : R+'/solar/solarave/daily/grid/0.05/history/nat',
'vph09/day' : R+'/vprp/vprph09/daily/grid/0.05/history/nat',
'vph15/day' : R+'/vprp/vprph15/daily/grid/0.05/history/nat',
'rain/month' : R+'/rainfall/totals/month/grid/0.05/history/nat',
'rain_rmse/month' : R+'/rainfall/rmse/month/grid/0.05/history/nat',
'tmax/month' : R+'/temperature/maxave/month/grid/0.05/history/nat',
'tmax_rmse/month' : R+'/temperature/maxrmse/month/grid/0.05/history/nat',
'tmin/month' : R+'/temperature/minave/month/grid/0.05/history/nat',
'tmin_rmse/month' : R+'/temperature/minrmse/month/grid/0.05/history/nat',
'rad/month' : R+'/solar/solarave/month/grid/0.05/history/nat',
'vph09/month' : R+'/vprp/vprph09/month/grid/0.05/history/nat',
'vph15/month' : R+'/vprp/vprph15/month/grid/0.05/history/nat'
}
filePrefix = {
'rain/day' : 'rain_daily',
'rain_rmse/day' : 'rain_rmse_daily',
'rain_recal/day' : 'rain_recal_daily',
'tmax/day' : 'tmax_daily',
'tmax_rmse/day' : 'tmax_rmse_daily',
'tmin/day' : 'tmin_daily',
'tmin_rmse/day' : 'tmin_rmse_daily',
'rad/day' : 'rad_daily',
'vph09/day' : 'vph09_daily',
'vph15/day' : 'vph15_daily',
'rain/month' : 'rain_month',
'rain_rmse/month' : 'rain_rmse_month',
'tmax/month' : 'tmax_month',
'tmax_rmse/month' : 'tmax_rmse_month',
'tmin/month' : 'tmin_month',
'tmin_rmse/month' : 'tmin_rmse_month',
'rad/month' : 'rad_month',
'vph09/month' : 'vph09_month',
'vph15/month' : 'vph15_month',
}
def get_bom_grid(var, yyyymmdd, month):
"""
Given a variable VAR and date YYYYMMDD, fetch the .grid file from
the Bureau server using wget. If MONTH != 0, monthly files are fetched
rather than daily. Return the name of the file fetched.
"""
monthday = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
interval = 'day' if month == 0 else 'month'
key = var+'/'+interval
if key not in vars:
raise ValueError("No such var ("+var+")")
try:
s = datetime.strptime(yyyymmdd, "%Y%m%d")
except ValueError:
raise ValueError("Invalid date ("+yyyymmdd+")")
if month:
if s.month == 2 and calendar.isleap(s.year): monthday[2] = 29
fname = "%04d%02d01%04d%02d%02d.grid.Z" % (s.year, s.month, s.year, s.month, monthday[s.month-1])
else:
fname = yyyymmdd+yyyymmdd+".grid.Z"
out = filePrefix[key]+'_'+fname
if os.path.exists(out):
raise RuntimeError("File already exists ("+out+")")
url = vars[key]+"/"+fname
if subprocess.call([WGET, '--output-document='+out, url]):
os.remove(out)
raise IOError("Calling %s --output-document=%s %s" % (WGET, out, url))
return out
if __name__ == '__main__':
if len(sys.argv) != 4:
print "USAGE: python get_bom_grid.py VAR YYYYMMDD MONTH(1/0)"
sys.exit(1)
try:
fname = get_bom_grid(sys.argv[1], sys.argv[2], int(sys.argv[3]))
print "Fetched file: "+fname
except ValueError as e:
print "Aborted - erroneous arguments: %s" % e
sys.exit(1)
except IOError as e:
print "URL not found: %s" % e
sys.exit(1)
except RuntimeError as e:
print "File of same name present: %s" % e
sys.exit(1)
sys.exit(0)
| KimberleyOpie/common-tools | raw_data_tools/get_bom_grid.py | Python | apache-2.0 | 4,025 |
# Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from _redfishobject import RedfishObject
from redfish.rest.v1 import ServerDownOrUnreachableError
def ex31_set_license_key(redfishobj, iLO_Key):
sys.stdout.write("\nEXAMPLE 31: Set iLO License Key\n")
instances = redfishobj.search_for_type("Manager.")
for instance in instances:
rsp = redfishobj.redfish_get(instance["@odata.id"])
body = dict()
body["LicenseKey"] = iLO_Key
if redfishobj.typepath.defs.isgen9:
oemhpdict = rsp.dict["Oem"]["Hp"]
else:
oemhpdict = rsp.dict["Oem"]["Hpe"]
response = redfishobj.redfish_post(oemhpdict["Links"]\
["LicenseService"]["@odata.id"], body)
redfishobj.error_handler(response)
if __name__ == "__main__":
# When running on the server locally use the following commented values
# iLO_https_url = "blobstore://."
# iLO_account = "None"
# iLO_password = "None"
# When running remotely connect using the iLO secured (https://) address,
# iLO account name, and password to send https requests
# iLO_https_url acceptable examples:
# "https://10.0.0.100"
# "https://f250asha.americas.hpqcorp.net"
iLO_https_url = "https://10.0.0.100"
iLO_account = "admin"
iLO_password = "password"
# Create a REDFISH object
try:
REDFISH_OBJ = RedfishObject(iLO_https_url, iLO_account, iLO_password)
except ServerDownOrUnreachableError, excp:
sys.stderr.write("ERROR: server not reachable or doesn't support " \
"RedFish.\n")
sys.exit()
except Exception, excp:
raise excp
ex31_set_license_key(REDFISH_OBJ, "test_iLO_Key")
| HewlettPackard/python-proliant-sdk | examples/Redfish/ex31_set_license_key.py | Python | apache-2.0 | 2,431 |
#
# (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Forms for managing metadata.
"""
import json
from django.forms import ValidationError
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard.api import glance
from openstack_dashboard.dashboards.admin.metadata_defs \
import constants
class CreateNamespaceForm(forms.SelfHandlingForm):
source_type = forms.ChoiceField(
label=_('Namespace Definition Source'),
choices=[('file', _('Metadata Definition File')),
('raw', _('Direct Input'))],
widget=forms.ThemableSelectWidget(
attrs={'class': 'switchable', 'data-slug': 'source'}))
metadef_file = forms.FileField(
label=_("Metadata Definition File"),
help_text=_("A local metadata definition file to upload."),
widget=forms.FileInput(
attrs={'class': 'switched', 'data-switch-on': 'source',
'data-required-when-shown': 'true',
'data-source-file': _('Metadata Definition File')}),
required=False)
direct_input = forms.CharField(
label=_('Namespace JSON'),
help_text=_('The JSON formatted contents of a namespace.'),
widget=forms.widgets.Textarea(
attrs={'class': 'switched', 'data-switch-on': 'source',
'data-required-when-shown': 'true',
'data-source-raw': _('Namespace JSON')}),
required=False)
public = forms.BooleanField(label=_("Public"), required=False)
protected = forms.BooleanField(label=_("Protected"), required=False)
def clean(self):
data = super(CreateNamespaceForm, self).clean()
# The key can be missing based on particular upload
# conditions. Code defensively for it here...
metadef_file = data.get('metadef_file', None)
metadata_raw = data.get('direct_input', None)
if metadata_raw and metadef_file:
raise ValidationError(
_("Cannot specify both file and direct input."))
if not metadata_raw and not metadef_file:
raise ValidationError(
_("No input was provided for the namespace content."))
try:
if metadef_file:
ns_str = self.files['metadef_file'].read()
else:
ns_str = data['direct_input']
namespace = json.loads(ns_str)
if data['public']:
namespace['visibility'] = 'public'
else:
namespace['visibility'] = 'private'
namespace['protected'] = data['protected']
for protected_prop in constants.METADEFS_PROTECTED_PROPS:
namespace.pop(protected_prop, None)
data['namespace'] = namespace
except Exception as e:
msg = _('There was a problem loading the namespace: %s.') % e
raise forms.ValidationError(msg)
return data
def handle(self, request, data):
try:
namespace = glance.metadefs_namespace_create(request,
data['namespace'])
messages.success(request,
_('Namespace %s has been created.') %
namespace['namespace'])
return namespace
except Exception as e:
msg = _('Unable to create new namespace. %s')
msg %= e.message.split('Failed validating', 1)[0]
exceptions.handle(request, message=msg)
return False
class ManageResourceTypesForm(forms.SelfHandlingForm):
def handle(self, request, context):
namespace_name = self.initial['id']
current_names = self.get_names(self.initial['resource_types'])
try:
updated_types = json.loads(self.data['resource_types'])
selected_types = [updated_type for updated_type in updated_types
if updated_type.pop('selected', False)]
for current_name in current_names:
glance.metadefs_namespace_remove_resource_type(
self.request, namespace_name, current_name)
for selected_type in selected_types:
selected_type.pop('$$hashKey', None)
selected_type.pop('created_at', None)
selected_type.pop('updated_at', None)
glance.metadefs_namespace_add_resource_type(
self.request, namespace_name, selected_type)
msg = _('Resource types updated for namespace %s.')
msg %= namespace_name
messages.success(request, msg)
except Exception:
msg = _('Error updating resource types for namespace %s.')
msg %= namespace_name
exceptions.handle(request, msg)
return False
return True
def get_names(self, items):
return [item['name'] for item in items]
class UpdateNamespaceForm(forms.SelfHandlingForm):
public = forms.BooleanField(label=_("Public"), required=False)
protected = forms.BooleanField(label=_("Protected"), required=False)
def handle(self, request, data):
try:
params = {
'visibility': 'public' if data['public'] else 'private',
'protected': data['protected']
}
glance.metadefs_namespace_update(request,
self.initial['namespace_id'],
**params)
msg = _('Namespace successfully updated.')
messages.success(request, msg)
except Exception:
msg = _('Error updating attributes for namespace.')
redirect = reverse(constants.METADATA_INDEX_URL)
exceptions.handle(request, msg, redirect=redirect)
return False
return True
| ChameleonCloud/horizon | openstack_dashboard/dashboards/admin/metadata_defs/forms.py | Python | apache-2.0 | 6,611 |
"""
URL routing for blogs, entries and feeds
"""
from django.conf.urls.defaults import patterns, url
from django.conf import settings
from feeds import LatestEntriesByBlog, LatestEntries #, EntryComments
from models import Blog
from views import generic_blog_entry_view, blog_detail
from viewpoint.settings import USE_CATEGORIES, DEFAULT_BLOG
FEEDS = {
'all': LatestEntries,
'latest': LatestEntries,
}
if USE_CATEGORIES and 'categories' in settings.INSTALLED_APPS:
from feeds import LatestEntriesByCategory
FEEDS['categories'] = LatestEntriesByCategory
urlpatterns = patterns('django.contrib.syndication.views',
(r'^feeds/(?P<url>.*)/$', 'feed', {'feed_dict': FEEDS}),
)
urlpatterns += patterns('',
# Blog detail (Main page of a blog, shows description and stuff)
url(
regex = r'^$',
view = blog_detail,
name='viewpoint_blog_detail'
),
# Listing of blog entries for a given year
url(
regex = r'^(?P<year>\d{4})/$',
view = generic_blog_entry_view,
name='viewpoint_blog_archive_year'
),
# Listing of blog entries for a given month/year
url(
regex = r'^(?P<year>\d{4})/(?P<month>\w{3})/$',
view = generic_blog_entry_view,
name = 'viewpoint_blog_archive_month'
),
# Listing of blog entries for a given week of the year
url(
regex = r'^(?P<year>\d{4})/(?P<week>\d{1,2})/$',
view = generic_blog_entry_view,
name = 'viewpoint_blog_archive_week'
),
# Listing of blog entries for a given day
url(
regex = r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{1,2})/$',
view = generic_blog_entry_view,
name = 'viewpoint_blog_archive_day'
),
# Listing of blog entries for the current date
url(
regex = r'^today/$',
view = generic_blog_entry_view,
name='viewpoint_blog_archive_today'
),
# A blog entry
url(
regex = r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{1,2})/(?P<slug>[-\w]+)/$',
view = generic_blog_entry_view,
name='viewpoint_entry_detail'
),
# A blog comments page
url(
regex = r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{1,2})/(?P<slug>[-\w]+)/comments/$',
view = generic_blog_entry_view,
kwargs = {'template_name':'viewpoint/entry_comments.html'},
name='viewpoint_entry_comments'
),
# A blog printing page
url(
regex = r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{1,2})/(?P<slug>[-\w]+)/print/$',
view = generic_blog_entry_view,
kwargs = {'template_name':'viewpoint/entry_print.html'},
name='viewpoint_entry_print'
),
)
| callowayproject/django-viewpoint | viewpoint/urls_defaultblog.py | Python | apache-2.0 | 2,705 |
"""A module to deal with processes."""
import datetime
def uptime(asstr = False):
"""Get system uptime>"""
raw = ''
with open('/proc/uptime','r') as ut:
raw = ut.read()[:-1]
uts = list(map(lambda x: int(float(x)), raw.split(' ')))
if asstr:
uts = str(datetime.timedelta(seconds = uts[0]))
return uts
| JoelBondurant/RandomCodeSamples | python/proc.py | Python | apache-2.0 | 312 |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
from pants.backend.python import target_types_rules
from pants.backend.python.goals.lockfile import GeneratePythonLockfile
from pants.backend.python.subsystems.ipython import IPythonLockfileSentinel
from pants.backend.python.subsystems.ipython import rules as subsystem_rules
from pants.backend.python.target_types import PythonSourcesGeneratorTarget
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.core.target_types import GenericTarget
from pants.testutil.rule_runner import QueryRule, RuleRunner
def test_setup_lockfile_interpreter_constraints() -> None:
rule_runner = RuleRunner(
rules=[
*subsystem_rules(),
*target_types_rules.rules(),
QueryRule(GeneratePythonLockfile, [IPythonLockfileSentinel]),
],
target_types=[PythonSourcesGeneratorTarget, GenericTarget],
)
global_constraint = "==3.9.*"
rule_runner.set_options(
["--ipython-lockfile=lockfile.txt"],
env={"PANTS_PYTHON_INTERPRETER_CONSTRAINTS": f"['{global_constraint}']"},
)
def assert_ics(build_file: str, expected: list[str]) -> None:
rule_runner.write_files({"project/BUILD": build_file})
lockfile_request = rule_runner.request(GeneratePythonLockfile, [IPythonLockfileSentinel()])
assert lockfile_request.interpreter_constraints == InterpreterConstraints(expected)
assert_ics("python_sources()", [global_constraint])
assert_ics("python_sources(interpreter_constraints=['==2.7.*'])", ["==2.7.*"])
assert_ics(
"python_sources(interpreter_constraints=['==2.7.*', '==3.5.*'])", ["==2.7.*", "==3.5.*"]
)
# If no Python targets in repo, fall back to global [python] constraints.
assert_ics("target()", [global_constraint])
# If there are multiple distinct ICs in the repo, we OR them. Even though the user might AND
# them by running `./pants repl ::`, they could also run on more precise subsets like
# `./pants repl py2::` and then `./pants repl py3::`
assert_ics(
dedent(
"""\
python_sources(name='a', interpreter_constraints=['==2.7.*'])
python_sources(name='b', interpreter_constraints=['==3.5.*'])
"""
),
["==2.7.*", "==3.5.*"],
)
assert_ics(
dedent(
"""\
python_sources(name='a', interpreter_constraints=['==2.7.*', '==3.5.*'])
python_sources(name='b', interpreter_constraints=['>=3.5'])
"""
),
["==2.7.*", "==3.5.*", ">=3.5"],
)
assert_ics(
dedent(
"""\
python_sources(name='a')
python_sources(name='b', interpreter_constraints=['==2.7.*'])
python_sources(name='c', interpreter_constraints=['>=3.6'])
"""
),
["==2.7.*", global_constraint, ">=3.6"],
)
| pantsbuild/pants | src/python/pants/backend/python/subsystems/ipython_test.py | Python | apache-2.0 | 3,096 |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Package version for dm_memorytasks.
Kept in separate file so it can be used during installation.
"""
__version__ = '1.0.3' # https://www.python.org/dev/peps/pep-0440/
| deepmind/dm_memorytasks | dm_memorytasks/_version.py | Python | apache-2.0 | 867 |
# Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The consistencygroups api."""
import webob
from webob import exc
from cinder.api import common
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import consistencygroups as consistencygroup_views
from cinder.api import xmlutil
from cinder import consistencygroup as consistencygroupAPI
from cinder import exception
from cinder.i18n import _, _LI
from cinder.openstack.common import log as logging
from cinder import utils
LOG = logging.getLogger(__name__)
def make_consistencygroup(elem):
elem.set('id')
elem.set('status')
elem.set('availability_zone')
elem.set('created_at')
elem.set('name')
elem.set('description')
def make_consistencygroup_from_src(elem):
elem.set('id')
elem.set('status')
elem.set('created_at')
elem.set('name')
elem.set('description')
elem.set('cgsnapshot_id')
class ConsistencyGroupTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('consistencygroup',
selector='consistencygroup')
make_consistencygroup(root)
alias = Consistencygroups.alias
namespace = Consistencygroups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class ConsistencyGroupsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('consistencygroups')
elem = xmlutil.SubTemplateElement(root, 'consistencygroup',
selector='consistencygroups')
make_consistencygroup(elem)
alias = Consistencygroups.alias
namespace = Consistencygroups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class ConsistencyGroupFromSrcTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('consistencygroup-from-src',
selector='consistencygroup-from-src')
make_consistencygroup_from_src(root)
alias = Consistencygroups.alias
namespace = Consistencygroups.namespace
return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace})
class CreateDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
consistencygroup = self._extract_consistencygroup(dom)
return {'body': {'consistencygroup': consistencygroup}}
def _extract_consistencygroup(self, node):
consistencygroup = {}
consistencygroup_node = self.find_first_child_named(
node,
'consistencygroup')
attributes = ['name',
'description']
for attr in attributes:
if consistencygroup_node.getAttribute(attr):
consistencygroup[attr] = consistencygroup_node.\
getAttribute(attr)
return consistencygroup
class CreateFromSrcDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
dom = utils.safe_minidom_parse_string(string)
consistencygroup = self._extract_consistencygroup(dom)
retval = {'body': {'consistencygroup-from-src': consistencygroup}}
return retval
def _extract_consistencygroup(self, node):
consistencygroup = {}
consistencygroup_node = self.find_first_child_named(
node, 'consistencygroup-from-src')
attributes = ['cgsnapshot', 'name', 'description']
for attr in attributes:
if consistencygroup_node.getAttribute(attr):
consistencygroup[attr] = (
consistencygroup_node.getAttribute(attr))
return consistencygroup
class ConsistencyGroupsController(wsgi.Controller):
"""The ConsistencyGroups API controller for the OpenStack API."""
_view_builder_class = consistencygroup_views.ViewBuilder
def __init__(self):
self.consistencygroup_api = consistencygroupAPI.API()
super(ConsistencyGroupsController, self).__init__()
@wsgi.serializers(xml=ConsistencyGroupTemplate)
def show(self, req, id):
"""Return data about the given consistency group."""
LOG.debug('show called for member %s', id)
context = req.environ['cinder.context']
try:
consistencygroup = self.consistencygroup_api.get(
context,
group_id=id)
except exception.ConsistencyGroupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
return self._view_builder.detail(req, consistencygroup)
def delete(self, req, id, body):
"""Delete a consistency group."""
LOG.debug('delete called for member %s', id)
context = req.environ['cinder.context']
force = False
if body:
cg_body = body['consistencygroup']
force = cg_body.get('force', False)
LOG.info(_LI('Delete consistency group with id: %s'), id,
context=context)
try:
group = self.consistencygroup_api.get(context, id)
self.consistencygroup_api.delete(context, group, force)
except exception.ConsistencyGroupNotFound:
msg = _("Consistency group %s could not be found.") % id
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidConsistencyGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=202)
@wsgi.serializers(xml=ConsistencyGroupsTemplate)
def index(self, req):
"""Returns a summary list of consistency groups."""
return self._get_consistencygroups(req, is_detail=False)
@wsgi.serializers(xml=ConsistencyGroupsTemplate)
def detail(self, req):
"""Returns a detailed list of consistency groups."""
return self._get_consistencygroups(req, is_detail=True)
def _get_consistencygroups(self, req, is_detail):
"""Returns a list of consistency groups through view builder."""
context = req.environ['cinder.context']
consistencygroups = self.consistencygroup_api.get_all(context)
limited_list = common.limited(consistencygroups, req)
if is_detail:
consistencygroups = self._view_builder.detail_list(req,
limited_list)
else:
consistencygroups = self._view_builder.summary_list(req,
limited_list)
return consistencygroups
@wsgi.response(202)
@wsgi.serializers(xml=ConsistencyGroupTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Create a new consistency group."""
LOG.debug('Creating new consistency group %s', body)
if not self.is_valid_body(body, 'consistencygroup'):
raise exc.HTTPBadRequest()
context = req.environ['cinder.context']
try:
consistencygroup = body['consistencygroup']
except KeyError:
msg = _("Incorrect request body format")
raise exc.HTTPBadRequest(explanation=msg)
name = consistencygroup.get('name', None)
description = consistencygroup.get('description', None)
volume_types = consistencygroup.get('volume_types', None)
if not volume_types:
msg = _("volume_types must be provided to create "
"consistency group %(name)s.") % {'name': name}
raise exc.HTTPBadRequest(explanation=msg)
availability_zone = consistencygroup.get('availability_zone', None)
LOG.info(_LI("Creating consistency group %(name)s."),
{'name': name},
context=context)
try:
new_consistencygroup = self.consistencygroup_api.create(
context, name, description, volume_types,
availability_zone=availability_zone)
except exception.InvalidConsistencyGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.InvalidVolumeType as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.ConsistencyGroupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
retval = self._view_builder.summary(
req,
dict(new_consistencygroup.iteritems()))
return retval
@wsgi.response(202)
@wsgi.serializers(xml=ConsistencyGroupFromSrcTemplate)
@wsgi.deserializers(xml=CreateFromSrcDeserializer)
def create_from_src(self, req, body):
"""Create a new consistency group from a source.
The source can be a snapshot. It could be extended
in the future to support other sources. Note that
this does not require volume_types as the "create"
API above.
"""
LOG.debug('Creating new consistency group %s.', body)
if not self.is_valid_body(body, 'consistencygroup-from-src'):
raise exc.HTTPBadRequest()
context = req.environ['cinder.context']
try:
consistencygroup = body['consistencygroup-from-src']
except KeyError:
msg = _("Incorrect request body format.")
raise exc.HTTPBadRequest(explanation=msg)
name = consistencygroup.get('name', None)
description = consistencygroup.get('description', None)
cgsnapshot_id = consistencygroup.get('cgsnapshot_id', None)
if not cgsnapshot_id:
msg = _("Cgsnapshot id must be provided to create "
"consistency group %(name)s from source.") % {'name': name}
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI("Creating consistency group %(name)s from cgsnapshot "
"%(snap)s."),
{'name': name, 'snap': cgsnapshot_id},
context=context)
try:
new_consistencygroup = self.consistencygroup_api.create_from_src(
context, name, description, cgsnapshot_id)
except exception.InvalidConsistencyGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.CgSnapshotNotFound as error:
raise exc.HTTPBadRequest(explanation=error.msg)
except exception.ConsistencyGroupNotFound as error:
raise exc.HTTPNotFound(explanation=error.msg)
except exception.CinderException as error:
raise exc.HTTPBadRequest(explanation=error.msg)
retval = self._view_builder.summary(
req,
dict(new_consistencygroup.iteritems()))
return retval
@wsgi.serializers(xml=ConsistencyGroupTemplate)
def update(self, req, id, body):
"""Update the consistency group.
Expected format of the input parameter 'body':
{
"consistencygroup":
{
"name": "my_cg",
"description": "My consistency group",
"add_volumes": "volume-uuid-1,volume-uuid-2,..."
"remove_volumes": "volume-uuid-8,volume-uuid-9,..."
}
}
"""
LOG.debug('Update called for consistency group %s.', id)
if not body:
msg = _("Missing request body.")
raise exc.HTTPBadRequest(explanation=msg)
if not self.is_valid_body(body, 'consistencygroup'):
msg = _("Incorrect request body format.")
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['cinder.context']
consistencygroup = body.get('consistencygroup', None)
name = consistencygroup.get('name', None)
description = consistencygroup.get('description', None)
add_volumes = consistencygroup.get('add_volumes', None)
remove_volumes = consistencygroup.get('remove_volumes', None)
if (not name and not description and not add_volumes
and not remove_volumes):
msg = _("Name, description, add_volumes, and remove_volumes "
"can not be all empty in the request body.")
raise exc.HTTPBadRequest(explanation=msg)
LOG.info(_LI("Updating consistency group %(id)s with name %(name)s "
"description: %(description)s add_volumes: "
"%(add_volumes)s remove_volumes: %(remove_volumes)s."),
{'id': id, 'name': name,
'description': description,
'add_volumes': add_volumes,
'remove_volumes': remove_volumes},
context=context)
try:
group = self.consistencygroup_api.get(context, id)
self.consistencygroup_api.update(
context, group, name, description,
add_volumes, remove_volumes)
except exception.ConsistencyGroupNotFound:
msg = _("Consistency group %s could not be found.") % id
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidConsistencyGroup as error:
raise exc.HTTPBadRequest(explanation=error.msg)
return webob.Response(status_int=202)
class Consistencygroups(extensions.ExtensionDescriptor):
"""consistency groups support."""
name = 'Consistencygroups'
alias = 'consistencygroups'
namespace = 'http://docs.openstack.org/volume/ext/consistencygroups/api/v1'
updated = '2014-08-18T00:00:00+00:00'
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Consistencygroups.alias, ConsistencyGroupsController(),
collection_actions={'detail': 'GET', 'create_from_src': 'POST'},
member_actions={'delete': 'POST', 'update': 'PUT'})
resources.append(res)
return resources
| Akrog/cinder | cinder/api/contrib/consistencygroups.py | Python | apache-2.0 | 14,556 |
#!/usr/bin/python
#
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unittests for the portpicker module."""
from __future__ import print_function
import errno
import os
import random
import socket
import sys
import unittest
from contextlib import ExitStack
if sys.platform == 'win32':
import _winapi
else:
_winapi = None
try:
# pylint: disable=no-name-in-module
from unittest import mock # Python >= 3.3.
except ImportError:
import mock # https://pypi.python.org/pypi/mock
import portpicker
class PickUnusedPortTest(unittest.TestCase):
def IsUnusedTCPPort(self, port):
return self._bind(port, socket.SOCK_STREAM, socket.IPPROTO_TCP)
def IsUnusedUDPPort(self, port):
return self._bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
def setUp(self):
# So we can Bind even if portpicker.bind is stubbed out.
self._bind = portpicker.bind
portpicker._owned_ports.clear()
portpicker._free_ports.clear()
portpicker._random_ports.clear()
def testPickUnusedPortActuallyWorks(self):
"""This test can be flaky."""
for _ in range(10):
port = portpicker.pick_unused_port()
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
@unittest.skipIf('PORTSERVER_ADDRESS' not in os.environ,
'no port server to test against')
def testPickUnusedCanSuccessfullyUsePortServer(self):
with mock.patch.object(portpicker, '_pick_unused_port_without_server'):
portpicker._pick_unused_port_without_server.side_effect = (
Exception('eek!')
)
# Since _PickUnusedPortWithoutServer() raises an exception, if we
# can successfully obtain a port, the portserver must be working.
port = portpicker.pick_unused_port()
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
@unittest.skipIf('PORTSERVER_ADDRESS' not in os.environ,
'no port server to test against')
def testPickUnusedCanSuccessfullyUsePortServerAddressKwarg(self):
with mock.patch.object(portpicker, '_pick_unused_port_without_server'):
portpicker._pick_unused_port_without_server.side_effect = (
Exception('eek!')
)
# Since _PickUnusedPortWithoutServer() raises an exception, and
# we've temporarily removed PORTSERVER_ADDRESS from os.environ, if
# we can successfully obtain a port, the portserver must be working.
addr = os.environ.pop('PORTSERVER_ADDRESS')
try:
port = portpicker.pick_unused_port(portserver_address=addr)
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
finally:
os.environ['PORTSERVER_ADDRESS'] = addr
@unittest.skipIf('PORTSERVER_ADDRESS' not in os.environ,
'no port server to test against')
def testGetPortFromPortServer(self):
"""Exercise the get_port_from_port_server() helper function."""
for _ in range(10):
port = portpicker.get_port_from_port_server(
os.environ['PORTSERVER_ADDRESS'])
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
def testSendsPidToPortServer(self):
with ExitStack() as stack:
if _winapi:
create_file_mock = mock.Mock()
create_file_mock.return_value = 0
read_file_mock = mock.Mock()
write_file_mock = mock.Mock()
read_file_mock.return_value = (b'42768\n', 0)
stack.enter_context(
mock.patch('_winapi.CreateFile', new=create_file_mock))
stack.enter_context(
mock.patch('_winapi.WriteFile', new=write_file_mock))
stack.enter_context(
mock.patch('_winapi.ReadFile', new=read_file_mock))
port = portpicker.get_port_from_port_server(
'portserver', pid=1234)
write_file_mock.assert_called_once_with(0, b'1234\n')
else:
server = mock.Mock()
server.recv.return_value = b'42768\n'
stack.enter_context(
mock.patch.object(socket, 'socket', return_value=server))
port = portpicker.get_port_from_port_server(
'portserver', pid=1234)
server.sendall.assert_called_once_with(b'1234\n')
self.assertEqual(port, 42768)
def testPidDefaultsToOwnPid(self):
with ExitStack() as stack:
stack.enter_context(
mock.patch.object(os, 'getpid', return_value=9876))
if _winapi:
create_file_mock = mock.Mock()
create_file_mock.return_value = 0
read_file_mock = mock.Mock()
write_file_mock = mock.Mock()
read_file_mock.return_value = (b'52768\n', 0)
stack.enter_context(
mock.patch('_winapi.CreateFile', new=create_file_mock))
stack.enter_context(
mock.patch('_winapi.WriteFile', new=write_file_mock))
stack.enter_context(
mock.patch('_winapi.ReadFile', new=read_file_mock))
port = portpicker.get_port_from_port_server('portserver')
write_file_mock.assert_called_once_with(0, b'9876\n')
else:
server = mock.Mock()
server.recv.return_value = b'52768\n'
stack.enter_context(
mock.patch.object(socket, 'socket', return_value=server))
port = portpicker.get_port_from_port_server('portserver')
server.sendall.assert_called_once_with(b'9876\n')
self.assertEqual(port, 52768)
@mock.patch.dict(os.environ,{'PORTSERVER_ADDRESS': 'portserver'})
def testReusesPortServerPorts(self):
with ExitStack() as stack:
if _winapi:
read_file_mock = mock.Mock()
read_file_mock.side_effect = [
(b'12345\n', 0),
(b'23456\n', 0),
(b'34567\n', 0),
]
stack.enter_context(mock.patch('_winapi.CreateFile'))
stack.enter_context(mock.patch('_winapi.WriteFile'))
stack.enter_context(
mock.patch('_winapi.ReadFile', new=read_file_mock))
else:
server = mock.Mock()
server.recv.side_effect = [b'12345\n', b'23456\n', b'34567\n']
stack.enter_context(
mock.patch.object(socket, 'socket', return_value=server))
self.assertEqual(portpicker.pick_unused_port(), 12345)
self.assertEqual(portpicker.pick_unused_port(), 23456)
portpicker.return_port(12345)
self.assertEqual(portpicker.pick_unused_port(), 12345)
@mock.patch.dict(os.environ,{'PORTSERVER_ADDRESS': ''})
def testDoesntReuseRandomPorts(self):
ports = set()
for _ in range(10):
try:
port = portpicker.pick_unused_port()
except portpicker.NoFreePortFoundError:
# This sometimes happens when not using portserver. Just
# skip to the next attempt.
continue
ports.add(port)
portpicker.return_port(port)
self.assertGreater(len(ports), 5) # Allow some random reuse.
def testReturnsReservedPorts(self):
with mock.patch.object(portpicker, '_pick_unused_port_without_server'):
portpicker._pick_unused_port_without_server.side_effect = (
Exception('eek!'))
# Arbitrary port. In practice you should get this from somewhere
# that assigns ports.
reserved_port = 28465
portpicker.add_reserved_port(reserved_port)
ports = set()
for _ in range(10):
port = portpicker.pick_unused_port()
ports.add(port)
portpicker.return_port(port)
self.assertEqual(len(ports), 1)
self.assertEqual(ports.pop(), reserved_port)
@mock.patch.dict(os.environ,{'PORTSERVER_ADDRESS': ''})
def testFallsBackToRandomAfterRunningOutOfReservedPorts(self):
# Arbitrary port. In practice you should get this from somewhere
# that assigns ports.
reserved_port = 23456
portpicker.add_reserved_port(reserved_port)
self.assertEqual(portpicker.pick_unused_port(), reserved_port)
self.assertNotEqual(portpicker.pick_unused_port(), reserved_port)
def testRandomlyChosenPorts(self):
# Unless this box is under an overwhelming socket load, this test
# will heavily exercise the "pick a port randomly" part of the
# port picking code, but may never hit the "OS assigns a port"
# code.
ports = 0
for _ in range(100):
try:
port = portpicker._pick_unused_port_without_server()
except portpicker.NoFreePortFoundError:
# Without the portserver, pick_unused_port can sometimes fail
# to find a free port. Check that it passes most of the time.
continue
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
ports += 1
# Getting a port shouldn't have failed very often, even on machines
# with a heavy socket load.
self.assertGreater(ports, 95)
def testOSAssignedPorts(self):
self.last_assigned_port = None
def error_for_explicit_ports(port, socket_type, socket_proto):
# Only successfully return a port if an OS-assigned port is
# requested, or if we're checking that the last OS-assigned port
# is unused on the other protocol.
if port == 0 or port == self.last_assigned_port:
self.last_assigned_port = self._bind(port, socket_type,
socket_proto)
return self.last_assigned_port
else:
return None
with mock.patch.object(portpicker, 'bind', error_for_explicit_ports):
# Without server, this can be little flaky, so check that it
# passes most of the time.
ports = 0
for _ in range(100):
try:
port = portpicker._pick_unused_port_without_server()
except portpicker.NoFreePortFoundError:
continue
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
ports += 1
self.assertGreater(ports, 70)
def pickUnusedPortWithoutServer(self):
# Try a few times to pick a port, to avoid flakiness and to make sure
# the code path we want was exercised.
for _ in range(5):
try:
port = portpicker._pick_unused_port_without_server()
except portpicker.NoFreePortFoundError:
continue
else:
self.assertTrue(self.IsUnusedTCPPort(port))
self.assertTrue(self.IsUnusedUDPPort(port))
return
self.fail("Failed to find a free port")
def testPickPortsWithoutServer(self):
# Test the first part of _pick_unused_port_without_server, which
# tries a few random ports and checks is_port_free.
self.pickUnusedPortWithoutServer()
# Now test the second part, the fallback from above, which asks the
# OS for a port.
def mock_port_free(port):
return False
with mock.patch.object(portpicker, 'is_port_free', mock_port_free):
self.pickUnusedPortWithoutServer()
def checkIsPortFree(self):
"""This might be flaky unless this test is run with a portserver."""
# The port should be free initially.
port = portpicker.pick_unused_port()
self.assertTrue(portpicker.is_port_free(port))
cases = [
(socket.AF_INET, socket.SOCK_STREAM, None),
(socket.AF_INET6, socket.SOCK_STREAM, 1),
(socket.AF_INET, socket.SOCK_DGRAM, None),
(socket.AF_INET6, socket.SOCK_DGRAM, 1),
]
# Using v6only=0 on Windows doesn't result in collisions
if not _winapi:
cases.extend([
(socket.AF_INET6, socket.SOCK_STREAM, 0),
(socket.AF_INET6, socket.SOCK_DGRAM, 0),
])
for (sock_family, sock_type, v6only) in cases:
# Occupy the port on a subset of possible protocols.
try:
sock = socket.socket(sock_family, sock_type, 0)
except socket.error:
print('Kernel does not support sock_family=%d' % sock_family,
file=sys.stderr)
# Skip this case, since we cannot occupy a port.
continue
if not hasattr(socket, 'IPPROTO_IPV6'):
v6only = None
if v6only is not None:
try:
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY,
v6only)
except socket.error:
print('Kernel does not support IPV6_V6ONLY=%d' % v6only,
file=sys.stderr)
# Don't care; just proceed with the default.
# Socket may have been taken in the mean time, so catch the
# socket.error with errno set to EADDRINUSE and skip this
# attempt.
try:
sock.bind(('', port))
except socket.error as e:
if e.errno == errno.EADDRINUSE:
raise portpicker.NoFreePortFoundError
raise
# The port should be busy.
self.assertFalse(portpicker.is_port_free(port))
sock.close()
# Now it's free again.
self.assertTrue(portpicker.is_port_free(port))
def testIsPortFree(self):
# This can be quite flaky on a busy host, so try a few times.
for _ in range(10):
try:
self.checkIsPortFree()
except portpicker.NoFreePortFoundError:
pass
else:
return
self.fail("checkPortIsFree failed every time.")
def testIsPortFreeException(self):
port = portpicker.pick_unused_port()
with mock.patch.object(socket, 'socket') as mock_sock:
mock_sock.side_effect = socket.error('fake socket error', 0)
self.assertFalse(portpicker.is_port_free(port))
def testThatLegacyCapWordsAPIsExist(self):
"""The original APIs were CapWords style, 1.1 added PEP8 names."""
self.assertEqual(portpicker.bind, portpicker.Bind)
self.assertEqual(portpicker.is_port_free, portpicker.IsPortFree)
self.assertEqual(portpicker.pick_unused_port, portpicker.PickUnusedPort)
self.assertEqual(portpicker.get_port_from_port_server,
portpicker.GetPortFromPortServer)
if __name__ == '__main__':
unittest.main()
| google/python_portpicker | src/tests/portpicker_test.py | Python | apache-2.0 | 16,155 |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions import format
from resource_management.libraries.functions import Direction
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
def run_migration(env, upgrade_type):
"""
If the acl migration script is present, then run it for either upgrade or downgrade.
That script was introduced in HDP 2.3.4.0 and requires stopping all Kafka brokers first.
Requires configs to be present.
:param env: Environment.
:param upgrade_type: "rolling" or "nonrolling
"""
import params
if upgrade_type is None:
raise Fail('Parameter "upgrade_type" is missing.')
if params.upgrade_direction is None:
raise Fail('Parameter "upgrade_direction" is missing.')
if not params.security_enabled:
Logger.info("Skip running the Kafka ACL migration script since cluster security is not enabled.")
return
Logger.info("Upgrade type: {0}, direction: {1}".format(str(upgrade_type), params.upgrade_direction))
# If the schema upgrade script exists in the version upgrading to, then attempt to upgrade/downgrade it while still using the present bits.
kafka_acls_script = None
command_suffix = ""
if params.upgrade_direction == Direction.UPGRADE:
kafka_acls_script = format("{stack_root}/{version}/kafka/bin/kafka-acls.sh")
command_suffix = "--upgradeAcls"
elif params.upgrade_direction == Direction.DOWNGRADE:
kafka_acls_script = format("{stack_root}/{downgrade_from_version}/kafka/bin/kafka-acls.sh")
command_suffix = "--downgradeAcls"
if kafka_acls_script is not None:
if os.path.exists(kafka_acls_script):
Logger.info("Found Kafka acls script: {0}".format(kafka_acls_script))
if params.zookeeper_connect is None:
raise Fail("Could not retrieve property kafka-broker/zookeeper.connect")
acls_command = "{0} --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect={1} {2}".\
format(kafka_acls_script, params.zookeeper_connect, command_suffix)
Execute(acls_command,
user=params.kafka_user,
logoutput=True)
else:
Logger.info("Did not find Kafka acls script: {0}".format(kafka_acls_script))
| radicalbit/ambari | ambari-server/src/main/resources/common-services/KAFKA/0.10.0.3.0/package/scripts/upgrade.py | Python | apache-2.0 | 3,109 |
#
# Author: Endre Karlson <endre.karlson@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cliff.command import Command as CliffCommand
from cliff.lister import Lister
from cliff.show import ShowOne
from fakturo.core import utils
class Command(CliffCommand):
api = None
action = None
@property
def name(self):
"""
The name of the command
api-action like account-create
"""
if self.api is None or self.action is None:
return None
return self.api + '-' + self.action
@property
def method_name(self):
return self.name.replace('-', '_') if self.name else None
def get_parser(self, prog_name):
"""
Override get_parser in order to get equivelant from the Provider
and extend options
"""
parser = super(Command, self).get_parser(prog_name)
self.app.provider_manager.extend_parser(self.method_name, parser)
return parser
def execute(self, parsed_args):
"""
Execute something, this is since we overload self.take_action()
in order to format the data
:param parsed_args: The parsed args that are given by take_action()
"""
return self.app.provider_manager.execute(
self.method_name,
parsed_args,
self)
def post_execute(self, data):
"""
Format the results locally if needed, by default we just return data
:param data: Whatever is returned by self.execute()
"""
return data
def take_action(self, parsed_args):
"""
Call self.execute to get data and then format it a bit with post_exec
"""
# TODO: Common Exception Handling Here
results = self.execute(parsed_args)
return self.post_execute(results)
class ListCommand(Command, Lister):
action = 'list'
def post_execute(self, results):
if len(results) > 0:
columns = utils.get_columns(results)
data = [utils.get_item_properties(i, columns) for i in results]
return columns, data
else:
return [], ()
class GetCommand(Command, ShowOne):
action = 'get'
def post_execute(self, results):
return results.keys(), results.values()
class CreateCommand(Command, ShowOne):
action = 'create'
def post_execute(self, results):
return results.keys(), results.values()
class UpdateCommand(Command, ShowOne):
action = 'update'
def post_execute(self, results):
return results.keys(), results.values()
class DeleteCommand(Command):
action = 'delete'
__all__ = ["Command", "ListCommand", "GetCommand", "CreateCommand",
"UpdateCommand", "DeleteCommand"]
| billingstack/python-fakturo | fakturo/core/cli/base.py | Python | apache-2.0 | 3,279 |
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from sqlalchemy.orm import exc
from sqlalchemy.sql import expression as expr
from neutron.db import models_v2
from neutron.extensions import l3
from neutron_lib import constants as l3_constants
from neutron_lib import exceptions as n_exc
from networking_cisco._i18n import _
from networking_cisco import backwards_compatibility as bc
from networking_cisco.plugins.cisco.common import cisco_constants
from networking_cisco.plugins.cisco.db.l3 import ha_db
from networking_cisco.plugins.cisco.db.l3 import l3_models
from networking_cisco.plugins.cisco.db.l3.l3_router_appliance_db import (
L3RouterApplianceDBMixin)
from networking_cisco.plugins.cisco.extensions import routerhostingdevice
from networking_cisco.plugins.cisco.extensions import routerrole
from networking_cisco.plugins.cisco.extensions import routertype
from networking_cisco.plugins.cisco.extensions import routertypeawarescheduler
from networking_cisco.plugins.cisco.l3 import drivers
LOG = logging.getLogger(__name__)
DEVICE_OWNER_GLOBAL_ROUTER_GW = cisco_constants.DEVICE_OWNER_GLOBAL_ROUTER_GW
HOSTING_DEVICE_ATTR = routerhostingdevice.HOSTING_DEVICE_ATTR
ROUTER_ROLE_GLOBAL = cisco_constants.ROUTER_ROLE_GLOBAL
ROUTER_ROLE_LOGICAL_GLOBAL = cisco_constants.ROUTER_ROLE_LOGICAL_GLOBAL
ROUTER_ROLE_HA_REDUNDANCY = cisco_constants.ROUTER_ROLE_HA_REDUNDANCY
TENANT_HSRP_GRP_RANGE = 1
TENANT_HSRP_GRP_OFFSET = 1064
EXT_HSRP_GRP_RANGE = 1
EXT_HSRP_GRP_OFFSET = 1064
N_ROUTER_PREFIX = 'nrouter-'
DEV_NAME_LEN = 14
class TopologyNotSupportedByRouterError(n_exc.Conflict):
message = _("Requested topology cannot be supported by router.")
class ASR1kL3RouterDriver(drivers.L3RouterBaseDriver):
def create_router_precommit(self, context, router_context):
pass
def create_router_postcommit(self, context, router_context):
pass
def update_router_precommit(self, context, router_context):
pass
def update_router_postcommit(self, context, router_context):
# Whenever a gateway is added to, or removed from, a router hosted on
# a hosting device, we must ensure that a global router is running
# (for add operation) or not running (for remove operation) on that
# hosting device.
current = router_context.current
if current[HOSTING_DEVICE_ATTR] is None:
return
e_context = context.elevated()
if current['gw_port_id']:
self._conditionally_add_global_router(e_context, current)
else:
self._conditionally_remove_global_router(
e_context, router_context.original, True)
def delete_router_precommit(self, context, router_context):
pass
def delete_router_postcommit(self, context, router_context):
pass
def schedule_router_precommit(self, context, router_context):
pass
def schedule_router_postcommit(self, context, router_context):
# When the hosting device hosts a Neutron router with external
# connectivity, a "global" router (modeled as a Neutron router) must
# also run on the hosting device (outside of any VRF) to enable the
# connectivity.
current = router_context.current
if current['gw_port_id'] and current[HOSTING_DEVICE_ATTR] is not None:
self._conditionally_add_global_router(context.elevated(), current)
def unschedule_router_precommit(self, context, router_context):
pass
def unschedule_router_postcommit(self, context, router_context):
# When there is no longer any router with external gateway hosted on
# a hosting device, the global router on that hosting device can also
# be removed.
current = router_context.current
hd_id = current[HOSTING_DEVICE_ATTR]
if current['gw_port_id'] and hd_id is not None:
self._conditionally_remove_global_router(context.elevated(),
current)
def add_router_interface_precommit(self, context, r_port_context):
# Inside an ASR1k, VLAN sub-interfaces are used to connect to internal
# neutron networks. Only one such sub-interface can be created for each
# VLAN. As the VLAN sub-interface is added to the VRF representing the
# Neutron router, we must only allow one Neutron router to attach to a
# particular Neutron subnet/network.
if (r_port_context.router_context.current[routerrole.ROUTER_ROLE_ATTR]
== ROUTER_ROLE_HA_REDUNDANCY):
# redundancy routers can be exempt as we check the user visible
# routers and the request will be rejected there.
return
e_context = context.elevated()
if r_port_context.current is None:
sn = self._core_plugin.get_subnet(e_context,
r_port_context.current_subnet_id)
net_id = sn['network_id']
else:
net_id = r_port_context.current['network_id']
router_id = r_port_context.router_context.current['id']
filters = {'network_id': [net_id],
'device_owner': [bc.constants.DEVICE_OWNER_ROUTER_INTF]}
for port in self._core_plugin.get_ports(e_context, filters=filters):
device_id = port['device_id']
if device_id is None:
continue
try:
router = self._l3_plugin.get_router(e_context, device_id)
if (router[routerrole.ROUTER_ROLE_ATTR] is None and
router['id'] != router_id):
# only a single router can connect to multiple subnets
# on the same internal network
raise TopologyNotSupportedByRouterError()
except n_exc.NotFound:
if self._l3_plugin.get_ha_group(e_context, device_id):
# Since this is a port for the HA VIP address, we can
# safely ignore it
continue
else:
LOG.warning(
'Spurious router port %s prevents attachement from'
' being performed. Try attaching again later, and '
'if the operation then fails again, remove the '
'spurious port', port['id'])
raise TopologyNotSupportedByRouterError()
def add_router_interface_postcommit(self, context, r_port_context):
pass
def remove_router_interface_precommit(self, context, r_port_context):
pass
def remove_router_interface_postcommit(self, context, r_port_context):
pass
def create_floatingip_precommit(self, context, fip_context):
pass
def create_floatingip_postcommit(self, context, fip_context):
pass
def update_floatingip_precommit(self, context, fip_context):
pass
def update_floatingip_postcommit(self, context, fip_context):
pass
def delete_floatingip_precommit(self, context, fip_context):
pass
def delete_floatingip_postcommit(self, context, fip_context):
pass
def ha_interface_ip_address_needed(self, context, router, port,
ha_settings_db, ha_group_uuid):
if port['device_owner'] == bc.constants.DEVICE_OWNER_ROUTER_GW:
return False
else:
return True
def generate_ha_group_id(self, context, router, port, ha_settings_db,
ha_group_uuid):
if port['device_owner'] in {bc.constants.DEVICE_OWNER_ROUTER_GW,
DEVICE_OWNER_GLOBAL_ROUTER_GW}:
ri_name = self._router_name(router['id'])[8:DEV_NAME_LEN]
group_id = int(ri_name, 16) % TENANT_HSRP_GRP_RANGE
group_id += TENANT_HSRP_GRP_OFFSET
return group_id
else:
net_id_digits = port['network_id'][:6]
group_id = int(net_id_digits, 16) % EXT_HSRP_GRP_RANGE
group_id += EXT_HSRP_GRP_OFFSET
return group_id
def pre_backlog_processing(self, context):
LOG.info('Performing pre-backlog processing')
filters = {routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_GLOBAL]}
global_routers = self._l3_plugin.get_routers(context, filters=filters)
if not global_routers:
LOG.debug("There are no global routers")
return
for gr in global_routers:
filters = {
HOSTING_DEVICE_ATTR: [gr[HOSTING_DEVICE_ATTR]],
routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_HA_REDUNDANCY, None]
}
invert_filters = {'gw_port_id': [None]}
num_rtrs = self._l3_plugin.get_routers_count_extended(
context, filters=filters, invert_filters=invert_filters)
LOG.debug("Global router %(name)s[%(id)s] with hosting_device "
"%(hd)s has %(num)d routers with gw_port set on that "
"device",
{'name': gr['name'], 'id': gr['id'],
'hd': gr[HOSTING_DEVICE_ATTR], 'num': num_rtrs, })
if num_rtrs == 0:
LOG.info(
"Global router %(name)s[id:%(id)s] is present for "
"hosting device %(hd)s but there are no tenant or "
"redundancy routers with gateway set on that hosting "
"device. Proceeding to delete global router.",
{'name': gr['name'], 'id': gr['id'],
'hd': gr[HOSTING_DEVICE_ATTR]})
self._delete_global_router(context, gr['id'])
filters = {
#TODO(bmelande): Filter on routertype of global router
#routertype.TYPE_ATTR: [routertype_id],
routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_LOGICAL_GLOBAL]}
log_global_routers = self._l3_plugin.get_routers(
context, filters=filters)
if log_global_routers:
log_global_router_id = log_global_routers[0]['id']
self._delete_global_router(context, log_global_router_id,
logical=True)
def post_backlog_processing(self, context):
pass
# ---------------- Create workflow functions -----------------
def _conditionally_add_global_router(self, context, tenant_router):
# We could filter on hosting device id but we don't so we get all
# global routers for this router type. We can then use that count to
# determine which ha priority a new global router should get.
filters = {
routertype.TYPE_ATTR: [tenant_router[routertype.TYPE_ATTR]],
routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_GLOBAL]}
global_routers = self._l3_plugin.get_routers(
context, filters=filters)
hd_to_gr_dict = {r[HOSTING_DEVICE_ATTR]: r for r in global_routers}
hosting_device_id = tenant_router[HOSTING_DEVICE_ATTR]
ext_nw_id = tenant_router[l3.EXTERNAL_GW_INFO]['network_id']
global_router = hd_to_gr_dict.get(hosting_device_id)
logical_global_router = self._get_logical_global_router(context,
tenant_router)
self._conditionally_add_auxiliary_external_gateway_port(
context, logical_global_router, ext_nw_id, tenant_router, True)
if global_router is None:
# must create global router on hosting device
global_router = self._create_global_router(
context, hosting_device_id, hd_to_gr_dict, tenant_router,
logical_global_router)
self._conditionally_add_auxiliary_external_gateway_port(
context, global_router, ext_nw_id, tenant_router)
self._l3_plugin.add_type_and_hosting_device_info(context,
global_router)
for ni in self._l3_plugin.get_notifiers(context, [global_router]):
if ni['notifier']:
ni['notifier'].routers_updated(context, ni['routers'])
def _conditionally_add_auxiliary_external_gateway_port(
self, context, global_router, ext_net_id, tenant_router,
provision_ha=False, port_type=DEVICE_OWNER_GLOBAL_ROUTER_GW):
# tbe global router may or may not have an interface on the
# external network that the tenant router uses
filters = {
'device_id': [global_router['id']],
'device_owner': [port_type]}
ext_net_port = {
p['network_id']: p for p in
self._core_plugin.get_ports(context, filters=filters)}
if ext_net_id in ext_net_port:
# already connected to the external network, called if
# new subnets are added to the network
aux_gw_port = self._update_auxiliary_external_gateway_port(
context, global_router, ext_net_id, ext_net_port)
if provision_ha:
for subnet in aux_gw_port[ext_net_id]['fixed_ips']:
self._provision_port_ha(context, aux_gw_port[ext_net_id],
subnet, global_router)
else:
# not connected to the external network, so let's fix that
aux_gw_port = self._create_auxiliary_external_gateway_port(
context, global_router, ext_net_id, tenant_router, port_type)
if provision_ha:
for subnet in aux_gw_port['fixed_ips']:
self._provision_port_ha(context, aux_gw_port, subnet,
global_router)
def _update_auxiliary_external_gateway_port(
self, context, global_router, ext_net_id, port):
# When a new subnet is added to an external network, the auxillary
# gateway port in the global router must be updated with the new
# subnet_id so an ip from that subnet is assigned to the gateway port
ext_network = self._core_plugin.get_network(context, ext_net_id)
fixed_ips = port[ext_net_id]['fixed_ips']
# fetch the subnets the port is currently connected to
subnet_id_list = [fixedip['subnet_id'] for fixedip in fixed_ips]
# add the new subnet
for subnet_id in ext_network['subnets']:
if subnet_id not in subnet_id_list:
fixed_ip = {'subnet_id': subnet_id}
fixed_ips.append(fixed_ip)
self._core_plugin.update_port(context, port[ext_net_id]['id'],
({'port': {'fixed_ips':
fixed_ips}}))
return port
def _create_auxiliary_external_gateway_port(
self, context, global_router, ext_net_id, tenant_router,
port_type=DEVICE_OWNER_GLOBAL_ROUTER_GW):
# When a global router is connected to an external network then a
# special type of gateway port is created on that network. Such a
# port is called auxiliary gateway ports. It has an ip address on
# each subnet of the external network. A (logical) global router
# never has a traditional Neutron gateway port.
filters = {
'device_id': [tenant_router['id']],
'device_owner': [l3_constants.DEVICE_OWNER_ROUTER_GW]}
# fetch the gateway port of the *tenant* router so we can determine
# the CIDR of that port's subnet
gw_port = self._core_plugin.get_ports(context,
filters=filters)[0]
fixed_ips = self._get_fixed_ips_subnets(context, gw_port)
global_router_id = global_router['id']
aux_gw_port = self._core_plugin.create_port(context, {
'port': {
'tenant_id': '', # intentionally not set
'network_id': ext_net_id,
'mac_address': bc.constants.ATTR_NOT_SPECIFIED,
'fixed_ips': fixed_ips,
'device_id': global_router_id,
'device_owner': port_type,
'admin_state_up': True,
'name': ''}})
router_port = bc.RouterPort(
port_id=aux_gw_port['id'],
router_id=global_router_id,
port_type=port_type)
context.session.add(router_port)
return aux_gw_port
def _create_global_router(
self, context, hosting_device_id, hd_to_gr_dict, tenant_router,
logical_global_router):
r_spec = {'router': {
# global routers are not tied to any tenant
'tenant_id': '',
'name': self._global_router_name(hosting_device_id),
'admin_state_up': True}}
global_router, r_hd_b_db = self._l3_plugin.do_create_router(
context, r_spec, tenant_router[routertype.TYPE_ATTR], False,
True, hosting_device_id, ROUTER_ROLE_GLOBAL)
# make the global router a redundancy router for the logical
# global router (which we treat as a hidden "user visible
# router" (how's that for a contradiction of terms! :-) )
with context.session.begin(subtransactions=True):
ha_priority = (
ha_db.DEFAULT_MASTER_PRIORITY -
len(hd_to_gr_dict) * ha_db.PRIORITY_INCREASE_STEP)
r_b_b = ha_db.RouterRedundancyBinding(
redundancy_router_id=global_router['id'],
priority=ha_priority,
user_router_id=logical_global_router['id'])
context.session.add(r_b_b)
return global_router
def _get_logical_global_router(self, context, tenant_router):
# Since HA is also enabled on the global routers on each hosting device
# those global routers need HA settings and VIPs. We represent that
# using a Neutron router that is never instantiated/hosted. That
# Neutron router is referred to as the "logical global" router.
filters = {routertype.TYPE_ATTR: [tenant_router[routertype.TYPE_ATTR]],
routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_LOGICAL_GLOBAL]}
logical_global_routers = self._l3_plugin.get_routers(
context, filters=filters)
if not logical_global_routers:
# must create logical global router
logical_global_router = self._create_logical_global_router(
context, tenant_router)
else:
logical_global_router = logical_global_routers[0]
self._update_ha_redundancy_level(context, logical_global_router, 1)
return logical_global_router
def _create_logical_global_router(self, context, tenant_router):
r_spec = {'router': {
# global routers are not tied to any tenant
'tenant_id': '',
'name': self._global_router_name('', logical=True),
'admin_state_up': True,
# set auto-schedule to false to keep this router un-hosted
routertypeawarescheduler.AUTO_SCHEDULE_ATTR: False}}
# notifications should never be sent for this logical router!
logical_global_router, r_hd_b_db = (
self._l3_plugin.do_create_router(
context, r_spec, tenant_router[routertype.TYPE_ATTR],
False, True, None, ROUTER_ROLE_LOGICAL_GLOBAL))
with context.session.begin(subtransactions=True):
r_ha_s_db = ha_db.RouterHASetting(
router_id=logical_global_router['id'],
ha_type=cfg.CONF.ha.default_ha_mechanism,
redundancy_level=1,
priority=ha_db.DEFAULT_MASTER_PRIORITY,
probe_connectivity=False,
probe_target=None,
probe_interval=None)
context.session.add(r_ha_s_db)
return logical_global_router
def _get_fixed_ips_subnets(self, context, gw_port):
nw = self._core_plugin.get_network(context, gw_port['network_id'])
subnets = [{'subnet_id': s} for s in nw['subnets']]
return subnets
def _provision_port_ha(self, context, ha_port, subnet, router,
ha_binding_db=None):
ha_group_uuid = uuidutils.generate_uuid()
router_id = router['id']
with context.session.begin(subtransactions=True):
ha_subnet_group = self._get_ha_group_by_ha_port_subnet_id(
context, ha_port['id'], subnet['subnet_id'])
if ha_subnet_group is not None:
return
if ha_binding_db is None:
ha_binding_db = self._get_ha_binding(context, router_id)
group_id = self.generate_ha_group_id(
context, router,
{'device_owner': DEVICE_OWNER_GLOBAL_ROUTER_GW}, ha_binding_db,
ha_group_uuid)
r_ha_g = ha_db.RouterHAGroup(
id=ha_group_uuid,
tenant_id='',
ha_type=ha_binding_db.ha_type,
group_identity=group_id,
ha_port_id=ha_port['id'],
extra_port_id=None,
subnet_id=subnet['subnet_id'],
user_router_id=router_id,
timers_config='',
tracking_config='',
other_config='')
context.session.add(r_ha_g)
def _get_ha_binding(self, context, router_id):
with context.session.begin(subtransactions=True):
query = context.session.query(ha_db.RouterHASetting)
query = query.filter(
ha_db.RouterHASetting.router_id == router_id)
return query.first()
def _get_ha_group_by_ha_port_subnet_id(self, context, port_id, subnet_id):
with context.session.begin(subtransactions=True):
query = context.session.query(ha_db.RouterHAGroup)
query = query.filter(ha_db.RouterHAGroup.ha_port_id == port_id,
ha_db.RouterHAGroup.subnet_id == subnet_id)
try:
r_ha_g = query.one()
except (exc.NoResultFound, exc.MultipleResultsFound):
return
return r_ha_g
# ---------------- Remove workflow functions -----------------
def _conditionally_remove_global_router(self, context, tenant_router,
update_operation=False):
filters = {routertype.TYPE_ATTR: [tenant_router[routertype.TYPE_ATTR]],
routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_GLOBAL],
HOSTING_DEVICE_ATTR: [tenant_router[HOSTING_DEVICE_ATTR]]}
global_routers = self._l3_plugin.get_routers(context,
filters=filters)
hd_to_gr_dict = {r[HOSTING_DEVICE_ATTR]: r for r in global_routers}
if global_routers:
global_router_id = global_routers[0]['id']
if not tenant_router or not tenant_router[l3.EXTERNAL_GW_INFO]:
# let l3 plugin's periodic backlog processing take care of the
# clean up of the global router
return
ext_net_id = tenant_router[l3.EXTERNAL_GW_INFO]['network_id']
routertype_id = tenant_router[routertype.TYPE_ATTR]
hd_id = tenant_router[HOSTING_DEVICE_ATTR]
global_router = hd_to_gr_dict.get(hd_id)
port_deleted = self._conditionally_remove_auxiliary_gateway_port(
context, global_router_id, ext_net_id, routertype_id, hd_id,
update_operation)
if port_deleted is False:
# since no auxiliary gateway port was deleted we can
# abort no since auxiliary gateway port count cannot
# have reached zero
return
filters = {
'device_id': [global_router_id],
'device_owner': [DEVICE_OWNER_GLOBAL_ROUTER_GW]}
num_aux_gw_ports = self._core_plugin.get_ports_count(
context, filters=filters)
if num_aux_gw_ports == 0:
# global router not needed any more so we delete it
self._delete_global_router(context, global_router_id)
do_notify = False
else:
do_notify = True
# process logical global router to remove its port
self._conditionally_remove_auxiliary_gateway_vip_port(
context, ext_net_id, routertype_id)
self._l3_plugin.add_type_and_hosting_device_info(context,
global_router)
if do_notify is True:
for ni in self._l3_plugin.get_notifiers(context,
[global_router]):
if ni['notifier']:
ni['notifier'].routers_updated(context, ni['routers'])
def _conditionally_remove_auxiliary_gateway_port(
self, context, router_id, ext_net_id, routertype_id,
hosting_device_id, update_operation=False):
num_rtrs = self._get_gateway_routers_count(
context, ext_net_id, routertype_id, None, hosting_device_id)
if ((num_rtrs <= 1 and update_operation is False) or
(num_rtrs == 0 and update_operation is True)):
# there are no tenant routers *on ext_net_id* that are serviced by
# this global router so it's aux gw port can be deleted
self._delete_auxiliary_gateway_ports(context, router_id,
ext_net_id)
return True
return False
def _conditionally_remove_auxiliary_gateway_vip_port(
self, context, ext_net_id, routertype_id):
filters = {routertype.TYPE_ATTR: [routertype_id],
routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_LOGICAL_GLOBAL]}
log_global_routers = self._l3_plugin.get_routers(context,
filters=filters)
if not log_global_routers:
return
self._update_ha_redundancy_level(context, log_global_routers[0], -1)
log_global_router_id = log_global_routers[0]['id']
num_global_rtrs = self._get_gateway_routers_count(
context, ext_net_id, routertype_id, ROUTER_ROLE_GLOBAL)
if num_global_rtrs == 0:
# there are no global routers *on ext_net_id* that are serviced by
# this logical global router so it's aux gw VIP port can be deleted
self._delete_auxiliary_gateway_ports(context, log_global_router_id,
ext_net_id)
filters[routerrole.ROUTER_ROLE_ATTR] = [ROUTER_ROLE_GLOBAL]
total_num_global_rtrs = self._l3_plugin.get_routers_count(
context, filters=filters)
if total_num_global_rtrs == 0:
# there are no global routers left that are serviced by this
# logical global router so it can be deleted
self._delete_global_router(context, log_global_router_id, True)
return False
def _delete_auxiliary_gateway_ports(
self, context, router_id, net_id=None,
port_type=DEVICE_OWNER_GLOBAL_ROUTER_GW):
filters = {
'device_id': [router_id],
'device_owner': [port_type]}
if net_id is not None:
filters['network_id'] = [net_id]
for port in self._core_plugin.get_ports(context, filters=filters):
try:
self._core_plugin.delete_port(context, port['id'],
l3_port_check=False)
except (exc.ObjectDeletedError, n_exc.PortNotFound) as e:
LOG.info('Unable to delete port for Global router '
'%(r_id)s. It has likely been concurrently '
'deleted. %(err)s', {'r_id': router_id,
'err': e})
def _delete_global_router(self, context, global_router_id, logical=False):
# ensure we clean up any stale auxiliary gateway ports
self._delete_auxiliary_gateway_ports(context, global_router_id)
try:
if logical is True:
# We use parent class method as no special operations beyond
# what the base implemenation does are needed for logical
# global router
super(L3RouterApplianceDBMixin, self._l3_plugin).delete_router(
context, global_router_id)
else:
self._l3_plugin.delete_router(
context, global_router_id, unschedule=False)
except (exc.ObjectDeletedError, l3.RouterNotFound) as e:
g_r_type = 'Logical Global' if logical is True else 'Global'
LOG.info('Unable to delete %(g_r_type)s router %(r_id)s. It '
'has likely been concurrently deleted. %(err)s',
{'g_r_type': g_r_type, 'r_id': global_router_id,
'err': e})
except Exception as e:
g_r_type = 'Logical Global' if logical is True else 'Global'
LOG.debug('Failed to delete %(g_r_type)s router %(r_id). It may '
'have been deleted concurrently. Error details: '
'%(err)s',
{'g_r_type': g_r_type, 'r_id': global_router_id,
'err': e})
def _get_gateway_routers_count(self, context, ext_net_id, routertype_id,
router_role, hosting_device_id=None):
# Determine number of routers (with routertype_id and router_role)
# that act as gateway to ext_net_id and that are hosted on
# hosting_device_id (if specified).
query = context.session.query(bc.Router)
if router_role in [None, ROUTER_ROLE_HA_REDUNDANCY]:
# tenant router roles
query = query.join(models_v2.Port,
models_v2.Port.id == bc.Router.gw_port_id)
role_filter = expr.or_(
l3_models.RouterHostingDeviceBinding.role == expr.null(),
l3_models.RouterHostingDeviceBinding.role ==
ROUTER_ROLE_HA_REDUNDANCY)
else:
# global and logical global routers
query = query.join(models_v2.Port,
models_v2.Port.device_owner == bc.Router.id)
role_filter = (
l3_models.RouterHostingDeviceBinding.role == router_role)
query = query.join(
l3_models.RouterHostingDeviceBinding,
l3_models.RouterHostingDeviceBinding.router_id == bc.Router.id)
query = query.filter(
role_filter,
models_v2.Port.network_id == ext_net_id,
l3_models.RouterHostingDeviceBinding.router_type_id ==
routertype_id)
if hosting_device_id is not None:
query = query.filter(
l3_models.RouterHostingDeviceBinding.hosting_device_id ==
hosting_device_id)
return query.count()
# ---------------- General support functions -----------------
def _update_ha_redundancy_level(self, context, logical_global_router,
delta):
with context.session.begin(subtransactions=True):
log_g_router_db = self._l3_plugin._get_router(
context, logical_global_router['id'])
log_g_router_db.ha_settings.redundancy_level += delta
context.session.add(log_g_router_db.ha_settings)
def _router_name(self, router_id):
return N_ROUTER_PREFIX + router_id
def _global_router_name(self, hosting_device_id, logical=False):
if logical is True:
return cisco_constants.LOGICAL_ROUTER_ROLE_NAME
else:
return '%s-%s' % (cisco_constants.ROUTER_ROLE_NAME_PREFIX,
hosting_device_id[-cisco_constants.ROLE_ID_LEN:])
@property
def _core_plugin(self):
return bc.get_plugin()
@property
def _l3_plugin(self):
return bc.get_plugin(bc.constants.L3)
| Tehsmash/networking-cisco | networking_cisco/plugins/cisco/l3/drivers/asr1k/asr1k_routertype_driver.py | Python | apache-2.0 | 33,133 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os.path
import boto3.session
import botocore.exceptions
import freezegun
import pretend
import pytest
import redis
from zope.interface.verify import verifyClass
from warehouse.packaging.interfaces import IDownloadStatService, IFileStorage
from warehouse.packaging.services import (
RedisDownloadStatService, LocalFileStorage, S3FileStorage,
)
@freezegun.freeze_time("2012-01-14")
class TestRedisDownloadStatService:
def test_verify_service(self):
assert verifyClass(IDownloadStatService, RedisDownloadStatService)
def test_creates_redis(self, monkeypatch):
redis_obj = pretend.stub()
redis_cls = pretend.stub(
from_url=pretend.call_recorder(lambda u: redis_obj),
)
monkeypatch.setattr(redis, "StrictRedis", redis_cls)
url = pretend.stub()
svc = RedisDownloadStatService(url)
assert svc.redis is redis_obj
assert redis_cls.from_url.calls == [pretend.call(url)]
@pytest.mark.parametrize(
("keys", "result"),
[
([], 0),
([5, 7, 8], 20),
]
)
def test_get_daily_stats(self, keys, result):
svc = RedisDownloadStatService("")
svc.redis = pretend.stub(mget=pretend.call_recorder(lambda *a: keys))
call_keys = (
["downloads:hour:12-01-14-00:foo"] +
[
"downloads:hour:12-01-13-{:02d}:foo".format(i)
for i in reversed(range(24))
] +
["downloads:hour:12-01-12-23:foo"]
)
assert svc.get_daily_stats("foo") == result
assert svc.redis.mget.calls == [pretend.call(*call_keys)]
@pytest.mark.parametrize(
("keys", "result"),
[
([], 0),
([5, 7, 8], 20),
]
)
def test_get_weekly_stats(self, keys, result):
svc = RedisDownloadStatService("")
svc.redis = pretend.stub(mget=pretend.call_recorder(lambda *a: keys))
call_keys = [
"downloads:daily:12-01-{:02d}:foo".format(i + 7)
for i in reversed(range(8))
]
assert svc.get_weekly_stats("foo") == result
assert svc.redis.mget.calls == [pretend.call(*call_keys)]
@pytest.mark.parametrize(
("keys", "result"),
[
([], 0),
([5, 7, 8], 20),
]
)
def test_get_monthly_stats(self, keys, result):
svc = RedisDownloadStatService("")
svc.redis = pretend.stub(mget=pretend.call_recorder(lambda *a: keys))
call_keys = [
"downloads:daily:12-01-{:02d}:foo".format(i)
for i in reversed(range(1, 15))
] + [
"downloads:daily:11-12-{:02d}:foo".format(i + 15)
for i in reversed(range(17))
]
assert svc.get_monthly_stats("foo") == result
assert svc.redis.mget.calls == [pretend.call(*call_keys)]
class TestLocalFileStorage:
def test_verify_service(self):
assert verifyClass(IFileStorage, LocalFileStorage)
def test_basic_init(self):
storage = LocalFileStorage("/foo/bar/")
assert storage.base == "/foo/bar/"
def test_create_service(self):
request = pretend.stub(
registry=pretend.stub(
settings={"files.path": "/the/one/two/"},
),
)
storage = LocalFileStorage.create_service(None, request)
assert storage.base == "/the/one/two/"
def test_gets_file(self, tmpdir):
with open(str(tmpdir.join("file.txt")), "wb") as fp:
fp.write(b"my test file contents")
storage = LocalFileStorage(str(tmpdir))
file_object = storage.get("file.txt")
assert file_object.read() == b"my test file contents"
def test_raises_when_file_non_existant(self, tmpdir):
storage = LocalFileStorage(str(tmpdir))
with pytest.raises(FileNotFoundError):
storage.get("file.txt")
def test_stores_file(self, tmpdir):
filename = str(tmpdir.join("testfile.txt"))
with open(filename, "wb") as fp:
fp.write(b"Test File!")
storage_dir = str(tmpdir.join("storage"))
storage = LocalFileStorage(storage_dir)
storage.store("foo/bar.txt", filename)
with open(os.path.join(storage_dir, "foo/bar.txt"), "rb") as fp:
assert fp.read() == b"Test File!"
def test_stores_two_files(self, tmpdir):
filename1 = str(tmpdir.join("testfile1.txt"))
with open(filename1, "wb") as fp:
fp.write(b"First Test File!")
filename2 = str(tmpdir.join("testfile2.txt"))
with open(filename2, "wb") as fp:
fp.write(b"Second Test File!")
storage_dir = str(tmpdir.join("storage"))
storage = LocalFileStorage(storage_dir)
storage.store("foo/first.txt", filename1)
storage.store("foo/second.txt", filename2)
with open(os.path.join(storage_dir, "foo/first.txt"), "rb") as fp:
assert fp.read() == b"First Test File!"
with open(os.path.join(storage_dir, "foo/second.txt"), "rb") as fp:
assert fp.read() == b"Second Test File!"
class TestS3FileStorage:
def test_verify_service(self):
assert verifyClass(IFileStorage, S3FileStorage)
def test_basic_init(self):
bucket = pretend.stub()
storage = S3FileStorage(bucket)
assert storage.bucket is bucket
def test_create_service(self):
session = boto3.session.Session()
request = pretend.stub(
find_service=pretend.call_recorder(lambda name: session),
registry=pretend.stub(settings={"files.bucket": "froblob"}),
)
storage = S3FileStorage.create_service(None, request)
assert request.find_service.calls == [pretend.call(name="aws.session")]
assert storage.bucket.name == "froblob"
def test_gets_file(self):
s3key = pretend.stub(get=lambda: {"Body": io.BytesIO(b"my contents")})
bucket = pretend.stub(Object=pretend.call_recorder(lambda path: s3key))
storage = S3FileStorage(bucket)
file_object = storage.get("file.txt")
assert file_object.read() == b"my contents"
assert bucket.Object.calls == [pretend.call("file.txt")]
def test_raises_when_key_non_existant(self):
def raiser():
raise botocore.exceptions.ClientError(
{"Error": {"Code": "NoSuchKey", "Message": "No Key!"}},
"some operation",
)
s3key = pretend.stub(get=raiser)
bucket = pretend.stub(Object=pretend.call_recorder(lambda path: s3key))
storage = S3FileStorage(bucket)
with pytest.raises(FileNotFoundError):
storage.get("file.txt")
assert bucket.Object.calls == [pretend.call("file.txt")]
def test_passes_up_error_when_not_no_such_key(self):
def raiser():
raise botocore.exceptions.ClientError(
{"Error": {"Code": "SomeOtherError", "Message": "Who Knows!"}},
"some operation",
)
s3key = pretend.stub(get=raiser)
bucket = pretend.stub(Object=lambda path: s3key)
storage = S3FileStorage(bucket)
with pytest.raises(botocore.exceptions.ClientError):
storage.get("file.txt")
def test_stores_file(self, tmpdir):
filename = str(tmpdir.join("testfile.txt"))
with open(filename, "wb") as fp:
fp.write(b"Test File!")
bucket = pretend.stub(
upload_file=pretend.call_recorder(lambda filename, key: None),
)
storage = S3FileStorage(bucket)
storage.store("foo/bar.txt", filename)
assert bucket.upload_file.calls == [
pretend.call(filename, "foo/bar.txt"),
]
def test_stores_two_files(self, tmpdir):
filename1 = str(tmpdir.join("testfile1.txt"))
with open(filename1, "wb") as fp:
fp.write(b"First Test File!")
filename2 = str(tmpdir.join("testfile2.txt"))
with open(filename2, "wb") as fp:
fp.write(b"Second Test File!")
bucket = pretend.stub(
upload_file=pretend.call_recorder(lambda filename, key: None),
)
storage = S3FileStorage(bucket)
storage.store("foo/first.txt", filename1)
storage.store("foo/second.txt", filename2)
assert bucket.upload_file.calls == [
pretend.call(filename1, "foo/first.txt"),
pretend.call(filename2, "foo/second.txt"),
]
| ismail-s/warehouse | tests/unit/packaging/test_services.py | Python | apache-2.0 | 9,105 |
import allure
from allure_commons_test.report import has_test_case
from allure_commons_test.result import with_status, with_message_contains, has_status_details
from hamcrest import assert_that
@allure.issue("376")
@allure.feature("Integration")
def test_pytest_check(allured_testdir):
"""
>>> import pytest_check as check
>>> def test_pytest_check_example():
... check.equal(1, 2, msg="First failure")
... check.equal(1, 2, msg="Second failure")
"""
allured_testdir.parse_docstring_source()
allured_testdir.run_with_allure()
assert_that(allured_testdir.allure_report,
has_test_case("test_pytest_check_example",
with_status("failed"),
has_status_details(with_message_contains("First failure"),
with_message_contains("Second failure"))
),
)
| allure-framework/allure-python | allure-pytest/test/integration/pytest_check/pytest_check_test.py | Python | apache-2.0 | 961 |
# Copyright 2013 Rackspace
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'get_pool',
'run_function',
'join_pool'
]
import os
import sys
try:
import gevent
gevent
gevent_available = True
except ImportError:
gevent_available = False
DEFAULT_BACKEND = 'noop'
BACKEND = DEFAULT_BACKEND
USE_GEVENT = os.getenv('RAXCLI_USE_GEVENT')
if USE_GEVENT and gevent_available:
BACKEND = 'gevent'
module_name = 'raxcli.concurrency.backends.%s_backend' % (BACKEND)
current_module = sys.modules[__name__]
backend_module = __import__(module_name,
fromlist=['raxcli.concurrency.backends'])
for key in __all__:
func = getattr(backend_module, key)
setattr(current_module, key, func)
backend_initialize = getattr(backend_module, 'initialize')
backend_initialize()
| racker/python-raxcli | raxcli/concurrency/__init__.py | Python | apache-2.0 | 1,568 |
from django import forms
from utils.fields import SlugField
from utils.forms import (
BootstrapMixin,
JSONField,
SmallTextarea,
StaticSelect,
add_blank_choice,
)
from .enums import PasswordAlgorithm
from .models import Platform
class PlatformForm(BootstrapMixin, forms.ModelForm):
slug = SlugField(max_length=255)
napalm_args = JSONField(
required=False,
label="Optional arguments",
help_text="See NAPALM's <a href='http://napalm.readthedocs.io/en/latest/support/#optional-arguments'>documentation</a> for a complete list of optional arguments",
widget=SmallTextarea,
)
password_algorithm = forms.ChoiceField(
required=False,
choices=add_blank_choice(PasswordAlgorithm.choices),
widget=StaticSelect,
)
class Meta:
model = Platform
fields = [
"name",
"slug",
"napalm_driver",
"napalm_args",
"password_algorithm",
"description",
]
| respawner/peering-manager | devices/forms.py | Python | apache-2.0 | 1,030 |
#! /usr/bin/env python
import sys, os, getopt, struct, unittest
from distutils.spawn import spawn
build = True
verbosity = 2
here = os.path.dirname(os.path.abspath(__file__))
os.chdir(here)
def bits():
"""determine if running on a 32 bit or 64 bit platform
"""
return struct.calcsize("P") * 8
# -- parse options
try:
opts, args = getopt.getopt(sys.argv[1:], "nq")
if args:
raise getopt.GetoptError("too many arguments")
except getopt.GetoptError:
sys.exit("run-tests.py: error: %s" % sys.exc_info()[1])
for o, a in opts:
if o == "-q":
verbosity = 0
elif o == "-n":
build = False
# -- build greenlet
if build:
if verbosity == 0:
cmd = [sys.executable, "setup.py", "-q", "build_ext", "-q"]
else:
cmd = [sys.executable, "setup.py", "build_ext"]
spawn(cmd, search_path=0)
# -- find greenlet but skip the one in "."
if not build:
oldpath = sys.path[:]
sys.path.remove(here)
import greenlet
if not build:
sys.path[:] = oldpath
sys.stdout.write("python %s (%s bit) using greenlet %s from %s\n" %
(sys.version.split()[0], bits(), greenlet.__version__, greenlet.__file__))
# -- run tests
from tests import test_collector
suite = test_collector()
unittest.TextTestRunner(verbosity=verbosity).run(suite)
| ioram7/keystone-federado-pgid2013 | build/greenlet/run-tests.py | Python | apache-2.0 | 1,321 |
"""Unit test for cgroup_service - Treadmill cgroup service.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import tempfile
import unittest
import select
import shutil
import mock
import treadmill
from treadmill.services import cgroup_service
class CGroupServiceTest(unittest.TestCase):
"""Unit tests for the cgroup service implementation.
"""
def setUp(self):
self.root = tempfile.mkdtemp()
self.cgroup_svc = os.path.join(self.root, 'cgroup_svc')
self.running = os.path.join(self.root, 'running')
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
def test_initialize(self):
"""Test service initialization.
"""
svc = cgroup_service.CgroupResourceService(self.running)
svc.initialize(self.cgroup_svc)
def test_report_status(self):
"""Test processing of status request.
"""
svc = cgroup_service.CgroupResourceService(self.running)
status = svc.report_status()
self.assertEqual(
status,
{'ready': True}
)
def test_event_handlers(self):
"""Test event_handlers request.
"""
svc = cgroup_service.CgroupResourceService(self.running)
handlers = svc.event_handlers()
self.assertEqual(
handlers,
[]
)
@mock.patch('treadmill.cgroups.create', mock.Mock())
@mock.patch('treadmill.cgroups.get_value', mock.Mock(return_value=10000))
@mock.patch('treadmill.cgroups.join', mock.Mock())
@mock.patch('treadmill.cgroups.set_value', mock.Mock())
@mock.patch('treadmill.services.cgroup_service.CgroupResourceService.'
'_register_oom_handler', mock.Mock())
def test_on_create_request(self):
"""Test processing of a cgroups create request.
"""
# Access to a protected member _register_oom_handler of a client class
# pylint: disable=W0212
svc = cgroup_service.CgroupResourceService(self.running)
request = {
'memory': '100M',
'cpu': '100%',
}
request_id = 'myproid.test-0-ID1234'
svc.on_create_request(request_id, request)
cgrp = os.path.join('treadmill/apps', request_id)
svc._register_oom_handler.assert_called_with(cgrp, request_id)
treadmill.cgroups.create.assert_has_calls(
[
mock.call(ss, cgrp)
for ss in ['cpu', 'cpuacct', 'memory', 'blkio']
]
)
# Memory calculation:
#
# (demand * virtual cpu bmips / total bmips) * treadmill.cpu.shares
# (100% * 5000 / (24000 * 0.9 ) * 10000) = 2314
treadmill.cgroups.set_value.assert_has_calls([
mock.call('blkio', cgrp, 'blkio.weight', 100),
mock.call('memory', cgrp, 'memory.soft_limit_in_bytes', '100M'),
mock.call('memory', cgrp, 'memory.limit_in_bytes', '100M'),
mock.call('memory', cgrp, 'memory.memsw.limit_in_bytes', '100M'),
mock.call('cpu', cgrp, 'cpu.shares',
treadmill.sysinfo.BMIPS_PER_CPU)
])
@mock.patch('treadmill.cgutils.delete', mock.Mock())
@mock.patch('treadmill.services.cgroup_service.CgroupResourceService.'
'_unregister_oom_handler', mock.Mock())
def test_on_delete_request(self):
"""Test processing of a cgroups delete request.
"""
# Access to a protected member _unregister_oom_handler of a client
# class
# pylint: disable=W0212
svc = cgroup_service.CgroupResourceService(self.running)
request_id = 'myproid.test-0-ID1234'
svc.on_delete_request(request_id)
cgrp = os.path.join('treadmill/apps', request_id)
treadmill.cgutils.delete.assert_has_calls(
[
mock.call(ss, cgrp)
for ss in ['cpu', 'cpuacct', 'memory', 'blkio']
]
)
svc._unregister_oom_handler.assert_called_with(cgrp)
@mock.patch('treadmill.cgutils.get_memory_oom_eventfd',
mock.Mock(return_value='fake_efd'))
def test__register_oom_handler(self):
"""Test registration of OOM handler.
"""
# Access to a protected member _register_oom_handler of a client class
# pylint: disable=W0212
svc = cgroup_service.CgroupResourceService(self.running)
registered_handlers = svc.event_handlers()
self.assertNotIn(
('fake_efd', select.POLLIN, mock.ANY),
registered_handlers
)
cgrp = 'treadmill/apps/myproid.test-42-ID1234'
svc._register_oom_handler(cgrp, 'myproid.test-42-ID1234')
treadmill.cgutils.get_memory_oom_eventfd.assert_called_with(cgrp)
registered_handlers = svc.event_handlers()
self.assertIn(
('fake_efd', select.POLLIN, mock.ANY),
registered_handlers
)
@mock.patch('os.close', mock.Mock())
@mock.patch('treadmill.cgutils.get_memory_oom_eventfd',
mock.Mock(return_value='fake_efd'))
def test__unregister_oom_handler(self):
"""Test unregistration of OOM handler.
"""
# Access to a protected member _unregister_oom_handler of a client
# class
# pylint: disable=W0212
svc = cgroup_service.CgroupResourceService(self.running)
cgrp = 'treadmill/apps/myproid.test-42-ID1234'
svc._register_oom_handler(cgrp, 'myproid.test-42-ID1234')
registered_handlers = svc.event_handlers()
self.assertIn(
('fake_efd', select.POLLIN, mock.ANY),
registered_handlers
)
svc._unregister_oom_handler(cgrp)
registered_handlers = svc.event_handlers()
self.assertNotIn(
('fake_efd', select.POLLIN, mock.ANY),
registered_handlers
)
os.close.assert_called_with('fake_efd')
if __name__ == '__main__':
unittest.main()
| captiosus/treadmill | tests/services/cgroup_service_test.py | Python | apache-2.0 | 6,163 |
import os
import stat
import time
from inaugurator import sh
class TargetDevice:
_found = None
@classmethod
def device(cls, candidates):
if cls._found is None:
cls._found = cls._find(candidates)
return cls._found
pass
@classmethod
def _find(cls, candidates):
RETRIES = 5
for retry in xrange(RETRIES):
for device in candidates:
if not os.path.exists(device):
continue
if not stat.S_ISBLK(os.stat(device).st_mode):
continue
try:
output = sh.run("dosfslabel", device + 1)
if output.strip() == "STRATODOK":
raise Exception(
"DOK was found on SDA. cannot continue: its likely the "
"the HD driver was not loaded correctly")
except:
pass
print "Found target device %s" % device
return device
print "didn't find target device, sleeping before retry %d" % retry
time.sleep(1)
os.system("/usr/sbin/busybox mdev -s")
raise Exception("Failed finding target device")
| eliran-stratoscale/inaugurator | inaugurator/targetdevice.py | Python | apache-2.0 | 1,263 |
import collections
import random
# Set the random seed so we see the same output each time
# the script is run.
random.seed(1)
d1 = collections.deque(maxlen=3)
d2 = collections.deque(maxlen=3)
for i in range(5):
n = random.randint(0, 100)
print('n =', n)
d1.append(n)
d2.appendleft(n)
print('D1:', d1)
print('D2:', d2)
| jasonwee/asus-rt-n14uhp-mrtg | src/lesson_data_structures/collections_deque_maxlen.py | Python | apache-2.0 | 349 |
# Copyright 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import json
from osprofiler import _utils as utils
from osprofiler.drivers.base import get_driver as profiler_get_driver
from osprofiler import notifier
from osprofiler import profiler
from osprofiler import web
from horizon.utils import settings as horizon_settings
ROOT_HEADER = 'PARENT_VIEW_TRACE_ID'
def init_notifier(connection_str, host="localhost"):
_notifier = notifier.create(
connection_str, project='horizon', service='horizon', host=host)
notifier.set(_notifier)
@contextlib.contextmanager
def traced(request, name, info=None):
if info is None:
info = {}
profiler_instance = profiler.get()
if profiler_instance is not None:
trace_id = profiler_instance.get_base_id()
info['user_id'] = request.user.id
with profiler.Trace(name, info=info):
yield trace_id
else:
yield
def _get_engine():
connection_str = horizon_settings.get_dict_config(
'OPENSTACK_PROFILER', 'receiver_connection_string')
return profiler_get_driver(connection_str)
def list_traces():
engine = _get_engine()
fields = ['base_id', 'timestamp', 'info.request.path', 'info']
traces = engine.list_traces(fields)
return [{'id': trace['base_id'],
'timestamp': trace['timestamp'],
'origin': trace['info']['request']['path']} for trace in traces]
def get_trace(trace_id):
def rec(_data, level=0):
_data['level'] = level
_data['is_leaf'] = not _data['children']
_data['visible'] = True
_data['childrenVisible'] = True
finished = _data['info']['finished']
for child in _data['children']:
__, child_finished = rec(child, level + 1)
# NOTE(tsufiev): in case of async requests the root request usually
# finishes before the dependent requests do so, to we need to
# normalize the duration of all requests by the finishing time of
# the one which took longest
if child_finished > finished:
finished = child_finished
return _data, finished
engine = _get_engine()
trace = engine.get_report(trace_id)
data, max_finished = rec(trace)
data['info']['max_finished'] = max_finished
return data
def update_trace_headers(keys, **kwargs):
trace_headers = web.get_trace_id_headers()
trace_info = utils.signed_unpack(
trace_headers[web.X_TRACE_INFO], trace_headers[web.X_TRACE_HMAC],
keys)
trace_info.update(kwargs)
p = profiler.get()
trace_data = utils.signed_pack(trace_info, p.hmac_key)
trace_data = [key.decode() if isinstance(key, bytes)
else key for key in trace_data]
return json.dumps({web.X_TRACE_INFO: trace_data[0],
web.X_TRACE_HMAC: trace_data[1]})
if not horizon_settings.get_dict_config('OPENSTACK_PROFILER', 'enabled'):
def trace(function):
return function
else:
def trace(function):
func_name = function.__module__ + '.' + function.__name__
decorator = profiler.trace(func_name)
return decorator(function)
| openstack/horizon | openstack_dashboard/contrib/developer/profiler/api.py | Python | apache-2.0 | 3,772 |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds ad groups to a given campaign.
To get ad groups, run get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README..
Tags: AdGroupService.mutate
"""
__author__ = ('api.kwinter@gmail.com (Kevin Winter)'
'Joseph DiLallo')
import uuid
from googleads import adwords
CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'
def main(client, campaign_id):
# Initialize appropriate service.
ad_group_service = client.GetService('AdGroupService', version='v201406')
# Construct operations and add ad groups.
operations = [{
'operator': 'ADD',
'operand': {
'campaignId': campaign_id,
'name': 'Earth to Mars Cruises #%s' % uuid.uuid4(),
'status': 'ENABLED',
'biddingStrategyConfiguration': {
'bids': [
{
'xsi_type': 'CpcBid',
'bid': {
'microAmount': '1000000'
},
}
]
}
}
}, {
'operator': 'ADD',
'operand': {
'campaignId': campaign_id,
'name': 'Earth to Venus Cruises #%s' % uuid.uuid4(),
'status': 'ENABLED',
'biddingStrategyConfiguration': {
'bids': [
{
'xsi_type': 'CpcBid',
'bid': {
'microAmount': '1000000'
}
}
]
}
}
}]
ad_groups = ad_group_service.mutate(operations)
# Display results.
for ad_group in ad_groups['value']:
print ('Ad group with name \'%s\' and id \'%s\' was added.'
% (ad_group['name'], ad_group['id']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CAMPAIGN_ID)
| dietrichc/streamline-ppc-reports | examples/adwords/v201406/basic_operations/add_ad_groups.py | Python | apache-2.0 | 2,698 |
import pytest
import pytest
from cwltool.utils import onWindows
from .util import get_data, get_main_output, needs_docker
try:
from galaxy.tools import deps
except ImportError:
deps = None
@needs_docker
@pytest.mark.skipif(not deps, reason="galaxy-lib is not installed")
def test_biocontainers():
wflow = get_data("tests/seqtk_seq.cwl")
job = get_data("tests/seqtk_seq_job.json")
error_code, _, _ = get_main_output(
["--beta-use-biocontainers", wflow, job])
assert error_code == 0
@pytest.mark.skipif(onWindows(), reason="bioconda currently not working on MS Windows")
@pytest.mark.skipif(not deps, reason="galaxy-lib is not installed")
def test_bioconda():
wflow = get_data("tests/seqtk_seq.cwl")
job = get_data("tests/seqtk_seq_job.json")
error_code, _, stderr = get_main_output(
["--beta-conda-dependencies", "--debug", wflow, job])
assert error_code == 0, stderr
import os
from distutils import spawn
@pytest.mark.skipif(not spawn.find_executable("modulecmd"), reason="modulecmd not installed")
def test_modules():
wflow = get_data("tests/random_lines.cwl")
job = get_data("tests/random_lines_job.json")
os.environ["MODULEPATH"] = os.path.join(os.getcwd(), 'tests/test_deps_env/modulefiles')
error_code, _, stderr = get_main_output(
["--beta-dependency-resolvers-configuration",
"tests/test_deps_env_modules_resolvers_conf.yml", "--debug", wflow, job])
assert error_code == 0, stderr
| dleehr/cwltool | tests/test_dependencies.py | Python | apache-2.0 | 1,496 |
# coding=utf-8
# Copyright 2019 The Google UDA Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transforms used in the Augmentation Policies.
Copied from AutoAugment: https://github.com/tensorflow/models/blob/master/research/autoaugment/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
# pylint:disable=g-multiple-import
from PIL import ImageOps, ImageEnhance, ImageFilter, Image
# pylint:enable=g-multiple-import
import tensorflow as tf
FLAGS = tf.flags.FLAGS
IMAGE_SIZE = 32
# What is the dataset mean and std of the images on the training set
PARAMETER_MAX = 10 # What is the max 'level' a transform could be predicted
def get_mean_and_std():
if FLAGS.task_name == "cifar10":
means = [0.49139968, 0.48215841, 0.44653091]
stds = [0.24703223, 0.24348513, 0.26158784]
elif FLAGS.task_name == "svhn":
means = [0.4376821, 0.4437697, 0.47280442]
stds = [0.19803012, 0.20101562, 0.19703614]
else:
assert False
return means, stds
def _width_height_from_img_shape(img_shape):
"""`img_shape` in autoaugment is (height, width)."""
return (img_shape[1], img_shape[0])
def random_flip(x):
"""Flip the input x horizontally with 50% probability."""
if np.random.rand(1)[0] > 0.5:
return np.fliplr(x)
return x
def zero_pad_and_crop(img, amount=4):
"""Zero pad by `amount` zero pixels on each side then take a random crop.
Args:
img: numpy image that will be zero padded and cropped.
amount: amount of zeros to pad `img` with horizontally and verically.
Returns:
The cropped zero padded img. The returned numpy array will be of the same
shape as `img`.
"""
padded_img = np.zeros((img.shape[0] + amount * 2, img.shape[1] + amount * 2,
img.shape[2]))
padded_img[amount:img.shape[0] + amount, amount:
img.shape[1] + amount, :] = img
top = np.random.randint(low=0, high=2 * amount)
left = np.random.randint(low=0, high=2 * amount)
new_img = padded_img[top:top + img.shape[0], left:left + img.shape[1], :]
return new_img
def create_cutout_mask(img_height, img_width, num_channels, size):
"""Creates a zero mask used for cutout of shape `img_height` x `img_width`.
Args:
img_height: Height of image cutout mask will be applied to.
img_width: Width of image cutout mask will be applied to.
num_channels: Number of channels in the image.
size: Size of the zeros mask.
Returns:
A mask of shape `img_height` x `img_width` with all ones except for a
square of zeros of shape `size` x `size`. This mask is meant to be
elementwise multiplied with the original image. Additionally returns
the `upper_coord` and `lower_coord` which specify where the cutout mask
will be applied.
"""
assert img_height == img_width
# Sample center where cutout mask will be applied
height_loc = np.random.randint(low=0, high=img_height)
width_loc = np.random.randint(low=0, high=img_width)
# Determine upper right and lower left corners of patch
upper_coord = (max(0, height_loc - size // 2), max(0, width_loc - size // 2))
lower_coord = (min(img_height, height_loc + size // 2),
min(img_width, width_loc + size // 2))
mask_height = lower_coord[0] - upper_coord[0]
mask_width = lower_coord[1] - upper_coord[1]
assert mask_height > 0
assert mask_width > 0
mask = np.ones((img_height, img_width, num_channels))
zeros = np.zeros((mask_height, mask_width, num_channels))
mask[upper_coord[0]:lower_coord[0], upper_coord[1]:lower_coord[1], :] = (
zeros)
return mask, upper_coord, lower_coord
def cutout_numpy(img, size=16):
"""Apply cutout with mask of shape `size` x `size` to `img`.
The cutout operation is from the paper https://arxiv.org/abs/1708.04552.
This operation applies a `size`x`size` mask of zeros to a random location
within `img`.
Args:
img: Numpy image that cutout will be applied to.
size: Height/width of the cutout mask that will be
Returns:
A numpy tensor that is the result of applying the cutout mask to `img`.
"""
img_height, img_width, num_channels = (img.shape[0], img.shape[1],
img.shape[2])
assert len(img.shape) == 3
mask, _, _ = create_cutout_mask(img_height, img_width, num_channels, size)
return img * mask
def float_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled
to level/PARAMETER_MAX.
Returns:
A float that results from scaling `maxval` according to `level`.
"""
return float(level) * maxval / PARAMETER_MAX
def int_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled
to level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`.
"""
return int(level * maxval / PARAMETER_MAX)
def pil_wrap(img, use_mean_std):
"""Convert the `img` numpy tensor to a PIL Image."""
if use_mean_std:
MEANS, STDS = get_mean_and_std()
else:
MEANS = [0, 0, 0]
STDS = [1, 1, 1]
img_ori = (img * STDS + MEANS) * 255
return Image.fromarray(
np.uint8((img * STDS + MEANS) * 255.0)).convert('RGBA')
def pil_unwrap(pil_img, use_mean_std, img_shape):
"""Converts the PIL img to a numpy array."""
if use_mean_std:
MEANS, STDS = get_mean_and_std()
else:
MEANS = [0, 0, 0]
STDS = [1, 1, 1]
pic_array = np.array(pil_img.getdata()).reshape((img_shape[0], img_shape[1], 4)) / 255.0
i1, i2 = np.where(pic_array[:, :, 3] == 0)
pic_array = (pic_array[:, :, :3] - MEANS) / STDS
pic_array[i1, i2] = [0, 0, 0]
return pic_array
def apply_policy(policy, img, use_mean_std=True):
"""Apply the `policy` to the numpy `img`.
Args:
policy: A list of tuples with the form (name, probability, level) where
`name` is the name of the augmentation operation to apply, `probability`
is the probability of applying the operation and `level` is what strength
the operation to apply.
img: Numpy image that will have `policy` applied to it.
Returns:
The result of applying `policy` to `img`.
"""
img_shape = img.shape
pil_img = pil_wrap(img, use_mean_std)
for xform in policy:
assert len(xform) == 3
name, probability, level = xform
xform_fn = NAME_TO_TRANSFORM[name].pil_transformer(
probability, level, img_shape)
pil_img = xform_fn(pil_img)
return pil_unwrap(pil_img, use_mean_std, img_shape)
class TransformFunction(object):
"""Wraps the Transform function for pretty printing options."""
def __init__(self, func, name):
self.f = func
self.name = name
def __repr__(self):
return '<' + self.name + '>'
def __call__(self, pil_img):
return self.f(pil_img)
class TransformT(object):
"""Each instance of this class represents a specific transform."""
def __init__(self, name, xform_fn):
self.name = name
self.xform = xform_fn
def pil_transformer(self, probability, level, img_shape):
def return_function(im):
if random.random() < probability:
im = self.xform(im, level, img_shape)
return im
name = self.name + '({:.1f},{})'.format(probability, level)
return TransformFunction(return_function, name)
################## Transform Functions ##################
identity = TransformT('identity', lambda pil_img, level, _: pil_img)
flip_lr = TransformT(
'FlipLR',
lambda pil_img, level, _: pil_img.transpose(Image.FLIP_LEFT_RIGHT))
flip_ud = TransformT(
'FlipUD',
lambda pil_img, level, _: pil_img.transpose(Image.FLIP_TOP_BOTTOM))
# pylint:disable=g-long-lambda
auto_contrast = TransformT(
'AutoContrast',
lambda pil_img, level, _: ImageOps.autocontrast(
pil_img.convert('RGB')).convert('RGBA'))
equalize = TransformT(
'Equalize',
lambda pil_img, level, _: ImageOps.equalize(
pil_img.convert('RGB')).convert('RGBA'))
invert = TransformT(
'Invert',
lambda pil_img, level, _: ImageOps.invert(
pil_img.convert('RGB')).convert('RGBA'))
# pylint:enable=g-long-lambda
blur = TransformT(
'Blur', lambda pil_img, level, _: pil_img.filter(ImageFilter.BLUR))
smooth = TransformT(
'Smooth',
lambda pil_img, level, _: pil_img.filter(ImageFilter.SMOOTH))
def _rotate_impl(pil_img, level, _):
"""Rotates `pil_img` from -30 to 30 degrees depending on `level`."""
degrees = int_parameter(level, 30)
if random.random() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees)
rotate = TransformT('Rotate', _rotate_impl)
def _posterize_impl(pil_img, level, _):
"""Applies PIL Posterize to `pil_img`."""
level = int_parameter(level, 4)
return ImageOps.posterize(pil_img.convert('RGB'), 4 - level).convert('RGBA')
posterize = TransformT('Posterize', _posterize_impl)
def _shear_x_impl(pil_img, level, img_shape):
"""Applies PIL ShearX to `pil_img`.
The ShearX operation shears the image along the horizontal axis with `level`
magnitude.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had ShearX applied to it.
"""
level = float_parameter(level, 0.3)
if random.random() > 0.5:
level = -level
return pil_img.transform(
_width_height_from_img_shape(img_shape),
Image.AFFINE,
(1, level, 0, 0, 1, 0))
shear_x = TransformT('ShearX', _shear_x_impl)
def _shear_y_impl(pil_img, level, img_shape):
"""Applies PIL ShearY to `pil_img`.
The ShearY operation shears the image along the vertical axis with `level`
magnitude.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had ShearX applied to it.
"""
level = float_parameter(level, 0.3)
if random.random() > 0.5:
level = -level
return pil_img.transform(
_width_height_from_img_shape(img_shape),
Image.AFFINE,
(1, 0, 0, level, 1, 0))
shear_y = TransformT('ShearY', _shear_y_impl)
def _translate_x_impl(pil_img, level, img_shape):
"""Applies PIL TranslateX to `pil_img`.
Translate the image in the horizontal direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had TranslateX applied to it.
"""
level = int_parameter(level, 10)
if random.random() > 0.5:
level = -level
return pil_img.transform(
_width_height_from_img_shape(img_shape),
Image.AFFINE,
(1, 0, level, 0, 1, 0))
translate_x = TransformT('TranslateX', _translate_x_impl)
def _translate_y_impl(pil_img, level, img_shape):
"""Applies PIL TranslateY to `pil_img`.
Translate the image in the vertical direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had TranslateY applied to it.
"""
level = int_parameter(level, 10)
if random.random() > 0.5:
level = -level
return pil_img.transform(
_width_height_from_img_shape(img_shape),
Image.AFFINE,
(1, 0, 0, 0, 1, level))
translate_y = TransformT('TranslateY', _translate_y_impl)
def _crop_impl(pil_img, level, img_shape, interpolation=Image.BILINEAR):
"""Applies a crop to `pil_img` with the size depending on the `level`."""
cropped = pil_img.crop((level, level, img_shape[0] - level, img_shape[1] - level))
resized = cropped.resize((img_shape[0], img_shape[1]), interpolation)
return resized
crop_bilinear = TransformT('CropBilinear', _crop_impl)
def _solarize_impl(pil_img, level, _):
"""Applies PIL Solarize to `pil_img`.
Translate the image in the vertical direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had Solarize applied to it.
"""
level = int_parameter(level, 256)
return ImageOps.solarize(pil_img.convert('RGB'), 256 - level).convert('RGBA')
solarize = TransformT('Solarize', _solarize_impl)
def _cutout_pil_impl(pil_img, level, img_shape):
"""Apply cutout to pil_img at the specified level."""
size = int_parameter(level, 20)
if size <= 0:
return pil_img
img_height, img_width, num_channels = (img_shape[0], img_shape[1], 3)
_, upper_coord, lower_coord = (
create_cutout_mask(img_height, img_width, num_channels, size))
pixels = pil_img.load() # create the pixel map
for i in range(upper_coord[0], lower_coord[0]): # for every col:
for j in range(upper_coord[1], lower_coord[1]): # For every row
pixels[i, j] = (125, 122, 113, 0) # set the colour accordingly
return pil_img
cutout = TransformT('Cutout', _cutout_pil_impl)
def _enhancer_impl(enhancer):
"""Sets level to be between 0.1 and 1.8 for ImageEnhance transforms of PIL."""
def impl(pil_img, level, _):
v = float_parameter(level, 1.8) + .1 # going to 0 just destroys it
return enhancer(pil_img).enhance(v)
return impl
color = TransformT('Color', _enhancer_impl(ImageEnhance.Color))
contrast = TransformT('Contrast', _enhancer_impl(ImageEnhance.Contrast))
brightness = TransformT('Brightness', _enhancer_impl(
ImageEnhance.Brightness))
sharpness = TransformT('Sharpness', _enhancer_impl(ImageEnhance.Sharpness))
ALL_TRANSFORMS = [
flip_lr,
flip_ud,
auto_contrast,
equalize,
invert,
rotate,
posterize,
crop_bilinear,
solarize,
color,
contrast,
brightness,
sharpness,
shear_x,
shear_y,
translate_x,
translate_y,
cutout,
blur,
smooth
]
NAME_TO_TRANSFORM = {t.name: t for t in ALL_TRANSFORMS}
TRANSFORM_NAMES = NAME_TO_TRANSFORM.keys()
| google-research/uda | image/randaugment/augmentation_transforms.py | Python | apache-2.0 | 14,832 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import sys
from gbpclient.gbp.v2_0 import groupbasedpolicy as gbp
from gbpclient.tests.unit import test_cli20
class CLITestV20NatPoolJSON(test_cli20.CLITestV20Base):
LOG = logging.getLogger(__name__)
def setUp(self):
super(CLITestV20NatPoolJSON, self).setUp()
def test_create_nat_pool_with_mandatory_params(self):
"""nat-pool-create with all mandatory params."""
resource = 'nat_pool'
cmd = gbp.CreateNatPool(test_cli20.MyApp(sys.stdout), None)
name = 'my-name'
tenant_id = 'my-tenant'
my_id = 'my-id'
args = ['--tenant-id', tenant_id,
name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
tenant_id=tenant_id)
def test_create_nat_pool_with_all_params(self):
"""nat-pool-create with all params."""
resource = 'nat_pool'
cmd = gbp.CreateNatPool(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
tenant_id = 'mytenant'
description = 'My Nat Pool'
my_id = 'someid'
ip_version = '4'
ip_pool = '192.168.0.0/24'
external_segment_id = "segmentid"
shared = 'true'
args = ['--tenant-id', tenant_id,
'--description', description,
'--ip-version', ip_version,
'--ip-pool', ip_pool,
'--external-segment', external_segment_id,
'--shared', shared,
name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
tenant_id=tenant_id,
description=description,
ip_version=4,
ip_pool=ip_pool,
external_segment_id=external_segment_id,
shared=shared)
def test_list_nat_pools(self):
"""nat-pool-list."""
resource = 'nat_pools'
cmd = gbp.ListNatPool(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resource, cmd, True)
def test_show_nat_pool_name(self):
"""nat-pool-show."""
resource = 'nat_pool'
cmd = gbp.ShowNatPool(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
def test_update_nat_pool(self):
"nat-pool-update myid --name myname --tags a b."
resource = 'nat_pool'
cmd = gbp.UpdateNatPool(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b'],
{'name': 'myname', 'tags': ['a', 'b'], })
def test_update_nat_pool_with_all_params(self):
resource = 'nat_pool'
cmd = gbp.UpdateNatPool(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
description = 'My Nat Pool'
my_id = 'someid'
external_segment_id = "segmentid"
shared = 'true'
args = ['--name', name,
'--description', description,
'--external-segment', external_segment_id,
'--shared', shared,
my_id]
params = {
'name': name,
'description': description,
'external_segment_id': external_segment_id,
'shared': shared
}
self._test_update_resource(resource, cmd, my_id, args, params)
def test_delete_nat_pool_name(self):
"""nat-pool-delete."""
resource = 'nat_pool'
cmd = gbp.DeleteNatPool(test_cli20.MyApp(sys.stdout), None)
my_id = 'my-id'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args)
| noironetworks/python-group-based-policy-client | gbpclient/tests/unit/test_cli20_natpool.py | Python | apache-2.0 | 4,756 |
import json
import os
from functools import wraps
from docker.utils.ports import split_port
from jsonschema import Draft4Validator
from jsonschema import FormatChecker
from jsonschema import ValidationError
from .errors import ConfigurationError
DOCKER_CONFIG_HINTS = {
'cpu_share': 'cpu_shares',
'add_host': 'extra_hosts',
'hosts': 'extra_hosts',
'extra_host': 'extra_hosts',
'device': 'devices',
'link': 'links',
'load_image': 'load_image',
'memory_swap': 'memswap_limit',
'port': 'ports',
'privilege': 'privileged',
'priviliged': 'privileged',
'privilige': 'privileged',
'volume': 'volumes',
'workdir': 'working_dir',
}
VALID_NAME_CHARS = '[a-zA-Z0-9\._\-]'
@FormatChecker.cls_checks(
format="ports",
raises=ValidationError(
"Invalid port formatting, it should be "
"'[[remote_ip:]remote_port:]port[/protocol]'"))
def format_ports(instance):
try:
split_port(instance)
except ValueError:
return False
return True
def validate_service_names(func):
@wraps(func)
def func_wrapper(config):
for service_name in config.keys():
if type(service_name) is int:
raise ConfigurationError(
"Service name: {} needs to be a string, eg '{}'".format(service_name, service_name)
)
return func(config)
return func_wrapper
def validate_top_level_object(func):
@wraps(func)
def func_wrapper(config):
if not isinstance(config, dict):
raise ConfigurationError(
"Top level object needs to be a dictionary. Check your .yml file that you have defined a service at the top level."
)
return func(config)
return func_wrapper
def get_unsupported_config_msg(service_name, error_key):
msg = "Unsupported config option for '{}' service: '{}'".format(service_name, error_key)
if error_key in DOCKER_CONFIG_HINTS:
msg += " (did you mean '{}'?)".format(DOCKER_CONFIG_HINTS[error_key])
return msg
def process_errors(errors):
"""
jsonschema gives us an error tree full of information to explain what has
gone wrong. Process each error and pull out relevant information and re-write
helpful error messages that are relevant.
"""
def _parse_key_from_error_msg(error):
return error.message.split("'")[1]
def _clean_error_message(message):
return message.replace("u'", "'")
def _parse_valid_types_from_schema(schema):
"""
Our defined types using $ref in the schema require some extra parsing
retrieve a helpful type for error message display.
"""
if '$ref' in schema:
return schema['$ref'].replace("#/definitions/", "").replace("_", " ")
else:
return str(schema['type'])
root_msgs = []
invalid_keys = []
required = []
type_errors = []
other_errors = []
for error in errors:
# handle root level errors
if len(error.path) == 0:
if error.validator == 'type':
msg = "Top level object needs to be a dictionary. Check your .yml file that you have defined a service at the top level."
root_msgs.append(msg)
elif error.validator == 'additionalProperties':
invalid_service_name = _parse_key_from_error_msg(error)
msg = "Invalid service name '{}' - only {} characters are allowed".format(invalid_service_name, VALID_NAME_CHARS)
root_msgs.append(msg)
else:
root_msgs.append(_clean_error_message(error.message))
else:
# handle service level errors
service_name = error.path[0]
# pop the service name off our path
error.path.popleft()
if error.validator == 'additionalProperties':
invalid_config_key = _parse_key_from_error_msg(error)
invalid_keys.append(get_unsupported_config_msg(service_name, invalid_config_key))
elif error.validator == 'anyOf':
if 'image' in error.instance and 'build' in error.instance:
required.append(
"Service '{}' has both an image and build path specified. "
"A service can either be built to image or use an existing "
"image, not both.".format(service_name))
elif 'image' not in error.instance and 'build' not in error.instance:
required.append(
"Service '{}' has neither an image nor a build path "
"specified. Exactly one must be provided.".format(service_name))
elif 'image' in error.instance and 'dockerfile' in error.instance:
required.append(
"Service '{}' has both an image and alternate Dockerfile. "
"A service can either be built to image or use an existing "
"image, not both.".format(service_name))
else:
required.append(_clean_error_message(error.message))
elif error.validator == 'oneOf':
config_key = error.path[0]
valid_types = [_parse_valid_types_from_schema(schema) for schema in error.schema['oneOf']]
valid_type_msg = " or ".join(valid_types)
type_errors.append("Service '{}' configuration key '{}' contains an invalid type, valid types are {}".format(
service_name, config_key, valid_type_msg)
)
elif error.validator == 'type':
msg = "a"
if error.validator_value == "array":
msg = "an"
if len(error.path) > 0:
config_key = " ".join(["'%s'" % k for k in error.path])
type_errors.append(
"Service '{}' configuration key {} contains an invalid "
"type, it should be {} {}".format(
service_name,
config_key,
msg,
error.validator_value))
else:
root_msgs.append(
"Service '{}' doesn\'t have any configuration options. "
"All top level keys in your docker-compose.yml must map "
"to a dictionary of configuration options.'".format(service_name))
elif error.validator == 'required':
config_key = error.path[0]
required.append(
"Service '{}' option '{}' is invalid, {}".format(
service_name,
config_key,
_clean_error_message(error.message)))
elif error.validator == 'dependencies':
dependency_key = list(error.validator_value.keys())[0]
required_keys = ",".join(error.validator_value[dependency_key])
required.append("Invalid '{}' configuration for '{}' service: when defining '{}' you must set '{}' as well".format(
dependency_key, service_name, dependency_key, required_keys))
else:
config_key = " ".join(["'%s'" % k for k in error.path])
err_msg = "Service '{}' configuration key {} value {}".format(service_name, config_key, error.message)
other_errors.append(err_msg)
return "\n".join(root_msgs + invalid_keys + required + type_errors + other_errors)
def validate_against_schema(config):
config_source_dir = os.path.dirname(os.path.abspath(__file__))
schema_file = os.path.join(config_source_dir, "schema.json")
with open(schema_file, "r") as schema_fh:
schema = json.load(schema_fh)
validation_output = Draft4Validator(schema, format_checker=FormatChecker(["ports"]))
errors = [error for error in sorted(validation_output.iter_errors(config), key=str)]
if errors:
error_msg = process_errors(errors)
raise ConfigurationError("Validation failed, reason(s):\n{}".format(error_msg))
| Dakno/compose | compose/config/validation.py | Python | apache-2.0 | 8,262 |
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common import BaseTest
class NetworkInterfaceTest(BaseTest):
def test_interface_subnet(self):
factory = self.replay_flight_data(
'test_network_interface_filter')
client = factory().client('ec2')
vpc_id = client.create_vpc(CidrBlock="10.4.0.0/16")['Vpc']['VpcId']
self.addCleanup(client.delete_vpc, VpcId=vpc_id)
sub_id = client.create_subnet(
VpcId=vpc_id, CidrBlock="10.4.8.0/24")[
'Subnet']['SubnetId']
self.addCleanup(client.delete_subnet, SubnetId=sub_id)
sg_id = client.create_security_group(
GroupName="web-tier",
VpcId=vpc_id,
Description="for apps")['GroupId']
self.addCleanup(client.delete_security_group, GroupId=sg_id)
qsg_id = client.create_security_group(
GroupName="quarantine-group",
VpcId=vpc_id,
Description="for quarantine")['GroupId']
self.addCleanup(client.delete_security_group, GroupId=qsg_id)
net = client.create_network_interface(
SubnetId=sub_id, Groups=[sg_id])['NetworkInterface']
net_id = net['NetworkInterfaceId']
self.addCleanup(
client.delete_network_interface, NetworkInterfaceId=net_id)
p = self.load_policy({
'name': 'net-find',
'resource': 'eni',
'filters': [
{'type': 'subnet',
'key': 'SubnetId',
'value': sub_id},
{'type': 'security-group',
'key': 'Description',
'value': 'for apps'}
],
'actions': [{
'type': 'remove-groups',
'groups': 'matched',
'isolation-group': qsg_id}]
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['NetworkInterfaceId'], net_id)
self.assertEqual(resources[0]['c7n.matched-security-groups'], [sg_id])
results = client.describe_network_interfaces(
NetworkInterfaceIds=[net_id])['NetworkInterfaces']
self.assertEqual([g['GroupId'] for g in results[0]['Groups']], [qsg_id])
class SecurityGroupTest(BaseTest):
def test_used(self):
factory = self.replay_flight_data(
'test_security_group_used')
p = self.load_policy({
'name': 'sg-used',
'resource': 'security-group',
'filters': ['used']
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 3)
self.assertEqual(
set(['sg-f9cc4d9f', 'sg-13de8f75', 'sg-ce548cb7']),
set([r['GroupId'] for r in resources]))
def test_unused(self):
factory = self.replay_flight_data(
'test_security_group_unused')
p = self.load_policy({
'name': 'sg-unused',
'resource': 'security-group',
'filters': ['unused'],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_only_ports(self):
factory = self.replay_flight_data(
'test_security_group_only_ports')
client = factory().client('ec2')
vpc_id = client.create_vpc(CidrBlock="10.4.0.0/16")['Vpc']['VpcId']
self.addCleanup(client.delete_vpc, VpcId=vpc_id)
sg_id = client.create_security_group(
GroupName="web-tier",
VpcId=vpc_id,
Description="for apps")['GroupId']
self.addCleanup(client.delete_security_group, GroupId=sg_id)
client.authorize_security_group_ingress(
GroupId=sg_id,
IpProtocol='tcp',
FromPort=60000,
ToPort=62000,
CidrIp='10.2.0.0/16')
client.authorize_security_group_ingress(
GroupId=sg_id,
IpProtocol='tcp',
FromPort=61000,
ToPort=61000,
CidrIp='10.2.0.0/16')
p = self.load_policy({
'name': 'sg-find',
'resource': 'security-group',
'filters': [
{'type': 'ingress',
'OnlyPorts': [61000]},
{'GroupName': 'web-tier'}]
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]['MatchedIpPermissions'],
[{u'FromPort': 60000,
u'IpProtocol': u'tcp',
u'IpRanges': [{u'CidrIp': u'10.2.0.0/16'}],
u'PrefixListIds': [],
u'ToPort': 62000,
u'UserIdGroupPairs': []}])
def test_security_group_delete(self):
factory = self.replay_flight_data(
'test_security_group_delete')
client = factory().client('ec2')
vpc_id = client.create_vpc(CidrBlock="10.4.0.0/16")['Vpc']['VpcId']
self.addCleanup(client.delete_vpc, VpcId=vpc_id)
sg_id = client.create_security_group(
GroupName="web-tier",
VpcId=vpc_id,
Description="for apps")['GroupId']
def delete_sg():
try:
client.delete_security_group(GroupId=sg_id)
except Exception:
pass
self.addCleanup(delete_sg)
p = self.load_policy({
'name': 'sg-delete',
'resource': 'security-group',
'filters': [
{'GroupId': sg_id}],
'actions': [
'delete']}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['GroupId'], sg_id)
try:
group_info = client.describe_security_groups(GroupIds=[sg_id])
except:
pass
else:
self.fail("group not deleted")
def test_port_within_range(self):
factory = self.replay_flight_data(
'test_security_group_port_in_range')
client = factory().client('ec2')
vpc_id = client.create_vpc(CidrBlock="10.4.0.0/16")['Vpc']['VpcId']
self.addCleanup(client.delete_vpc, VpcId=vpc_id)
sg_id = client.create_security_group(
GroupName="web-tier",
VpcId=vpc_id,
Description="for apps")['GroupId']
self.addCleanup(client.delete_security_group, GroupId=sg_id)
client.authorize_security_group_ingress(
GroupId=sg_id,
IpProtocol='tcp',
FromPort=60000,
ToPort=62000,
CidrIp='10.2.0.0/16')
p = self.load_policy({
'name': 'sg-find',
'resource': 'security-group',
'filters': [
{'type': 'ingress',
'IpProtocol': 'tcp',
'FromPort': 60000},
{'GroupName': 'web-tier'}]
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['GroupName'], 'web-tier')
self.assertEqual(
resources[0]['MatchedIpPermissions'],
[{u'FromPort': 60000,
u'IpProtocol': u'tcp',
u'IpRanges': [{u'CidrIp': u'10.2.0.0/16'}],
u'PrefixListIds': [],
u'ToPort': 62000,
u'UserIdGroupPairs': []}])
def test_ingress_remove(self):
factory = self.replay_flight_data(
'test_security_group_ingress_filter')
client = factory().client('ec2')
vpc_id = client.create_vpc(CidrBlock="10.4.0.0/16")['Vpc']['VpcId']
sg_id = client.create_security_group(
GroupName="web-tier",
VpcId=vpc_id,
Description="for apps")['GroupId']
self.addCleanup(client.delete_vpc, VpcId=vpc_id)
client.authorize_security_group_ingress(
GroupId=sg_id,
IpProtocol='tcp',
FromPort=0,
ToPort=62000,
CidrIp='10.2.0.0/16')
self.addCleanup(client.delete_security_group, GroupId=sg_id)
p = self.load_policy({
'name': 'sg-find',
'resource': 'security-group',
'filters': [
{'VpcId': vpc_id},
{'type': 'ingress',
'IpProtocol': 'tcp',
'FromPort': 0},
{'GroupName': 'web-tier'}],
'actions': [
{'type': 'remove-permissions',
'ingress': 'matched'}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['GroupId'], sg_id)
group_info = client.describe_security_groups(
GroupIds=[sg_id])['SecurityGroups'][0]
self.assertEqual(group_info.get('IpPermissions', []), [])
def test_default_vpc(self):
# preconditions, more than one vpc, each with at least one
# security group
factory = self.replay_flight_data(
'test_security_group_default_vpc_filter')
p = self.load_policy({
'name': 'sg-test',
'resource': 'security-group',
'filters': [
{'type': 'default-vpc'},
{'GroupName': 'default'}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_only_ports_ingress(self):
p = self.load_policy({
'name': 'ingress-access',
'resource': 'security-group',
'filters': [
{'type': 'ingress', 'OnlyPorts': [80]}
]})
resources = [
{'Description': 'Typical Internet-Facing Security Group',
'GroupId': 'sg-abcd1234',
'GroupName': 'TestInternetSG',
'IpPermissions': [{'FromPort': 53,
'IpProtocol': 'tcp',
'IpRanges': ['10.0.0.0/8'],
'PrefixListIds': [],
'ToPort': 53,
'UserIdGroupPairs': []}],
'IpPermissionsEgress': [],
'OwnerId': '123456789012',
'Tags': [{'Key': 'Value',
'Value': 'InternetSecurityGroup'},
{'Key': 'Key', 'Value': 'Name'}],
'VpcId': 'vpc-1234abcd'}
]
manager = p.get_resource_manager()
self.assertEqual(len(manager.filter_resources(resources)), 1)
def test_ports_ingress(self):
p = self.load_policy({
'name': 'ingress-access',
'resource': 'security-group',
'filters': [
{'type': 'ingress', 'Ports': [53]}
]})
resources = [
{'Description': 'Typical Internet-Facing Security Group',
'GroupId': 'sg-abcd1234',
'GroupName': 'TestInternetSG',
'IpPermissions': [{'FromPort': 53,
'IpProtocol': 'tcp',
'IpRanges': ['10.0.0.0/8'],
'PrefixListIds': [],
'ToPort': 53,
'UserIdGroupPairs': []}],
'IpPermissionsEgress': [],
'OwnerId': '123456789012',
'Tags': [{'Key': 'Value',
'Value': 'InternetSecurityGroup'},
{'Key': 'Key', 'Value': 'Name'}],
'VpcId': 'vpc-1234abcd'}
]
manager = p.get_resource_manager()
self.assertEqual(len(manager.filter_resources(resources)), 1)
def test_cidr_ingress(self):
factory = self.replay_flight_data('test_security_group_cidr_ingress')
client = factory().client('ec2')
vpc_id = client.create_vpc(CidrBlock="10.42.0.0/16")['Vpc']['VpcId']
self.addCleanup(client.delete_vpc, VpcId=vpc_id)
sg_id = client.create_security_group(
GroupName="allow-https-ingress",
VpcId=vpc_id,
Description="inbound access")['GroupId']
self.addCleanup(client.delete_security_group, GroupId=sg_id)
client.authorize_security_group_ingress(
GroupId=sg_id,
IpPermissions=[{
'IpProtocol': 'tcp',
'FromPort': 443,
'ToPort': 443,
'IpRanges': [
{
'CidrIp': '10.42.1.0/24'
}]
}])
p = self.load_policy({
'name': 'ingress-access',
'resource': 'security-group',
'filters': [
{'type': 'ingress',
'Cidr': {
'value': '10.42.1.239',
'op': 'in',
'value_type': 'cidr'}}]
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
len(resources[0].get('MatchedIpPermissions', [])), 1)
def test_cidr_size_egress(self):
factory = self.replay_flight_data('test_security_group_cidr_size')
client = factory().client('ec2')
vpc_id = client.create_vpc(CidrBlock="10.42.0.0/16")['Vpc']['VpcId']
self.addCleanup(client.delete_vpc, VpcId=vpc_id)
sg_id = client.create_security_group(
GroupName="wide-egress",
VpcId=vpc_id,
Description="unnecessarily large egress CIDR rule")['GroupId']
self.addCleanup(client.delete_security_group, GroupId=sg_id)
client.revoke_security_group_egress(
GroupId=sg_id,
IpPermissions=[
{'IpProtocol': '-1',
'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}])
client.authorize_security_group_egress(
GroupId=sg_id,
IpPermissions=[{
'IpProtocol': 'tcp',
'FromPort': 443,
'ToPort': 443,
'IpRanges': [
{'CidrIp': '10.42.0.0/16'},
{'CidrIp': '10.42.1.0/24'}]}])
p = self.load_policy({
'name': 'wide-egress',
'resource': 'security-group',
'filters': [
{'type': 'egress',
'Cidr': {
'value': 24,
'op': 'lt',
'value_type': 'cidr_size'}},
{'GroupName': 'wide-egress'}]
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
len(resources[0].get('MatchedIpPermissionsEgress', [])), 1)
self.assertEqual(
resources[0]['MatchedIpPermissionsEgress'],
[{u'FromPort': 443,
u'IpProtocol': u'tcp',
u'IpRanges': [
{u'CidrIp': u'10.42.0.0/16'},
{u'CidrIp': u'10.42.1.0/24'}],
u'PrefixListIds': [],
u'ToPort': 443,
u'UserIdGroupPairs': []}])
class VpcTest(BaseTest):
def test_subnets(self):
factory = self.replay_flight_data(
'test_vpc_subnets_filter')
p = self.load_policy({
'name': 'empty-vpc-test',
'resource': 'vpc',
'filters': [
{'type': 'subnets',
'value': []}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
| RyanWolfe/cloud-custodian | tests/test_vpc.py | Python | apache-2.0 | 16,157 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn abstraction.
FeatureColumns provide a high level abstraction for ingesting and representing
features. FeatureColumns are also the primary way of encoding features for
canned `tf.estimator.Estimator`s.
When using FeatureColumns with `Estimators`, the type of feature column you
should choose depends on (1) the feature type and (2) the model type.
1. Feature type:
* Continuous features can be represented by `numeric_column`.
* Categorical features can be represented by any `categorical_column_with_*`
column:
- `categorical_column_with_vocabulary_list`
- `categorical_column_with_vocabulary_file`
- `categorical_column_with_hash_bucket`
- `categorical_column_with_identity`
- `weighted_categorical_column`
2. Model type:
* Deep neural network models (`DNNClassifier`, `DNNRegressor`).
Continuous features can be directly fed into deep neural network models.
age_column = numeric_column("age")
To feed sparse features into DNN models, wrap the column with
`embedding_column` or `indicator_column`. `indicator_column` is recommended
for features with only a few possible values. For features with many
possible values, to reduce the size of your model, `embedding_column` is
recommended.
embedded_dept_column = embedding_column(
categorical_column_with_vocabulary_list(
"department", ["math", "philosophy", ...]), dimension=10)
* Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).
Sparse features can be fed directly into linear models. They behave like an
indicator column but with an efficient implementation.
dept_column = categorical_column_with_vocabulary_list("department",
["math", "philosophy", "english"])
It is recommended that continuous features be bucketized before being
fed into linear models.
bucketized_age_column = bucketized_column(
source_column=age_column,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
Sparse features can be crossed (also known as conjuncted or combined) in
order to form non-linearities, and then fed into linear models.
cross_dept_age_column = crossed_column(
columns=["department", bucketized_age_column],
hash_bucket_size=1000)
Example of building canned `Estimator`s using FeatureColumns:
```python
# Define features and transformations
deep_feature_columns = [age_column, embedded_dept_column]
wide_feature_columns = [dept_column, bucketized_age_column,
cross_dept_age_column]
# Build deep model
estimator = DNNClassifier(
feature_columns=deep_feature_columns,
hidden_units=[500, 250, 50])
estimator.train(...)
# Or build a wide model
estimator = LinearClassifier(
feature_columns=wide_feature_columns)
estimator.train(...)
# Or build a wide and deep model!
estimator = DNNLinearCombinedClassifier(
linear_feature_columns=wide_feature_columns,
dnn_feature_columns=deep_feature_columns,
dnn_hidden_units=[500, 250, 50])
estimator.train(...)
```
FeatureColumns can also be transformed into a generic input layer for
custom models using `input_layer`.
Example of building model using FeatureColumns, this can be used in a
`model_fn` which is given to the {tf.estimator.Estimator}:
```python
# Building model via layers
deep_feature_columns = [age_column, embedded_dept_column]
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=deep_feature_columns)
first_layer = input_layer(
features=columns_to_tensor,
feature_columns=deep_feature_columns)
second_layer = fully_connected(first_layer, ...)
```
NOTE: Functions prefixed with "_" indicate experimental or private parts of
the API subject to change, and should not be relied upon!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import math
import numpy as np
import six
from tensorflow.python.eager import context
from tensorflow.python.feature_column import feature_column as fc_old
from tensorflow.python.feature_column import utils as fc_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
# TODO(b/118385027): Dependency on keras can be problematic if Keras moves out
# of the main repo.
from tensorflow.python.keras import initializers
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
from tensorflow.python.util.compat import collections_abc
_FEATURE_COLUMN_DEPRECATION_DATE = None
_FEATURE_COLUMN_DEPRECATION = ('The old _FeatureColumn APIs are being '
'deprecated. Please use the new FeatureColumn '
'APIs instead.')
class StateManager(object):
"""Manages the state associated with FeatureColumns.
Some `FeatureColumn`s create variables or resources to assist their
computation. The `StateManager` is responsible for creating and storing these
objects since `FeatureColumn`s are supposed to be stateless configuration
only.
"""
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
"""Creates a new variable.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
trainable: Whether this variable is trainable or not.
use_resource: If true, we use resource variables. Otherwise we use
RefVariable.
initializer: initializer instance (callable).
Returns:
The created variable.
"""
del feature_column, name, shape, dtype, trainable, use_resource, initializer
raise NotImplementedError('StateManager.create_variable')
def add_variable(self, feature_column, var):
"""Adds an existing variable to the state.
Args:
feature_column: A `FeatureColumn` object to associate this variable with.
var: The variable.
"""
del feature_column, var
raise NotImplementedError('StateManager.add_variable')
def get_variable(self, feature_column, name):
"""Returns an existing variable.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: variable name.
"""
del feature_column, name
raise NotImplementedError('StateManager.get_var')
def add_resource(self, feature_column, name, resource):
"""Creates a new resource.
Resources can be things such as tables etc.
Args:
feature_column: A `FeatureColumn` object this resource corresponds to.
name: Name of the resource.
resource: The resource.
Returns:
The created resource.
"""
del feature_column, name, resource
raise NotImplementedError('StateManager.add_resource')
def get_resource(self, feature_column, name):
"""Returns an already created resource.
Resources can be things such as tables etc.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: Name of the resource.
"""
del feature_column, name
raise NotImplementedError('StateManager.get_resource')
class _StateManagerImpl(StateManager):
"""Manages the state of DenseFeatures and LinearLayer."""
def __init__(self, layer, trainable):
"""Creates an _StateManagerImpl object.
Args:
layer: The input layer this state manager is associated with.
trainable: Whether by default, variables created are trainable or not.
"""
self._trainable = trainable
self._layer = layer
if self._layer is not None and not hasattr(self._layer, '_resources'):
self._layer._resources = [] # pylint: disable=protected-access
self._cols_to_vars_map = collections.defaultdict(lambda: {})
# TODO(vbardiovsky): Make sure the resources are tracked by moving them to
# the layer (inheriting from AutoTrackable), e.g.:
# self._layer._resources_map = data_structures.Mapping()
self._cols_to_resources_map = collections.defaultdict(lambda: {})
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
if name in self._cols_to_vars_map[feature_column]:
raise ValueError('Variable already exists.')
# We explicitly track these variables since `name` is not guaranteed to be
# unique and disable manual tracking that the add_variable call does.
with trackable.no_manual_dependency_tracking_scope(self._layer):
var = self._layer.add_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=self._trainable and trainable,
use_resource=use_resource,
# TODO(rohanj): Get rid of this hack once we have a mechanism for
# specifying a default partitioner for an entire layer. In that case,
# the default getter for Layers should work.
getter=variable_scope.get_variable)
if isinstance(var, trackable.Trackable):
self._layer._track_trackable(var, feature_column.name + '/' + name) # pylint: disable=protected-access
self._cols_to_vars_map[feature_column][name] = var
return var
def get_variable(self, feature_column, name):
if name in self._cols_to_vars_map[feature_column]:
return self._cols_to_vars_map[feature_column][name]
raise ValueError('Variable does not exist.')
def add_resource(self, feature_column, name, resource):
self._cols_to_resources_map[feature_column][name] = resource
if self._layer is not None:
self._layer._resources.append(resource) # pylint: disable=protected-access
def get_resource(self, feature_column, name):
if name in self._cols_to_resources_map[feature_column]:
return self._cols_to_resources_map[feature_column][name]
raise ValueError('Resource does not exist.')
class _StateManagerImplV2(_StateManagerImpl):
"""Manages the state of DenseFeatures."""
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
if name in self._cols_to_vars_map[feature_column]:
raise ValueError('Variable already exists.')
# We explicitly track these variables since `name` is not guaranteed to be
# unique and disable manual tracking that the add_variable call does.
with trackable.no_manual_dependency_tracking_scope(self._layer):
var = self._layer.add_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=self._trainable and trainable,
use_resource=use_resource)
if isinstance(var, trackable.Trackable):
self._layer._track_trackable(var, feature_column.name + '/' + name) # pylint: disable=protected-access
self._cols_to_vars_map[feature_column][name] = var
return var
class _BaseFeaturesLayer(Layer):
"""Base class for DenseFeatures and SequenceFeatures.
Defines common methods and helpers.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model.
expected_column_type: Expected class for provided feature columns.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the DenseFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` doesn't match
`expected_column_type`.
"""
def __init__(self, feature_columns, expected_column_type, trainable, name,
**kwargs):
super(_BaseFeaturesLayer, self).__init__(
name=name, trainable=trainable, **kwargs)
self._feature_columns = _normalize_feature_columns(feature_columns)
self._state_manager = _StateManagerImpl(self, self.trainable)
for column in self._feature_columns:
if not isinstance(column, expected_column_type):
raise ValueError(
'Items of feature_columns must be a {}. '
'You can wrap a categorical column with an '
'embedding_column or indicator_column. Given: {}'.format(
expected_column_type, column))
def build(self, _):
for column in self._feature_columns:
with variable_scope._pure_variable_scope(self.name): # pylint: disable=protected-access
with variable_scope._pure_variable_scope(column.name): # pylint: disable=protected-access
column.create_state(self._state_manager)
super(_BaseFeaturesLayer, self).build(None)
def _output_shape(self, input_shape, num_elements):
"""Computes expected output shape of the layer or a column's dense tensor.
Args:
input_shape: Tensor or array with batch shape.
num_elements: Size of the last dimension of the output.
Returns:
Tuple with output shape.
"""
raise NotImplementedError('Calling an abstract method.')
def compute_output_shape(self, input_shape):
total_elements = 0
for column in self._feature_columns:
total_elements += column.variable_shape.num_elements()
return self._target_shape(input_shape, total_elements)
def _process_dense_tensor(self, column, tensor):
"""Reshapes the dense tensor output of a column based on expected shape.
Args:
column: A DenseColumn or SequenceDenseColumn object.
tensor: A dense tensor obtained from the same column.
Returns:
Reshaped dense tensor."""
num_elements = column.variable_shape.num_elements()
target_shape = self._target_shape(array_ops.shape(tensor), num_elements)
return array_ops.reshape(tensor, shape=target_shape)
def _verify_and_concat_tensors(self, output_tensors):
"""Verifies and concatenates the dense output of several columns."""
_verify_static_batch_size_equality(output_tensors, self._feature_columns)
return array_ops.concat(output_tensors, -1)
def get_config(self):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
column_configs = serialization.serialize_feature_columns(
self._feature_columns)
config = {'feature_columns': column_configs}
base_config = super( # pylint: disable=bad-super-call
_BaseFeaturesLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
config_cp = config.copy()
config_cp['feature_columns'] = serialization.deserialize_feature_columns(
config['feature_columns'], custom_objects=custom_objects)
return cls(**config_cp)
class _LinearModelLayer(Layer):
"""Layer that contains logic for `LinearModel`."""
def __init__(self,
feature_columns,
units=1,
sparse_combiner='sum',
trainable=True,
name=None,
**kwargs):
super(_LinearModelLayer, self).__init__(
name=name, trainable=trainable, **kwargs)
self._feature_columns = _normalize_feature_columns(feature_columns)
for column in self._feature_columns:
if not isinstance(column, (DenseColumn, CategoricalColumn)):
raise ValueError(
'Items of feature_columns must be either a '
'DenseColumn or CategoricalColumn. Given: {}'.format(column))
self._units = units
self._sparse_combiner = sparse_combiner
self._state_manager = _StateManagerImpl(self, self.trainable)
self.bias = None
def build(self, _):
# We need variable scopes for now because we want the variable partitioning
# information to percolate down. We also use _pure_variable_scope's here
# since we want to open up a name_scope in the `call` method while creating
# the ops.
with variable_scope._pure_variable_scope(self.name): # pylint: disable=protected-access
for column in self._feature_columns:
with variable_scope._pure_variable_scope(column.name): # pylint: disable=protected-access
# Create the state for each feature column
column.create_state(self._state_manager)
# Create a weight variable for each column.
if isinstance(column, CategoricalColumn):
first_dim = column.num_buckets
else:
first_dim = column.variable_shape.num_elements()
self._state_manager.create_variable(
column,
name='weights',
dtype=dtypes.float32,
shape=(first_dim, self._units),
initializer=init_ops.zeros_initializer(),
trainable=self.trainable)
# Create a bias variable.
self.bias = self.add_variable(
name='bias_weights',
dtype=dtypes.float32,
shape=[self._units],
initializer=init_ops.zeros_initializer(),
trainable=self.trainable,
use_resource=True,
# TODO(rohanj): Get rid of this hack once we have a mechanism for
# specifying a default partitioner for an entire layer. In that case,
# the default getter for Layers should work.
getter=variable_scope.get_variable)
super(_LinearModelLayer, self).build(None)
def call(self, features):
if not isinstance(features, dict):
raise ValueError('We expected a dictionary here. Instead we got: {}'
.format(features))
with ops.name_scope(self.name):
transformation_cache = FeatureTransformationCache(features)
weighted_sums = []
for column in self._feature_columns:
with ops.name_scope(column.name):
# All the weights used in the linear model are owned by the state
# manager associated with this Linear Model.
weight_var = self._state_manager.get_variable(column, 'weights')
weighted_sum = _create_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=self._state_manager,
sparse_combiner=self._sparse_combiner,
weight_var=weight_var)
weighted_sums.append(weighted_sum)
_verify_static_batch_size_equality(weighted_sums, self._feature_columns)
predictions_no_bias = math_ops.add_n(
weighted_sums, name='weighted_sum_no_bias')
predictions = nn_ops.bias_add(
predictions_no_bias, self.bias, name='weighted_sum')
return predictions
def get_config(self):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
column_configs = serialization.serialize_feature_columns(
self._feature_columns)
config = {
'feature_columns': column_configs,
'units': self._units,
'sparse_combiner': self._sparse_combiner
}
base_config = super( # pylint: disable=bad-super-call
_LinearModelLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
config_cp = config.copy()
columns = serialization.deserialize_feature_columns(
config_cp['feature_columns'], custom_objects=custom_objects)
del config_cp['feature_columns']
return cls(feature_columns=columns, **config_cp)
# TODO(tanzheny): Cleanup it with respect to Premade model b/132690565.
class LinearModel(training.Model):
"""Produces a linear prediction `Tensor` based on given `feature_columns`.
This layer generates a weighted sum based on output dimension `units`.
Weighted sum refers to logits in classification problems. It refers to the
prediction itself for linear regression problems.
Note on supported columns: `LinearLayer` treats categorical columns as
`indicator_column`s. To be specific, assume the input as `SparseTensor` looks
like:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
`linear_model` assigns weights for the presence of "a", "b", "c' implicitly,
just like `indicator_column`, while `input_layer` explicitly requires wrapping
each of categorical columns with an `embedding_column` or an
`indicator_column`.
Example of usage:
```python
price = numeric_column('price')
price_buckets = bucketized_column(price, boundaries=[0., 10., 100., 1000.])
keywords = categorical_column_with_hash_bucket("keywords", 10K)
keywords_price = crossed_column('keywords', price_buckets, ...)
columns = [price_buckets, keywords, keywords_price ...]
linear_model = LinearLayer(columns)
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
prediction = linear_model(features)
```
"""
def __init__(self,
feature_columns,
units=1,
sparse_combiner='sum',
trainable=True,
name=None,
**kwargs):
"""Constructs a LinearLayer.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model. All items should be instances of classes derived
from `_FeatureColumn`s.
units: An integer, dimensionality of the output space. Default value is 1.
sparse_combiner: A string specifying how to reduce if a categorical column
is multivalent. Except `numeric_column`, almost all columns passed to
`linear_model` are considered as categorical columns. It combines each
categorical column independently. Currently "mean", "sqrtn" and "sum"
are supported, with "sum" the default for linear model. "sqrtn" often
achieves good accuracy, in particular with bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For example, for two features represented as the categorical columns:
```python
# Feature 1
shape = [2, 2]
{
[0, 0]: "a"
[0, 1]: "b"
[1, 0]: "c"
}
# Feature 2
shape = [2, 3]
{
[0, 0]: "d"
[1, 0]: "e"
[1, 1]: "f"
[1, 2]: "g"
}
```
with `sparse_combiner` as "mean", the linear model outputs conceptly are
```
y_0 = 1.0 / 2.0 * ( w_a + w_ b) + w_c + b_0
y_1 = w_d + 1.0 / 3.0 * ( w_e + w_ f + w_g) + b_1
```
where `y_i` is the output, `b_i` is the bias, and `w_x` is the weight
assigned to the presence of `x` in the input features.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: Name to give to the Linear Model. All variables and ops created will
be scoped by this name.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` is neither a `DenseColumn`
nor `CategoricalColumn`.
"""
super(LinearModel, self).__init__(name=name, **kwargs)
self.layer = _LinearModelLayer(
feature_columns,
units,
sparse_combiner,
trainable,
name=self.name,
**kwargs)
def call(self, features):
"""Returns a `Tensor` the represents the predictions of a linear model.
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via
these keys. For example `numeric_column('price')` will look at 'price'
key in this dict. Values are `Tensor` or `SparseTensor` depending on
corresponding `_FeatureColumn`.
Returns:
A `Tensor` which represents predictions/logits of a linear model. Its
shape is (batch_size, units) and its dtype is `float32`.
Raises:
ValueError: If features are not a dictionary.
"""
return self.layer(features)
@property
def bias(self):
return self.layer.bias
def _transform_features_v2(features, feature_columns, state_manager):
"""Returns transformed features based on features columns passed in.
Please note that most probably you would not need to use this function. Please
check `input_layer` and `linear_model` to see whether they will
satisfy your use case or not.
Example:
```python
# Define features and transformations
crosses_a_x_b = crossed_column(
columns=["sparse_feature_a", "sparse_feature_b"], hash_bucket_size=10000)
price_buckets = bucketized_column(
source_column=numeric_column("price"), boundaries=[...])
columns = [crosses_a_x_b, price_buckets]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
transformed = transform_features(features=features, feature_columns=columns)
assertCountEqual(columns, transformed.keys())
```
Args:
features: A mapping from key to tensors. `FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values can be a `SparseTensor` or a `Tensor` depends on
corresponding `FeatureColumn`.
feature_columns: An iterable containing all the `FeatureColumn`s.
state_manager: A StateManager object that holds the FeatureColumn state.
Returns:
A `dict` mapping `FeatureColumn` to `Tensor` and `SparseTensor` values.
"""
feature_columns = _normalize_feature_columns(feature_columns)
outputs = {}
with ops.name_scope(
None, default_name='transform_features', values=features.values()):
transformation_cache = FeatureTransformationCache(features)
for column in feature_columns:
with ops.name_scope(None, default_name=column.name):
outputs[column] = transformation_cache.get(column, state_manager)
return outputs
@tf_export('feature_column.make_parse_example_spec', v1=[])
def make_parse_example_spec_v2(feature_columns):
"""Creates parsing spec dictionary from input feature_columns.
The returned dictionary can be used as arg 'features' in
`tf.io.parse_example`.
Typical usage example:
```python
# Define features and transformations
feature_a = categorical_column_with_vocabulary_file(...)
feature_b = numeric_column(...)
feature_c_bucketized = bucketized_column(numeric_column("feature_c"), ...)
feature_a_x_feature_c = crossed_column(
columns=["feature_a", feature_c_bucketized], ...)
feature_columns = set(
[feature_b, feature_c_bucketized, feature_a_x_feature_c])
features = tf.io.parse_example(
serialized=serialized_examples,
features=make_parse_example_spec(feature_columns))
```
For the above example, make_parse_example_spec would return the dict:
```python
{
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
}
```
Args:
feature_columns: An iterable containing all feature columns. All items
should be instances of classes derived from `FeatureColumn`.
Returns:
A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature`
value.
Raises:
ValueError: If any of the given `feature_columns` is not a `FeatureColumn`
instance.
"""
result = {}
for column in feature_columns:
if not isinstance(column, FeatureColumn):
raise ValueError('All feature_columns must be FeatureColumn instances. '
'Given: {}'.format(column))
config = column.parse_example_spec
for key, value in six.iteritems(config):
if key in result and value != result[key]:
raise ValueError(
'feature_columns contain different parse_spec for key '
'{}. Given {} and {}'.format(key, value, result[key]))
result.update(config)
return result
@tf_export('feature_column.embedding_column')
def embedding_column(categorical_column,
dimension,
combiner='mean',
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""`DenseColumn` that converts from sparse, categorical input.
Use this when your inputs are sparse, but you want to convert them to a dense
representation (e.g., to feed to a DNN).
Inputs must be a `CategoricalColumn` created by any of the
`categorical_column_*` function. Here is an example of using
`embedding_column` with `DNNClassifier`:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `embedding_column` with model_fn:
```python
def model_fn(features, ...):
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_column: A `CategoricalColumn` created by a
`categorical_column_with_*` function. This column produces the sparse IDs
that are inputs to the embedding lookup.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries in
a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which
to restore the column weights. Required if `ckpt_to_load_from` is not
`None`.
max_norm: If not `None`, embedding values are l2-normalized to this value.
trainable: Whether or not the embedding is trainable. Default is True.
Returns:
`DenseColumn` that converts from sparse input.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: If eager execution is enabled.
"""
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. '
'Embedding of column_name: {}'.format(
categorical_column.name))
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
return EmbeddingColumn(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable)
@tf_export(v1=['feature_column.shared_embedding_columns'])
def shared_embedding_columns(categorical_columns,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""List of dense columns that convert from sparse, categorical input.
This is similar to `embedding_column`, except that it produces a list of
embedding columns that share the same embedding weights.
Use this when your inputs are sparse and of the same type (e.g. watched and
impression video IDs that share the same vocabulary), and you want to convert
them to a dense representation (e.g., to feed to a DNN).
Inputs must be a list of categorical columns created by any of the
`categorical_column_*` function. They must all be of the same type and have
the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file. Some or
all columns could also be weighted_categorical_column.
Here is an example embedding of two features for a DNNClassifier model:
```python
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `shared_embedding_columns` with model_fn:
```python
def model_fn(features, ...):
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_columns: List of categorical columns created by a
`categorical_column_with_*` function. These columns produce the sparse IDs
that are inputs to the embedding lookup. All columns must be of the same
type and have the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file.
Some or all columns could also be weighted_categorical_column.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries in
a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
shared_embedding_collection_name: Optional name of the collection where
shared embedding weights are added. If not given, a reasonable name will
be chosen based on the names of `categorical_columns`. This is also used
in `variable_scope` when creating shared embedding weights.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which
to restore the column weights. Required if `ckpt_to_load_from` is not
`None`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is larger
than this value, before combining.
trainable: Whether or not the embedding is trainable. Default is True.
Returns:
A list of dense columns that converts from sparse input. The order of
results follows the ordering of `categorical_columns`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if any of the given `categorical_columns` is of different type
or has different arguments than the others.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: if eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('shared_embedding_columns are not supported when eager '
'execution is enabled.')
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1. / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
c0 = sorted_columns[0]
num_buckets = c0._num_buckets # pylint: disable=protected-access
if not isinstance(c0, fc_old._CategoricalColumn): # pylint: disable=protected-access
raise ValueError(
'All categorical_columns must be subclasses of _CategoricalColumn. '
'Given: {}, of type: {}'.format(c0, type(c0)))
if isinstance(c0,
(fc_old._WeightedCategoricalColumn, WeightedCategoricalColumn)): # pylint: disable=protected-access
c0 = c0.categorical_column
for c in sorted_columns[1:]:
if isinstance(
c, (fc_old._WeightedCategoricalColumn, WeightedCategoricalColumn)): # pylint: disable=protected-access
c = c.categorical_column
if not isinstance(c, type(c0)):
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same type, or be weighted_categorical_column of the same type. '
'Given column: {} of type: {} does not match given column: {} of '
'type: {}'.format(c0, type(c0), c, type(c)))
if num_buckets != c._num_buckets: # pylint: disable=protected-access
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same number of buckets. Given column: {} with buckets: {} does '
'not match column: {} with buckets: {}'.format(
c0, num_buckets, c, c._num_buckets)) # pylint: disable=protected-access
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
result = []
for column in categorical_columns:
result.append(
fc_old._SharedEmbeddingColumn( # pylint: disable=protected-access
categorical_column=column,
initializer=initializer,
dimension=dimension,
combiner=combiner,
shared_embedding_collection_name=shared_embedding_collection_name,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable))
return result
@tf_export('feature_column.shared_embeddings', v1=[])
def shared_embedding_columns_v2(categorical_columns,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""List of dense columns that convert from sparse, categorical input.
This is similar to `embedding_column`, except that it produces a list of
embedding columns that share the same embedding weights.
Use this when your inputs are sparse and of the same type (e.g. watched and
impression video IDs that share the same vocabulary), and you want to convert
them to a dense representation (e.g., to feed to a DNN).
Inputs must be a list of categorical columns created by any of the
`categorical_column_*` function. They must all be of the same type and have
the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file. Some or
all columns could also be weighted_categorical_column.
Here is an example embedding of two features for a DNNClassifier model:
```python
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `shared_embedding_columns` with model_fn:
```python
def model_fn(features, ...):
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_columns: List of categorical columns created by a
`categorical_column_with_*` function. These columns produce the sparse IDs
that are inputs to the embedding lookup. All columns must be of the same
type and have the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file.
Some or all columns could also be weighted_categorical_column.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and standard
deviation `1/sqrt(dimension)`.
shared_embedding_collection_name: Optional collective name of these columns.
If not given, a reasonable name will be chosen based on the names of
`categorical_columns`.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from
which to restore the column weights. Required if `ckpt_to_load_from` is
not `None`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is
larger than this value, before combining.
trainable: Whether or not the embedding is trainable. Default is True.
Returns:
A list of dense columns that converts from sparse input. The order of
results follows the ordering of `categorical_columns`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if any of the given `categorical_columns` is of different type
or has different arguments than the others.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: if eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('shared_embedding_columns are not supported when eager '
'execution is enabled.')
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1. / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
c0 = sorted_columns[0]
num_buckets = c0.num_buckets
if not isinstance(c0, CategoricalColumn):
raise ValueError(
'All categorical_columns must be subclasses of CategoricalColumn. '
'Given: {}, of type: {}'.format(c0, type(c0)))
if isinstance(c0, WeightedCategoricalColumn):
c0 = c0.categorical_column
for c in sorted_columns[1:]:
if isinstance(c, WeightedCategoricalColumn):
c = c.categorical_column
if not isinstance(c, type(c0)):
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same type, or be weighted_categorical_column of the same type. '
'Given column: {} of type: {} does not match given column: {} of '
'type: {}'.format(c0, type(c0), c, type(c)))
if num_buckets != c.num_buckets:
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same number of buckets. Given column: {} with buckets: {} does '
'not match column: {} with buckets: {}'.format(
c0, num_buckets, c, c.num_buckets))
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
column_creator = SharedEmbeddingColumnCreator(
dimension, initializer, ckpt_to_load_from, tensor_name_in_ckpt,
num_buckets, trainable, shared_embedding_collection_name)
result = []
for column in categorical_columns:
result.append(
column_creator(
categorical_column=column, combiner=combiner, max_norm=max_norm))
return result
@tf_export('feature_column.numeric_column')
def numeric_column(key,
shape=(1,),
default_value=None,
dtype=dtypes.float32,
normalizer_fn=None):
"""Represents real valued or numerical features.
Example:
```python
price = numeric_column('price')
columns = [price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
# or
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
shape: An iterable of integers specifies the shape of the `Tensor`. An
integer can be given which means a single dimension `Tensor` with given
width. The `Tensor` representing the column will have the shape of
[batch_size] + `shape`.
default_value: A single value compatible with `dtype` or an iterable of
values compatible with `dtype` which the column takes on during
`tf.Example` parsing if data is missing. A default value of `None` will
cause `tf.io.parse_example` to fail if an example does not contain this
column. If a single value is provided, the same value will be applied as
the default value for every item. If an iterable of values is provided,
the shape of the `default_value` should be equal to the given `shape`.
dtype: defines the type of values. Default value is `tf.float32`. Must be a
non-quantized, real integer or floating point type.
normalizer_fn: If not `None`, a function that can be used to normalize the
value of the tensor after `default_value` is applied for parsing.
Normalizer function takes the input `Tensor` as its argument, and returns
the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that
even though the most common use case of this function is normalization, it
can be used for any kind of Tensorflow transformations.
Returns:
A `NumericColumn`.
Raises:
TypeError: if any dimension in shape is not an int
ValueError: if any dimension in shape is not a positive integer
TypeError: if `default_value` is an iterable but not compatible with `shape`
TypeError: if `default_value` is not compatible with `dtype`.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
shape = _check_shape(shape, key)
if not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype must be convertible to float. '
'dtype: {}, key: {}'.format(dtype, key))
default_value = fc_utils.check_default_value(
shape, default_value, dtype, key)
if normalizer_fn is not None and not callable(normalizer_fn):
raise TypeError(
'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))
fc_utils.assert_key_is_string(key)
return NumericColumn(
key,
shape=shape,
default_value=default_value,
dtype=dtype,
normalizer_fn=normalizer_fn)
@tf_export('feature_column.bucketized_column')
def bucketized_column(source_column, boundaries):
"""Represents discretized dense input.
Buckets include the left boundary, and exclude the right boundary. Namely,
`boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`,
`[1., 2.)`, and `[2., +inf)`.
For example, if the inputs are
```python
boundaries = [0, 10, 100]
input tensor = [[-5, 10000]
[150, 10]
[5, 100]]
```
then the output will be
```python
output = [[0, 3]
[3, 2]
[1, 3]]
```
Example:
```python
price = numeric_column('price')
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
columns = [bucketized_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
`bucketized_column` can also be crossed with another categorical column using
`crossed_column`:
```python
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
# 'keywords' is a string feature.
price_x_keywords = crossed_column([bucketized_price, 'keywords'], 50K)
columns = [price_x_keywords, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
source_column: A one-dimensional dense column which is generated with
`numeric_column`.
boundaries: A sorted list or tuple of floats specifying the boundaries.
Returns:
A `BucketizedColumn`.
Raises:
ValueError: If `source_column` is not a numeric column, or if it is not
one-dimensional.
ValueError: If `boundaries` is not a sorted list or tuple.
"""
if not isinstance(source_column, (NumericColumn, fc_old._NumericColumn)): # pylint: disable=protected-access
raise ValueError(
'source_column must be a column generated with numeric_column(). '
'Given: {}'.format(source_column))
if len(source_column.shape) > 1:
raise ValueError(
'source_column must be one-dimensional column. '
'Given: {}'.format(source_column))
if not boundaries:
raise ValueError('boundaries must not be empty.')
if not (isinstance(boundaries, list) or isinstance(boundaries, tuple)):
raise ValueError('boundaries must be a sorted list.')
for i in range(len(boundaries) - 1):
if boundaries[i] >= boundaries[i + 1]:
raise ValueError('boundaries must be a sorted list.')
return BucketizedColumn(source_column, tuple(boundaries))
@tf_export('feature_column.categorical_column_with_hash_bucket')
def categorical_column_with_hash_bucket(key,
hash_bucket_size,
dtype=dtypes.string):
"""Represents sparse feature where ids are set by hashing.
Use this when your sparse features are in string or integer format, and you
want to distribute your inputs into a finite number of buckets by hashing.
output_id = Hash(input_feature_string) % bucket_size for string type input.
For int type input, the value is converted to its string representation first
and then hashed by the same formula.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example:
```python
keywords = categorical_column_with_hash_bucket("keywords", 10K)
columns = [keywords, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
keywords_embedded = embedding_column(keywords, 16)
columns = [keywords_embedded, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
hash_bucket_size: An int > 1. The number of buckets.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `HashedCategoricalColumn`.
Raises:
ValueError: `hash_bucket_size` is not greater than 1.
ValueError: `dtype` is neither string nor integer.
"""
if hash_bucket_size is None:
raise ValueError('hash_bucket_size must be set. ' 'key: {}'.format(key))
if hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be at least 1. '
'hash_bucket_size: {}, key: {}'.format(
hash_bucket_size, key))
fc_utils.assert_key_is_string(key)
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
return HashedCategoricalColumn(key, hash_bucket_size, dtype)
@tf_export(v1=['feature_column.categorical_column_with_vocabulary_file'])
def categorical_column_with_vocabulary_file(key,
vocabulary_file,
vocabulary_size=None,
num_oov_buckets=0,
default_value=None,
dtype=dtypes.string):
"""A `CategoricalColumn` with a vocabulary file.
Use this when your inputs are in string or integer format, and you have a
vocabulary file that maps each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
File '/us/states.txt' contains 50 lines, each with a 2-character U.S. state
abbreviation. All inputs with values in that file are assigned an ID 0-49,
corresponding to its line number. All other values are hashed and assigned an
ID 50-54.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Example with `default_value`:
File '/us/states.txt' contains 51 lines - the first line is 'XX', and the
other 50 each have a 2-character U.S. state abbreviation. Both a literal 'XX'
in input, and other values missing from the file, will be assigned ID 0. All
others are assigned the corresponding line number 1-50.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
default_value=0)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(states, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `CategoricalColumn` with a vocabulary file.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
return categorical_column_with_vocabulary_file_v2(
key, vocabulary_file, vocabulary_size,
dtype, default_value,
num_oov_buckets)
@tf_export('feature_column.categorical_column_with_vocabulary_file', v1=[])
def categorical_column_with_vocabulary_file_v2(key,
vocabulary_file,
vocabulary_size=None,
dtype=dtypes.string,
default_value=None,
num_oov_buckets=0):
"""A `CategoricalColumn` with a vocabulary file.
Use this when your inputs are in string or integer format, and you have a
vocabulary file that maps each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
File `'/us/states.txt'` contains 50 lines, each with a 2-character U.S. state
abbreviation. All inputs with values in that file are assigned an ID 0-49,
corresponding to its line number. All other values are hashed and assigned an
ID 50-54.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Example with `default_value`:
File `'/us/states.txt'` contains 51 lines - the first line is `'XX'`, and the
other 50 each have a 2-character U.S. state abbreviation. Both a literal
`'XX'` in input, and other values missing from the file, will be assigned
ID 0. All others are assigned the corresponding line number 1-50.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
default_value=0)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(states, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
dtype: The type of features. Only string and integer types are supported.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
Returns:
A `CategoricalColumn` with a vocabulary file.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
if not vocabulary_file:
raise ValueError('Missing vocabulary_file in {}.'.format(key))
if vocabulary_size is None:
if not gfile.Exists(vocabulary_file):
raise ValueError('vocabulary_file in {} does not exist.'.format(key))
with gfile.GFile(vocabulary_file) as f:
vocabulary_size = sum(1 for _ in f)
logging.info(
'vocabulary_size = %d in %s is inferred from the number of elements '
'in the vocabulary_file %s.', vocabulary_size, key, vocabulary_file)
# `vocabulary_size` isn't required for lookup, but it is for `_num_buckets`.
if vocabulary_size < 1:
raise ValueError('Invalid vocabulary_size in {}.'.format(key))
if num_oov_buckets:
if default_value is not None:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
fc_utils.assert_key_is_string(key)
return VocabularyFileCategoricalColumn(
key=key,
vocabulary_file=vocabulary_file,
vocabulary_size=vocabulary_size,
num_oov_buckets=0 if num_oov_buckets is None else num_oov_buckets,
default_value=-1 if default_value is None else default_value,
dtype=dtype)
@tf_export('feature_column.categorical_column_with_vocabulary_list')
def categorical_column_with_vocabulary_list(key,
vocabulary_list,
dtype=None,
default_value=-1,
num_oov_buckets=0):
"""A `CategoricalColumn` with in-memory vocabulary.
Use this when your inputs are in string or integer format, and you have an
in-memory vocabulary mapping each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-3 corresponding to its index (e.g., input 'B' produces output 2). All other
inputs are hashed and assigned an ID 4-5.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
num_oov_buckets=2)
columns = [colors, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Example with `default_value`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-4 corresponding to its index (e.g., input 'B' produces output 3). All other
inputs are assigned `default_value` 0.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0)
columns = [colors, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(colors, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the column
name and the dictionary key for feature parsing configs, feature `Tensor`
objects, and feature columns.
vocabulary_list: An ordered iterable defining the vocabulary. Each feature
is mapped to the index of its value (if present) in `vocabulary_list`.
Must be castable to `dtype`.
dtype: The type of features. Only string and integer types are supported. If
`None`, it will be inferred from `vocabulary_list`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a
hash of the input value. A positive `num_oov_buckets` can not be specified
with `default_value`.
Returns:
A `CategoricalColumn` with in-memory vocabulary.
Raises:
ValueError: if `vocabulary_list` is empty, or contains duplicate keys.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: if `dtype` is not integer or string.
"""
if (vocabulary_list is None) or (len(vocabulary_list) < 1):
raise ValueError(
'vocabulary_list {} must be non-empty, column_name: {}'.format(
vocabulary_list, key))
if len(set(vocabulary_list)) != len(vocabulary_list):
raise ValueError(
'Duplicate keys in vocabulary_list {}, column_name: {}'.format(
vocabulary_list, key))
vocabulary_dtype = dtypes.as_dtype(np.array(vocabulary_list).dtype)
if num_oov_buckets:
if default_value != -1:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
fc_utils.assert_string_or_int(
vocabulary_dtype, prefix='column_name: {} vocabulary'.format(key))
if dtype is None:
dtype = vocabulary_dtype
elif dtype.is_integer != vocabulary_dtype.is_integer:
raise ValueError(
'dtype {} and vocabulary dtype {} do not match, column_name: {}'.format(
dtype, vocabulary_dtype, key))
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
fc_utils.assert_key_is_string(key)
return VocabularyListCategoricalColumn(
key=key,
vocabulary_list=tuple(vocabulary_list),
dtype=dtype,
default_value=default_value,
num_oov_buckets=num_oov_buckets)
@tf_export('feature_column.categorical_column_with_identity')
def categorical_column_with_identity(key, num_buckets, default_value=None):
"""A `CategoricalColumn` that returns identity values.
Use this when your inputs are integers in the range `[0, num_buckets)`, and
you want to use the input value itself as the categorical ID. Values outside
this range will result in `default_value` if specified, otherwise it will
fail.
Typically, this is used for contiguous ranges of integer indexes, but
it doesn't have to be. This might be inefficient, however, if many of IDs
are unused. Consider `categorical_column_with_hash_bucket` in that case.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
In the following examples, each input in the range `[0, 1000000)` is assigned
the same value. All other inputs are assigned `default_value` 0. Note that a
literal 0 in inputs will result in the same default ID.
Linear model:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [video_id, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Embedding for a DNN model:
```python
columns = [embedding_column(video_id, 9),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
num_buckets: Range of inputs and outputs is `[0, num_buckets)`.
default_value: If set, values outside of range `[0, num_buckets)` will
be replaced with this value. If not set, values >= num_buckets will
cause a failure while values < 0 will be dropped.
Returns:
A `CategoricalColumn` that returns identity values.
Raises:
ValueError: if `num_buckets` is less than one.
ValueError: if `default_value` is not in range `[0, num_buckets)`.
"""
if num_buckets < 1:
raise ValueError(
'num_buckets {} < 1, column_name {}'.format(num_buckets, key))
if (default_value is not None) and (
(default_value < 0) or (default_value >= num_buckets)):
raise ValueError(
'default_value {} not in range [0, {}), column_name {}'.format(
default_value, num_buckets, key))
fc_utils.assert_key_is_string(key)
return IdentityCategoricalColumn(
key=key, number_buckets=num_buckets, default_value=default_value)
@tf_export('feature_column.indicator_column')
def indicator_column(categorical_column):
"""Represents multi-hot representation of given categorical column.
- For DNN model, `indicator_column` can be used to wrap any
`categorical_column_*` (e.g., to feed to DNN). Consider to Use
`embedding_column` if the number of buckets/unique(values) are large.
- For Wide (aka linear) model, `indicator_column` is the internal
representation for categorical column when passing categorical column
directly (as any element in feature_columns) to `linear_model`. See
`linear_model` for details.
```python
name = indicator_column(categorical_column_with_vocabulary_list(
'name', ['bob', 'george', 'wanda'])
columns = [name, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
dense_tensor == [[1, 0, 0]] # If "name" bytes_list is ["bob"]
dense_tensor == [[1, 0, 1]] # If "name" bytes_list is ["bob", "wanda"]
dense_tensor == [[2, 0, 0]] # If "name" bytes_list is ["bob", "bob"]
```
Args:
categorical_column: A `CategoricalColumn` which is created by
`categorical_column_with_*` or `crossed_column` functions.
Returns:
An `IndicatorColumn`.
"""
return IndicatorColumn(categorical_column)
@tf_export('feature_column.weighted_categorical_column')
def weighted_categorical_column(categorical_column,
weight_feature_key,
dtype=dtypes.float32):
"""Applies weight values to a `CategoricalColumn`.
Use this when each of your sparse inputs has both an ID and a value. For
example, if you're representing text documents as a collection of word
frequencies, you can provide 2 parallel sparse input features ('terms' and
'frequencies' below).
Example:
Input `tf.Example` objects:
```proto
[
features {
feature {
key: "terms"
value {bytes_list {value: "very" value: "model"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.3 value: 0.1}}
}
},
features {
feature {
key: "terms"
value {bytes_list {value: "when" value: "course" value: "human"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.4 value: 0.1 value: 0.2}}
}
}
]
```
```python
categorical_column = categorical_column_with_hash_bucket(
column_name='terms', hash_bucket_size=1000)
weighted_column = weighted_categorical_column(
categorical_column=categorical_column, weight_feature_key='frequencies')
columns = [weighted_column, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
This assumes the input dictionary contains a `SparseTensor` for key
'terms', and a `SparseTensor` for key 'frequencies'. These 2 tensors must have
the same indices and dense shape.
Args:
categorical_column: A `CategoricalColumn` created by
`categorical_column_with_*` functions.
weight_feature_key: String key for weight values.
dtype: Type of weights, such as `tf.float32`. Only float and integer weights
are supported.
Returns:
A `CategoricalColumn` composed of two sparse features: one represents id,
the other represents weight (value) of the id feature in that example.
Raises:
ValueError: if `dtype` is not convertible to float.
"""
if (dtype is None) or not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype {} is not convertible to float.'.format(dtype))
return WeightedCategoricalColumn(
categorical_column=categorical_column,
weight_feature_key=weight_feature_key,
dtype=dtype)
@tf_export('feature_column.crossed_column')
def crossed_column(keys, hash_bucket_size, hash_key=None):
"""Returns a column for performing crosses of categorical features.
Crossed features will be hashed according to `hash_bucket_size`. Conceptually,
the transformation can be thought of as:
Hash(cartesian product of features) % `hash_bucket_size`
For example, if the input features are:
* SparseTensor referred by first key:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
* SparseTensor referred by second key:
```python
shape = [2, 1]
{
[0, 0]: "d"
[1, 0]: "e"
}
```
then crossed feature will look like:
```python
shape = [2, 2]
{
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
}
```
Here is an example to create a linear model with crosses of string features:
```python
keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
You could also use vocabulary lookup before crossing:
```python
keywords = categorical_column_with_vocabulary_file(
'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)
keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
If an input feature is of numeric type, you can use
`categorical_column_with_identity`, or `bucketized_column`, as in the example:
```python
# vertical_id is an integer categorical feature.
vertical_id = categorical_column_with_identity('vertical_id', 10K)
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
columns = [vertical_id_x_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
To use crossed column in DNN model, you need to add it in an embedding column
as in this example:
```python
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10)
dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...])
```
Args:
keys: An iterable identifying the features to be crossed. Each element can
be either:
* string: Will use the corresponding feature which must be of string type.
* `CategoricalColumn`: Will use the transformed tensor produced by this
column. Does not support hashed categorical column.
hash_bucket_size: An int > 1. The number of buckets.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseCrossOp (optional).
Returns:
A `CrossedColumn`.
Raises:
ValueError: If `len(keys) < 2`.
ValueError: If any of the keys is neither a string nor `CategoricalColumn`.
ValueError: If any of the keys is `HashedCategoricalColumn`.
ValueError: If `hash_bucket_size < 1`.
"""
if not hash_bucket_size or hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be > 1. '
'hash_bucket_size: {}'.format(hash_bucket_size))
if not keys or len(keys) < 2:
raise ValueError(
'keys must be a list with length > 1. Given: {}'.format(keys))
for key in keys:
if (not isinstance(key, six.string_types) and
not isinstance(key, (CategoricalColumn, fc_old._CategoricalColumn))): # pylint: disable=protected-access
raise ValueError(
'Unsupported key type. All keys must be either string, or '
'categorical column except HashedCategoricalColumn. '
'Given: {}'.format(key))
if isinstance(key,
(HashedCategoricalColumn, fc_old._HashedCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'categorical_column_with_hash_bucket is not supported for crossing. '
'Hashing before crossing will increase probability of collision. '
'Instead, use the feature name as a string. Given: {}'.format(key))
return CrossedColumn(
keys=tuple(keys), hash_bucket_size=hash_bucket_size, hash_key=hash_key)
@six.add_metaclass(abc.ABCMeta)
class FeatureColumn(object):
"""Represents a feature column abstraction.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
To distinguish between the concept of a feature family and a specific binary
feature within a family, we refer to a feature family like "country" as a
feature column. For example, we can have a feature in a `tf.Example` format:
{key: "country", value: [ "US" ]}
In this example the value of feature is "US" and "country" refers to the
column of the feature.
This class is an abstract class. Users should not create instances of this.
"""
@abc.abstractproperty
def name(self):
"""Returns string. Used for naming."""
pass
def __lt__(self, other):
"""Allows feature columns to be sorted in Python 3 as they are in Python 2.
Feature columns need to occasionally be sortable, for example when used as
keys in a features dictionary passed to a layer.
In CPython, `__lt__` must be defined for all objects in the
sequence being sorted.
If any objects in teh sequence being sorted do not have an `__lt__` method
compatible with feature column objects (such as strings), then CPython will
fall back to using the `__gt__` method below.
https://docs.python.org/3/library/stdtypes.html#list.sort
Args:
other: The other object to compare to.
Returns:
True if the string representation of this object is lexicographically less
than the string representation of `other`. For FeatureColumn objects,
this looks like "<__main__.FeatureColumn object at 0xa>".
"""
return str(self) < str(other)
def __gt__(self, other):
"""Allows feature columns to be sorted in Python 3 as they are in Python 2.
Feature columns need to occasionally be sortable, for example when used as
keys in a features dictionary passed to a layer.
`__gt__` is called when the "other" object being compared during the sort
does not have `__lt__` defined.
Example: http://gpaste/4803354716798976
Args:
other: The other object to compare to.
Returns:
True if the string representation of this object is lexicographically
greater than the string representation of `other`. For FeatureColumn
objects, this looks like "<__main__.FeatureColumn object at 0xa>".
"""
return str(self) > str(other)
@abc.abstractmethod
def transform_feature(self, transformation_cache, state_manager):
"""Returns intermediate representation (usually a `Tensor`).
Uses `transformation_cache` to create an intermediate representation
(usually a `Tensor`) that other feature columns can use.
Example usage of `transformation_cache`:
Let's say a Feature column depends on raw feature ('raw') and another
`FeatureColumn` (input_fc). To access corresponding `Tensor`s,
transformation_cache will be used as follows:
```python
raw_tensor = transformation_cache.get('raw', state_manager)
fc_tensor = transformation_cache.get(input_fc, state_manager)
```
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Transformed feature `Tensor`.
"""
pass
@abc.abstractproperty
def parse_example_spec(self):
"""Returns a `tf.Example` parsing spec as dict.
It is used for get_parsing_spec for `tf.io.parse_example`. Returned spec is
a dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
supported objects. Please check documentation of `tf.io.parse_example` for
all supported spec objects.
Let's say a Feature column depends on raw feature ('raw') and another
`FeatureColumn` (input_fc). One possible implementation of
parse_example_spec is as follows:
```python
spec = {'raw': tf.io.FixedLenFeature(...)}
spec.update(input_fc.parse_example_spec)
return spec
```
"""
pass
def create_state(self, state_manager):
"""Uses the `state_manager` to create state for the FeatureColumn.
Args:
state_manager: A `StateManager` to create / access resources such as
lookup tables and variables.
"""
pass
@abc.abstractproperty
def _is_v2_column(self):
"""Returns whether this FeatureColumn is fully conformant to the new API.
This is needed for composition type cases where an EmbeddingColumn etc.
might take in old categorical columns as input and then we want to use the
old API.
"""
pass
@abc.abstractproperty
def parents(self):
"""Returns a list of immediate raw feature and FeatureColumn dependencies.
For example:
# For the following feature columns
a = numeric_column('f1')
c = crossed_column(a, 'f2')
# The expected parents are:
a.parents = ['f1']
c.parents = [a, 'f2']
"""
pass
@abc.abstractmethod
def _get_config(self):
"""Returns the config of the feature column.
A FeatureColumn config is a Python dictionary (serializable) containing the
configuration of a FeatureColumn. The same FeatureColumn can be
reinstantiated later from this configuration.
The config of a feature column does not include information about feature
columns depending on it nor the FeatureColumn class name.
Example with (de)serialization practices followed in this file:
```python
class SerializationExampleFeatureColumn(
FeatureColumn, collections.namedtuple(
'SerializationExampleFeatureColumn',
('dimension', 'parent', 'dtype', 'normalizer_fn'))):
def _get_config(self):
# Create a dict from the namedtuple.
# Python attribute literals can be directly copied from / to the config.
# For example 'dimension', assuming it is an integer literal.
config = dict(zip(self._fields, self))
# (De)serialization of parent FeatureColumns should use the provided
# (de)serialize_feature_column() methods that take care of de-duping.
config['parent'] = serialize_feature_column(self.parent)
# Many objects provide custom (de)serialization e.g: for tf.DType
# tf.DType.name, tf.as_dtype() can be used.
config['dtype'] = self.dtype.name
# Non-trivial dependencies should be Keras-(de)serializable.
config['normalizer_fn'] = generic_utils.serialize_keras_object(
self.normalizer_fn)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
# This should do the inverse transform from `_get_config` and construct
# the namedtuple.
kwargs = config.copy()
kwargs['parent'] = deserialize_feature_column(
config['parent'], custom_objects, columns_by_name)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
kwargs['normalizer_fn'] = generic_utils.deserialize_keras_object(
config['normalizer_fn'], custom_objects=custom_objects)
return cls(**kwargs)
```
Returns:
A serializable Dict that can be used to deserialize the object with
from_config.
"""
pass
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""Creates a FeatureColumn from its config.
This method should be the reverse of `_get_config`, capable of instantiating
the same FeatureColumn from the config dictionary. See `_get_config` for an
example of common (de)serialization practices followed in this file.
TODO(b/118939620): This is a private method until consensus is reached on
supporting object deserialization deduping within Keras.
Args:
config: A Dict config acquired with `_get_config`.
custom_objects: Optional dictionary mapping names (strings) to custom
classes or functions to be considered during deserialization.
columns_by_name: A Dict[String, FeatureColumn] of existing columns in
order to avoid duplication. Should be passed to any calls to
deserialize_feature_column().
Returns:
A FeatureColumn for the input config.
"""
pass
class DenseColumn(FeatureColumn):
"""Represents a column which can be represented as `Tensor`.
Some examples of this type are: numeric_column, embedding_column,
indicator_column.
"""
@abc.abstractproperty
def variable_shape(self):
"""`TensorShape` of `get_dense_tensor`, without batch dimension."""
pass
@abc.abstractmethod
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `Tensor`.
The output of this function will be used by model-builder-functions. For
example the pseudo code of `input_layer` will be like:
```python
def input_layer(features, feature_columns, ...):
outputs = [fc.get_dense_tensor(...) for fc in feature_columns]
return tf.concat(outputs)
```
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
`Tensor` of shape [batch_size] + `variable_shape`.
"""
pass
def is_feature_column_v2(feature_columns):
"""Returns True if all feature columns are V2."""
for feature_column in feature_columns:
if not isinstance(feature_column, FeatureColumn):
return False
if not feature_column._is_v2_column: # pylint: disable=protected-access
return False
return True
def _create_weighted_sum(column, transformation_cache, state_manager,
sparse_combiner, weight_var):
"""Creates a weighted sum for a dense/categorical column for linear_model."""
if isinstance(column, CategoricalColumn):
return _create_categorical_column_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=state_manager,
sparse_combiner=sparse_combiner,
weight_var=weight_var)
else:
return _create_dense_column_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=state_manager,
weight_var=weight_var)
def _create_dense_column_weighted_sum(column, transformation_cache,
state_manager, weight_var):
"""Create a weighted sum of a dense column for linear_model."""
tensor = column.get_dense_tensor(transformation_cache, state_manager)
num_elements = column.variable_shape.num_elements()
batch_size = array_ops.shape(tensor)[0]
tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))
return math_ops.matmul(tensor, weight_var, name='weighted_sum')
class CategoricalColumn(FeatureColumn):
"""Represents a categorical feature.
A categorical feature typically handled with a `tf.SparseTensor` of IDs.
"""
IdWeightPair = collections.namedtuple( # pylint: disable=invalid-name
'IdWeightPair', ('id_tensor', 'weight_tensor'))
@abc.abstractproperty
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
pass
@abc.abstractmethod
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Returns an IdWeightPair.
`IdWeightPair` is a pair of `SparseTensor`s which represents ids and
weights.
`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
`SparseTensor` of `float` or `None` to indicate all weights should be
taken to be 1. If specified, `weight_tensor` must have exactly the same
shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
output of a `VarLenFeature` which is a ragged matrix.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
pass
def _create_categorical_column_weighted_sum(
column, transformation_cache, state_manager, sparse_combiner, weight_var):
# pylint: disable=g-doc-return-or-yield,g-doc-args
"""Create a weighted sum of a categorical column for linear_model.
Note to maintainer: As implementation details, the weighted sum is
implemented via embedding_lookup_sparse toward efficiency. Mathematically,
they are the same.
To be specific, conceptually, categorical column can be treated as multi-hot
vector. Say:
```python
x = [0 0 1] # categorical column input
w = [a b c] # weights
```
The weighted sum is `c` in this case, which is same as `w[2]`.
Another example is
```python
x = [0 1 1] # categorical column input
w = [a b c] # weights
```
The weighted sum is `b + c` in this case, which is same as `w[2] + w[3]`.
For both cases, we can implement weighted sum via embedding_lookup with
sparse_combiner = "sum".
"""
sparse_tensors = column.get_sparse_tensors(transformation_cache,
state_manager)
id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [
array_ops.shape(sparse_tensors.id_tensor)[0], -1
])
weight_tensor = sparse_tensors.weight_tensor
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(
weight_tensor, [array_ops.shape(weight_tensor)[0], -1])
return embedding_ops.safe_embedding_lookup_sparse(
weight_var,
id_tensor,
sparse_weights=weight_tensor,
combiner=sparse_combiner,
name='weighted_sum')
class SequenceDenseColumn(FeatureColumn):
"""Represents dense sequence data."""
TensorSequenceLengthPair = collections.namedtuple( # pylint: disable=invalid-name
'TensorSequenceLengthPair', ('dense_tensor', 'sequence_length'))
@abc.abstractmethod
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `TensorSequenceLengthPair`.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
pass
class FeatureTransformationCache(object):
"""Handles caching of transformations while building the model.
`FeatureColumn` specifies how to digest an input column to the network. Some
feature columns require data transformations. This class caches those
transformations.
Some features may be used in more than one place. For example, one can use a
bucketized feature by itself and a cross with it. In that case we
should create only one bucketization op instead of creating ops for each
feature column separately. To handle re-use of transformed columns,
`FeatureTransformationCache` caches all previously transformed columns.
Example:
We're trying to use the following `FeatureColumn`s:
```python
bucketized_age = fc.bucketized_column(fc.numeric_column("age"), ...)
keywords = fc.categorical_column_with_hash_buckets("keywords", ...)
age_X_keywords = fc.crossed_column([bucketized_age, "keywords"])
... = linear_model(features,
[bucketized_age, keywords, age_X_keywords]
```
If we transform each column independently, then we'll get duplication of
bucketization (one for cross, one for bucketization itself).
The `FeatureTransformationCache` eliminates this duplication.
"""
def __init__(self, features):
"""Creates a `FeatureTransformationCache`.
Args:
features: A mapping from feature column to objects that are `Tensor` or
`SparseTensor`, or can be converted to same via
`sparse_tensor.convert_to_tensor_or_sparse_tensor`. A `string` key
signifies a base feature (not-transformed). A `FeatureColumn` key
means that this `Tensor` is the output of an existing `FeatureColumn`
which can be reused.
"""
self._features = features.copy()
self._feature_tensors = {}
def get(self, key, state_manager):
"""Returns a `Tensor` for the given key.
A `str` key is used to access a base feature (not-transformed). When a
`FeatureColumn` is passed, the transformed feature is returned if it
already exists, otherwise the given `FeatureColumn` is asked to provide its
transformed output, which is then cached.
Args:
key: a `str` or a `FeatureColumn`.
state_manager: A StateManager object that holds the FeatureColumn state.
Returns:
The transformed `Tensor` corresponding to the `key`.
Raises:
ValueError: if key is not found or a transformed `Tensor` cannot be
computed.
"""
if key in self._feature_tensors:
# FeatureColumn is already transformed or converted.
return self._feature_tensors[key]
if key in self._features:
feature_tensor = self._get_raw_feature_as_tensor(key)
self._feature_tensors[key] = feature_tensor
return feature_tensor
if isinstance(key, six.string_types):
raise ValueError('Feature {} is not in features dictionary.'.format(key))
if not isinstance(key, FeatureColumn):
raise TypeError('"key" must be either a "str" or "FeatureColumn". '
'Provided: {}'.format(key))
column = key
logging.debug('Transforming feature_column %s.', column)
transformed = column.transform_feature(self, state_manager)
if transformed is None:
raise ValueError('Column {} is not supported.'.format(column.name))
self._feature_tensors[column] = transformed
return transformed
def _get_raw_feature_as_tensor(self, key):
"""Gets the raw_feature (keyed by `key`) as `tensor`.
The raw feature is converted to (sparse) tensor and maybe expand dim.
For both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if
the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will
error out as it is not supported.
Args:
key: A `str` key to access the raw feature.
Returns:
A `Tensor` or `SparseTensor`.
Raises:
ValueError: if the raw feature has rank 0.
"""
raw_feature = self._features[key]
feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
raw_feature)
def expand_dims(input_tensor):
# Input_tensor must have rank 1.
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return sparse_ops.sparse_reshape(
input_tensor, [array_ops.shape(input_tensor)[0], 1])
else:
return array_ops.expand_dims(input_tensor, -1)
rank = feature_tensor.get_shape().ndims
if rank is not None:
if rank == 0:
raise ValueError(
'Feature (key: {}) cannot have rank 0. Given: {}'.format(
key, feature_tensor))
return feature_tensor if rank != 1 else expand_dims(feature_tensor)
# Handle dynamic rank.
with ops.control_dependencies([
check_ops.assert_positive(
array_ops.rank(feature_tensor),
message='Feature (key: {}) cannot have rank 0. Given: {}'.format(
key, feature_tensor))]):
return control_flow_ops.cond(
math_ops.equal(1, array_ops.rank(feature_tensor)),
lambda: expand_dims(feature_tensor),
lambda: feature_tensor)
# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py
def _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None):
"""Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells.
If `input_tensor` is already a `SparseTensor`, just return it.
Args:
input_tensor: A string or integer `Tensor`.
ignore_value: Entries in `dense_tensor` equal to this value will be
absent from the resulting `SparseTensor`. If `None`, default value of
`dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`).
Returns:
A `SparseTensor` with the same shape as `input_tensor`.
Raises:
ValueError: when `input_tensor`'s rank is `None`.
"""
input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
input_tensor)
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return input_tensor
with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)):
if ignore_value is None:
if input_tensor.dtype == dtypes.string:
# Exception due to TF strings are converted to numpy objects by default.
ignore_value = ''
elif input_tensor.dtype.is_integer:
ignore_value = -1 # -1 has a special meaning of missing feature
else:
# NOTE: `as_numpy_dtype` is a property, so with the parentheses this is
# constructing a new numpy object of the given type, which yields the
# default value for that type.
ignore_value = input_tensor.dtype.as_numpy_dtype()
ignore_value = math_ops.cast(
ignore_value, input_tensor.dtype, name='ignore_value')
indices = array_ops.where_v2(
math_ops.not_equal(input_tensor, ignore_value), name='indices')
return sparse_tensor_lib.SparseTensor(
indices=indices,
values=array_ops.gather_nd(input_tensor, indices, name='values'),
dense_shape=array_ops.shape(
input_tensor, out_type=dtypes.int64, name='dense_shape'))
def _normalize_feature_columns(feature_columns):
"""Normalizes the `feature_columns` input.
This method converts the `feature_columns` to list type as best as it can. In
addition, verifies the type and other parts of feature_columns, required by
downstream library.
Args:
feature_columns: The raw feature columns, usually passed by users.
Returns:
The normalized feature column list.
Raises:
ValueError: for any invalid inputs, such as empty, duplicated names, etc.
"""
if isinstance(feature_columns, FeatureColumn):
feature_columns = [feature_columns]
if isinstance(feature_columns, collections_abc.Iterator):
feature_columns = list(feature_columns)
if isinstance(feature_columns, dict):
raise ValueError('Expected feature_columns to be iterable, found dict.')
for column in feature_columns:
if not isinstance(column, FeatureColumn):
raise ValueError('Items of feature_columns must be a FeatureColumn. '
'Given (type {}): {}.'.format(type(column), column))
if not feature_columns:
raise ValueError('feature_columns must not be empty.')
name_to_column = {}
for column in feature_columns:
if column.name in name_to_column:
raise ValueError('Duplicate feature column name found for columns: {} '
'and {}. This usually means that these columns refer to '
'same base feature. Either one must be discarded or a '
'duplicated but renamed item must be inserted in '
'features dict.'.format(column,
name_to_column[column.name]))
name_to_column[column.name] = column
return sorted(feature_columns, key=lambda x: x.name)
class NumericColumn(
DenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'NumericColumn',
('key', 'shape', 'default_value', 'dtype', 'normalizer_fn'))):
"""see `numeric_column`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {
self.key:
parsing_ops.FixedLenFeature(self.shape, self.dtype,
self.default_value)
}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError(
'The corresponding Tensor of numerical column must be a Tensor. '
'SparseTensor is not supported. key: {}'.format(self.key))
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return math_ops.cast(input_tensor, dtypes.float32)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = inputs.get(self.key)
return self._transform_input_tensor(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class.
In this case, we apply the `normalizer_fn` to the input tensor.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Normalized input tensor.
Raises:
ValueError: If a SparseTensor is passed in.
"""
input_tensor = transformation_cache.get(self.key, state_manager)
return self._transform_input_tensor(input_tensor)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(self.shape)
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing numeric feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Dense `Tensor` created within `transform_feature`.
"""
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
return transformation_cache.get(self, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
return inputs.get(self)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['normalizer_fn'] = generic_utils.serialize_keras_object(
self.normalizer_fn)
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['normalizer_fn'] = generic_utils.deserialize_keras_object(
config['normalizer_fn'], custom_objects=custom_objects)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class BucketizedColumn(
DenseColumn,
CategoricalColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('BucketizedColumn',
('source_column', 'boundaries'))):
"""See `bucketized_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.source_column, FeatureColumn) and
self.source_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_bucketized'.format(self.source_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.source_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.source_column._parse_example_spec # pylint: disable=protected-access
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Returns bucketized categorical `source_column` tensor."""
source_tensor = inputs.get(self.source_column)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
def transform_feature(self, transformation_cache, state_manager):
"""Returns bucketized categorical `source_column` tensor."""
source_tensor = transformation_cache.get(self.source_column, state_manager)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(
tuple(self.source_column.shape) + (len(self.boundaries) + 1,))
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def _get_dense_tensor_for_input_tensor(self, input_tensor):
return array_ops.one_hot(
indices=math_ops.cast(input_tensor, dtypes.int64),
depth=len(self.boundaries) + 1,
on_value=1.,
off_value=0.)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns one hot encoded dense `Tensor`."""
input_tensor = transformation_cache.get(self, state_manager)
return self._get_dense_tensor_for_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return self._get_dense_tensor_for_input_tensor(input_tensor)
@property
def num_buckets(self):
"""See `CategoricalColumn` base class."""
# By construction, source_column is always one-dimensional.
return (len(self.boundaries) + 1) * self.source_column.shape[0]
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def _get_sparse_tensors_for_input_tensor(self, input_tensor):
batch_size = array_ops.shape(input_tensor)[0]
# By construction, source_column is always one-dimensional.
source_dimension = self.source_column.shape[0]
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(math_ops.range(0, batch_size), 1),
[1, source_dimension]),
(-1,))
i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size])
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = (
array_ops.reshape(input_tensor, (-1,)) +
(len(self.boundaries) + 1) * i2)
indices = math_ops.cast(
array_ops.transpose(array_ops.stack((i1, i2))), dtypes.int64)
dense_shape = math_ops.cast(
array_ops.stack([batch_size, source_dimension]), dtypes.int64)
sparse_tensor = sparse_tensor_lib.SparseTensor(
indices=indices,
values=bucket_indices,
dense_shape=dense_shape)
return CategoricalColumn.IdWeightPair(sparse_tensor, None)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
input_tensor = transformation_cache.get(self, state_manager)
return self._get_sparse_tensors_for_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
del weight_collections
del trainable
input_tensor = inputs.get(self)
return self._get_sparse_tensors_for_input_tensor(input_tensor)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.source_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['source_column'] = serialize_feature_column(self.source_column)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['source_column'] = deserialize_feature_column(
config['source_column'], custom_objects, columns_by_name)
return cls(**kwargs)
class EmbeddingColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'EmbeddingColumn',
('categorical_column', 'dimension', 'combiner', 'initializer',
'ckpt_to_load_from', 'tensor_name_in_ckpt', 'max_norm', 'trainable'))):
"""See `embedding_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_embedding'.format(self.categorical_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def transform_feature(self, transformation_cache, state_manager):
"""Transforms underlying `categorical_column`."""
return transformation_cache.get(self.categorical_column, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
return inputs.get(self.categorical_column)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape([self.dimension])
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def create_state(self, state_manager):
"""Creates the embedding lookup variable."""
num_buckets = getattr(self.categorical_column, 'num_buckets',
self.categorical_column._num_buckets) # pylint: disable=protected-access
embedding_shape = (num_buckets, self.dimension)
state_manager.create_variable(
self,
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
trainable=self.trainable,
use_resource=True,
initializer=self.initializer)
def _get_dense_tensor_internal_helper(self, sparse_tensors,
embedding_weights):
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
if self.ckpt_to_load_from is not None:
to_restore = embedding_weights
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
self.tensor_name_in_ckpt: to_restore
})
# Return embedding lookup result.
return embedding_ops.safe_embedding_lookup_sparse(
embedding_weights=embedding_weights,
sparse_ids=sparse_ids,
sparse_weights=sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def _get_dense_tensor_internal(self, sparse_tensors, state_manager):
"""Private method that follows the signature of get_dense_tensor."""
embedding_weights = state_manager.get_variable(
self, name='embedding_weights')
return self._get_dense_tensor_internal_helper(sparse_tensors,
embedding_weights)
def _old_get_dense_tensor_internal(self, sparse_tensors, weight_collections,
trainable):
"""Private method that follows the signature of _get_dense_tensor."""
embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access
if (weight_collections and
ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections):
weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
embedding_weights = variable_scope.get_variable(
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self.initializer,
trainable=self.trainable and trainable,
collections=weight_collections)
return self._get_dense_tensor_internal_helper(sparse_tensors,
embedding_weights)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns tensor after doing the embedding lookup.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Embedding lookup tensor.
Raises:
ValueError: `categorical_column` is SequenceCategoricalColumn.
"""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._get_dense_tensor_internal(sparse_tensors, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access
inputs, weight_collections, trainable)
return self._old_get_dense_tensor_internal(sparse_tensors,
weight_collections, trainable)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
dense_tensor = self._get_dense_tensor_internal(sparse_tensors,
state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
if not isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
dense_tensor = self._old_get_dense_tensor_internal(
sparse_tensors,
weight_collections=weight_collections,
trainable=trainable)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
config['initializer'] = initializers.serialize(self.initializer)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
kwargs['initializer'] = initializers.deserialize(
config['initializer'], custom_objects=custom_objects)
return cls(**kwargs)
def _raise_shared_embedding_column_error():
raise ValueError('SharedEmbeddingColumns are not supported in '
'`linear_model` or `input_layer`. Please use '
'`DenseFeatures` or `LinearModel` instead.')
class SharedEmbeddingColumnCreator(tracking.AutoTrackable):
def __init__(self,
dimension,
initializer,
ckpt_to_load_from,
tensor_name_in_ckpt,
num_buckets,
trainable,
name='shared_embedding_column_creator'):
self._dimension = dimension
self._initializer = initializer
self._ckpt_to_load_from = ckpt_to_load_from
self._tensor_name_in_ckpt = tensor_name_in_ckpt
self._num_buckets = num_buckets
self._trainable = trainable
self._name = name
# Map from graph keys to embedding_weight variables.
self._embedding_weights = {}
def __call__(self, categorical_column, combiner, max_norm):
return SharedEmbeddingColumn(categorical_column, self, combiner, max_norm)
@property
def embedding_weights(self):
key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
if key not in self._embedding_weights:
embedding_shape = (self._num_buckets, self._dimension)
var = variable_scope.get_variable(
name=self._name,
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self._initializer,
trainable=self._trainable)
if self._ckpt_to_load_from is not None:
to_restore = var
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(
self._ckpt_to_load_from, {self._tensor_name_in_ckpt: to_restore})
self._embedding_weights[key] = var
return self._embedding_weights[key]
@property
def dimension(self):
return self._dimension
class SharedEmbeddingColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'SharedEmbeddingColumn',
('categorical_column', 'shared_embedding_column_creator', 'combiner',
'max_norm'))):
"""See `embedding_column`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_shared_embedding'.format(self.categorical_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
def _parse_example_spec(self):
return _raise_shared_embedding_column_error()
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class."""
return transformation_cache.get(self.categorical_column, state_manager)
def _transform_feature(self, inputs):
return _raise_shared_embedding_column_error()
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(
[self.shared_embedding_column_creator.dimension])
@property
def _variable_shape(self):
return _raise_shared_embedding_column_error()
def _get_dense_tensor_internal(self, transformation_cache, state_manager):
"""Private method that follows the signature of _get_dense_tensor."""
# This method is called from a variable_scope with name _var_scope_name,
# which is shared among all shared embeddings. Open a name_scope here, so
# that the ops for different columns have distinct names.
with ops.name_scope(None, default_name=self.name):
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
embedding_weights = self.shared_embedding_column_creator.embedding_weights
# Return embedding lookup result.
return embedding_ops.safe_embedding_lookup_sparse(
embedding_weights=embedding_weights,
sparse_ids=sparse_ids,
sparse_weights=sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns the embedding lookup result."""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
return self._get_dense_tensor_internal(transformation_cache, state_manager)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return _raise_shared_embedding_column_error()
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
dense_tensor = self._get_dense_tensor_internal(transformation_cache,
state_manager)
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
return _raise_shared_embedding_column_error()
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
raise NotImplementedError()
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
raise NotImplementedError()
def _check_shape(shape, key):
"""Returns shape if it's valid, raises error otherwise."""
assert shape is not None
if not nest.is_sequence(shape):
shape = [shape]
shape = tuple(shape)
for dimension in shape:
if not isinstance(dimension, int):
raise TypeError('shape dimensions must be integer. '
'shape: {}, key: {}'.format(shape, key))
if dimension < 1:
raise ValueError('shape dimensions must be greater than 0. '
'shape: {}, key: {}'.format(shape, key))
return shape
class HashedCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('HashedCategoricalColumn',
('key', 'hash_bucket_size', 'dtype'))):
"""see `categorical_column_with_hash_bucket`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
"""Hashes the values in the feature_column."""
if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError('SparseColumn input must be a SparseTensor.')
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
if self.dtype == dtypes.string:
sparse_values = input_tensor.values
else:
sparse_values = string_ops.as_string(input_tensor.values)
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.hash_bucket_size, name='lookup')
return sparse_tensor_lib.SparseTensor(
input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
def transform_feature(self, transformation_cache, state_manager):
"""Hashes the values in the feature_column."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class VocabularyFileCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('VocabularyFileCategoricalColumn',
('key', 'vocabulary_file', 'vocabulary_size',
'num_oov_buckets', 'dtype', 'default_value'))):
"""See `categorical_column_with_vocabulary_file`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor, state_manager=None):
"""Creates a lookup table for the vocabulary."""
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_file` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.cast(input_tensor, dtypes.int64)
name = '{}_lookup'.format(self.key)
table = lookup_ops.index_table_from_file(
vocabulary_file=self.vocabulary_file,
num_oov_buckets=self.num_oov_buckets,
vocab_size=self.vocabulary_size,
default_value=self.default_value,
key_dtype=key_dtype,
name=name)
if state_manager is not None:
state_manager.add_resource(self, name, table)
return table.lookup(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""Creates a lookup table for the vocabulary."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.vocabulary_size + self.num_oov_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class VocabularyListCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple(
'VocabularyListCategoricalColumn',
('key', 'vocabulary_list', 'dtype', 'default_value', 'num_oov_buckets'))
):
"""See `categorical_column_with_vocabulary_list`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor, state_manager=None):
"""Creates a lookup table for the vocabulary list."""
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_tensor` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.cast(input_tensor, dtypes.int64)
name = '{}_lookup'.format(self.key)
table = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self.vocabulary_list),
default_value=self.default_value,
num_oov_buckets=self.num_oov_buckets,
dtype=key_dtype,
name=name)
if state_manager is not None:
state_manager.add_resource(self, name, table)
return table.lookup(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""Creates a lookup table for the vocabulary list."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return len(self.vocabulary_list) + self.num_oov_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class IdentityCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('IdentityCategoricalColumn',
('key', 'number_buckets', 'default_value'))):
"""See `categorical_column_with_identity`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(dtypes.int64)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
"""Returns a SparseTensor with identity values."""
if not input_tensor.dtype.is_integer:
raise ValueError(
'Invalid input, not integer. key: {} dtype: {}'.format(
self.key, input_tensor.dtype))
values = input_tensor.values
if input_tensor.values.dtype != dtypes.int64:
values = math_ops.cast(values, dtypes.int64, name='values')
if self.default_value is not None:
values = math_ops.cast(input_tensor.values, dtypes.int64, name='values')
num_buckets = math_ops.cast(
self.num_buckets, dtypes.int64, name='num_buckets')
zero = math_ops.cast(0, dtypes.int64, name='zero')
# Assign default for out-of-range values.
values = array_ops.where_v2(
math_ops.logical_or(
values < zero, values >= num_buckets, name='out_of_range'),
array_ops.fill(
dims=array_ops.shape(values),
value=math_ops.cast(self.default_value, dtypes.int64),
name='default_values'), values)
return sparse_tensor_lib.SparseTensor(
indices=input_tensor.indices,
values=values,
dense_shape=input_tensor.dense_shape)
def transform_feature(self, transformation_cache, state_manager):
"""Returns a SparseTensor with identity values."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.number_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
return dict(zip(self._fields, self))
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
return cls(**kwargs)
class WeightedCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple(
'WeightedCategoricalColumn',
('categorical_column', 'weight_feature_key', 'dtype'))):
"""See `weighted_categorical_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_weighted_by_{}'.format(
self.categorical_column.name, self.weight_feature_key)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
config = self.categorical_column.parse_example_spec
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
return config
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
config = self.categorical_column._parse_example_spec # pylint: disable=protected-access
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
return config
@property
def num_buckets(self):
"""See `DenseColumn` base class."""
return self.categorical_column.num_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _transform_weight_tensor(self, weight_tensor):
if weight_tensor is None:
raise ValueError('Missing weights {}.'.format(self.weight_feature_key))
weight_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
weight_tensor)
if self.dtype != weight_tensor.dtype.base_dtype:
raise ValueError('Bad dtype, expected {}, but got {}.'.format(
self.dtype, weight_tensor.dtype))
if not isinstance(weight_tensor, sparse_tensor_lib.SparseTensor):
# The weight tensor can be a regular Tensor. In this case, sparsify it.
weight_tensor = _to_sparse_input_and_drop_ignore_values(
weight_tensor, ignore_value=0.0)
if not weight_tensor.dtype.is_floating:
weight_tensor = math_ops.cast(weight_tensor, dtypes.float32)
return weight_tensor
def transform_feature(self, transformation_cache, state_manager):
"""Applies weights to tensor generated from `categorical_column`'."""
weight_tensor = transformation_cache.get(self.weight_feature_key,
state_manager)
weight_tensor = self._transform_weight_tensor(weight_tensor)
return (transformation_cache.get(self.categorical_column, state_manager),
weight_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Applies weights to tensor generated from `categorical_column`'."""
weight_tensor = inputs.get(self.weight_feature_key)
weight_tensor = self._transform_weight_tensor(weight_tensor)
return (inputs.get(self.categorical_column), weight_tensor)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
tensors = transformation_cache.get(self, state_manager)
return CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
tensors = inputs.get(self)
return CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column, self.weight_feature_key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class CrossedColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('CrossedColumn',
('keys', 'hash_bucket_size', 'hash_key'))):
"""See `crossed_column`."""
@property
def _is_v2_column(self):
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
continue
if not isinstance(key, FeatureColumn):
return False
if not key._is_v2_column: # pylint: disable=protected-access
return False
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
feature_names = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, (FeatureColumn, fc_old._FeatureColumn)): # pylint: disable=protected-access
feature_names.append(key.name)
else: # key must be a string
feature_names.append(key)
return '_X_'.join(sorted(feature_names))
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
config = {}
for key in self.keys:
if isinstance(key, FeatureColumn):
config.update(key.parse_example_spec)
elif isinstance(key, fc_old._FeatureColumn): # pylint: disable=protected-access
config.update(key._parse_example_spec) # pylint: disable=protected-access
else: # key must be a string
config.update({key: parsing_ops.VarLenFeature(dtypes.string)})
return config
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def transform_feature(self, transformation_cache, state_manager):
"""Generates a hashed sparse cross from the input tensors."""
feature_tensors = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
feature_tensors.append(transformation_cache.get(key, state_manager))
elif isinstance(key, (fc_old._CategoricalColumn, CategoricalColumn)): # pylint: disable=protected-access
ids_and_weights = key.get_sparse_tensors(transformation_cache,
state_manager)
if ids_and_weights.weight_tensor is not None:
raise ValueError(
'crossed_column does not support weight_tensor, but the given '
'column populates weight_tensor. '
'Given column: {}'.format(key.name))
feature_tensors.append(ids_and_weights.id_tensor)
else:
raise ValueError('Unsupported column type. Given: {}'.format(key))
return sparse_ops.sparse_cross_hashed(
inputs=feature_tensors,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Generates a hashed sparse cross from the input tensors."""
feature_tensors = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
feature_tensors.append(inputs.get(key))
elif isinstance(key, (CategoricalColumn, fc_old._CategoricalColumn)): # pylint: disable=protected-access
ids_and_weights = key._get_sparse_tensors(inputs) # pylint: disable=protected-access
if ids_and_weights.weight_tensor is not None:
raise ValueError(
'crossed_column does not support weight_tensor, but the given '
'column populates weight_tensor. '
'Given column: {}'.format(key.name))
feature_tensors.append(ids_and_weights.id_tensor)
else:
raise ValueError('Unsupported column type. Given: {}'.format(key))
return sparse_ops.sparse_cross_hashed(
inputs=feature_tensors,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""See `CategoricalColumn` base class."""
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return list(self.keys)
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['keys'] = tuple([serialize_feature_column(fc) for fc in self.keys])
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['keys'] = tuple([
deserialize_feature_column(c, custom_objects, columns_by_name)
for c in config['keys']
])
return cls(**kwargs)
def _collect_leaf_level_keys(cross):
"""Collects base keys by expanding all nested crosses.
Args:
cross: A `CrossedColumn`.
Returns:
A list of strings or `CategoricalColumn` instances.
"""
leaf_level_keys = []
for k in cross.keys:
if isinstance(k, CrossedColumn):
leaf_level_keys.extend(_collect_leaf_level_keys(k))
else:
leaf_level_keys.append(k)
return leaf_level_keys
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid,
array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
def _prune_invalid_weights(sparse_ids, sparse_weights):
"""Prune invalid weights (< 0) from the input ids and weights."""
if sparse_weights is not None:
is_weights_valid = math_ops.greater(sparse_weights.values, 0)
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
return sparse_ids, sparse_weights
class IndicatorColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple('IndicatorColumn', ('categorical_column'))):
"""Represents a one-hot column for use in deep networks.
Args:
categorical_column: A `CategoricalColumn` which is created by
`categorical_column_with_*` function.
"""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_indicator'.format(self.categorical_column.name)
def _transform_id_weight_pair(self, id_weight_pair):
id_tensor = id_weight_pair.id_tensor
weight_tensor = id_weight_pair.weight_tensor
# If the underlying column is weighted, return the input as a dense tensor.
if weight_tensor is not None:
weighted_column = sparse_ops.sparse_merge(
sp_ids=id_tensor,
sp_values=weight_tensor,
vocab_size=int(self._variable_shape[-1]))
# Remove (?, -1) index.
weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0],
weighted_column.dense_shape)
# Use scatter_nd to merge duplicated indices if existed,
# instead of sparse_tensor_to_dense.
return array_ops.scatter_nd(weighted_column.indices,
weighted_column.values,
weighted_column.dense_shape)
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(
id_tensor, default_value=-1)
# One hot must be float for tf.concat reasons since all other inputs to
# input_layer are float32.
one_hot_id_tensor = array_ops.one_hot(
dense_id_tensor,
depth=self._variable_shape[-1],
on_value=1.0,
off_value=0.0)
# Reduce to get a multi-hot per example.
return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2])
def transform_feature(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Transformed feature `Tensor`.
Raises:
ValueError: if input rank is not known at graph building time.
"""
id_weight_pair = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._transform_id_weight_pair(id_weight_pair)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
id_weight_pair = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
return self._transform_id_weight_pair(id_weight_pair)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
@property
def variable_shape(self):
"""Returns a `TensorShape` representing the shape of the dense `Tensor`."""
if isinstance(self.categorical_column, FeatureColumn):
return tensor_shape.TensorShape([1, self.categorical_column.num_buckets])
else:
return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Dense `Tensor` created within `transform_feature`.
Raises:
ValueError: If `categorical_column` is a `SequenceCategoricalColumn`.
"""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In indicator_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by transform_feature.
return transformation_cache.get(self, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
if isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In indicator_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by transform_feature.
return inputs.get(self)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In indicator_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by transform_feature.
dense_tensor = transformation_cache.get(self, state_manager)
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
if not isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In indicator_column: {}. '
'categorical_column must be of type _SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
dense_tensor = inputs.get(self)
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
return cls(**kwargs)
def _verify_static_batch_size_equality(tensors, columns):
"""Verify equality between static batch sizes.
Args:
tensors: iterable of input tensors.
columns: Corresponding feature columns.
Raises:
ValueError: in case of mismatched batch sizes.
"""
# bath_size is a Dimension object.
expected_batch_size = None
for i in range(0, len(tensors)):
batch_size = tensor_shape.Dimension(tensor_shape.dimension_value(
tensors[i].shape[0]))
if batch_size.value is not None:
if expected_batch_size is None:
bath_size_column_index = i
expected_batch_size = batch_size
elif not expected_batch_size.is_compatible_with(batch_size):
raise ValueError(
'Batch size (first dimension) of each feature must be same. '
'Batch size of columns ({}, {}): ({}, {})'.format(
columns[bath_size_column_index].name, columns[i].name,
expected_batch_size, batch_size))
class SequenceCategoricalColumn(
CategoricalColumn,
fc_old._SequenceCategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('SequenceCategoricalColumn',
('categorical_column'))):
"""Represents sequences of categorical data."""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.name
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class."""
return self.categorical_column.transform_feature(transformation_cache,
state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
return self.categorical_column._transform_feature(inputs) # pylint: disable=protected-access
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.categorical_column.num_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _get_sparse_tensors_helper(self, sparse_tensors):
id_tensor = sparse_tensors.id_tensor
weight_tensor = sparse_tensors.weight_tensor
# Expands third dimension, if necessary so that embeddings are not
# combined during embedding lookup. If the tensor is already 3D, leave
# as-is.
shape = array_ops.shape(id_tensor)
# Compute the third dimension explicitly instead of setting it to -1, as
# that doesn't work for dynamically shaped tensors with 0-length at runtime.
# This happens for empty sequences.
target_shape = [shape[0], shape[1], math_ops.reduce_prod(shape[2:])]
id_tensor = sparse_ops.sparse_reshape(id_tensor, target_shape)
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(weight_tensor, target_shape)
return CategoricalColumn.IdWeightPair(id_tensor, weight_tensor)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Returns an IdWeightPair.
`IdWeightPair` is a pair of `SparseTensor`s which represents ids and
weights.
`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
`SparseTensor` of `float` or `None` to indicate all weights should be
taken to be 1. If specified, `weight_tensor` must have exactly the same
shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
output of a `VarLenFeature` which is a ragged matrix.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._get_sparse_tensors_helper(sparse_tensors)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
return self._get_sparse_tensors_helper(sparse_tensors)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
return cls(**kwargs)
def _check_config_keys(config, expected_keys):
"""Checks that a config has all expected_keys."""
if set(config.keys()) != set(expected_keys):
raise ValueError('Invalid config: {}, expected keys: {}'.format(
config, expected_keys))
def _standardize_and_copy_config(config):
"""Returns a shallow copy of config with lists turned to tuples.
Keras serialization uses nest to listify everything.
This causes problems with the NumericColumn shape, which becomes
unhashable. We could try to solve this on the Keras side, but that
would require lots of tracking to avoid changing existing behavior.
Instead, we ensure here that we revive correctly.
Args:
config: dict that will be used to revive a Feature Column
Returns:
Shallow copy of config with lists turned to tuples.
"""
kwargs = config.copy()
for k, v in kwargs.items():
if isinstance(v, list):
kwargs[k] = tuple(v)
return kwargs
| chemelnucfin/tensorflow | tensorflow/python/feature_column/feature_column_v2.py | Python | apache-2.0 | 183,637 |
# Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Solum base exception handling.
Includes decorator for re-raising Solum-type exceptions.
"""
import functools
import pecan
import sys
from keystoneclient import exceptions as keystone_exceptions
from oslo.config import cfg
import six
import wsme
from solum.common import safe_utils
from solum.openstack.common import excutils
from solum.openstack.common.gettextutils import _
from solum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='make exception message format errors fatal')
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
def wrap_exception(notifier=None, publisher_id=None, event_type=None,
level=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It logs the exception as well as optionally sending
it to the notification system.
"""
def inner(f):
def wrapped(self, context, *args, **kw):
# Don't store self or context in the payload, it now seems to
# contain confidential information.
try:
return f(self, context, *args, **kw)
except Exception as e:
with excutils.save_and_reraise_exception():
if notifier:
call_dict = safe_utils.getcallargs(f, *args, **kw)
payload = dict(exception=e,
private=dict(args=call_dict)
)
# Use a temp vars so we don't shadow
# our outer definitions.
temp_level = level
if not temp_level:
temp_level = notifier.ERROR
temp_type = event_type
if not temp_type:
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
temp_type = f.__name__
notifier.notify(context, publisher_id, temp_type,
temp_level, payload)
return functools.wraps(f)(wrapped)
return inner
def wrap_controller_exception(func):
"""This decorator wraps controllers method to manage wsme exceptions:
a wsme ClientSideError is raised if a SolumException is thrown.
"""
@functools.wraps(func)
def wrapped(*args, **kw):
try:
return func(*args, **kw)
except SolumException as excp:
pecan.response.translatable_error = excp
raise wsme.exc.ClientSideError(six.text_type(excp), excp.code)
return wrapped
def wrap_keystone_exception(func):
"""This decorator wraps keystone exception by throwing Solum specific
exceptions.
"""
@functools.wraps(func)
def wrapped(*args, **kw):
try:
return func(*args, **kw)
except keystone_exceptions.AuthorizationFailure:
raise AuthorizationFailure(
client=func.__name__, message="reason: %s" % sys.exc_info()[1])
except keystone_exceptions.ClientException:
raise AuthorizationFailure(
client=func.__name__,
message="unexpected keystone client error occurred: %s"
% sys.exc_info()[1])
return wrapped
class SolumException(Exception):
"""Base Solum Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
def __init__(self, **kwargs):
self.kwargs = kwargs
if CONF.fatal_exception_format_errors:
assert isinstance(self.msg_fmt, six.text_type)
try:
self.message = self.msg_fmt % kwargs
except KeyError:
#kwargs doesn't match a variable in the message
#log the issue and the kwargs
LOG.exception(_('Exception in string format operation'),
extra=dict(
private=dict(
msg=self.msg_fmt,
args=kwargs
)
)
)
if CONF.fatal_exception_format_errors:
raise
def __str__(self):
if six.PY3:
return self.message
return self.message.encode('utf-8')
def __unicode__(self):
return self.message
class ObjectNotFound(SolumException):
msg_fmt = _("The %(name)s %(id)s could not be found.")
class ObjectNotUnique(SolumException):
msg_fmt = _("The %(name)s already exists.")
class ResourceNotFound(ObjectNotFound):
msg_fmt = _("The %(name)s resource %(id)s could not be found.")
code = 404
class ResourceExists(ObjectNotUnique):
msg_fmt = _("The %(name)s resource already exists.")
code = 409
class BadRequest(SolumException):
msg_fmt = _("The request is malformed. Reason: %(reason)s")
code = 400
class NotImplemented(SolumException):
msg_fmt = _("The requested operation is not implemented.")
code = 501
class AuthorizationFailure(SolumException):
msg_fmt = _("%(client)s connection failed. %(message)s")
| jamesyli/solum | solum/common/exception.py | Python | apache-2.0 | 6,153 |
import subprocess
subprocess.call("""
adb -d shell am start -n com.android.gallery/com.android.camera.GalleryPicker
""")
| carabri/carabri | test_script/open_gallery.py | Python | apache-2.0 | 123 |
"""
Tools for sending email.
"""
import mimetypes
import os
import smtplib
import socket
import time
import random
from email import Charset, Encoders
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.Header import Header
from email.Utils import formatdate, parseaddr, formataddr
from django.conf import settings
from django.utils.encoding import smart_str, force_unicode
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
Charset.add_charset('utf-8', Charset.SHORTEST, Charset.QP, 'utf-8')
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
# Cache the hostname, but do it lazily: socket.getfqdn() can take a couple of
# seconds, which slows down the restart of the server.
class CachedDnsName(object):
def __str__(self):
return self.get_fqdn()
def get_fqdn(self):
if not hasattr(self, '_fqdn'):
self._fqdn = socket.getfqdn()
return self._fqdn
DNS_NAME = CachedDnsName()
# Copied from Python standard library, with the following modifications:
# * Used cached hostname for performance.
# * Added try/except to support lack of getpid() in Jython (#5496).
def make_msgid(idstring=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<20020201195627.33539.96671@nightshade.la.mastaler.com>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
try:
pid = os.getpid()
except AttributeError:
# No getpid() in Jython, for example.
pid = 1
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
idhost = DNS_NAME
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
return msgid
class BadHeaderError(ValueError):
pass
def forbid_multi_line_headers(name, val):
"""Forbids multi-line headers, to prevent header injection."""
val = force_unicode(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val = val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ('to', 'from', 'cc'):
result = []
for item in val.split(', '):
nm, addr = parseaddr(item)
nm = str(Header(nm, settings.DEFAULT_CHARSET))
result.append(formataddr((nm, str(addr))))
val = ', '.join(result)
else:
val = Header(val, settings.DEFAULT_CHARSET)
else:
if name.lower() == 'subject':
val = Header(val)
return name, val
class SafeMIMEText(MIMEText):
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val)
MIMEText.__setitem__(self, name, val)
class SafeMIMEMultipart(MIMEMultipart):
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val)
MIMEMultipart.__setitem__(self, name, val)
class SMTPConnection(object):
"""
A wrapper that manages the SMTP network connection.
"""
def __init__(self, host=None, port=None, username=None, password=None,
use_tls=None, fail_silently=False):
self.host = host or settings.EMAIL_HOST
self.port = port or settings.EMAIL_PORT
self.username = username or settings.EMAIL_HOST_USER
self.password = password or settings.EMAIL_HOST_PASSWORD
self.use_tls = (use_tls is not None) and use_tls or settings.EMAIL_USE_TLS
self.fail_silently = fail_silently
self.connection = None
def open(self):
"""
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
try:
# If local_hostname is not specified, socket.getfqdn() gets used.
# For performance, we use the cached FQDN for local_hostname.
self.connection = smtplib.SMTP(self.host, self.port,
local_hostname=DNS_NAME.get_fqdn())
if self.use_tls:
self.connection.ehlo()
self.connection.starttls()
self.connection.ehlo()
if self.username and self.password:
self.connection.login(self.username, self.password)
return True
except:
if not self.fail_silently:
raise
def close(self):
"""Closes the connection to the email server."""
try:
try:
self.connection.quit()
except socket.sslerror:
# This happens when calling quit() on a TLS connection
# sometimes.
self.connection.close()
except:
if self.fail_silently:
return
raise
finally:
self.connection = None
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
if not email_messages:
return
new_conn_created = self.open()
if not self.connection:
# We failed silently on open(). Trying to send would be pointless.
return
num_sent = 0
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
try:
self.connection.sendmail(email_message.from_email,
email_message.recipients(),
email_message.message().as_string())
except:
if not self.fail_silently:
raise
return False
return True
class EmailMessage(object):
"""
A container for email information.
"""
content_subtype = 'plain'
multipart_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.
"""
if to:
assert not isinstance(to, basestring), '"to" argument must be a list or tuple'
self.to = list(to)
else:
self.to = []
if bcc:
assert not isinstance(bcc, basestring), '"bcc" argument must be a list or tuple'
self.bcc = list(bcc)
else:
self.bcc = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
if not self.connection:
self.connection = SMTPConnection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(smart_str(self.body, settings.DEFAULT_CHARSET),
self.content_subtype, encoding)
if self.attachments:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.multipart_subtype)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
msg['Subject'] = self.subject
msg['From'] = self.from_email
msg['To'] = ', '.join(self.to)
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
msg['Date'] = formatdate()
if 'message-id' not in header_names:
msg['Message-ID'] = make_msgid()
for name, value in self.extra_headers.items():
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Bcc entries).
"""
return self.to + self.bcc
def send(self, fail_silently=False):
"""Sends the email message."""
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted (useful for multipart/alternative messages) and the mimetype
is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
"""
if isinstance(filename, MIMEBase):
assert content == mimetype == None
self.attachments.append(filename)
else:
assert content is not None
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""Attaches a file from the filesystem."""
filename = os.path.basename(path)
content = open(path, 'rb').read()
self.attach(filename, content, mimetype)
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
if mimetype is None:
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
attachment = SafeMIMEText(smart_str(content,
settings.DEFAULT_CHARSET), subtype, settings.DEFAULT_CHARSET)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
if filename:
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
multipart_subtype = 'alternative'
def attach_alternative(self, content, mimetype=None):
"""Attach an alternative content representation."""
self.attach(content=content, mimetype=mimetype)
def send_mail(subject, message, from_email, recipient_list,
fail_silently=False, auth_user=None, auth_password=None):
"""
Easy wrapper for sending a single message to a recipient list. All members
of the recipient list will see the other recipients in the 'To' field.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = SMTPConnection(username=auth_user, password=auth_password,
fail_silently=fail_silently)
return EmailMessage(subject, message, from_email, recipient_list,
connection=connection).send()
def send_mass_mail(datatuple, fail_silently=False, auth_user=None,
auth_password=None):
"""
Given a datatuple of (subject, message, from_email, recipient_list), sends
each message to each recipient list. Returns the number of e-mails sent.
If from_email is None, the DEFAULT_FROM_EMAIL setting is used.
If auth_user and auth_password are set, they're used to log in.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = SMTPConnection(username=auth_user, password=auth_password,
fail_silently=fail_silently)
messages = [EmailMessage(subject, message, sender, recipient)
for subject, message, sender, recipient in datatuple]
return connection.send_messages(messages)
def mail_admins(subject, message, fail_silently=False):
"""Sends a message to the admins, as defined by the ADMINS setting."""
EmailMessage(settings.EMAIL_SUBJECT_PREFIX + subject, message,
settings.SERVER_EMAIL, [a[1] for a in settings.ADMINS]
).send(fail_silently=fail_silently)
def mail_managers(subject, message, fail_silently=False):
"""Sends a message to the managers, as defined by the MANAGERS setting."""
EmailMessage(settings.EMAIL_SUBJECT_PREFIX + subject, message,
settings.SERVER_EMAIL, [a[1] for a in settings.MANAGERS]
).send(fail_silently=fail_silently)
| jamslevy/gsoc | app/django/core/mail.py | Python | apache-2.0 | 14,299 |
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1, projection="3d")
x = np.linspace(-6 * np.pi, 6 * np.pi, 1000)
y = np.sin(x)
z = np.cos(x)
ax1.plot(x, y, z)
ax2 = fig.add_subplot(1, 2, 2, projection="3d")
X = np.arange(-2, 2, 0.1)
Y = np.arange(-2, 2, 0.1)
X, Y = np.meshgrid(X, Y)
Z = np.sqrt(X ** 2 + Y ** 2)
ax2.plot_surface(X, Y, Z, cmap=plt.cm.winter)
plt.show()
| tongxindao/shiyanlou | shiyanlou_cs892/sub.py | Python | apache-2.0 | 469 |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import random
import pytest
import cirq
import cirq.contrib.acquaintance as cca
def random_part_lens(max_n_parts, max_part_size):
return tuple(random.randint(1, max_part_size) for _ in range(random.randint(1, max_n_parts)))
@pytest.mark.parametrize(
'left_part_lens,right_part_lens',
[tuple(random_part_lens(7, 2) for _ in ('left', 'right')) for _ in range(5)],
)
def test_shift_swap_network_gate_acquaintance_opps(left_part_lens, right_part_lens):
gate = cca.ShiftSwapNetworkGate(left_part_lens, right_part_lens)
n_qubits = gate.qubit_count()
qubits = cirq.LineQubit.range(n_qubits)
strategy = cirq.Circuit(gate(*qubits))
# actual_opps
initial_mapping = {q: i for i, q in enumerate(qubits)}
actual_opps = cca.get_logical_acquaintance_opportunities(strategy, initial_mapping)
# expected opps
i = 0
sides = ('left', 'right')
parts = {side: [] for side in sides}
for side, part_lens in zip(sides, (left_part_lens, right_part_lens)):
for part_len in part_lens:
parts[side].append(set(range(i, i + part_len)))
i += part_len
expected_opps = set(
frozenset(left_part | right_part)
for left_part, right_part in itertools.product(parts['left'], parts['right'])
)
assert actual_opps == expected_opps
circuit_diagrams = {
(
'undecomposed',
(1,) * 3,
(1,) * 3,
): """
0: ───(0, 0, 0)↦(1, 0, 0)───
│
1: ───(0, 1, 0)↦(1, 1, 0)───
│
2: ───(0, 2, 0)↦(1, 2, 0)───
│
3: ───(1, 0, 0)↦(0, 0, 0)───
│
4: ───(1, 1, 0)↦(0, 1, 0)───
│
5: ───(1, 2, 0)↦(0, 2, 0)───
""",
(
'decomposed',
(1,) * 3,
(1,) * 3,
): """
0: ───────────────────────█───╲0╱───────────────────────
│ │
1: ─────────────█───╲0╱───█───╱1╲───█───╲0╱─────────────
│ │ │ │
2: ───█───╲0╱───█───╱1╲───█───╲0╱───█───╱1╲───█───╲0╱───
│ │ │ │ │ │
3: ───█───╱1╲───█───╲0╱───█───╱1╲───█───╲0╱───█───╱1╲───
│ │ │ │
4: ─────────────█───╱1╲───█───╲0╱───█───╱1╲─────────────
│ │
5: ───────────────────────█───╱1╲───────────────────────
""",
(
'undecomposed',
(2,) * 3,
(2,) * 3,
): """
0: ────(0, 0, 0)↦(1, 0, 0)───
│
1: ────(0, 0, 1)↦(1, 0, 1)───
│
2: ────(0, 1, 0)↦(1, 1, 0)───
│
3: ────(0, 1, 1)↦(1, 1, 1)───
│
4: ────(0, 2, 0)↦(1, 2, 0)───
│
5: ────(0, 2, 1)↦(1, 2, 1)───
│
6: ────(1, 0, 0)↦(0, 0, 0)───
│
7: ────(1, 0, 1)↦(0, 0, 1)───
│
8: ────(1, 1, 0)↦(0, 1, 0)───
│
9: ────(1, 1, 1)↦(0, 1, 1)───
│
10: ───(1, 2, 0)↦(0, 2, 0)───
│
11: ───(1, 2, 1)↦(0, 2, 1)───
""",
(
'decomposed',
(2,) * 3,
(2,) * 3,
): """
0: ────────────────────────█───╲0╱───────────────────────
│ │
1: ────────────────────────█───╲1╱───────────────────────
│ │
2: ──────────────█───╲0╱───█───╱2╲───█───╲0╱─────────────
│ │ │ │ │ │
3: ──────────────█───╲1╱───█───╱3╲───█───╲1╱─────────────
│ │ │ │
4: ────█───╲0╱───█───╱2╲───█───╲0╱───█───╱2╲───█───╲0╱───
│ │ │ │ │ │ │ │ │ │
5: ────█───╲1╱───█───╱3╲───█───╲1╱───█───╱3╲───█───╲1╱───
│ │ │ │ │ │
6: ────█───╱2╲───█───╲0╱───█───╱2╲───█───╲0╱───█───╱2╲───
│ │ │ │ │ │ │ │ │ │
7: ────█───╱3╲───█───╲1╱───█───╱3╲───█───╲1╱───█───╱3╲───
│ │ │ │
8: ──────────────█───╱2╲───█───╲0╱───█───╱2╲─────────────
│ │ │ │ │ │
9: ──────────────█───╱3╲───█───╲1╱───█───╱3╲─────────────
│ │
10: ───────────────────────█───╱2╲───────────────────────
│ │
11: ───────────────────────█───╱3╲───────────────────────
""",
(
'undecomposed',
(1, 2, 2),
(2, 1, 2),
): """
0: ───(0, 0, 0)↦(1, 0, 0)───
│
1: ───(0, 1, 0)↦(1, 1, 0)───
│
2: ───(0, 1, 1)↦(1, 1, 1)───
│
3: ───(0, 2, 0)↦(1, 2, 0)───
│
4: ───(0, 2, 1)↦(1, 2, 1)───
│
5: ───(1, 0, 0)↦(0, 0, 0)───
│
6: ───(1, 0, 1)↦(0, 0, 1)───
│
7: ───(1, 1, 0)↦(0, 1, 0)───
│
8: ───(1, 2, 0)↦(0, 2, 0)───
│
9: ───(1, 2, 1)↦(0, 2, 1)───
""",
(
'decomposed',
(1, 2, 2),
(2, 1, 2),
): """
0: ───────────────────────█───╲0╱───────────────────────
│ │
1: ─────────────█───╲0╱───█───╱1╲───────────────────────
│ │ │ │
2: ─────────────█───╲1╱───█───╱2╲───█───╲0╱─────────────
│ │ │ │
3: ───█───╲0╱───█───╱2╲───█───╲0╱───█───╱1╲───█───╲0╱───
│ │ │ │ │ │ │ │
4: ───█───╲1╱───█───╱3╲───█───╲1╱───█───╲0╱───█───╱1╲───
│ │ │ │ │ │ │ │
5: ───█───╱2╲───█───╲0╱───█───╱2╲───█───╲1╱───█───╱2╲───
│ │ │ │ │ │
6: ───█───╱3╲───█───╲1╱───█───╲0╱───█───╱2╲─────────────
│ │ │ │ │ │
7: ─────────────█───╱2╲───█───╲1╱───█───╱3╲─────────────
│ │
8: ───────────────────────█───╱2╲───────────────────────
│ │
9: ───────────────────────█───╱3╲───────────────────────
""",
}
@pytest.mark.parametrize('left_part_lens,right_part_lens', set(key[1:] for key in circuit_diagrams))
def test_shift_swap_network_gate_diagrams(left_part_lens, right_part_lens):
gate = cca.ShiftSwapNetworkGate(left_part_lens, right_part_lens)
n_qubits = gate.qubit_count()
qubits = cirq.LineQubit.range(n_qubits)
circuit = cirq.Circuit(gate(*qubits))
diagram = circuit_diagrams['undecomposed', left_part_lens, right_part_lens]
cirq.testing.assert_has_diagram(circuit, diagram)
cca.expose_acquaintance_gates(circuit)
diagram = circuit_diagrams['decomposed', left_part_lens, right_part_lens]
cirq.testing.assert_has_diagram(circuit, diagram)
def test_shift_swap_network_gate_bad_part_lens():
with pytest.raises(ValueError):
cca.ShiftSwapNetworkGate((0, 1, 1), (2, 2))
with pytest.raises(ValueError):
cca.ShiftSwapNetworkGate((-1, 1, 1), (2, 2))
with pytest.raises(ValueError):
cca.ShiftSwapNetworkGate((1, 1), (2, 0, 2))
with pytest.raises(ValueError):
cca.ShiftSwapNetworkGate((1, 1), (2, -3))
@pytest.mark.parametrize(
'left_part_lens,right_part_lens',
[tuple(random_part_lens(2, 2) for _ in ('left', 'right')) for _ in range(5)],
)
def test_shift_swap_network_gate_repr(left_part_lens, right_part_lens):
gate = cca.ShiftSwapNetworkGate(left_part_lens, right_part_lens)
cirq.testing.assert_equivalent_repr(gate)
gate = cca.ShiftSwapNetworkGate(left_part_lens, right_part_lens, cirq.ZZ)
cirq.testing.assert_equivalent_repr(gate)
@pytest.mark.parametrize(
'left_part_lens,right_part_lens',
[tuple(random_part_lens(2, 2) for _ in ('left', 'right')) for _ in range(5)],
)
def test_shift_swap_network_gate_permutation(left_part_lens, right_part_lens):
gate = cca.ShiftSwapNetworkGate(left_part_lens, right_part_lens)
n_qubits = gate.qubit_count()
cca.testing.assert_permutation_decomposition_equivalence(gate, n_qubits)
| quantumlib/Cirq | cirq-core/cirq/contrib/acquaintance/shift_swap_network_test.py | Python | apache-2.0 | 11,590 |
from elftools.dwarf.ranges import RangeEntry, BaseAddressEntry
from elftools.dwarf import constants
def get_die_offset_by_reference(referer_die, attrname, use_abstract_origin=True):
'''Return the offset of the DIE referred by the given attribute in the referrer DIE.'''
ref = referer_die.attributes.get(attrname, None)
if attrname != 'DW_AT_abstract_origin' and ref is None and use_abstract_origin:
origin_die = get_origin_die(referer_die)
if origin_die:
return get_die_offset_by_reference(origin_die, attrname, use_abstract_origin)
if ref is None:
return None
elif ref.form.startswith('DW_FORM_ref'):
# Reference to a DIE in the current CU
return referer_die.cu.cu_offset + ref.value
elif ref.form in ('DW_FORM_ref_sig8', 'DW_FORM_ref_addr'):
raise NotImplementedError('Type references encoded as %s are not implemented.' % ref.form)
else:
raise ValueError
def get_die_by_reference(referer_die, attrname, use_abstract_origin=True):
'''Return the DIE referred by the given attribute in the referrer DIE.'''
offset = get_die_offset_by_reference(referer_die, attrname, use_abstract_origin)
if offset is None:
return None
# Iterate through the DIEs searching for the right one
for target_die in referer_die.cu.iter_DIEs():
if target_die.offset == offset:
return target_die
# It's not in the current DIE, iterate through all DIEs
for compilation_unit in referer_die.dwarfinfo.iter_CUs():
if compilation_unit.cu_offset == referer_die.cu_offset:
# We've already searched this CU
continue
for target_die in compilation_unit.iter_DIEs():
if target_die.offset == offset:
return target_die
raise ValueError
def get_origin_die(die):
return get_die_by_reference(die, 'DW_AT_abstract_origin')
def get_attr_form_val(die, what, use_abstract_origin=True):
'''Return the form and value of the given attribute of the given DIE.'''
try:
return die.attributes[what].form, die.attributes[what].value
except KeyError:
if use_abstract_origin:
origin_die = get_origin_die(die)
if origin_die:
return get_attr_form_val(origin_die, what, use_abstract_origin)
# Everything failed, no value found
return None, None
def get_attr_val(die, what, use_abstract_origin=True):
'''Return the value of the given attribute of the given DIE.'''
form, val = get_attr_form_val(die, what, use_abstract_origin)
return val
def iter_ranges(die):
def iter_range_list(ranges):
def iter_pairs():
# by default addresses are relative to the CU base address
base = die.cu.get_top_DIE().attributes['DW_AT_low_pc'].value
for entry in ranges:
if isinstance(entry, BaseAddressEntry):
base = entry.base_adress
elif isinstance(entry, RangeEntry):
yield base + entry.begin_offset, base + entry.end_offset
else:
raise ValueError('Invalid element in range list.')
def merge_ranges(ranges):
'''Yield ranges equivalent to the given ones, but simplified if possible.'''
next_range = (None, None)
for low, high in sorted(ranges):
if next_range[1] == low:
next_range = (next_range[0], high)
else:
if next_range[0] is not None:
yield next_range
next_range = (low, high)
if next_range[0] is not None:
yield next_range
return merge_ranges(iter_pairs())
if die.tag == 'DW_TAG_subprogram' and 'DW_AT_inline' in die.attributes:
if die.attributes['DW_AT_inline'].value in (constants.DW_INL_inlined, constants.DW_INL_declared_inlined):
# inlined function abstract entry
return
# inlined function instance
if die.tag == 'DW_TAG_inlined_subroutine':
return
if 'DW_AT_ranges' in die.attributes:
rangelist_offset = die.attributes['DW_AT_ranges'].value
rl = die.dwarfinfo.range_lists().get_range_list_at_offset(rangelist_offset)
for low, high in iter_range_list(rl):
yield low, high
elif 'DW_AT_low_pc' in die.attributes:
low = get_attr_val(die, 'DW_AT_low_pc', False)
high_form, high = get_attr_form_val(die, 'DW_AT_high_pc', False) or low + 1
if high_form.startswith('DW_FORM_data'):
high += low
yield low, high
elif die.get_parent():
for x in iter_ranges(die.get_parent()):
yield x
def iter_loclist(loclistptr):
raise NotImplementedError
def iter_expressions(die, attr_name='DW_AT_location'):
def get_loclist(ptr):
return die.dwarfinfo.location_lists().get_location_list_at_offset(ptr)
try:
location_attr = die.attributes[attr_name]
if location_attr.form == 'DW_FORM_exprloc':
# Single location expression
yield None, None, location_attr.value
elif location_attr.form.startswith('DW_FORM_block'):
# Single location expression. This form is not legal for location expressions,
# but GCC uses it anyway...
yield None, None, location_attr.value
elif location_attr.form == 'DW_FORM_sec_offset':
for low, high, expr in get_loclist(location_attr.value):
yield low, high, expr
elif location_attr.form.startswith('DW_FORM_data'):
# Another illegal form for location expressions used by GCC.
# addresses are relative to cu base address
cuaddr = die.cu.get_top_DIE().attributes['DW_AT_low_pc'].value
for low, high, expr in get_loclist(location_attr.value):
yield cuaddr + low, cuaddr + high, expr
else:
raise ValueError('%s form of DW_AT_location is not supported.' % location_attr.form)
except KeyError:
pass
| Samsung/ADBI | idk/cachebuilder/dwarftools.py | Python | apache-2.0 | 6,162 |
# coding=utf-8
# Copyright 2022 The Deeplab2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for model.builder."""
import os
from absl.testing import parameterized
import tensorflow as tf
from google.protobuf import text_format
from deeplab2 import config_pb2
from deeplab2.model import builder
from deeplab2.model.decoder import motion_deeplab_decoder
from deeplab2.model.encoder import axial_resnet_instances
from deeplab2.model.encoder import mobilenet
# resources dependency
_CONFIG_PATH = 'deeplab2/configs/example'
def _read_proto_file(filename, proto):
filename = filename # OSS: removed internal filename loading.
with tf.io.gfile.GFile(filename, 'r') as proto_file:
return text_format.ParseLines(proto_file, proto)
class BuilderTest(tf.test.TestCase, parameterized.TestCase):
def test_resnet50_encoder_creation(self):
backbone_options = config_pb2.ModelOptions.BackboneOptions(
name='resnet50', output_stride=32)
encoder = builder.create_encoder(
backbone_options,
tf.keras.layers.experimental.SyncBatchNormalization)
self.assertIsInstance(encoder, axial_resnet_instances.ResNet50)
@parameterized.parameters('mobilenet_v3_large', 'mobilenet_v3_small')
def test_mobilenet_encoder_creation(self, model_name):
backbone_options = config_pb2.ModelOptions.BackboneOptions(
name=model_name, use_squeeze_and_excite=True, output_stride=32)
encoder = builder.create_encoder(
backbone_options,
tf.keras.layers.experimental.SyncBatchNormalization)
self.assertIsInstance(encoder, mobilenet.MobileNet)
def test_resnet_encoder_creation(self):
backbone_options = config_pb2.ModelOptions.BackboneOptions(
name='max_deeplab_s', output_stride=32)
encoder = builder.create_resnet_encoder(
backbone_options,
bn_layer=tf.keras.layers.experimental.SyncBatchNormalization)
self.assertIsInstance(encoder, axial_resnet_instances.MaXDeepLabS)
def test_decoder_creation(self):
proto_filename = os.path.join(
_CONFIG_PATH, 'example_kitti-step_motion_deeplab.textproto')
model_options = _read_proto_file(proto_filename, config_pb2.ModelOptions())
motion_decoder = builder.create_decoder(
model_options, tf.keras.layers.experimental.SyncBatchNormalization,
ignore_label=255)
self.assertIsInstance(motion_decoder,
motion_deeplab_decoder.MotionDeepLabDecoder)
if __name__ == '__main__':
tf.test.main()
| google-research/deeplab2 | model/builder_test.py | Python | apache-2.0 | 3,005 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import cherrypy
import mako
import os
from girder import constants
from girder.utility import config
class WebrootBase(object):
"""
Serves a template file in response to GET requests.
This will typically be the base class of any non-API endpoints.
"""
exposed = True
def __init__(self, templatePath):
with open(templatePath) as templateFile:
# This may raise an IOError, but there's no way to recover
self.template = templateFile.read()
# Rendering occurs lazily on the first GET request
self.indexHtml = None
self.vars = {}
self.config = config.getConfig()
def updateHtmlVars(self, vars):
"""
If any of the variables in the index html need to change, call this
with the updated set of variables to render the template with.
"""
self.vars.update(vars)
self.indexHtml = None
def _renderHTML(self):
return mako.template.Template(self.template).render(**self.vars)
def GET(self, **params):
if self.indexHtml is None or self.config['server']['mode'] == 'development':
self.indexHtml = self._renderHTML()
return self.indexHtml
def DELETE(self, **params):
raise cherrypy.HTTPError(405)
def PATCH(self, **params):
raise cherrypy.HTTPError(405)
def POST(self, **params):
raise cherrypy.HTTPError(405)
def PUT(self, **params):
raise cherrypy.HTTPError(405)
class Webroot(WebrootBase):
"""
The webroot endpoint simply serves the main index HTML file.
"""
def __init__(self, templatePath=None):
if not templatePath:
templatePath = os.path.join(constants.PACKAGE_DIR,
'utility', 'webroot.mako')
super(Webroot, self).__init__(templatePath)
self.vars = {
'plugins': [],
'apiRoot': '',
'staticRoot': '',
'title': 'Girder'
}
def _renderHTML(self):
self.vars['pluginCss'] = []
self.vars['pluginJs'] = []
builtDir = os.path.join(constants.STATIC_ROOT_DIR, 'clients', 'web',
'static', 'built', 'plugins')
for plugin in self.vars['plugins']:
if os.path.exists(os.path.join(builtDir, plugin, 'plugin.min.css')):
self.vars['pluginCss'].append(plugin)
if os.path.exists(os.path.join(builtDir, plugin, 'plugin.min.js')):
self.vars['pluginJs'].append(plugin)
return super(Webroot, self)._renderHTML()
| sutartmelson/girder | girder/utility/webroot.py | Python | apache-2.0 | 3,397 |
import json
import objects
from mifkit.util.mif_encoder import MifEncoder
from mifkit.util.case import keys_to_snake_case
from mifkit.util.case import to_capitalized_camel_case
def dump(mif_object, fp, **kwargs):
"""
Convert this object into a JSON-encoded string and save it in a file.
:param mif_object: Object to serialize.
:type mif_object: Single MifObject-type object or list of MifObject-type objects.
:param fp: Object to write the serialization to.
:type fp: File-like object supporting .write() method.
:param kwargs: Any options available to json.dump().
"""
return json.dump(mif_object, fp, cls=MifEncoder, **kwargs)
def dumps(mif_object, **kwargs):
"""
Convert this object into a JSON-encoded string.
:param mif_object: Object to serialize.
:type mif_object: Single MifObject-type object or list of MifObject-type objects.
:param kwargs: Any options available to json.dumps().
"""
return json.dumps(mif_object, cls=MifEncoder, **kwargs)
def load(fp, **kwargs):
"""
Convert content in a JSON-encoded string to a Mif object.
:param fp: Object to deserialize from.
:type fp: File-like object supporting .read() method.
:param kwargs: Any options available to json.load().
:return: Single MifObject-type object or list of MifObject-type objects.
"""
return _to_mif_object(json.load(fp, **kwargs))
def loads(s, **kwargs):
"""
Convert content in a JSON-encoded string to a Mif object.
:param s: String to deserialize from.
:type s: String.
:param kwargs: Any options available to json.loads().
:return: Single MifObject-type object or list of MifObject-type objects.
"""
return _to_mif_object(json.loads(s, **kwargs))
def from_dict(obj):
"""
Convert content in a list or dictionary to
:param obj: Python object to convert to MifObject type.
:type obj: List or dictionary.
:return: Single MifObject-type object or list of MifObject-type objects.
"""
return _to_mif_object(obj)
def _to_mif_object(obj):
"""
Convert a dictionary or list of a single or multiple MifObject objects.
:param obj: Object to convert.
:type obj: Dictionary or list.
:return: A single MifObject object or a list of MifObject objects.
"""
if isinstance(obj, list):
return [_dict_to_mif_object(i) for i in obj]
elif isinstance(obj, dict):
return [_dict_to_mif_object(obj)]
else:
raise ValueError('expecting list or dictionary as outermost structure')
def _dict_to_mif_object(obj):
"""
Convert a dictionary to a MifObject object based on its name.
:param obj: Object to convert to a MifObject object.
:type obj: Dictionary.
:return: MifObject with the content of obj.
"""
if len(obj) != 1:
raise ValueError('Top-level mif object must contain exactly one key')
key = obj.keys()[0]
value = obj[key]
if not isinstance(value, dict):
raise ValueError(key + ' must have a value that is a dictionary')
return getattr(objects, to_capitalized_camel_case(key))(**keys_to_snake_case(value))
class Mif(object):
"""
Legacy class. Don't use this. It's only here to prevent old scripts from breaking.
"""
def __init__(self, sample=None):
"""
Constructor.
:param sample: Samples to sample.
:type sample: Sample object or list of Sample objects.
"""
super(Mif, self).__init__()
self.sample = sample
def to_json(self, indent=None):
"""
Convert this object into a JSON-encoded string.
:param indent: Indent to apply to the json string.
:returns: JSON-encoded string with the content of this object.
"""
return json.dumps(self.sample) if indent is None else json.dumps(self.sample, indent=indent)
| CitrineInformatics/mifkit | mifkit/mif.py | Python | apache-2.0 | 3,890 |
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.template import RequestContext, loader
from django.views.decorators.csrf import csrf_exempt
def index(request):
template = loader.get_template('index.html')
result=10
context = RequestContext(request, {
# 'result': result,
})
return HttpResponse(template.render(context))
@csrf_exempt
def predict(request):
result=5
try:
img=request.POST['img']
except KeyError:
# Redisplay the question voting form.
# return render(request, 'polls/detail.html', {
# 'question': p,
# 'error_message': "You didn't select a choice.",
# })
# print "error_message"
return HttpResponse("Your predict is %s." % result)
else:
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
# return HttpResponseRedirect(reverse('polls:results', args=(p.id,)))
return HttpResponse("Your predict is %s." % result)
# pass
# name = request.POST.get('name')
# return HttpResponse(json.dumps({'name': name}), content_type="application/json")
| osgee/django-web-demo | webdemo/views.py | Python | apache-2.0 | 1,352 |
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.parties.PartyCogActivity
from panda3d.core import CollideMask, CollisionHandler, CollisionHandlerEvent, CollisionNode, CollisionSphere, NodePath, Point3, TextNode, Texture
from direct.interval.MetaInterval import Sequence, Parallel, Track
from direct.interval.FunctionInterval import Func, Wait
from direct.interval.SoundInterval import SoundInterval
from direct.interval.ActorInterval import ActorInterval
from direct.interval.ProjectileInterval import ProjectileInterval
from direct.distributed.ClockDelta import globalClockDelta
from direct.showbase.PythonUtil import bound, lerp
from direct.showbase.DirectObject import DirectObject
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.toonbase.ToontownTimer import ToontownTimer
import PartyGlobals
import PartyCogUtils
from PartyCog import PartyCogManager
from PartyCogActivityPlayer import PartyCogActivityPlayer
from PartyCogActivityPlayer import PartyCogActivityLocalPlayer
from StretchingArrow import StretchingArrow
class PartyCogActivity(DirectObject):
notify = directNotify.newCategory('PartyCogActivity')
cog = None
arena = None
player = None
players = {}
def __init__(self, activity, arenaModel = None, texture = None):
self.activity = activity
self.root = self.activity.root
self.toonPieTracks = {}
self.toonPieEventNames = {}
self.toonIdsToAnimIntervals = {}
self.pieIvals = []
self.resultsIval = None
self.arenaModel = arenaModel
self.texture = texture
return
def load(self):
self.arena = loader.loadModel(self.arenaModel)
self.arena.reparentTo(self.root)
ground = self.arena.find('**/ground')
ground.setBin('ground', 1)
entranceArrows = self.arena.findAllMatches('**/arrowFlat*')
for arrow in entranceArrows:
arrow.setBin('ground', 5)
self.leftEntranceLocator = self.arena.find('**/leftEntrance_locator')
self.rightEntranceLocator = self.arena.find('**/rightEntrance_locator')
self.leftExitLocator = self.arena.find('**/leftExit_locator')
self.rightExitLocator = self.arena.find('**/rightExit_locator')
self.teamCamPosLocators = (self.arena.find('**/team0CamPos_locator'), self.arena.find('**/team1CamPos_locator'))
self.teamCamAimLocators = (self.arena.find('**/team0CamAim_locator'), self.arena.find('**/team1CamAim_locator'))
leftTeamLocator = NodePath('TeamLocator-%d' % PartyGlobals.TeamActivityTeams.LeftTeam)
leftTeamLocator.reparentTo(self.root)
leftTeamLocator.setH(90)
rightTeamLocator = NodePath('TeamLocator-%d' % PartyGlobals.TeamActivityTeams.RightTeam)
rightTeamLocator.reparentTo(self.root)
rightTeamLocator.setH(-90)
self.teamLocators = (leftTeamLocator, rightTeamLocator)
self._lengthBetweenEntrances = self.leftEntranceLocator.getY() - self.rightExitLocator.getY()
self._skyCollisionsCollection = self.arena.findAllMatches('**/cogPieArena_sky*_collision')
if len(self._skyCollisionsCollection) > 0:
self._skyCollisionParent = self._skyCollisionsCollection[0].getParent()
else:
self._skyCollisionParent = self.arena
self._wallCollisionsCollection = self.arena.findAllMatches('**/cogPieArena_wall*_collision')
self._arenaFlagGroups = (self.arena.find('**/flagsL_grp'), self.arena.find('**/flagsR_grp'))
self._initArenaDoors()
self.cogManager = PartyCogManager()
self.arrows = []
self.distanceLabels = []
self.teamColors = list(PartyGlobals.CogActivityColors) + [PartyGlobals.TeamActivityStatusColor]
for i in xrange(3):
start = self.arena.find('**/cog%d_start_locator' % (i + 1))
end = self.arena.find('**/cog%d_end_locator' % (i + 1))
cog = self.cogManager.generateCog(self.arena)
cog.setEndPoints(start.getPos(), end.getPos())
arrow1 = StretchingArrow(self.arena, useColor='orange')
arrow2 = StretchingArrow(self.arena, useColor='blue')
arrow1.setZ(0.1)
arrow2.setZ(0.1)
self.arrows.append([arrow1, arrow2])
distanceLabel = self.createDistanceLabel(0, self.teamColors[1])
distanceLabel[0].stash()
distanceLabel2 = self.createDistanceLabel(0, self.teamColors[0])
distanceLabel2[0].stash()
self.distanceLabels.append([distanceLabel, distanceLabel2])
self.winText = []
text1 = self.createText(0, Point3(-0.5, 0.0, -0.5), self.teamColors[1])
text2 = self.createText(1, Point3(0.5, 0.0, -0.5), self.teamColors[0])
self.winText.append(text1)
self.winText.append(text2)
self.winStatus = self.createText(2, Point3(0.0, 0.0, -0.8), self.teamColors[0])
signLocator = self.arena.find('**/eventSign_locator')
self.activity.sign.setPos(signLocator.getPos(self.root))
if self.texture:
textureAlpha = self.texture[:-4] + '_a.rgb'
reskinTexture = loader.loadTexture(self.texture, textureAlpha)
self.arena.find('**/center_grp').setTexture(reskinTexture, 100)
self.arena.find('**/leftSide_grp').setTexture(reskinTexture, 100)
self.arena.find('**/rightSide_grp').setTexture(reskinTexture, 100)
self.enable()
def _initArenaDoors(self):
self._arenaDoors = (self.arena.find('**/doorL'), self.arena.find('**/doorR'))
arenaDoorLocators = (self.arena.find('**/doorL_locator'), self.arena.find('**/doorR_locator'))
for i in xrange(len(arenaDoorLocators)):
arenaDoorLocators[i].wrtReparentTo(self._arenaDoors[i])
self._arenaDoorTimers = (self.createDoorTimer(PartyGlobals.TeamActivityTeams.LeftTeam), self.createDoorTimer(PartyGlobals.TeamActivityTeams.RightTeam))
self._arenaDoorIvals = [None, None]
self._doorStartPos = []
for i in xrange(len(self._arenaDoors)):
door = self._arenaDoors[i]
timer = self._arenaDoorTimers[i]
timer.reparentTo(arenaDoorLocators[i])
timer.hide()
self._doorStartPos.append(door.getPos())
door.setPos(door, 0, 0, -7.0)
return
def _destroyArenaDoors(self):
for ival in self._arenaDoorIvals:
ival.finish()
self._arenaDoorIvals = None
self._arenaDoors = None
for timer in self._arenaDoorTimers:
timer.stop()
timer.removeNode()
self._arenaDoorTimers = None
return
def createDoorTimer(self, team):
timer = ToontownTimer(useImage=False, highlightNearEnd=False)
timer['text_font'] = ToontownGlobals.getMinnieFont()
timer.setFontColor(PartyGlobals.CogActivityColors[team])
timer.setScale(7.0)
timer.setPos(0.2, -0.03, 0.0)
return timer
def createText(self, number, position, color):
text = TextNode('winText%d' % number)
text.setAlign(TextNode.ACenter)
text.setTextColor(color)
text.setFont(ToontownGlobals.getSignFont())
text.setText('')
noteText = aspect2d.attachNewNode(text)
noteText.setScale(0.2)
noteText.setPos(position)
noteText.stash()
return (text, noteText)
def createDistanceLabel(self, number, color):
text = TextNode('distanceText-%d' % number)
text.setAlign(TextNode.ACenter)
text.setTextColor(color)
text.setFont(ToontownGlobals.getSignFont())
text.setText('10 ft')
node = self.root.attachNewNode(text)
node.setBillboardPointEye()
node.setScale(2.5)
node.setZ(5.0)
return (node, text)
def unload(self):
self.disable()
self._cleanupResultsIval()
if self.winText is not None:
for pair in self.winText:
pair[1].reparentTo(hidden)
pair[1].removeNode()
self.winText = None
if self.winStatus is not None:
self.winStatus[1].reparentTo(hidden)
self.winStatus[1].removeNode()
self.winStatus = None
if self.cogManager is not None:
self.cogManager.unload()
self.cogManager = None
if self.arrows is not None:
for pair in self.arrows:
for arrow in pair:
arrow.destroy()
arrow = None
pair = None
self.arrows = None
if self.distanceLabels is not None:
for pair in self.distanceLabels:
for node, text in pair:
node.removeNode()
pair = None
self.distanceLabels = None
if len(self.players):
for player in self.players.values():
player.disable()
player.destroy()
self.players.clear()
self.player = None
if self.arena is not None:
self.leftEntranceLocator = None
self.rightEntranceLocator = None
self.leftExitLocator = None
self.rightExitLocator = None
self._skyCollisions = None
self._skyCollisionParent = None
self._arenaFlagGroups = None
self._destroyArenaDoors()
self.arena.removeNode()
self.arena = None
for ival in self.toonPieTracks.values():
if ival is not None and ival.isPlaying():
try:
ival.finish()
except Exception as theException:
self.notify.warning('Ival could not finish:\n %s \nException %s ' % (str(ival), str(theException)))
self.toonPieTracks = {}
for ival in self.pieIvals:
if ival is not None and ival.isPlaying():
try:
ival.finish()
except Exception as theException:
self.notify.warning('Ival could not finish:\n %s \nException %s ' % (str(ival), str(theException)))
self.pieIvals = []
self.toonIdsToAnimIntervals = {}
for eventName in self.toonPieEventNames.values():
self.ignore(eventName)
self.toonPieEventNames = {}
return
def enable(self):
self.enableEnterGateCollision()
def disable(self):
self.disableEnterGateCollision()
self.ignoreAll()
def hideTeamFlags(self, team):
self._arenaFlagGroups[team].stash()
def showTeamFlags(self, team):
self._arenaFlagGroups[team].unstash()
def _playArenaDoorIval(self, team, opening = True):
ival = self._arenaDoorIvals[team]
if ival is not None and ival.isPlaying():
ival.pause()
if not opening:
pos = self._doorStartPos[team]
else:
pos = (self._doorStartPos[team] + Point3(0, 0, -7.0),)
ival = self._arenaDoors[team].posInterval(0.75, Point3(0, 0, -7.0), blendType='easeIn')
self._arenaDoorIvals[team] = ival
ival.start()
return
def openArenaDoorForTeam(self, team):
self._playArenaDoorIval(team, opening=False)
def closeArenaDoorForTeam(self, team):
self._playArenaDoorIval(team, opening=False)
def openArenaDoors(self):
self.enableEnterGateCollision()
for i in xrange(len(self._arenaDoors)):
self.openArenaDoorForTeam(i)
def closeArenaDoors(self):
self.disableEnterGateCollision()
for i in xrange(len(self._arenaDoors)):
self.closeArenaDoorForTeam(i)
def showArenaDoorTimers(self, duration):
for timer in self._arenaDoorTimers:
timer.setTime(duration)
timer.countdown(duration)
timer.show()
def hideArenaDoorTimers(self):
for timer in self._arenaDoorTimers:
timer.hide()
def enableEnterGateCollision(self):
self.acceptOnce('entercogPieArena_entranceLeft_collision', self.handleEnterLeftEntranceTrigger)
self.acceptOnce('entercogPieArena_entranceRight_collision', self.handleEnterRightEntranceTrigger)
def disableEnterGateCollision(self):
self.ignore('entercogPieArena_entranceLeft_collision')
self.ignore('entercogPieArena_entranceRight_collision')
def enableWallCollisions(self):
self._wallCollisionsCollection.unstash()
def disableWallCollisions(self):
self._wallCollisionsCollection.stash()
def enableSkyCollisions(self):
self._skyCollisionsCollection.unstash()
def disableSkyCollisions(self):
self._skyCollisionsCollection.stash()
def handleEnterLeftEntranceTrigger(self, collEntry):
self.activity.d_toonJoinRequest(PartyGlobals.TeamActivityTeams.LeftTeam)
def handleEnterRightEntranceTrigger(self, collEntry):
self.activity.d_toonJoinRequest(PartyGlobals.TeamActivityTeams.RightTeam)
def checkOrthoDriveCollision(self, oldPos, newPos):
x = bound(newPos[0], -16.8, 16.8)
y = bound(newPos[1], -17.25, -24.1)
newPos.setX(x)
newPos.setY(y)
return newPos
def getPlayerStartPos(self, team, spot):
if team == PartyGlobals.TeamActivityTeams.LeftTeam:
node = self.leftExitLocator
else:
node = self.rightExitLocator
d = self._lengthBetweenEntrances / (self.activity.getMaxPlayersPerTeam() + 1)
yOffset = node.getY(self.root) + d * (spot + 1)
pos = node.getPos(self.root)
pos.setY(yOffset)
return pos
def handleToonJoined(self, toon, team, lateEntry = False):
pos = self.getPlayerStartPos(team, self.activity.getIndex(toon.doId, team))
if toon == base.localAvatar:
player = PartyCogActivityLocalPlayer(self.activity, pos, team, self.handleToonExited)
player.entersActivity()
self.player = player
self.disableSkyCollisions()
self.playPlayerEnterIval()
else:
player = PartyCogActivityPlayer(self.activity, toon, pos, team)
player.entersActivity()
if lateEntry:
player.updateToonPosition()
self.players[toon.doId] = player
def handleToonSwitchedTeams(self, toon):
toonId = toon.doId
player = self.players.get(toonId)
if player is None:
self.notify.warning('handleToonSwitchedTeams: toonId %s not found' % toonId)
return
else:
team = self.activity.getTeam(toonId)
spot = self.activity.getIndex(toonId, team)
pos = self.getPlayerStartPos(team, spot)
self.finishToonIval(toonId)
player.setTeam(team)
player.setToonStartPosition(pos)
player.updateToonPosition()
return
def handleToonShifted(self, toon):
toonId = toon.doId
if toonId in self.players:
player = self.players[toonId]
spot = self.activity.getIndex(toonId, player.team)
pos = self.getPlayerStartPos(player.team, spot)
player.setToonStartPosition(pos)
if self.player is not None and toon == self.player.toon:
self.playToonIval(base.localAvatar.doId, self.player.getRunToStartPositionIval())
return
def handleToonDisabled(self, toonId):
self.finishToonIval(toonId)
self.finishPieIvals(toonId)
player = self.players.get(toonId)
if player is not None:
player.disable()
if player == self.player:
self.player = None
del self.players[toonId]
return
def finishPieIvals(self, toonId):
for ival in self.pieIvals:
if ival.isPlaying():
if ival.getName().find(str(toonId)) != -1:
ival.finish()
def playPlayerEnterIval(self):
def conditionallyShowSwitchButton(self = self, enable = True):
if enable and self.activity.activityFSM.state in ('WaitForEnough', 'WaitToStart'):
self.activity.teamActivityGui.enableSwitchButton()
else:
self.activity.teamActivityGui.disableSwitchButton()
ival = Sequence(Func(self.disableWallCollisions), Func(conditionallyShowSwitchButton, self, False), self.player.getRunToStartPositionIval(), Func(conditionallyShowSwitchButton, self, True), Func(self.enableWallCollisions))
self.playToonIval(base.localAvatar.doId, ival)
def finishToonIval(self, toonId):
if self.toonIdsToAnimIntervals.get(toonId) is not None and self.toonIdsToAnimIntervals[toonId].isPlaying():
self.toonIdsToAnimIntervals[toonId].finish()
return
def playToonIval(self, toonId, ival):
self.finishToonIval(toonId)
self.toonIdsToAnimIntervals[toonId] = ival
ival.start()
def startActivity(self, timestamp):
self.pieHandler = CollisionHandlerEvent()
self.pieHandler.setInPattern('pieHit-%fn')
if self.player is not None:
self.player.resetScore()
self.hideTeamFlags(self.player.team)
for player in self.players.values():
self.finishToonIval(player.toon.doId)
player.enable()
for cog in self.cogManager.cogs:
cog.request('Active', timestamp)
for ival in self.pieIvals:
if ival.isPlaying():
ival.finish()
self.pieIvals = []
return
def stopActivity(self):
for player in self.players.values():
player.disable()
for eventName in self.toonPieEventNames.values():
self.ignore(eventName)
self.toonPieEventNames.clear()
for cog in self.cogManager.cogs:
cog.request('Static')
def handleToonExited(self, toon):
self.finishToonIval(toon.doId)
player = self.players[toon.doId]
player.disable()
player.exitsActivity()
player.destroy()
if player == self.player:
self.showTeamFlags(self.activity.getTeam(toon.doId))
self.player = None
self.enableEnterGateCollision()
self.enableSkyCollisions()
del self.players[toon.doId]
return
def pieThrow(self, avId, timestamp, heading, pos, power):
toon = self.activity.getAvatar(avId)
if toon is None:
return
else:
tossTrack, pieTrack, flyPie = self.getTossPieInterval(toon, pos[0], pos[1], pos[2], heading, 0, 0, power)
if avId == base.localAvatar.doId:
flyPie.setTag('throwerId', str(avId))
collSphere = CollisionSphere(0, 0, 0, 0.5)
collSphere.setTangible(0)
name = 'PieSphere-%d' % avId
collSphereName = self.activity.uniqueName(name)
collNode = CollisionNode(collSphereName)
collNode.setFromCollideMask(ToontownGlobals.PieBitmask)
collNode.addSolid(collSphere)
collNP = flyPie.attachNewNode(collNode)
base.cTrav.addCollider(collNP, self.pieHandler)
self.toonPieEventNames[collNP] = 'pieHit-' + collSphereName
self.accept(self.toonPieEventNames[collNP], self.handlePieCollision)
else:
player = self.players.get(avId)
if player is not None:
player.faceForward()
def matchRunningAnim(toon = toon):
toon.playingAnim = None
toon.setSpeed(toon.forwardSpeed, toon.rotateSpeed)
return
newTossTrack = Sequence(tossTrack, Func(matchRunningAnim))
pieTrack = Parallel(newTossTrack, pieTrack, name='PartyCogActivity.pieTrack-%d-%s' % (avId, timestamp))
elapsedTime = globalClockDelta.localElapsedTime(timestamp)
if elapsedTime < 16.0 / 24.0:
elapsedTime = 16.0 / 24.0
pieTrack.start(elapsedTime)
self.pieIvals.append(pieTrack)
self.toonPieTracks[avId] = pieTrack
return
def getTossPieInterval(self, toon, x, y, z, h, p, r, power, beginFlyIval = Sequence()):
from toontown.toonbase import ToontownBattleGlobals
from toontown.battle import BattleProps
pie = toon.getPieModel()
pie.setScale(0.5)
flyPie = pie.copyTo(NodePath('a'))
pieName = ToontownBattleGlobals.pieNames[toon.pieType]
pieType = BattleProps.globalPropPool.getPropType(pieName)
animPie = Sequence()
if pieType == 'actor':
animPie = ActorInterval(pie, pieName, startFrame=48)
sound = loader.loadSfx('phase_3.5/audio/sfx/AA_pie_throw_only.ogg')
t = power / 100.0
dist = lerp(PartyGlobals.CogActivityPieMinDist, PartyGlobals.CogActivityPieMaxDist, t)
time = lerp(1.0, 1.5, t)
proj = ProjectileInterval(None, startPos=Point3(0, 0, 0), endPos=Point3(0, dist, 0), duration=time)
relVel = proj.startVel
def getVelocity(toon = toon, relVel = relVel):
return render.getRelativeVector(toon, relVel) * 0.6
def __safeSetAnimState(toon = toon, state = 'Happy'):
if toon and hasattr(toon, 'animFSM'):
toon.setAnimState('Happy')
else:
self.notify.warning('The toon is being destroyed. No attribute animState.')
toss = Track((0, Sequence(Func(toon.setPosHpr, x, y, z, h, p, r), Func(pie.reparentTo, toon.rightHand), Func(pie.setPosHpr, 0, 0, 0, 0, 0, 0), animPie, Parallel(ActorInterval(toon, 'throw', startFrame=48, playRate=1.5, partName='torso'), animPie), Func(__safeSetAnimState, toon, 'Happy'))), (16.0 / 24.0, Func(pie.detachNode)))
fly = Track((14.0 / 24.0, SoundInterval(sound, node=toon, cutOff=PartyGlobals.PARTY_COG_CUTOFF)), (16.0 / 24.0, Sequence(Func(flyPie.reparentTo, render), Func(flyPie.setPosHpr, toon, 0.52, 0.97, 2.24, 0, -45, 0), beginFlyIval, ProjectileInterval(flyPie, startVel=getVelocity, duration=6), Func(flyPie.detachNode))))
return (toss, fly, flyPie)
def handlePieCollision(self, colEntry):
if not self.activity.isState('Active') or self.player is None:
return
else:
handled = False
into = colEntry.getIntoNodePath()
intoName = into.getName()
timestamp = globalClockDelta.localToNetworkTime(globalClock.getFrameTime(), bits=32)
if 'PartyCog' in intoName:
if self.toonPieTracks.get(base.localAvatar.doId) is not None:
self.toonPieTracks[base.localAvatar.doId].finish()
self.toonPieTracks[base.localAvatar.doId] = None
parts = intoName.split('-')
cogID = int(parts[1])
point = colEntry.getSurfacePoint(self.cogManager.cogs[cogID].root)
cog = self.cogManager.cogs[cogID]
hitHead = point.getZ() > cog.getHeadLocation() and not parts[2].startswith('Arm')
if self.activity.getTeam(base.localAvatar.doId) == PartyGlobals.TeamActivityTeams.LeftTeam:
direction = -1.0
else:
direction = 1.0
self.activity.b_pieHitsCog(timestamp, cogID, point, direction, hitHead)
if hitHead:
hitPoints = self.player.hitHead()
else:
hitPoints = self.player.hitBody()
self.player.updateScore()
if hitPoints > 0:
cog.showHitScore(hitPoints)
handled = True
elif 'distAvatarCollNode' in intoName:
parts = intoName.split('-')
hitToonId = int(parts[1])
toon = base.cr.doId2do.get(hitToonId)
if toon is not None and self.activity.getTeam(hitToonId) != self.player.team:
point = colEntry.getSurfacePoint(toon)
self.activity.b_pieHitsToon(hitToonId, timestamp, point)
handled = True
if handled:
eventName = self.toonPieEventNames.get(colEntry.getFromNodePath())
if eventName is not None:
self.ignore(eventName)
del self.toonPieEventNames[colEntry.getFromNodePath()]
return
def pieHitsCog(self, timestamp, cogNum, pos, direction, part):
cog = self.cogManager.cogs[cogNum]
cog.respondToPieHit(timestamp, pos, part, direction)
def pieHitsToon(self, toonId, timestamp, pos):
player = self.players.get(toonId)
if player is not None:
player.respondToPieHit(timestamp, pos)
return
def setCogDistances(self, distances):
self.cogManager.updateDistances(distances)
def showCogs(self):
for cog in self.cogManager.cogs:
cog.request('Static')
def hideCogs(self):
for cog in self.cogManager.cogs:
cog.request('Down')
def showResults(self, resultsText, winner, totals):
if self.player is None:
return None
base.localAvatar.showName()
self.resultsIval = Sequence(Wait(0.1), Func(self.activity.setStatus, TTLocalizer.PartyCogTimeUp), Func(self.activity.showStatus), Wait(2.0), Func(self.activity.hideStatus), Wait(0.5), Func(self.player.lookAtArena), Func(self.showTeamFlags, self.activity.getTeam(base.localAvatar.doId)), Wait(1.0), Func(self.showArrow, 0), Wait(1.3), Func(self.showArrow, 1), Wait(1.3), Func(self.showArrow, 2), Wait(1.3), Func(self.showTotals, totals), Wait(1.0), Func(self.showWinner, resultsText, winner), Func(self._cleanupResultsIval), name='PartyCog-conclusionSequence')
self.accept('DistributedPartyActivity-showJellybeanReward', self._cleanupResultsIval)
self.resultsIval.start()
return None
def _cleanupResultsIval(self):
if self.resultsIval:
if self.resultsIval.isPlaying():
self.resultsIval.pause()
self.resultsIval = None
self.ignore('DistributedPartyActivity-showJellybeanReward')
return
def showTotals(self, totals):
newtotals = (totals[1] - totals[0] + PartyGlobals.CogActivityArenaLength / 2.0 * 3, totals[0] - totals[1] + PartyGlobals.CogActivityArenaLength / 2.0 * 3)
self.winText[0][0].setText(TTLocalizer.PartyCogDistance % newtotals[0])
self.winText[1][0].setText(TTLocalizer.PartyCogDistance % newtotals[1])
for textPair in self.winText:
textPair[1].unstash()
def hideTotals(self):
for textPair in self.winText:
textPair[0].setText('')
textPair[1].stash()
def showWinner(self, text, winner):
self.winStatus[0].setText(text)
self.winStatus[0].setTextColor(self.teamColors[winner])
self.winStatus[1].unstash()
def hideWinner(self):
self.winStatus[0].setText('')
self.winStatus[1].stash()
def showArrow(self, arrowNum):
arrows = self.arrows[arrowNum]
cog = self.cogManager.cogs[arrowNum]
points = [self.arena.find('**/cog%d_start_locator' % (arrowNum + 1)), self.arena.find('**/cog%d_end_locator' % (arrowNum + 1))]
Y = cog.root.getY()
for point in points:
point.setY(Y)
for i in xrange(len(arrows)):
arrow = arrows[i]
arrow.draw(points[i].getPos(), cog.root.getPos(), animate=False)
arrow.unstash()
i = -1
length = PartyGlobals.CogActivityArenaLength
for node, text in self.distanceLabels[arrowNum]:
current = bound(i, 0, 1)
node.setPos(cog.root.getPos(self.root) + Point3(i * 4, 2, 4))
dist = PartyCogUtils.getCogDistanceUnitsFromCenter(cog.currentT)
dist = abs(dist - i * length / 2)
if dist > length - dist:
node.setScale(2.8)
else:
node.setScale(2.2)
text.setText(TTLocalizer.PartyCogDistance % dist)
if dist > 0:
node.unstash()
else:
arrows[current].stash()
i += 2
def hideArrows(self):
for pair in self.arrows:
for arrow in pair:
arrow.stash()
for pair in self.distanceLabels:
for node, text in pair:
node.stash()
def hideResults(self):
self.hideArrows()
self.hideTotals()
self.hideWinner() | DedMemez/ODS-August-2017 | parties/PartyCogActivity.py | Python | apache-2.0 | 29,414 |
# Copyright 2016 Euclidean Technologies Management LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import tensorflow as tf
from deep_nn_model import DeepNNModel
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
class DeepMlpModel(DeepNNModel):
"""
A Deep MLP Model that supports a mult-class output with an
arbitrary number of fixed width hidden layers.
"""
def __init__(self, num_layers, num_inputs, num_hidden, num_outputs,
num_unrollings,
max_grad_norm=5.0,
hidden_dropout=True,
input_dropout=False,
skip_connections=False,
embedding_size=0,
optimizer='gd'):
"""
Initialize the model
Args:
num_layers: number of hidden layers
num_inputs: number input units. this should be less than or
or equal to width of feature data in the data file
num_hidden: number of hidden units in each hidden layer
num_unrollings: the size of the time window processed in
each step (see step() function below)
batch_size: the size of the data batch processed in each step
max_grad_norm: max gardient norm size for gradient clipping
input_dropout: perform dropout on input layer
"""
self._num_unrollings = num_unrollings
self._num_inputs = num_inputs
total_input_size = num_unrollings * num_inputs
batch_size = self._batch_size = tf.placeholder(tf.int32, shape=[])
self._seq_lengths = tf.placeholder(tf.int64, shape=[None])
self._keep_prob = tf.placeholder(tf.float32, shape=[])
self._inputs = list()
self._targets = list()
self._train_mask = list() # Weights for loss functions per example
self._valid_mask = list() # Weights for loss functions per example
for _ in range(num_unrollings):
self._inputs.append( tf.placeholder(tf.float32,
shape=[None,num_inputs]) )
self._targets.append( tf.placeholder(tf.float32,
shape=[None,num_outputs]) )
self._train_mask.append(tf.placeholder(tf.float32, shape=[None]))
self._valid_mask.append(tf.placeholder(tf.float32, shape=[None]))
inputs = tf.reverse_sequence(tf.concat( self._inputs, 1 ),
self._seq_lengths*num_inputs,
seq_axis=1,batch_axis=0)
if input_dropout is True: inputs = self._input_dropout(inputs)
num_prev = total_input_size
outputs = inputs
if embedding_size > 0:
time_weights = tf.get_variable("t_weights",[num_unrollings,embedding_size,1])
feature_weights = tf.get_variable("f_weights",[1,embedding_size,num_inputs])
embedding_weights = tf.reshape( time_weights*feature_weights,
[num_unrollings*num_inputs, embedding_size] )
biases = tf.get_variable("embedding_biases",[embedding_size])
outputs = tf.nn.relu(tf.nn.xw_plus_b(inputs,embedding_weights,biases))
num_prev = embedding_size
for i in range(num_layers):
weights = tf.get_variable("hidden_w_%d"%i,[num_prev, num_hidden])
biases = tf.get_variable("hidden_b_%d"%i,[num_hidden])
outputs = tf.nn.relu(tf.nn.xw_plus_b(outputs, weights, biases))
if hidden_dropout is True:
outputs = tf.nn.dropout(outputs, self._keep_prob)
num_prev = num_hidden
if skip_connections is True:
num_prev = num_inputs+num_prev
skip_inputs = tf.slice(inputs, [0, 0], [batch_size, num_inputs] )
outputs = tf.concat( [ skip_inputs, outputs], 1)
softmax_b = tf.get_variable("softmax_b", [num_outputs])
softmax_w = tf.get_variable("softmax_w", [num_prev, num_outputs])
logits = tf.nn.xw_plus_b(outputs, softmax_w, softmax_b)
targets = tf.unstack(tf.reverse_sequence(tf.reshape(
tf.concat(self._targets, 1),[batch_size,num_unrollings,num_outputs] ),
self._seq_lengths,seq_axis=1,batch_axis=0),axis=1)[0]
agg_loss = tf.nn.softmax_cross_entropy_with_logits(labels=targets,logits=logits)
train_mask = tf.unstack(tf.reverse_sequence(tf.transpose(
tf.reshape( tf.concat(self._train_mask, 0 ),
[num_unrollings, batch_size] ) ),
self._seq_lengths,seq_axis=1,batch_axis=0),axis=1)[0]
valid_mask = tf.unstack(tf.reverse_sequence(tf.transpose(
tf.reshape( tf.concat(self._valid_mask, 0),
[num_unrollings, batch_size] ) ),
self._seq_lengths,seq_axis=1,batch_axis=0),axis=1)[0]
train_loss = tf.multiply(agg_loss, train_mask)
valid_loss = tf.multiply(agg_loss, valid_mask)
self._loss = self._train_loss = train_loss
self._valid_loss = valid_loss
self._train_evals = tf.reduce_sum( train_mask )
self._valid_evals = tf.reduce_sum( valid_mask )
self._train_cst = tf.reduce_sum( train_loss )
self._valid_cst = tf.reduce_sum( valid_loss )
self._predictions = tf.nn.softmax(logits)
self._class_predictions = tf.one_hot(tf.argmax(self._predictions,1),
num_outputs, axis=-1)
accy = tf.multiply(self._class_predictions, targets)
train_accy = tf.multiply(accy,tf.reshape(train_mask,
shape=[batch_size,1]))
valid_accy = tf.multiply(accy,tf.reshape(valid_mask,
shape=[batch_size,1]))
self._train_accy = tf.reduce_sum( train_accy )
self._valid_accy = tf.reduce_sum( valid_accy )
self._cost = self._train_cst
self._accy = self._train_accy
self._evals = self._train_evals
self._batch_cst = self._train_cst / (self._train_evals + 1.0)
# here is the learning part of the graph
tvars = tf.trainable_variables()
grads = tf.gradients(self._batch_cst,tvars)
if (max_grad_norm > 0):
grads, _ = tf.clip_by_global_norm(grads,max_grad_norm)
self._lr = tf.Variable(0.0, trainable=False)
optim = None
if optimizer == 'gd':
optim = tf.train.GradientDescentOptimizer(self._lr)
elif optimizer == 'adagrad':
optim = tf.train.AdagradOptimizer(self._lr)
elif optimizer == 'adam':
optim = tf.train.AdamOptimizer(self._lr)
elif optimizer == 'mo':
optim = tf.train.MomentumOptimizer(self._lr)
else:
raise RuntimeError("Unknown optimizer = %s"%optimizer)
self._train_op = optim.apply_gradients(zip(grads, tvars))
def _input_dropout(self,inputs):
# This implementation of dropout dropouts an entire feature along the time dim
random_tensor = self._keep_prob
random_tensor += random_ops.random_uniform([self._batch_size,self._num_inputs],
dtype=inputs.dtype)
random_tensor = tf.tile(random_tensor,[1,self._num_unrollings])
binary_tensor = math_ops.floor(random_tensor)
ret = math_ops.div(inputs, self._keep_prob) * binary_tensor
ret.set_shape(inputs.get_shape())
return ret
| euclidjda/dnn-quant | scripts/deep_mlp_model.py | Python | apache-2.0 | 7,953 |
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.registry import register_model
from .helpers import build_model_with_cfg
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .96, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head',
'first_conv': 'stem.0',
**kwargs
}
default_cfgs = {
'convmixer_1536_20': _cfg(url='https://github.com/tmp-iclr/convmixer/releases/download/timm-v1.0/convmixer_1536_20_ks9_p7.pth.tar'),
'convmixer_768_32': _cfg(url='https://github.com/tmp-iclr/convmixer/releases/download/timm-v1.0/convmixer_768_32_ks7_p7_relu.pth.tar'),
'convmixer_1024_20_ks9_p14': _cfg(url='https://github.com/tmp-iclr/convmixer/releases/download/timm-v1.0/convmixer_1024_20_ks9_p14.pth.tar')
}
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class ConvMixer(nn.Module):
def __init__(self, dim, depth, kernel_size=9, patch_size=7, in_chans=3, num_classes=1000, activation=nn.GELU, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_features = dim
self.head = nn.Linear(dim, num_classes) if num_classes > 0 else nn.Identity()
self.stem = nn.Sequential(
nn.Conv2d(in_chans, dim, kernel_size=patch_size, stride=patch_size),
activation(),
nn.BatchNorm2d(dim)
)
self.blocks = nn.Sequential(
*[nn.Sequential(
Residual(nn.Sequential(
nn.Conv2d(dim, dim, kernel_size, groups=dim, padding="same"),
activation(),
nn.BatchNorm2d(dim)
)),
nn.Conv2d(dim, dim, kernel_size=1),
activation(),
nn.BatchNorm2d(dim)
) for i in range(depth)]
)
self.pooling = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten()
)
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.stem(x)
x = self.blocks(x)
x = self.pooling(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _create_convmixer(variant, pretrained=False, **kwargs):
return build_model_with_cfg(ConvMixer, variant, pretrained, default_cfg=default_cfgs[variant], **kwargs)
@register_model
def convmixer_1536_20(pretrained=False, **kwargs):
model_args = dict(dim=1536, depth=20, kernel_size=9, patch_size=7, **kwargs)
return _create_convmixer('convmixer_1536_20', pretrained, **model_args)
@register_model
def convmixer_768_32(pretrained=False, **kwargs):
model_args = dict(dim=768, depth=32, kernel_size=7, patch_size=7, activation=nn.ReLU, **kwargs)
return _create_convmixer('convmixer_768_32', pretrained, **model_args)
@register_model
def convmixer_1024_20_ks9_p14(pretrained=False, **kwargs):
model_args = dict(dim=1024, depth=20, kernel_size=9, patch_size=14, **kwargs)
return _create_convmixer('convmixer_1024_20_ks9_p14', pretrained, **model_args) | rwightman/pytorch-image-models | timm/models/convmixer.py | Python | apache-2.0 | 3,631 |
#!/usr/bin/env python
# Libraries
import imapclient
# Local imports
import account_settings
import utils
def get_all_folders(username, password):
server = utils.login_to_server(username, password)
all_folders = server.list_folders()
all_folders = [triple[2] for triple in all_folders]
return all_folders, server
def main():
all_folders_old, server_old = get_all_folders(
account_settings.OLD_USERNAME, account_settings.OLD_PASSWORD)
# We don't need the old server connection any longer.
server_old.logout()
all_folders_new, server_new = get_all_folders(
account_settings.NEW_USERNAME, account_settings.NEW_PASSWORD)
uncreated_old_folders = set(all_folders_old).difference(all_folders_new)
for folder in uncreated_old_folders:
server_new.create_folder(folder)
print 'Created', folder
server_new.logout()
if __name__ == '__main__':
main()
| dhermes/gmail-imap-migrate | step1_make_new_labels.py | Python | apache-2.0 | 895 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common import password_gen
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
from heat.engine import translation
class RandomString(resource.Resource):
"""A resource which generates a random string.
This is useful for configuring passwords and secrets on services. Random
string can be generated from specified character sequences, which means
that all characters will be randomly chosen from specified sequences, or
with some classes, e.g. letterdigits, which means that all character will
be randomly chosen from union of ascii letters and digits. Output string
will be randomly generated string with specified length (or with length of
32, if length property doesn't specified).
"""
support_status = support.SupportStatus(version='2014.1')
PROPERTIES = (
LENGTH, SEQUENCE, CHARACTER_CLASSES, CHARACTER_SEQUENCES,
SALT,
) = (
'length', 'sequence', 'character_classes', 'character_sequences',
'salt',
)
_CHARACTER_CLASSES_KEYS = (
CHARACTER_CLASSES_CLASS, CHARACTER_CLASSES_MIN,
) = (
'class', 'min',
)
_CHARACTER_SEQUENCES = (
CHARACTER_SEQUENCES_SEQUENCE, CHARACTER_SEQUENCES_MIN,
) = (
'sequence', 'min',
)
ATTRIBUTES = (
VALUE,
) = (
'value',
)
properties_schema = {
LENGTH: properties.Schema(
properties.Schema.INTEGER,
_('Length of the string to generate.'),
default=32,
constraints=[
constraints.Range(1, 512),
]
),
SEQUENCE: properties.Schema(
properties.Schema.STRING,
_('Sequence of characters to build the random string from.'),
constraints=[
constraints.AllowedValues(password_gen.CHARACTER_CLASSES),
],
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
message=_('Use property %s.') % CHARACTER_CLASSES,
version='2014.2'
)
)
),
CHARACTER_CLASSES: properties.Schema(
properties.Schema.LIST,
_('A list of character class and their constraints to generate '
'the random string from.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
CHARACTER_CLASSES_CLASS: properties.Schema(
properties.Schema.STRING,
(_('A character class and its corresponding %(min)s '
'constraint to generate the random string from.')
% {'min': CHARACTER_CLASSES_MIN}),
constraints=[
constraints.AllowedValues(
password_gen.CHARACTER_CLASSES),
],
default=password_gen.LETTERS_DIGITS),
CHARACTER_CLASSES_MIN: properties.Schema(
properties.Schema.INTEGER,
_('The minimum number of characters from this '
'character class that will be in the generated '
'string.'),
default=1,
constraints=[
constraints.Range(1, 512),
]
)
}
),
# add defaults for backward compatibility
default=[{CHARACTER_CLASSES_CLASS: password_gen.LETTERS_DIGITS,
CHARACTER_CLASSES_MIN: 1}]
),
CHARACTER_SEQUENCES: properties.Schema(
properties.Schema.LIST,
_('A list of character sequences and their constraints to '
'generate the random string from.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
CHARACTER_SEQUENCES_SEQUENCE: properties.Schema(
properties.Schema.STRING,
_('A character sequence and its corresponding %(min)s '
'constraint to generate the random string '
'from.') % {'min': CHARACTER_SEQUENCES_MIN},
required=True),
CHARACTER_SEQUENCES_MIN: properties.Schema(
properties.Schema.INTEGER,
_('The minimum number of characters from this '
'sequence that will be in the generated '
'string.'),
default=1,
constraints=[
constraints.Range(1, 512),
]
)
}
)
),
SALT: properties.Schema(
properties.Schema.STRING,
_('Value which can be set or changed on stack update to trigger '
'the resource for replacement with a new random string. The '
'salt value itself is ignored by the random generator.')
),
}
attributes_schema = {
VALUE: attributes.Schema(
_('The random string generated by this resource. This value is '
'also available by referencing the resource.'),
cache_mode=attributes.Schema.CACHE_NONE,
type=attributes.Schema.STRING
),
}
def translation_rules(self, props):
if props.get(self.SEQUENCE):
return [
translation.TranslationRule(
props,
translation.TranslationRule.ADD,
[self.CHARACTER_CLASSES],
[{self.CHARACTER_CLASSES_CLASS: props.get(
self.SEQUENCE),
self.CHARACTER_CLASSES_MIN: 1}]),
translation.TranslationRule(
props,
translation.TranslationRule.DELETE,
[self.SEQUENCE]
)
]
def _generate_random_string(self, char_sequences, char_classes, length):
seq_mins = [
password_gen.special_char_class(
char_seq[self.CHARACTER_SEQUENCES_SEQUENCE],
char_seq[self.CHARACTER_SEQUENCES_MIN])
for char_seq in char_sequences]
char_class_mins = [
password_gen.named_char_class(
char_class[self.CHARACTER_CLASSES_CLASS],
char_class[self.CHARACTER_CLASSES_MIN])
for char_class in char_classes]
return password_gen.generate_password(length,
seq_mins + char_class_mins)
def validate(self):
super(RandomString, self).validate()
char_sequences = self.properties[self.CHARACTER_SEQUENCES]
char_classes = self.properties[self.CHARACTER_CLASSES]
def char_min(char_dicts, min_prop):
if char_dicts:
return sum(char_dict[min_prop] for char_dict in char_dicts)
return 0
length = self.properties[self.LENGTH]
min_length = (char_min(char_sequences, self.CHARACTER_SEQUENCES_MIN) +
char_min(char_classes, self.CHARACTER_CLASSES_MIN))
if min_length > length:
msg = _("Length property cannot be smaller than combined "
"character class and character sequence minimums")
raise exception.StackValidationFailed(message=msg)
def handle_create(self):
char_sequences = self.properties[self.CHARACTER_SEQUENCES] or []
char_classes = self.properties[self.CHARACTER_CLASSES] or []
length = self.properties[self.LENGTH]
random_string = self._generate_random_string(char_sequences,
char_classes,
length)
self.data_set('value', random_string, redact=True)
self.resource_id_set(self.physical_resource_name())
def _resolve_attribute(self, name):
if name == self.VALUE:
return self.data().get(self.VALUE)
def get_reference_id(self):
if self.resource_id is not None:
return self.data().get('value')
else:
return six.text_type(self.name)
def resource_mapping():
return {
'OS::Heat::RandomString': RandomString,
}
| noironetworks/heat | heat/engine/resources/openstack/heat/random_string.py | Python | apache-2.0 | 9,442 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import job_search_commute_search
PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"]
def test_commute_search(tenant):
jobs = job_search_commute_search.search_jobs(PROJECT_ID, tenant)
for job in jobs:
assert "projects/" in job
| googleapis/python-talent | samples/snippets/job_search_commute_search_test.py | Python | apache-2.0 | 828 |
# -*- coding: utf-8 -*-
#
# Read the Docs Template documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 26 14:19:49 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from recommonmark.parser import CommonMarkParser
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
## Add parser for Makdown
source_parsers = {
'.md': CommonMarkParser,
}
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Documentation DigitalSkills'
copyright = u'2017, DigitalSkills'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
#html_theme = 'sphinx_rtd_theme_digitalskills'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes',]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ReadtheDocsTemplatedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ReadtheDocsTemplate.tex', u'Read the Docs Template Documentation',
u'Read the Docs', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'readthedocstemplate', u'Read the Docs Template Documentation',
[u'Read the Docs'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ReadtheDocsTemplate', u'Read the Docs Template Documentation',
u'Read the Docs', 'ReadtheDocsTemplate', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| DigitalSkills-fr/Docs | docs/conf.py | Python | apache-2.0 | 8,474 |
wsgi_app = "weasyl.wsgi:make_wsgi_app()"
proc_name = "weasyl"
preload_app = False
secure_scheme_headers = {
'X-FORWARDED-PROTO': 'https',
}
forwarded_allow_ips = '*'
| Weasyl/weasyl | gunicorn.conf.py | Python | apache-2.0 | 173 |
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
import mock
from testtools import matchers
import webob
import webob.exc
from cinder.api import common
from cinder import test
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
class LimiterTest(test.TestCase):
"""Unit tests for the `cinder.api.common.limited` method.
This method takes in a list of items and, depending on the 'offset'
and 'limit' GET params, returns a subset or complete set of the given
items.
"""
def setUp(self):
"""Run before each test."""
super(LimiterTest, self).setUp()
self.tiny = range(1)
self.small = range(10)
self.medium = range(1000)
self.large = range(10000)
def test_limiter_offset_zero(self):
"""Test offset key works with 0."""
req = webob.Request.blank('/?offset=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_offset_medium(self):
"""Test offset key works with a medium sized number."""
req = webob.Request.blank('/?offset=10')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), self.small[10:])
self.assertEqual(common.limited(self.medium, req), self.medium[10:])
self.assertEqual(common.limited(self.large, req), self.large[10:1010])
def test_limiter_offset_over_max(self):
"""Test offset key works with a number over 1000 (max_limit)."""
req = webob.Request.blank('/?offset=1001')
self.assertEqual([], common.limited(self.tiny, req))
self.assertEqual([], common.limited(self.small, req))
self.assertEqual([], common.limited(self.medium, req))
self.assertEqual(
common.limited(self.large, req), self.large[1001:2001])
def test_limiter_offset_blank(self):
"""Test offset key works with a blank offset."""
req = webob.Request.blank('/?offset=')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_offset_bad(self):
"""Test offset key works with a BAD offset."""
req = webob.Request.blank(u'/?offset=\u0020aa')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_nothing(self):
"""Test request with no offset or limit."""
req = webob.Request.blank('/')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_zero(self):
"""Test limit of zero."""
req = webob.Request.blank('/?limit=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_bad(self):
"""Test with a bad limit."""
req = webob.Request.blank(u'/?limit=hello')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_limit_medium(self):
"""Test limit of 10."""
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium[:10])
self.assertEqual(common.limited(self.large, req), self.large[:10])
def test_limiter_limit_over_max(self):
"""Test limit of 3000."""
req = webob.Request.blank('/?limit=3000')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_and_offset(self):
"""Test request with both limit and offset."""
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(common.limited(items, req), items[1:4])
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(common.limited(items, req), items[3:1003])
req = webob.Request.blank('/?offset=3&limit=1500')
self.assertEqual(common.limited(items, req), items[3:1003])
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual(common.limited(items, req), [])
def test_limiter_custom_max_limit(self):
"""Test a max_limit other than 1000."""
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[1:4])
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[3:])
req = webob.Request.blank('/?offset=3&limit=2500')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[3:])
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual(common.limited(items, req, max_limit=2000), [])
def test_limiter_negative_limit(self):
"""Test a negative limit."""
req = webob.Request.blank('/?limit=-3000')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_negative_offset(self):
"""Test a negative offset."""
req = webob.Request.blank('/?offset=-30')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
class PaginationParamsTest(test.TestCase):
"""Unit tests for `cinder.api.common.get_pagination_params` method.
This method takes in a request object and returns 'marker' and 'limit'
GET params.
"""
def test_nonnumerical_limit(self):
"""Test nonnumerical limit param."""
req = webob.Request.blank('/?limit=hello')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_no_params(self):
"""Test no params."""
req = webob.Request.blank('/')
self.assertEqual({}, common.get_pagination_params(req))
def test_valid_marker(self):
"""Test valid marker param."""
req = webob.Request.blank(
'/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
self.assertEqual({'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'},
common.get_pagination_params(req))
def test_valid_limit(self):
"""Test valid limit param."""
req = webob.Request.blank('/?limit=10')
self.assertEqual({'limit': 10}, common.get_pagination_params(req))
def test_invalid_limit(self):
"""Test invalid limit param."""
req = webob.Request.blank('/?limit=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_valid_limit_and_marker(self):
"""Test valid limit and marker parameters."""
marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
req = webob.Request.blank('/?limit=20&marker=%s' % marker)
self.assertEqual({'marker': marker, 'limit': 20},
common.get_pagination_params(req))
class SortParamUtilsTest(test.TestCase):
def test_get_sort_params_defaults(self):
"""Verifies the default sort key and direction."""
sort_keys, sort_dirs = common.get_sort_params({})
self.assertEqual(['created_at'], sort_keys)
self.assertEqual(['desc'], sort_dirs)
def test_get_sort_params_override_defaults(self):
"""Verifies that the defaults can be overriden."""
sort_keys, sort_dirs = common.get_sort_params({}, default_key='key1',
default_dir='dir1')
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_single_value_sort_param(self):
"""Verifies a single sort key and direction."""
params = {'sort': 'key1:dir1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_single_value_old_params(self):
"""Verifies a single sort key and direction."""
params = {'sort_key': 'key1', 'sort_dir': 'dir1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_single_with_default_sort_param(self):
"""Verifies a single sort value with a default direction."""
params = {'sort': 'key1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
# Direction should be defaulted
self.assertEqual(['desc'], sort_dirs)
def test_get_sort_params_single_with_default_old_params(self):
"""Verifies a single sort value with a default direction."""
params = {'sort_key': 'key1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
# Direction should be defaulted
self.assertEqual(['desc'], sort_dirs)
def test_get_sort_params_multiple_values(self):
"""Verifies multiple sort parameter values."""
params = {'sort': 'key1:dir1,key2:dir2,key3:dir3'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dirs)
def test_get_sort_params_multiple_not_all_dirs(self):
"""Verifies multiple sort keys without all directions."""
params = {'sort': 'key1:dir1,key2,key3:dir3'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
# Second key is missing the direction, should be defaulted
self.assertEqual(['dir1', 'desc', 'dir3'], sort_dirs)
def test_get_sort_params_multiple_override_default_dir(self):
"""Verifies multiple sort keys and overriding default direction."""
params = {'sort': 'key1:dir1,key2,key3'}
sort_keys, sort_dirs = common.get_sort_params(params,
default_dir='foo')
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'foo', 'foo'], sort_dirs)
def test_get_sort_params_params_modified(self):
"""Verifies that the input sort parameter are modified."""
params = {'sort': 'key1:dir1,key2:dir2,key3:dir3'}
common.get_sort_params(params)
self.assertEqual({}, params)
params = {'sort_dir': 'key1', 'sort_dir': 'dir1'}
common.get_sort_params(params)
self.assertEqual({}, params)
def test_get_sort_params_random_spaces(self):
"""Verifies that leading and trailing spaces are removed."""
params = {'sort': ' key1 : dir1,key2: dir2 , key3 '}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'dir2', 'desc'], sort_dirs)
def test_get_params_mix_sort_and_old_params(self):
"""An exception is raised if both types of sorting params are given."""
for params in ({'sort': 'k1', 'sort_key': 'k1'},
{'sort': 'k1', 'sort_dir': 'd1'},
{'sort': 'k1', 'sort_key': 'k1', 'sort_dir': 'd2'}):
self.assertRaises(webob.exc.HTTPBadRequest,
common.get_sort_params,
params)
class MiscFunctionsTest(test.TestCase):
def test_remove_major_version_from_href(self):
fixture = 'http://www.testsite.com/v1/images'
expected = 'http://www.testsite.com/images'
actual = common.remove_version_from_href(fixture)
self.assertEqual(expected, actual)
def test_remove_version_from_href(self):
fixture = 'http://www.testsite.com/v1.1/images'
expected = 'http://www.testsite.com/images'
actual = common.remove_version_from_href(fixture)
self.assertEqual(expected, actual)
def test_remove_version_from_href_2(self):
fixture = 'http://www.testsite.com/v1.1/'
expected = 'http://www.testsite.com/'
actual = common.remove_version_from_href(fixture)
self.assertEqual(expected, actual)
def test_remove_version_from_href_3(self):
fixture = 'http://www.testsite.com/v10.10'
expected = 'http://www.testsite.com'
actual = common.remove_version_from_href(fixture)
self.assertEqual(expected, actual)
def test_remove_version_from_href_4(self):
fixture = 'http://www.testsite.com/v1.1/images/v10.5'
expected = 'http://www.testsite.com/images/v10.5'
actual = common.remove_version_from_href(fixture)
self.assertEqual(expected, actual)
def test_remove_version_from_href_bad_request(self):
fixture = 'http://www.testsite.com/1.1/images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_remove_version_from_href_bad_request_2(self):
fixture = 'http://www.testsite.com/v/images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_remove_version_from_href_bad_request_3(self):
fixture = 'http://www.testsite.com/v1.1images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
class TestCollectionLinks(test.TestCase):
"""Tests the _get_collection_links method."""
def _validate_next_link(self, href_link_mock, item_count,
osapi_max_limit, limit, should_link_exist):
req = mock.MagicMock()
href_link_mock.return_value = [{"rel": "next",
"href": "fake_link"}]
self.flags(osapi_max_limit=osapi_max_limit)
if limit is None:
params = mock.PropertyMock(return_value=dict())
limited_list_size = min(item_count, osapi_max_limit)
else:
params = mock.PropertyMock(return_value=dict(limit=limit))
limited_list_size = min(item_count, osapi_max_limit,
limit)
limited_list = [{"uuid": str(i)} for i in range(limited_list_size)]
type(req).params = params
builder = common.ViewBuilder()
results = builder._get_collection_links(req, limited_list,
mock.sentinel.coll_key,
item_count, "uuid")
if should_link_exist:
href_link_mock.assert_called_once_with(limited_list, "uuid",
req,
mock.sentinel.coll_key)
self.assertThat(results, matchers.HasLength(1))
else:
self.assertFalse(href_link_mock.called)
self.assertThat(results, matchers.HasLength(0))
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_equals_osapi_max_no_limit(self, href_link_mock):
item_count = 5
osapi_max_limit = 5
limit = None
should_link_exist = False
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_equals_osapi_max_greater_than_limit(self,
href_link_mock):
item_count = 5
osapi_max_limit = 5
limit = 4
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_equals_osapi_max_equals_limit(self, href_link_mock):
item_count = 5
osapi_max_limit = 5
limit = 5
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_equals_osapi_max_less_than_limit(self, href_link_mock):
item_count = 5
osapi_max_limit = 5
limit = 6
should_link_exist = False
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_less_than_osapi_max_no_limit(self, href_link_mock):
item_count = 5
osapi_max_limit = 7
limit = None
should_link_exist = False
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_limit_less_than_items_less_than_osapi_max(self, href_link_mock):
item_count = 5
osapi_max_limit = 7
limit = 4
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_limit_equals_items_less_than_osapi_max(self, href_link_mock):
item_count = 5
osapi_max_limit = 7
limit = 5
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_less_than_limit_less_than_osapi_max(self, href_link_mock):
item_count = 5
osapi_max_limit = 7
limit = 6
should_link_exist = False
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_less_than_osapi_max_equals_limit(self, href_link_mock):
item_count = 5
osapi_max_limit = 7
limit = 7
should_link_exist = False
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_less_than_osapi_max_less_than_limit(self, href_link_mock):
item_count = 5
osapi_max_limit = 7
limit = 8
should_link_exist = False
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_greater_than_osapi_max_no_limit(self, href_link_mock):
item_count = 5
osapi_max_limit = 3
limit = None
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_limit_less_than_items_greater_than_osapi_max(self,
href_link_mock):
item_count = 5
osapi_max_limit = 3
limit = 2
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_greater_than_osapi_max_equals_limit(self,
href_link_mock):
item_count = 5
osapi_max_limit = 3
limit = 3
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_greater_than_limit_greater_than_osapi_max(self,
href_link_mock):
item_count = 5
osapi_max_limit = 3
limit = 4
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_equals_limit_greater_than_osapi_max(self,
href_link_mock):
item_count = 5
osapi_max_limit = 3
limit = 5
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_limit_greater_than_items_greater_than_osapi_max(self,
href_link_mock):
item_count = 5
osapi_max_limit = 3
limit = 6
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
class LinkPrefixTest(test.TestCase):
def test_update_link_prefix(self):
vb = common.ViewBuilder()
result = vb._update_link_prefix("http://192.168.0.243:24/",
"http://127.0.0.1/volume")
self.assertEqual("http://127.0.0.1/volume", result)
result = vb._update_link_prefix("http://foo.x.com/v1",
"http://new.prefix.com")
self.assertEqual("http://new.prefix.com/v1", result)
result = vb._update_link_prefix(
"http://foo.x.com/v1",
"http://new.prefix.com:20455/new_extra_prefix")
self.assertEqual("http://new.prefix.com:20455/new_extra_prefix/v1",
result)
| CloudServer/cinder | cinder/tests/unit/api/test_common.py | Python | apache-2.0 | 24,477 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tflite_runtime.interpreter import load_delegate
from tflite_runtime.interpreter import Interpreter
import glob
import os
import subprocess
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
input_size = (224, 224)
input_shape = (224, 224, 3)
batch_size = 1
###########################################################################################
# Load pretrained model
###########################################################################################
base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
include_top=False,
classifier_activation='softmax',
weights='imagenet')
# Freeze first 100 layers
base_model.trainable = True
for layer in base_model.layers[:100]:
layer.trainable = False
model = tf.keras.Sequential([
base_model,
tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(units=2, activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.RMSprop(lr=1e-5),
metrics=['accuracy'])
print(model.summary())
###########################################################################################
# Prepare Datasets
###########################################################################################
train_datagen = ImageDataGenerator(rescale=1./255,
zoom_range=0.3,
rotation_range=50,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
val_datagen = ImageDataGenerator(rescale=1./255)
dataset_path = './dataset'
train_set_path = os.path.join(dataset_path, 'train')
val_set_path = os.path.join(dataset_path, 'test')
batch_size = 64
train_generator = train_datagen.flow_from_directory(train_set_path,
target_size=input_size,
batch_size=batch_size,
class_mode='categorical')
val_generator = val_datagen.flow_from_directory(val_set_path,
target_size=input_size,
batch_size=batch_size,
class_mode='categorical')
epochs = 15
history = model.fit(train_generator,
steps_per_epoch=train_generator.n // batch_size,
epochs=epochs,
validation_data=val_generator,
validation_steps=val_generator.n // batch_size,
verbose=1)
###########################################################################################
# Plotting Train Data
###########################################################################################
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()), 1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0, 1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
# plt.show()
plt.savefig('history.png')
###########################################################################################
# Post Training Quantization
###########################################################################################
def representative_data_gen():
dataset_list = tf.data.Dataset.list_files('./dataset/test/*/*')
for i in range(100):
image = next(iter(dataset_list))
image = tf.io.read_file(image)
image = tf.io.decode_jpeg(image, channels=3)
image = tf.image.resize(image, input_size)
image = tf.cast(image / 255., tf.float32)
image = tf.expand_dims(image, 0)
yield [image]
model.input.set_shape((1,) + model.input.shape[1:])
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.target_spec.supported_types = [tf.int8]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
tflite_model = converter.convert()
###########################################################################################
# Saving models
###########################################################################################
model.save('classifier.h5')
with open('classifier.tflite', 'wb') as f:
f.write(tflite_model)
###########################################################################################
# Evaluating h5
###########################################################################################
batch_images, batch_labels = next(val_generator)
labels = '\n'.join(sorted(train_generator.class_indices.keys()))
with open('classifier_labels.txt', 'w') as f:
f.write(labels)
logits = model(batch_images)
prediction = np.argmax(logits, axis=1)
truth = np.argmax(batch_labels, axis=1)
keras_accuracy = tf.keras.metrics.Accuracy()
keras_accuracy(prediction, truth)
###########################################################################################
# Evaluating tflite
###########################################################################################
def set_input_tensor(interpreter, input):
input_details = interpreter.get_input_details()[0]
tensor_index = input_details['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
scale, zero_point = input_details['quantization']
input_tensor[:, :] = np.uint8(input / scale + zero_point)
def classify_image(interpreter, input):
set_input_tensor(interpreter, input)
interpreter.invoke()
output_details = interpreter.get_output_details()[0]
output = interpreter.get_tensor(output_details['index'])
scale, zero_point = output_details['quantization']
output = scale * (output - zero_point)
top_1 = np.argmax(output)
return top_1
interpreter = tf.lite.Interpreter('classifier.tflite')
interpreter.allocate_tensors()
# Collect all inference predictions in a list
batch_prediction = []
batch_truth = np.argmax(batch_labels, axis=1)
for i in range(len(batch_images)):
prediction = classify_image(interpreter, batch_images[i])
batch_prediction.append(prediction)
# Compare all predictions to the ground truth
tflite_accuracy = tf.keras.metrics.Accuracy()
tflite_accuracy(batch_prediction, batch_truth)
###########################################################################################
# Compiles model
###########################################################################################
subprocess.call(["edgetpu_compiler",
"--show_operations",
"classifier.tflite"])
###########################################################################################
# Evaluating tflite
###########################################################################################
interpreter = Interpreter('classifier_edgetpu.tflite', experimental_delegates=[
load_delegate('libedgetpu.so.1.0')])
interpreter.allocate_tensors()
# Collect all inference predictions in a list
batch_prediction = []
batch_truth = np.argmax(batch_labels, axis=1)
for i in range(len(batch_images)):
prediction = classify_image(interpreter, batch_images[i])
batch_prediction.append(prediction)
# Compare all predictions to the ground truth
edgetpu_tflite_accuracy = tf.keras.metrics.Accuracy()
edgetpu_tflite_accuracy(batch_prediction, batch_truth)
###########################################################################################
# Show Results
###########################################################################################
print("Raw model accuracy: {:.2%}".format(keras_accuracy.result()))
print("Quant TF Lite accuracy: {:.2%}".format(tflite_accuracy.result()))
print("EdgeTpu Quant TF Lite accuracy: {:.2%}".format(
edgetpu_tflite_accuracy.result()))
| google-coral/demo-manufacturing | models/retraining/train_classifier.py | Python | apache-2.0 | 9,469 |
#!/usr/bin/env python
"""AFF4 interface implementation.
This contains an AFF4 data model implementation.
"""
import __builtin__
import abc
import StringIO
import time
import zlib
import logging
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import lexer
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import utils
from grr.lib.rdfvalues import grr_rdf
config_lib.DEFINE_integer(
"AFF4.cache_age", 5,
"The number of seconds AFF4 objects live in the cache.")
config_lib.DEFINE_integer(
"AFF4.notification_rules_cache_age", 60,
"The number of seconds AFF4 notification rules are cached.")
# Factor to convert from seconds to microseconds
MICROSECONDS = 1000000
# Age specifications for opening AFF4 objects.
NEWEST_TIME = "NEWEST_TIME"
ALL_TIMES = "ALL_TIMES"
# Just something to write on an index attribute to make it exist.
EMPTY_DATA = "X"
AFF4_PREFIXES = ["aff4:.*", "metadata:.*"]
class Error(Exception):
pass
class LockError(Error):
pass
class InstanciationError(Error, IOError):
pass
class LockContextManager(object):
def __init__(self, aff4_obj, sync):
self.aff4_obj = aff4_obj
self.sync = sync
def __enter__(self):
return self.aff4_obj
def __exit__(self, unused_type, unused_value, unused_traceback):
self.aff4_obj.Close(sync=self.sync)
class Factory(object):
"""A central factory for AFF4 objects."""
def __init__(self):
# This is a relatively short lived cache of objects.
self.cache = utils.AgeBasedCache(
max_size=10000,
max_age=config_lib.CONFIG["AFF4.cache_age"])
self.intermediate_cache = utils.FastStore(2000)
# Create a token for system level actions:
self.root_token = access_control.ACLToken(username="system",
reason="Maintainance")
self.root_token.supervisor = True
self.notification_rules = []
self.notification_rules_timestamp = 0
@classmethod
def ParseAgeSpecification(cls, age):
"""Parses an aff4 age and returns a datastore age specification."""
try:
return (0, int(age))
except (ValueError, TypeError):
pass
if age == NEWEST_TIME:
return data_store.DB.NEWEST_TIMESTAMP
elif age == ALL_TIMES:
return data_store.DB.ALL_TIMESTAMPS
elif len(age) == 2:
start, end = age
return (int(start), int(end))
raise RuntimeError("Unknown age specification: %s" % age)
def GetAttributes(self, urns, ignore_cache=False, token=None,
age=NEWEST_TIME):
"""Retrieves all the attributes for all the urns."""
urns = [utils.SmartUnicode(u) for u in set(urns)]
try:
if not ignore_cache:
result = []
for subject in urns:
key = self._MakeCacheInvariant(subject, token, age)
result.append((subject, self.cache.Get(key)))
return result
except KeyError:
pass
subjects = []
result = {}
# If there are any cache misses, we need to go to the data store. So we
# might as well just re-fetch all the urns again in a single data store
# round trip.
for subject, values in data_store.DB.MultiResolveRegex(
urns, AFF4_PREFIXES,
timestamp=self.ParseAgeSpecification(age),
token=token, limit=None).items():
# Ensure the values are sorted.
values.sort(key=lambda x: x[-1], reverse=True)
key = self._MakeCacheInvariant(subject, token, age)
self.cache.Put(key, values)
result[utils.SmartUnicode(subject)] = values
subjects.append(subject)
return result.items()
def SetAttributes(self, urn, attributes, to_delete, sync=False, token=None):
"""Sets the attributes in the data store and update the cache."""
# Force a data_store lookup next.
try:
# Expire all entries in the cache for this urn (for all tokens, and
# timestamps)
self.cache.ExpirePrefix(utils.SmartStr(urn) + ":")
except KeyError:
pass
attributes[AFF4Object.SchemaCls.LAST] = [
rdfvalue.RDFDatetime().Now().SerializeToDataStore()]
to_delete.add(AFF4Object.SchemaCls.LAST)
data_store.DB.MultiSet(urn, attributes, token=token,
replace=False, sync=sync, to_delete=to_delete)
# TODO(user): This can run in the thread pool since its not time
# critical.
self._UpdateIndex(urn, attributes, token)
def _UpdateIndex(self, urn, attributes, token):
"""Updates any indexes we need."""
index = {}
for attribute, values in attributes.items():
if attribute.index:
for value, _ in values:
index.setdefault(attribute.index, []).append((attribute, value))
if index:
for index_urn, index_data in index.items():
aff4index = self.Create(index_urn, "AFF4Index", mode="w", token=token)
for attribute, value in index_data:
aff4index.Add(urn, attribute, value)
aff4index.Close()
self._UpdateChildIndex(urn, token)
def _UpdateChildIndex(self, urn, token):
"""Update the child indexes.
This function maintains the index for direct child relations. When we set
an AFF4 path, we always add an attribute like
index:dir/%(childname)s to its parent. This is written
asynchronously to its parent.
In order to query for all direct children of an AFF4 object, we then simple
get the attributes which match the regex index:dir/.+ which are the
direct children.
Args:
urn: The AFF4 object for which we update the index.
token: The token to use.
"""
try:
# Create navigation aids by touching intermediate subject names.
while urn.Path() != "/":
basename = urn.Basename()
dirname = rdfvalue.RDFURN(urn.Dirname())
try:
self.intermediate_cache.Get(urn.Path())
return
except KeyError:
data_store.DB.MultiSet(dirname, {
AFF4Object.SchemaCls.LAST: [
rdfvalue.RDFDatetime().Now().SerializeToDataStore()],
# This updates the directory index.
"index:dir/%s" % utils.SmartStr(basename): [EMPTY_DATA],
},
token=token, replace=True, sync=False)
self.intermediate_cache.Put(urn.Path(), 1)
urn = dirname
except access_control.UnauthorizedAccess:
pass
def _DeleteChildFromIndex(self, urn, token):
try:
# Create navigation aids by touching intermediate subject names.
basename = urn.Basename()
dirname = rdfvalue.RDFURN(urn.Dirname())
try:
self.intermediate_cache.ExpireObject(urn.Path())
except KeyError:
pass
data_store.DB.DeleteAttributes(
dirname, ["index:dir/%s" % utils.SmartStr(basename)], token=token,
sync=False)
data_store.DB.MultiSet(dirname, {
AFF4Object.SchemaCls.LAST: [
rdfvalue.RDFDatetime().Now().SerializeToDataStore()],
}, token=token, replace=True, sync=False)
except access_control.UnauthorizedAccess:
pass
def _ExpandURNComponents(self, urn, unique_urns):
"""This expands URNs.
This method breaks the urn into all the urns from its path components and
adds them to the set unique_urns.
Args:
urn: An RDFURN.
unique_urns: A set to add the components of the urn to.
"""
x = ROOT_URN
for component in rdfvalue.RDFURN(urn).Path().split("/"):
if component:
x = x.Add(component)
unique_urns.add(x)
def _MakeCacheInvariant(self, urn, token, age):
"""Returns an invariant key for an AFF4 object.
The object will be cached based on this key. This function is specifically
extracted to ensure that we encapsulate all security critical aspects of the
AFF4 object so that objects do not leak across security boundaries.
Args:
urn: The urn of the object.
token: The access token used to receive the object.
age: The age policy used to build this object. Should be one
of ALL_TIMES, NEWEST_TIME or a range.
Returns:
A key into the cache.
"""
return "%s:%s:%s" % (utils.SmartStr(urn), utils.SmartStr(token),
self.ParseAgeSpecification(age))
def _OpenWithLock(self, transaction, aff4_type=None, age=NEWEST_TIME,
lease_time=100):
values = list(transaction.ResolveRegex(
AFF4_PREFIXES, timestamp=self.ParseAgeSpecification(age)))
local_cache = {rdfvalue.RDFURN(transaction.subject): values}
# TODO(user): We should have a test here that open does not access the
# data store.
aff4_obj = self.Open(transaction.subject, aff4_type=aff4_type,
local_cache=local_cache, age=age,
mode="rw", follow_symlinks=False,
token=transaction.token)
leased_until = aff4_obj.Get(aff4_obj.Schema.LEASED_UNTIL)
if leased_until and leased_until > rdfvalue.RDFDatetime().Now():
raise LockError(transaction.subject)
# Force a data_store lookup next.
try:
# Expire all entries in the cache for this urn (for all tokens, and
# timestamps)
self.cache.ExpirePrefix(utils.SmartStr(transaction.subject) + ":")
except KeyError:
pass
new_lease_time = rdfvalue.RDFDatetime().FromSecondsFromEpoch(
time.time() + lease_time)
transaction.Set(aff4_obj.Schema.LEASED_UNTIL.predicate,
new_lease_time.SerializeToDataStore())
aff4_obj.Set(aff4_obj.Schema.LEASED_UNTIL, new_lease_time)
# We don't want the object to be dirty.
aff4_obj._SyncAttributes() # pylint: disable=protected-access
aff4_obj.locked = True
return aff4_obj
def OpenWithLock(self, urn, aff4_type=None, token=None,
age=NEWEST_TIME, blocking=True, blocking_lock_timeout=10,
blocking_sleep_interval=1, lease_time=100):
"""Open given urn and locks it.
Opens an object and locks it for 'lease_time' seconds. OpenWithLock can
only be used in 'with ...' statement. The lock is released when code
execution leaves 'with ...' block.
The urn is always opened in "rw" mode. Symlinks are not followed in
OpenWithLock() due to possible race conditions.
Args:
urn: The urn to open.
aff4_type: If this optional parameter is set, we raise an
InstanciationError if the object exists and is not an instance of this
type. This check is important when a different object can be stored in
this location.
token: The Security Token to use for opening this item.
age: The age policy used to build this object. Should be one of
NEWEST_TIME, ALL_TIMES or a time range given as a tuple (start, end) in
microseconds since Jan 1st, 1970.
blocking: When True, wait and repeatedly try to grab the lock.
blocking_lock_timeout: Maximum wait time when sync is True.
blocking_sleep_interval: Sleep time between lock grabbing attempts. Used
when blocking is True.
lease_time: Maximum time the object stays locked. Lock will be considered
released when this time expires.
Returns:
Context manager to be used in 'with ...' statement.
"""
timestamp = time.time()
if urn is not None:
urn = rdfvalue.RDFURN(urn)
while True:
try:
obj = data_store.DB.RetryWrapper(
urn, self._OpenWithLock, aff4_type=aff4_type, token=token,
age=age, lease_time=lease_time)
# When we open with lock, we should always use sync.
return LockContextManager(obj, sync=True)
except (data_store.TransactionError, LockError) as e:
if not blocking or time.time() - timestamp > blocking_lock_timeout:
raise LockError(e)
else:
time.sleep(blocking_sleep_interval)
def Open(self, urn, aff4_type=None, mode="r", ignore_cache=False,
token=None, local_cache=None, age=NEWEST_TIME, follow_symlinks=True):
"""Opens the named object.
This instantiates the object from the AFF4 data store.
Note that the root aff4:/ object is a container for all other
objects. Opening it for reading will instantiate a AFF4Volume instance, even
if the row does not exist.
The mode parameter specifies, how the object should be opened. A read only
mode will raise when calling Set() on it, while a write only object will
never read from the data store. Note that its impossible to open an object
with pure write support (since we have no idea what type it should be
without reading the data base) - use Create() instead for purely write mode.
Args:
urn: The urn to open.
aff4_type: If this parameter is set, we raise an IOError if
the object is not an instance of this type. This check is important
when a different object can be stored in this location. If mode is
"w", this parameter will determine the type of the object and is
mandatory.
mode: The mode to open the file with.
ignore_cache: Forces a data store read.
token: The Security Token to use for opening this item.
local_cache: A dict containing a cache as returned by GetAttributes. If
set, this bypasses the factory cache.
age: The age policy used to build this object. Should be one of
NEWEST_TIME, ALL_TIMES or a time range given as a tuple (start, end) in
microseconds since Jan 1st, 1970.
follow_symlinks: If object opened is a symlink, follow it.
Returns:
An AFF4Object instance.
Raises:
IOError: If the object is not of the required type.
AttributeError: If the requested mode is incorrect.
"""
if mode not in ["w", "r", "rw"]:
raise AttributeError("Invalid mode %s" % mode)
if mode == "w":
if aff4_type is None:
raise AttributeError("Need a type to open in write only mode.")
return self.Create(urn, aff4_type, mode=mode, token=token, age=age,
ignore_cache=ignore_cache, force_new_version=False)
urn = rdfvalue.RDFURN(urn)
if "r" in mode and (local_cache is None or urn not in local_cache):
# Warm up the cache. The idea is to prefetch all the path components in
# the same round trip and make sure this data is in cache, so that as each
# AFF4 object is instantiated it can read attributes from cache rather
# than round tripping to the data store.
unique_urn = set()
self._ExpandURNComponents(urn, unique_urn)
local_cache = dict(
self.GetAttributes(unique_urn,
age=age, ignore_cache=ignore_cache,
token=token))
# Read the row from the table.
result = AFF4Object(urn, mode=mode, token=token, local_cache=local_cache,
age=age, follow_symlinks=follow_symlinks)
# Get the correct type.
existing_type = result.Get(result.Schema.TYPE, default="AFF4Volume")
if existing_type:
result = result.Upgrade(existing_type)
if (aff4_type is not None and
not isinstance(result, AFF4Object.classes[aff4_type])):
raise InstanciationError(
"Object %s is of type %s, but required_type is %s" % (
urn, result.__class__.__name__, aff4_type))
return result
def MultiOpen(self, urns, mode="rw", token=None, aff4_type=None,
age=NEWEST_TIME):
"""Opens a bunch of urns efficiently."""
if mode not in ["w", "r", "rw"]:
raise RuntimeError("Invalid mode %s" % mode)
# We accept both lists and generators of urns
urns = list(urns)
# Fill up the cache with all the urns
unique_urn = set()
for urn in urns:
self._ExpandURNComponents(urn, unique_urn)
cache = dict(self.GetAttributes(unique_urn, token=token, age=age))
symlinks = []
for urn in urns:
try:
if urn in cache:
obj = self.Open(urn, mode=mode, token=token, local_cache=cache,
aff4_type=aff4_type, age=age,
follow_symlinks=False)
target = obj.Get(obj.Schema.SYMLINK_TARGET)
if target is not None:
symlinks.append(target)
else:
yield obj
except IOError:
pass
if symlinks:
for obj in self.MultiOpen(symlinks, mode=mode, token=token,
aff4_type=aff4_type, age=age):
yield obj
def OpenDiscreteVersions(self, urn, mode="r", ignore_cache=False, token=None,
local_cache=None, age=ALL_TIMES,
follow_symlinks=True):
"""Returns all the versions of the object as AFF4 objects.
Args:
urn: The urn to open.
mode: The mode to open the file with.
ignore_cache: Forces a data store read.
token: The Security Token to use for opening this item.
local_cache: A dict containing a cache as returned by GetAttributes. If
set, this bypasses the factory cache.
age: The age policy used to build this object. Should be one of
ALL_TIMES or a time range
follow_symlinks: If object opened is a symlink, follow it.
Yields:
An AFF4Object for each version.
Raises:
IOError: On bad open or wrong time range specified.
This iterates through versions of an object, returning the newest version
first, then each older version until the beginning of time.
Note that versions are defined by changes to the TYPE attribute, and this
takes the version between two TYPE attributes.
In many cases as a user you don't want this, as you want to be returned an
object with as many attributes as possible, instead of the subset of them
that were Set between these two times.
"""
if age == NEWEST_TIME or len(age) == 1:
raise IOError("Bad age policy NEWEST_TIME for OpenDiscreteVersions.")
if len(age) == 2:
oldest_age = age[1]
else:
oldest_age = 0
aff4object = FACTORY.Open(urn, mode=mode, ignore_cache=ignore_cache,
token=token, local_cache=local_cache, age=age,
follow_symlinks=follow_symlinks)
# TYPE is always written last so we trust it to bound the version.
# Iterate from newest to oldest.
type_iter = aff4object.GetValuesForAttribute(aff4object.Schema.TYPE)
version_list = [(t.age, str(t)) for t in type_iter]
version_list.append((oldest_age, None))
for i in range(0, len(version_list)-1):
age_range = (version_list[i+1][0], version_list[i][0])
# Create a subset of attributes for use in the new object that represents
# this version.
clone_attrs = {}
for k, values in aff4object.synced_attributes.iteritems():
reduced_v = []
for v in values:
if v.age > age_range[0] and v.age <= age_range[1]:
reduced_v.append(v)
clone_attrs.setdefault(k, []).extend(reduced_v)
obj_cls = AFF4Object.classes[version_list[i][1]]
new_obj = obj_cls(urn, mode=mode, parent=aff4object.parent,
clone=clone_attrs, token=token, age=age_range,
local_cache=local_cache,
follow_symlinks=follow_symlinks)
new_obj.Initialize() # This is required to set local attributes.
yield new_obj
def Stat(self, urns, token=None):
"""Returns metadata about all urns.
Currently the metadata include type, and last update time.
Args:
urns: The urns of the objects to open.
token: The token to use.
Yields:
A dict of metadata.
Raises:
RuntimeError: A string was passed instead of an iterable.
"""
if isinstance(urns, basestring):
raise RuntimeError("Expected an iterable, not string.")
for subject, values in data_store.DB.MultiResolveRegex(
urns, ["aff4:type"], token=token).items():
yield dict(urn=rdfvalue.RDFURN(subject), type=values[0])
def Create(self, urn, aff4_type, mode="w", token=None, age=NEWEST_TIME,
ignore_cache=False, force_new_version=True):
"""Creates the urn if it does not already exist, otherwise opens it.
If the urn exists and is of a different type, this will also promote it to
the specified type.
Args:
urn: The object to create.
aff4_type: The desired type for this object.
mode: The desired mode for this object.
token: The Security Token to use for opening this item.
age: The age policy used to build this object. Only makes sense when mode
has "r".
ignore_cache: Bypass the aff4 cache.
force_new_version: Forces the creation of a new object in the data_store.
Returns:
An AFF4 object of the desired type and mode.
Raises:
AttributeError: If the mode is invalid.
"""
if mode not in ["w", "r", "rw"]:
raise AttributeError("Invalid mode %s" % mode)
if urn is not None:
urn = rdfvalue.RDFURN(urn)
if "r" in mode:
# Check to see if an object already exists.
try:
existing = self.Open(
urn, mode=mode, token=token, age=age,
ignore_cache=ignore_cache)
result = existing.Upgrade(aff4_type)
if force_new_version and existing.Get(result.Schema.TYPE) != aff4_type:
result.ForceNewVersion()
return result
except IOError:
pass
# Object does not exist, just make it.
cls = AFF4Object.classes[str(aff4_type)]
result = cls(urn, mode=mode, token=token, age=age)
result.Initialize()
if force_new_version:
result.ForceNewVersion()
return result
def Delete(self, urn, token=None, limit=1000):
"""Drop all the information about this object.
DANGEROUS! This recursively deletes all objects contained within the
specified URN.
Args:
urn: The object to remove.
token: The Security Token to use for opening this item.
limit: The number of objects to remove.
Raises:
RuntimeError: If the urn is too short. This is a safety check to ensure
the root is not removed.
"""
urn = rdfvalue.RDFURN(urn)
if len(urn.Path()) < 1:
raise RuntimeError("URN %s too short. Please enter a valid URN" % urn)
# Get all the children of this URN and delete them all.
logging.info("Recursively removing AFF4 Object %s", urn)
fd = FACTORY.Create(urn, "AFF4Volume", mode="rw", token=token)
count = 0
for child in fd.ListChildren():
logging.info("Removing child %s", child)
self.Delete(child, token=token)
count += 1
if count >= limit:
logging.info("Object limit reached, there may be further objects "
"to delete.")
data_store.DB.DeleteSubject(fd.urn, token=token)
self._DeleteChildFromIndex(fd.urn, token)
count += 1
logging.info("Removed %s objects", count)
# Ensure this is removed from the cache as well.
self.Flush()
def RDFValue(self, name):
return rdfvalue.RDFValue.classes.get(name)
def AFF4Object(self, name):
return AFF4Object.classes.get(name)
def Merge(self, first, second):
"""Merge two AFF4 objects and return a new object.
Args:
first: The first object (Can be None).
second: The second object (Can be None).
Returns:
A new object with the type of the latest object, but with both first and
second's attributes.
"""
if first is None: return second
if second is None: return first
# Make first the most recent object, and second the least recent:
if first.Get("type").age < second.Get("type").age:
first, second = second, first
# Merge the attributes together.
for k, v in second.synced_attributes.iteritems():
first.synced_attributes.setdefault(k, []).extend(v)
for k, v in second.new_attributes.iteritems():
first.new_attributes.setdefault(k, []).extend(v)
return first
def MultiListChildren(self, urns, token=None, limit=None, age=NEWEST_TIME):
"""Lists bunch of directories efficiently.
Args:
urns: List of urns to list children.
token: Security token.
limit: Max number of children to list (NOTE: this is per urn).
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Returns:
A dict keyed by subjects, with values being a list of children urns of a
given subject.
"""
index_prefix = "index:dir/"
result = {}
for subject, values in data_store.DB.MultiResolveRegex(
urns, index_prefix + ".+", token=token,
timestamp=Factory.ParseAgeSpecification(age),
limit=limit).iteritems():
subject_result = []
for predicate, _, timestamp in values:
urn = rdfvalue.RDFURN(subject).Add(predicate[len(index_prefix):])
urn.age = rdfvalue.RDFDatetime(timestamp)
subject_result.append(urn)
result[subject] = subject_result
return result
def Flush(self):
data_store.DB.Flush()
self.cache.Flush()
self.intermediate_cache.Flush()
def UpdateNotificationRules(self):
fd = self.Open(rdfvalue.RDFURN("aff4:/config/aff4_rules"), mode="r",
token=self.root_token)
self.notification_rules = [rule for rule in fd.OpenChildren()
if isinstance(rule, AFF4NotificationRule)]
def NotifyWriteObject(self, aff4_object):
current_time = time.time()
if (current_time - self.notification_rules_timestamp >
config_lib.CONFIG["AFF4.notification_rules_cache_age"]):
self.notification_rules_timestamp = current_time
self.UpdateNotificationRules()
for rule in self.notification_rules:
try:
rule.OnWriteObject(aff4_object)
except Exception, e: # pylint: disable=broad-except
logging.error("Error while applying the rule: %s", e)
class Attribute(object):
"""AFF4 schema attributes are instances of this class."""
description = ""
# A global registry of attributes by name. This ensures we do not accidentally
# define the same attribute with conflicting types.
PREDICATES = {}
# A human readable name to be used in filter queries.
NAMES = {}
def __init__(self, predicate, attribute_type=rdfvalue.RDFString,
description="", name=None, _copy=False, default=None, index=None,
versioned=True, lock_protected=False,
creates_new_object_version=True):
"""Constructor.
Args:
predicate: The name of this attribute - must look like a URL
(e.g. aff4:contains). Will be used to store the attribute.
attribute_type: The RDFValue type of this attributes.
description: A one line description of what this attribute represents.
name: A human readable name for the attribute to be used in filters.
_copy: Used internally to create a copy of this object without
registering.
default: A default value will be returned if the attribute is not set on
an object. This can be a constant or a callback which receives the fd
itself as an arg.
index: The name of the index to use for this attribute. If None, the
attribute will not be indexed.
versioned: Should this attribute be versioned? Non-versioned attributes
always overwrite other versions of the same attribute.
lock_protected: If True, this attribute may only be set if the object was
opened via OpenWithLock().
creates_new_object_version: If this is set, a write to this attribute
will also write a new version of the parent attribute. This should be
False for attributes where lots of entries are collected like logs.
"""
self.name = name
self.predicate = predicate
self.attribute_type = attribute_type
self.description = description
self.default = default
self.index = index
self.versioned = versioned
self.lock_protected = lock_protected
self.creates_new_object_version = creates_new_object_version
# Field names can refer to a specific component of an attribute
self.field_names = []
if not _copy:
# Check the attribute registry for conflicts
try:
old_attribute = Attribute.PREDICATES[predicate]
if old_attribute.attribute_type != attribute_type:
msg = "Attribute %s defined with conflicting types (%s, %s)" % (
predicate, old_attribute.attribute_type.__class__.__name__,
attribute_type.__class__.__name__)
logging.error(msg)
raise RuntimeError(msg)
except KeyError:
pass
# Register
self.PREDICATES[predicate] = self
if name:
self.NAMES[name] = self
def Copy(self):
"""Return a copy without registering in the attribute registry."""
return Attribute(self.predicate, self.attribute_type, self.description,
self.name, _copy=True)
def __call__(self, *args, **kwargs):
"""A shortcut allowing us to instantiate a new type from an attribute."""
result = self.attribute_type(*args, **kwargs)
result.attribute_instance = self
return result
def __str__(self):
return self.predicate
def __repr__(self):
return "<Attribute(%s, %s)>" %(self.name, self.predicate)
def __hash__(self):
return hash(self.predicate)
def __eq__(self, other):
return str(self.predicate) == str(other)
def __ne__(self, other):
return str(self.predicate) != str(other)
def __getitem__(self, item):
result = self.Copy()
result.field_names = item.split(".")
return result
def Fields(self):
return self.attribute_type.Fields()
@classmethod
def GetAttributeByName(cls, name):
# Support attribute names with a . in them:
try:
if "." in name:
name, field = name.split(".", 1)
return cls.NAMES[name][field]
return cls.NAMES[name]
except KeyError:
raise AttributeError("Invalid attribute")
def GetRDFValueType(self):
"""Returns this attribute's RDFValue class."""
result = self.attribute_type
for field_name in self.field_names:
# Support the new semantic protobufs.
if issubclass(result, rdfvalue.RDFProtoStruct):
result = result.type_infos.get(field_name, rdfvalue.RDFString).type
else:
# TODO(user): Remove and deprecate.
# Support for the old RDFProto.
result = result.rdf_map.get(field_name, rdfvalue.RDFString)
return result
def GetValues(self, fd):
"""Return the values for this attribute as stored in an AFF4Object."""
result = False
for result in fd.new_attributes.get(self, []):
# We need to interpolate sub fields in this rdfvalue.
if self.field_names:
for x in result.GetFields(self.field_names):
yield x
else:
yield result
for result in fd.synced_attributes.get(self, []):
result = result.ToRDFValue()
# We need to interpolate sub fields in this rdfvalue.
if self.field_names:
for x in result.GetFields(self.field_names):
yield x
else:
yield result
if not result:
default = self.GetDefault(fd)
if default is not None:
yield default
def GetDefault(self, fd=None, default=None):
"""Returns a default attribute if it is not set."""
if callable(self.default):
return self.default(fd)
if self.default is not None:
return self(self.default)
if isinstance(default, rdfvalue.RDFValue):
default = default.Copy()
default.attribute_instance = self
return default
class SubjectAttribute(Attribute):
"""An attribute which virtualises the subject."""
def __init__(self):
Attribute.__init__(self, "aff4:subject",
rdfvalue.Subject, "A subject pseodo attribute",
"subject")
def GetValues(self, fd):
return [rdfvalue.Subject(fd.urn)]
class ClassProperty(property):
"""A property which comes from the class object."""
def __get__(self, _, owner):
return self.fget.__get__(None, owner)()
class ClassInstantiator(property):
"""A property which instantiates the class on getting."""
def __get__(self, _, owner):
return self.fget()
class LazyDecoder(object):
"""An object which delays serialize and unserialize as late as possible.
The current implementation requires the proxied object to be immutable.
"""
def __init__(self, rdfvalue_cls=None, serialized=None, age=None,
decoded=None):
self.rdfvalue_cls = rdfvalue_cls
self.serialized = serialized
self.age = age
self.decoded = decoded
def ToRDFValue(self):
if self.decoded is None:
self.decoded = self.rdfvalue_cls(initializer=self.serialized,
age=self.age)
return self.decoded
def FromRDFValue(self):
return self.serialized
class AFF4Object(object):
"""Base class for all objects."""
# We are a registered class.
__metaclass__ = registry.MetaclassRegistry
include_plugins_as_attributes = True
# This property is used in GUIs to define behaviours. These can take arbitrary
# values as needed. Behaviours are read only and set in the class definition.
_behaviours = frozenset()
# Should this object be synced back to the data store.
_dirty = False
# Is this object currently locked.
locked = False
@ClassProperty
@classmethod
def behaviours(cls): # pylint: disable=g-bad-name
return cls._behaviours
# We define the parts of the schema for each AFF4 Object as an internal
# class. As new objects extend this, they can add more attributes to their
# schema by extending their parents. Note that the class must be named
# SchemaCls.
class SchemaCls(object):
"""The standard AFF4 schema."""
# NOTE: we don't version the type in order not to accumulate its versions
# during blind write operations.
TYPE = Attribute("aff4:type", rdfvalue.RDFString,
"The name of the AFF4Object derived class.", "type")
SUBJECT = SubjectAttribute()
STORED = Attribute("aff4:stored", rdfvalue.RDFURN,
"The AFF4 container inwhich this object is stored.")
LAST = Attribute("metadata:last", rdfvalue.RDFDatetime,
"The last time any attribute of this object was written.",
creates_new_object_version=False)
LABEL = Attribute("aff4:labels", grr_rdf.LabelList,
"Any object can have labels applied to it.")
LEASED_UNTIL = Attribute("aff4:lease", rdfvalue.RDFDatetime,
"The time until which the object is leased by a "
"particular caller.", versioned=False,
creates_new_object_version=False)
def ListAttributes(self):
for attr in dir(self):
attr = getattr(self, attr)
if isinstance(attr, Attribute):
yield attr
def GetAttribute(self, name):
for i in self.ListAttributes():
# Attributes are accessible by predicate or name
if i.name == name or i.predicate == name:
return i
def __getattr__(self, attr):
"""For unknown attributes just return None.
Often the actual object returned is not the object that is expected. In
those cases attempting to retrieve a specific named attribute will raise,
e.g.:
fd = aff4.FACTORY.Open(urn)
fd.Get(fd.Schema.SOME_ATTRIBUTE, default_value)
This simply ensures that the default is chosen.
Args:
attr: Some ignored attribute.
"""
return None
# Make sure that when someone references the schema, they receive an instance
# of the class.
@property
def Schema(self): # pylint: disable=g-bad-name
return self.SchemaCls()
def __init__(self, urn, mode="r", parent=None, clone=None, token=None,
local_cache=None, age=NEWEST_TIME, follow_symlinks=True):
if urn is not None:
urn = rdfvalue.RDFURN(urn)
self.urn = urn
self.mode = mode
self.parent = parent
self.token = token
self.age_policy = age
self.follow_symlinks = follow_symlinks
self.lock = utils.PickleableLock()
# This flag will be set whenever an attribute is changed that has the
# creates_new_object_version flag set.
self._new_version = False
# Mark out attributes to delete when Flushing()
self._to_delete = set()
# We maintain two attribute caches - self.synced_attributes reflects the
# attributes which are synced with the data_store, while self.new_attributes
# are new attributes which still need to be flushed to the data_store. When
# this object is instantiated we populate self.synced_attributes with the
# data_store, while the finish method flushes new changes.
if clone is not None:
if isinstance(clone, dict):
# Just use these as the attributes, do not go to the data store. This is
# a quick way of creating an object with data which was already fetched.
self.new_attributes = {}
self.synced_attributes = clone
elif isinstance(clone, AFF4Object):
# We were given another object to clone - we do not need to access the
# data_store now.
self.new_attributes = clone.new_attributes.copy()
self.synced_attributes = clone.synced_attributes.copy()
else:
raise RuntimeError("Cannot clone from %s." % clone)
else:
self.new_attributes = {}
self.synced_attributes = {}
if "r" in mode:
if local_cache:
try:
for attribute, value, ts in local_cache[utils.SmartUnicode(urn)]:
self.DecodeValueFromAttribute(attribute, value, ts)
except KeyError:
pass
else:
# Populate the caches from the data store.
for urn, values in FACTORY.GetAttributes([urn], age=age,
token=self.token):
for attribute_name, value, ts in values:
self.DecodeValueFromAttribute(attribute_name, value, ts)
if clone is None:
self.Initialize()
def Initialize(self):
"""The method is called after construction to initialize the object.
This will be called after construction, and each time the object is
unserialized from the datastore.
An AFF4 object contains attributes which can be populated from the
database. This method is called to obtain a fully fledged object from
a collection of attributes.
"""
def DecodeValueFromAttribute(self, attribute_name, value, ts):
"""Given a serialized value, decode the attribute.
Only attributes which have been previously defined are permitted.
Args:
attribute_name: The string name of the attribute.
value: The serialized attribute value.
ts: The timestamp of this attribute.
"""
try:
# Get the Attribute object from our schema.
attribute = Attribute.PREDICATES[attribute_name]
cls = attribute.attribute_type
self._AddAttributeToCache(attribute, LazyDecoder(cls, value, ts),
self.synced_attributes)
except KeyError:
if not attribute_name.startswith("index:"):
logging.debug("Attribute %s not defined, skipping.", attribute_name)
except (ValueError, rdfvalue.DecodeError):
logging.debug("%s: %s invalid encoding. Skipping.",
self.urn, attribute_name)
def _AddAttributeToCache(self, attribute_name, value, cache):
"""Helper to add a new attribute to a cache."""
cache.setdefault(attribute_name, []).append(value)
def CheckLease(self):
if self.locked:
leased_until = self.Get(self.Schema.LEASED_UNTIL)
now = rdfvalue.RDFDatetime().Now()
if leased_until < now:
raise LockError("Lease for this object is expired "
"(leased until %s, now %s)!" % (leased_until, now))
def UpdateLease(self, duration):
"""Updates the lease and flushes the object.
The lease is set to expire after the "duration" time from the present
moment.
This method is supposed to be used when operation that requires locking
may run for a time that exceeds the lease time specified in OpenWithLock().
See flows/hunts locking for an example.
Args:
duration: Integer number of seconds. Lease expiry time will be set
to "time.time() + duration".
Raises:
LockError: if the object is not currently locked or the lease has
expired.
"""
if not self.locked:
raise LockError(
"Object must be locked to update the lease: %s." % self.urn)
# Check that current lease has not expired yet
self.CheckLease()
self.Set(
self.Schema.LEASED_UNTIL,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(time.time() + duration))
self.Flush()
def Flush(self, sync=True):
"""Syncs this object with the data store, maintaining object validity."""
self.CheckLease()
self._WriteAttributes(sync=sync)
self._SyncAttributes()
if self.parent:
self.parent.Flush(sync=sync)
def Close(self, sync=True):
"""Close and destroy the object.
This is similar to Flush, but does not maintain object validity. Hence the
object should not be interacted with after Close().
Args:
sync: Write the attributes synchronously to the data store.
Raises:
LockError: The lease for this object has expired.
"""
self.CheckLease()
if self.locked:
self.Set(self.Schema.LEASED_UNTIL, rdfvalue.RDFDatetime(0))
self._WriteAttributes(sync=self.locked or sync)
if self.parent:
self.parent.Close(sync=sync)
# Interacting with a closed object is a bug. We need to catch this ASAP so
# we remove all mode permissions from this object.
self.mode = ""
@utils.Synchronized
def _WriteAttributes(self, sync=True):
"""Write the dirty attributes to the data store."""
# If the object is not opened for writing we do not need to flush it to the
# data_store.
if "w" not in self.mode:
return
if self.urn is None:
raise RuntimeError("Storing of anonymous AFF4 objects not supported.")
to_set = {}
for attribute_name, value_array in self.new_attributes.iteritems():
to_set_list = to_set.setdefault(attribute_name, [])
for value in value_array:
to_set_list.append((value.SerializeToDataStore(), value.age))
if self._dirty:
# We determine this object has a new version only if any of the versioned
# attributes have changed. Non-versioned attributes do not represent a new
# object version. The type of an object is versioned and represents a
# version point in the life of the object.
if self._new_version:
to_set[self.Schema.TYPE] = [
(rdfvalue.RDFString(self.__class__.__name__).SerializeToDataStore(),
rdfvalue.RDFDatetime().Now())]
# Write the attributes to the Factory cache.
FACTORY.SetAttributes(self.urn, to_set, self._to_delete, sync=sync,
token=self.token)
# Notify the factory that this object got updated.
FACTORY.NotifyWriteObject(self)
@utils.Synchronized
def _SyncAttributes(self):
"""Sync the new attributes to the synced attribute cache.
This maintains object validity.
"""
# This effectively moves all the values from the new_attributes to the
# _attributes caches.
for attribute, value_array in self.new_attributes.iteritems():
if not attribute.versioned:
value = value_array[0]
self.synced_attributes[attribute] = [LazyDecoder(decoded=value,
age=value.age)]
else:
synced_value_array = self.synced_attributes.setdefault(attribute, [])
for value in value_array:
synced_value_array.append(LazyDecoder(decoded=value, age=value.age))
synced_value_array.sort(key=lambda x: x.age, reverse=True)
self.new_attributes = {}
self._to_delete.clear()
self._dirty = False
self._new_version = False
def _CheckAttribute(self, attribute, value):
"""Check that the value is of the expected type.
Args:
attribute: An instance of Attribute().
value: An instance of RDFValue.
Raises:
ValueError: when the value is not of the expected type.
AttributeError: When the attribute is not of type Attribute().
"""
if not isinstance(attribute, Attribute):
raise AttributeError("Attribute %s must be of type aff4.Attribute()",
attribute)
if not isinstance(value, attribute.attribute_type):
raise ValueError("Value for attribute %s must be of type %s()",
attribute, attribute.attribute_type.__name__)
def Copy(self, to_attribute, from_fd, from_attribute):
values = from_fd.GetValuesForAttribute(from_attribute)
for v in values:
self.AddAttribute(to_attribute, v, age=v.age)
def Set(self, attribute, value=None):
"""Set an attribute on this object.
Set() is now a synonym for AddAttribute() since attributes are never
deleted.
Args:
attribute: The attribute to set.
value: The new value for this attribute.
"""
self.AddAttribute(attribute, value)
def AddAttribute(self, attribute, value=None, age=None):
"""Add an additional attribute to this object.
If value is None, attribute is expected to be already initialized with a
value. For example:
fd.AddAttribute(fd.Schema.CONTAINS("some data"))
Args:
attribute: The attribute name or an RDFValue derived from the attribute.
value: The value the attribute will be set to.
age: Age (timestamp) of the attribute. If None, current time is used.
Raises:
IOError: If this object is read only.
"""
if "w" not in self.mode:
raise IOError("Writing attribute %s to read only object." % attribute)
if value is None:
value = attribute
attribute = value.attribute_instance
# Check if this object should be locked in order to add the attribute.
# NOTE: We don't care about locking when doing blind writes.
if self.mode != "w" and attribute.lock_protected and not self.locked:
raise IOError("Object must be locked to write attribute %s." % attribute)
self._CheckAttribute(attribute, value)
# Does this represent a new version?
if attribute.versioned:
if attribute.creates_new_object_version:
self._new_version = True
# Update the time of this new attribute.
if age:
value.age = age
else:
value.age.Now()
# Non-versioned attributes always replace previous versions and get written
# at the earliest timestamp (so they appear in all objects).
else:
self._to_delete.add(attribute)
value.age = 0
self._AddAttributeToCache(attribute, value, self.new_attributes)
self._dirty = True
@utils.Synchronized
def DeleteAttribute(self, attribute):
"""Clears the attribute from this object."""
if "w" not in self.mode:
raise IOError("Deleting attribute %s from read only object." % attribute)
# Check if this object should be locked in order to delete the attribute.
# NOTE: We don't care about locking when doing blind writes.
if self.mode != "w" and attribute.lock_protected and not self.locked:
raise IOError("Object must be locked to delete attribute %s." % attribute)
if attribute in self.synced_attributes:
self._to_delete.add(attribute)
del self.synced_attributes[attribute]
if attribute in self.new_attributes:
del self.new_attributes[attribute]
# Does this represent a new version?
if attribute.versioned and attribute.creates_new_object_version:
self._new_version = True
self._dirty = True
def IsAttributeSet(self, attribute):
"""Determine if the attribute is set.
Args:
attribute: The attribute to check.
Returns:
True if set, otherwise False.
Checking Get against None doesn't work as Get will return a default
attribute value. This determines if the attribute has been manually set.
"""
return (attribute in self.synced_attributes or
attribute in self.new_attributes)
def Get(self, attribute, default=None):
"""Gets the attribute from this object."""
if attribute is None:
return default
# Allow the user to specify the attribute by name.
elif isinstance(attribute, str):
attribute = Attribute.GetAttributeByName(attribute)
# We can't read attributes from the data_store unless read mode was
# specified. It is ok to read new attributes though.
if "r" not in self.mode and (attribute not in self.new_attributes and
attribute not in self.synced_attributes):
raise IOError(
"Fetching %s from object not opened for reading." % attribute)
for result in self.GetValuesForAttribute(attribute, only_one=True):
try:
# The attribute may be a naked string or int - i.e. not an RDFValue at
# all.
result.attribute_instance = attribute
except AttributeError:
pass
return result
return attribute.GetDefault(self, default)
def GetValuesForAttribute(self, attribute, only_one=False):
"""Returns a list of values from this attribute."""
if not only_one and self.age_policy == NEWEST_TIME:
raise RuntimeError("Attempting to read all attribute versions for an "
"object opened for NEWEST_TIME. This is probably "
"not what you want.")
if attribute is None:
return []
elif isinstance(attribute, basestring):
attribute = Attribute.GetAttributeByName(attribute)
return attribute.GetValues(self)
def Update(self, attribute=None, user=None, priority=None):
"""Requests the object refresh an attribute from the Schema."""
def Upgrade(self, aff4_class):
"""Upgrades this object to the type specified.
AFF4 Objects can be upgraded on the fly to other type - As long as the new
type is derived from the current type. This feature allows creation of
placeholder objects which can later be upgraded to the fully featured
object.
Note: It is not allowed to downgrade an object if that would result in a
loss of information (since the new object has a smaller schema). This method
tries to store the new object with its new attributes and will fail if any
attributes can not be mapped.
Args:
aff4_class: A string representing the new class.
Returns:
an instance of the new class with all the same attributes as this current
object.
Raises:
AttributeError: When the new object can not accept some of the old
attributes.
InstanciationError: When we cannot instantiate the object type class.
"""
# We are already of the required type
if self.__class__.__name__ == aff4_class:
return self
# Instantiate the right type
cls = self.classes.get(str(aff4_class))
if cls is None:
raise InstanciationError("Could not instantiate %s" % aff4_class)
# It's not allowed to downgrade the object
if isinstance(self, cls):
# TODO(user): check what we should do here:
# 1) Nothing
# 2) raise
# 3) return self
# Option 3) seems ok, but we need to be sure that we don't use
# Create(mode='r') anywhere where code actually expects the object to be
# downgraded.
return self
# NOTE: It is possible for attributes to become inaccessible here if the old
# object has an attribute which the new object does not have in its
# schema. The values of these attributes will not be available any longer in
# the new object - usually because old attributes do not make sense in the
# context of the new object.
# Instantiate the class
result = cls(self.urn, mode=self.mode, clone=self, parent=self.parent,
token=self.token, age=self.age_policy,
follow_symlinks=self.follow_symlinks)
result.Initialize()
return result
def ForceNewVersion(self):
self._dirty = True
self._new_version = True
def __repr__(self):
return "<%s@%X = %s>" % (self.__class__.__name__, hash(self), self.urn)
# The following are used to ensure a bunch of AFF4Objects can be sorted on
# their URNs.
def __gt__(self, other):
return self.urn > other
def __lt__(self, other):
return self.urn < other
# This will register all classes into this modules's namespace regardless of
# where they are defined. This allows us to decouple the place of definition of
# a class (which might be in a plugin) from its use which will reference this
# module.
AFF4Object.classes = globals()
class AttributeExpression(lexer.Expression):
"""An expression which is used to filter attributes."""
def SetAttribute(self, attribute):
"""Checks that attribute is a valid Attribute() instance."""
# Grab the attribute registered for this name
self.attribute = attribute
self.attribute_obj = Attribute.GetAttributeByName(attribute)
if self.attribute_obj is None:
raise lexer.ParseError("Attribute %s not defined" % attribute)
def SetOperator(self, operator):
"""Sets the operator for this expression."""
self.operator = operator
# Find the appropriate list of operators for this attribute
attribute_type = self.attribute_obj.GetRDFValueType()
operators = attribute_type.operators
# Do we have such an operator?
self.number_of_args, self.operator_method = operators.get(
operator, (0, None))
if self.operator_method is None:
raise lexer.ParseError("Operator %s not defined on attribute '%s'" % (
operator, self.attribute))
self.operator_method = getattr(attribute_type, self.operator_method)
def Compile(self, filter_implemention):
"""Returns the data_store filter implementation from the attribute."""
return self.operator_method(self.attribute_obj,
filter_implemention, *self.args)
class AFF4QueryParser(lexer.SearchParser):
expression_cls = AttributeExpression
class AFF4Volume(AFF4Object):
"""Volumes contain other objects.
The AFF4 concept of a volume abstracts away how objects are stored. We simply
define an AFF4 volume as a container of other AFF4 objects. The volume may
implement any storage mechanism it likes, including virtualizing the objects
contained within it.
"""
_behaviours = frozenset(["Container"])
class SchemaCls(AFF4Object.SchemaCls):
CONTAINS = Attribute("aff4:contains", rdfvalue.RDFURN,
"An AFF4 object contained in this container.")
def Query(self, filter_string="", filter_obj=None, limit=1000,
age=NEWEST_TIME):
"""A way to query the collection based on a filter object.
Args:
filter_string: An optional filter applied to our members. The filter
string should correspond to the syntax described in lexer.py.
filter_obj: An optional compiled filter (as obtained from lexer.Compile().
limit: A limit on the number of returned rows.
age: The age of the objects to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Returns:
A generator of all children which match the filter.
"""
# If no filtering is required we can just use OpenChildren.
if not filter_obj and not filter_string:
return self.OpenChildren(limit=limit, age=age)
if filter_obj is None and filter_string:
# Parse the query string
ast = AFF4QueryParser(filter_string).Parse()
# Query our own data store
filter_obj = ast.Compile(data_store.DB.filter)
result_set = data_store.DB.Query(
[], filter_obj, limit=limit, subject_prefix=self.urn, token=self.token)
result = data_store.ResultSet(
self.OpenChildren([m["subject"][0][0] for m in result_set],
limit=limit,
age=age))
result.total_count = result_set.total_count
return result
def OpenMember(self, path, mode="r"):
"""Opens the member which is contained in us.
Args:
path: A string relative to our own URN or an absolute urn.
mode: Mode for object.
Returns:
an AFF4Object instance.
Raises:
InstanciationError: If we are unable to open the member (e.g. it does not
already exist.)
"""
if isinstance(path, rdfvalue.RDFURN):
child_urn = path
else:
child_urn = self.urn.Add(path)
# Read the row from the table.
result = AFF4Object(child_urn, mode=mode, token=self.token)
# Get the correct type.
aff4_type = result.Get(result.Schema.TYPE)
if aff4_type:
# Try to get the container.
return result.Upgrade(aff4_type)
raise InstanciationError("Path %s not found" % path)
def ListChildren(self, limit=1000000, age=NEWEST_TIME):
"""Yields RDFURNs of all the children of this object.
Args:
limit: Total number of items we will attempt to retrieve.
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Yields:
RDFURNs instances of each child.
"""
# Just grab all the children from the index.
index_prefix = "index:dir/"
for predicate, _, timestamp in data_store.DB.ResolveRegex(
self.urn, index_prefix + ".+", token=self.token,
timestamp=Factory.ParseAgeSpecification(age), limit=limit):
urn = self.urn.Add(predicate[len(index_prefix):])
urn.age = rdfvalue.RDFDatetime(timestamp)
yield urn
def OpenChildren(self, children=None, mode="r", limit=1000000,
chunk_limit=100000, age=NEWEST_TIME):
"""Yields AFF4 Objects of all our direct children.
This method efficiently returns all attributes for our children directly, in
a few data store round trips. We use the directory indexes to query the data
store.
Args:
children: A list of children RDFURNs to open. If None open all our
children.
mode: The mode the files should be opened with.
limit: Total number of items we will attempt to retrieve.
chunk_limit: Maximum number of items to retrieve at a time.
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Yields:
Instances for each direct child.
"""
if children is None:
subjects = list(self.ListChildren(limit=limit, age=age))
else:
subjects = list(children)
subjects.sort()
# Read at most limit children at a time.
while subjects:
to_read = subjects[:chunk_limit]
subjects = subjects[chunk_limit:]
for child in FACTORY.MultiOpen(to_read, mode=mode, token=self.token,
age=age):
yield child
class AFF4Root(AFF4Volume):
"""The root of the VFS.
This virtual collection contains the entire virtual filesystem, and therefore
can be queried across the entire data store.
"""
def Query(self, filter_string="", filter_obj=None, subjects=None, limit=100):
"""Filter the objects contained within this collection."""
if filter_obj is None and filter_string:
# Parse the query string
ast = AFF4QueryParser(filter_string).Parse()
# Query our own data store
filter_obj = ast.Compile(data_store.DB.filter)
subjects = []
result_set = data_store.DB.Query([], filter_obj, subjects=subjects,
limit=limit, token=self.token)
for match in result_set:
subjects.append(match["subject"][0][0])
# Open them all at once.
result = data_store.ResultSet(FACTORY.MultiOpen(subjects, token=self.token))
result.total_count = result_set.total_count
return result
def OpenMember(self, path, mode="r"):
"""If we get to the root without a container, virtualize an empty one."""
urn = self.urn.Add(path)
result = AFF4Volume(urn, mode=mode, token=self.token)
result.Initialize()
return result
class AFF4Symlink(AFF4Object):
"""This is a symlink to another AFF4 object.
This means that opening this object will return the linked to object. To
create a symlink, one must open the symlink for writing and set the
Schema.SYMLINK_TARGET attribute.
Opening the object for reading will return the linked to object.
"""
class SchemaCls(AFF4Object.SchemaCls):
SYMLINK_TARGET = Attribute("aff4:symlink_target", rdfvalue.RDFURN,
"The target of this link.")
def __new__(cls, urn, mode="r", clone=None, token=None, local_cache=None,
age=NEWEST_TIME, parent=None, follow_symlinks=True):
# When first created, the symlink object is exposed.
if mode == "w" or not follow_symlinks:
return super(AFF4Symlink, cls).__new__(
cls, urn, mode=mode, clone=clone, token=token, parent=parent,
local_cache=local_cache, age=age)
elif clone is not None:
# Get the real object (note, clone shouldn't be None during normal
# object creation process):
target_urn = clone.Get(cls.SchemaCls.SYMLINK_TARGET)
return FACTORY.Open(target_urn, mode=mode, age=age, token=token)
else:
raise RuntimeError("Unable to open symlink.")
class AFF4OverlayedVolume(AFF4Volume):
"""A special kind of volume with overlayed contained objects.
This AFF4Volume can contain virtual overlays. An overlay is a path which
appears to be contained within our object, but is in fact the same object. For
example if self.urn = RDFURN('aff4:/C.123/foobar'):
Opening aff4:/C.123/foobar/overlayed/ will return a copy of aff4:/C.123/foobar
with the variable self.overlayed_path = "overlayed".
This is used to effectively allow a single AFF4Volume to handle overlay
virtual paths inside itself without resorting to storing anything in the
database for every one of these object. Thus we can have a WinRegistry
AFF4Volume that handles any paths within without having storage for each
registry key.
"""
overlayed_path = ""
def IsPathOverlayed(self, path): # pylint: disable=unused-argument
"""Should this path be overlayed.
Args:
path: A direct_child of ours.
Returns:
True if the path should be overlayed.
"""
return False
def OpenMember(self, path, mode="rw"):
if self.IsPathOverlayed(path):
result = self.__class__(self.urn, mode=mode, clone=self, parent=self)
result.overlayed_path = path
return result
return super(AFF4OverlayedVolume, self).OpenMember(path, mode)
def CreateMember(self, path, aff4_type, mode="w", clone=None):
if self.IsPathOverlayed(path):
result = self.__class__(self.urn, mode=mode, clone=self, parent=self)
result.overlayed_path = path
return result
return super(AFF4OverlayedVolume, self).CreateMember(
path, aff4_type, mode=mode, clone=clone)
class AFF4Stream(AFF4Object):
"""An abstract stream for reading data."""
__metaclass__ = abc.ABCMeta
# The read pointer offset.
offset = 0
# Updated when the object becomes dirty.
dirty = False
class SchemaCls(AFF4Object.SchemaCls):
# Note that a file on the remote system might have stat.st_size > 0 but if
# we do not have any of the data available to read: size = 0.
SIZE = Attribute("aff4:size", rdfvalue.RDFInteger,
"The total size of available data for this stream.",
"size", default=0)
@abc.abstractmethod
def Read(self, length):
pass
@abc.abstractmethod
def Write(self, data):
pass
@abc.abstractmethod
def Tell(self):
pass
@abc.abstractmethod
def Seek(self, offset, whence=0):
pass
# These are file object conformant namings for library functions that
# grr uses, and that expect to interact with 'real' file objects.
read = utils.Proxy("Read")
seek = utils.Proxy("Seek")
tell = utils.Proxy("Tell")
close = utils.Proxy("Close")
write = utils.Proxy("Write")
class AFF4MemoryStream(AFF4Stream):
"""A stream which keeps all data in memory."""
class SchemaCls(AFF4Stream.SchemaCls):
CONTENT = Attribute("aff4:content", rdfvalue.RDFBytes,
"Total content of this file.", default="")
def Initialize(self):
"""Try to load the data from the store."""
contents = ""
if "r" in self.mode:
contents = self.Get(self.Schema.CONTENT)
try:
if contents is not None:
contents = zlib.decompress(utils.SmartStr(contents))
except zlib.error:
pass
self.fd = StringIO.StringIO(contents)
self.size = rdfvalue.RDFInteger(len(contents))
self.offset = 0
def Truncate(self, offset=None):
if offset is None:
offset = self.offset
self.fd = StringIO.StringIO(self.fd.getvalue()[:offset])
self.size.Set(offset)
def Read(self, length):
return self.fd.read(int(length))
def Write(self, data):
if isinstance(data, unicode):
raise IOError("Cannot write unencoded string.")
self.dirty = True
self.fd.write(data)
self.size = self.fd.len
def Tell(self):
return self.fd.tell()
def Seek(self, offset, whence=0):
self.fd.seek(offset, whence)
def Flush(self, sync=True):
if self.dirty:
compressed_content = zlib.compress(self.fd.getvalue())
self.Set(self.Schema.CONTENT(compressed_content))
self.Set(self.Schema.SIZE(self.size))
super(AFF4MemoryStream, self).Flush(sync=sync)
def Close(self, sync=True):
if self.dirty:
compressed_content = zlib.compress(self.fd.getvalue())
self.Set(self.Schema.CONTENT(compressed_content))
self.Set(self.Schema.SIZE(self.size))
super(AFF4MemoryStream, self).Close(sync=sync)
class AFF4ObjectCache(utils.PickleableStore):
"""A cache which closes its objects when they expire."""
def KillObject(self, obj):
obj.Close()
class AFF4Image(AFF4Stream):
"""An AFF4 Image is stored in segments.
We are both an Image here and a volume (since we store the segments inside
us).
"""
NUM_RETRIES = 10
CHUNK_ID_TEMPLATE = "%010X"
# This is the chunk size of each chunk. The chunksize can not be changed once
# the object is created.
chunksize = 64 * 1024
class SchemaCls(AFF4Stream.SchemaCls):
_CHUNKSIZE = Attribute("aff4:chunksize", rdfvalue.RDFInteger,
"Total size of each chunk.", default=64*1024)
def Initialize(self):
"""Build a cache for our chunks."""
super(AFF4Image, self).Initialize()
self.offset = 0
# A cache for segments - When we get pickled we want to discard them.
self.chunk_cache = AFF4ObjectCache(100)
if "r" in self.mode:
self.size = int(self.Get(self.Schema.SIZE))
# pylint: disable=protected-access
self.chunksize = int(self.Get(self.Schema._CHUNKSIZE))
# pylint: enable=protected-access
else:
self.size = 0
def SetChunksize(self, chunksize):
# pylint: disable=protected-access
self.Set(self.Schema._CHUNKSIZE(chunksize))
# pylint: enable=protected-access
self.chunksize = int(chunksize)
self.Truncate(0)
def Seek(self, offset, whence=0):
# This stream does not support random writing in "w" mode. When the stream
# is opened in "w" mode we can not read from the data store and therefore we
# can not merge writes with existing data. It only makes sense to append to
# existing streams.
if self.mode == "w":
# Seeking to the end of the stream is ok.
if not (whence == 2 and offset == 0):
raise IOError("Can not seek with an AFF4Image opened for write only.")
if whence == 0:
self.offset = offset
elif whence == 1:
self.offset += offset
elif whence == 2:
self.offset = long(self.size) + offset
def Tell(self):
return self.offset
def Truncate(self, offset=0):
self._dirty = True
self.size = offset
self.offset = offset
self.chunk_cache.Flush()
def _GetChunkForWriting(self, chunk):
chunk_name = self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk)
try:
fd = self.chunk_cache.Get(chunk_name)
except KeyError:
fd = FACTORY.Create(chunk_name, "AFF4MemoryStream", mode="rw",
token=self.token)
self.chunk_cache.Put(chunk_name, fd)
return fd
def _GetChunkForReading(self, chunk):
chunk_name = self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk)
try:
fd = self.chunk_cache.Get(chunk_name)
except KeyError:
# The most common read access pattern is contiguous reading. Here we
# readahead to reduce round trips.
missing_chunks = []
for chunk_number in range(chunk, chunk + 10):
new_chunk_name = self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk_number)
try:
self.chunk_cache.Get(new_chunk_name)
except KeyError:
missing_chunks.append(new_chunk_name)
for child in FACTORY.MultiOpen(
missing_chunks, mode="rw", token=self.token, age=self.age_policy):
if isinstance(child, AFF4Stream):
self.chunk_cache.Put(child.urn, child)
# This should work now - otherwise we just give up.
try:
fd = self.chunk_cache.Get(chunk_name)
except KeyError:
raise IOError("Cannot open chunk %s" % chunk_name)
return fd
def _ReadPartial(self, length):
"""Read as much as possible, but not more than length."""
chunk = self.offset / self.chunksize
chunk_offset = self.offset % self.chunksize
available_to_read = min(length, self.chunksize - chunk_offset)
retries = 0
while retries < self.NUM_RETRIES:
fd = self._GetChunkForReading(chunk)
if fd:
break
# Arriving here means we know about blobs that cannot be found in the db.
# The most likely reason is that they have not been synced yet so we
# retry a couple of times just in case they come in eventually.
logging.warning("Chunk not found.")
time.sleep(1)
retries += 1
if retries >= self.NUM_RETRIES:
raise IOError("Chunk not found for reading.")
fd.Seek(chunk_offset)
result = fd.Read(available_to_read)
self.offset += len(result)
return result
def Read(self, length):
"""Read a block of data from the file."""
result = ""
# The total available size in the file
length = int(length)
length = min(length, self.size - self.offset)
while length > 0:
data = self._ReadPartial(length)
if not data: break
length -= len(data)
result += data
return result
def _WritePartial(self, data):
chunk = self.offset / self.chunksize
chunk_offset = self.offset % self.chunksize
data = utils.SmartStr(data)
available_to_write = min(len(data), self.chunksize - chunk_offset)
fd = self._GetChunkForWriting(chunk)
fd.Seek(chunk_offset)
fd.Write(data[:available_to_write])
self.offset += available_to_write
return data[available_to_write:]
def Write(self, data):
self._dirty = True
if isinstance(data, unicode):
raise IOError("Cannot write unencoded string.")
while data:
data = self._WritePartial(data)
self.size = max(self.size, self.offset)
def Flush(self, sync=True):
"""Sync the chunk cache to storage."""
if self._dirty:
chunk_id = self.offset / self.chunksize
chunk_name = self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk_id)
current_chunk = self.chunk_cache.Pop(chunk_name)
# Flushing the cache will call Close() on all the chunks. We hold on to
# the current chunk to ensure it does not get closed.
self.chunk_cache.Flush()
if current_chunk:
current_chunk.Flush(sync=sync)
self.chunk_cache.Put(chunk_name, current_chunk)
self.Set(self.Schema.SIZE(self.size))
super(AFF4Image, self).Flush(sync=sync)
def Close(self, sync=True):
"""This method is called to sync our data into storage.
Args:
sync: Should flushing be synchronous.
"""
self.Flush(sync=sync)
class AFF4NotificationRule(AFF4Object):
def OnWriteObject(self, unused_aff4_object):
raise NotImplementedError()
# Utility functions
class AFF4InitHook(registry.InitHook):
pre = ["DataStoreInit"]
def Run(self):
"""Delayed loading of aff4 plugins to break import cycles."""
# pylint: disable=unused-variable,global-statement,g-import-not-at-top
from grr.lib import aff4_objects
global FACTORY
FACTORY = Factory() # pylint: disable=g-bad-name
# pylint: enable=unused-variable,global-statement,g-import-not-at-top
class AFF4Filter(object):
"""A simple filtering system to be used with Query()."""
__metaclass__ = registry.MetaclassRegistry
# Automatically register plugins as class attributes
include_plugins_as_attributes = True
def __init__(self, *args):
self.args = args
@abc.abstractmethod
def Filter(self, subjects):
"""A generator which filters the subjects.
Args:
subjects: An iterator of aff4 objects.
Returns:
A generator over all the Objects which pass the filter.
"""
# A global registry of all AFF4 classes
FACTORY = None
ROOT_URN = rdfvalue.RDFURN("aff4:/")
def issubclass(obj, cls): # pylint: disable=redefined-builtin,g-bad-name
"""A sane implementation of issubclass.
See http://bugs.python.org/issue10569
Python bare issubclass must be protected by an isinstance test first since it
can only work on types and raises when provided something which is not a type.
Args:
obj: Any object or class.
cls: The class to check against.
Returns:
True if obj is a subclass of cls and False otherwise.
"""
return isinstance(obj, type) and __builtin__.issubclass(obj, cls)
| MiniSEC/GRR_clone | lib/aff4.py | Python | apache-2.0 | 74,406 |
import pymongo
import json
cache = {}
client = pymongo.MongoClient()
try:
dbConf = json.load(open("./model/db.json","r"))
except:
try:
dbConf = json.load(open("./db.json","r"))
except:
dbConf = {"dbname": "voteview"}
db = client[dbConf["dbname"]]
def metaLookup(api = ""):
if not api:
returnDict = {"loyalty_counts": 0}
elif api == "Web_Members":
returnDict = {"nominate": 0}
for m in db.voteview_metadata.find({}, returnDict).sort('time', -1).limit(1):
meta = m
return meta
| JeffreyBLewis/WebVoteView | model/searchMeta.py | Python | apache-2.0 | 568 |
# -*- coding: utf-8 -*-
#
#
# Copyright 2013 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""
aminator.plugins.blockdevice.base
=================================
Base class(es) for block device manager plugins
"""
import abc
import logging
from aminator.plugins.base import BasePlugin
__all__ = ('BaseBlockDevicePlugin',)
log = logging.getLogger(__name__)
class BaseBlockDevicePlugin(BasePlugin):
"""
BlockDevicePlugins are context managers and as such, need to implement the context manager protocol
"""
__metaclass__ = abc.ABCMeta
_entry_point = 'aminator.plugins.blockdevice'
@abc.abstractmethod
def __enter__(self):
return self
@abc.abstractmethod
def __exit__(self, typ, val, trc):
if typ: log.exception("Exception: {0}: {1}".format(typ.__name__,val))
return False
def __call__(self, cloud):
"""
By default, BlockDevicePlugins are called using
with blockdeviceplugin(cloud) as device:
pass
Override if need be
"""
self.cloud = cloud
return self
| nhamplify/aminator | aminator/plugins/blockdevice/base.py | Python | apache-2.0 | 1,648 |
#!/usr/bin/env python3
delineator = "//"
hashtag = "#"
# generate poems from a file
# out: list of poem lines
def generate_poems(filename):
g = []
# get to the first poem in the file
with open(filename, 'r') as f:
for line in f:
line = line.rstrip()
if line.startswith( delineator ) and g:
yield g
g = []
if line:
g.append(line)
yield g
# convert a list of strings
# into a poem dictionary
def to_dictionary(poem_lines):
d = {}
d['content'] = []
d['tags'] = []
tags = []
for line in poem_lines:
if line.startswith( delineator ):
d['title'] = line.lstrip( delineator ).strip()
elif line.startswith( hashtag ):
tags.append(line)
else:
d['content'].append(line) # do not strip to preserve indentation
for line in tags:
for tag in \
(t.strip() for t in line.split( hashtag ) if t):
d['tags'].append(tag)
return d
| benjspriggs/tumb-borg | tumb_borg/process.py | Python | apache-2.0 | 1,044 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the UserAssist Windows Registry plugin."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import winreg as winreg_formatter
from plaso.lib import eventdata
from plaso.lib import timelib_test
from plaso.parsers.winreg_plugins import test_lib
from plaso.parsers.winreg_plugins import userassist
class UserAssistPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the UserAssist Windows Registry plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = userassist.UserAssistPlugin()
def testProcessOnWinXP(self):
"""Tests the Process function on a Windows XP Registry file."""
test_file = self._GetTestFilePath(['NTUSER.DAT'])
key_path = (
u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\UserAssist'
u'\\{75048700-EF1F-11D0-9888-006097DEACF9}')
winreg_key = self._GetKeyFromFile(test_file, key_path)
event_queue_consumer = self._ParseKeyWithPlugin(self._plugin, winreg_key)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(len(event_objects), 14)
event_object = event_objects[0]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2009-08-04 15:11:22.811067')
self.assertEquals(event_object.timestamp, expected_timestamp)
regvalue_identifier = u'UEME_RUNPIDL:%csidl2%\\MSN.lnk'
expected_value = u'[Count: 14]'
self._TestRegvalue(event_object, regvalue_identifier, expected_value)
expected_msg = u'[{0:s}\\Count] {1:s}: {2:s}'.format(
key_path, regvalue_identifier, expected_value)
# The short message contains the first 76 characters of the key path.
expected_msg_short = u'[{0:s}...'.format(key_path[:76])
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
def testProcessOnWin7(self):
"""Tests the Process function on a Windows 7 Registry file."""
test_file = self._GetTestFilePath(['NTUSER-WIN7.DAT'])
key_path = (
u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\UserAssist'
u'\\{CEBFF5CD-ACE2-4F4F-9178-9926F41749EA}')
winreg_key = self._GetKeyFromFile(test_file, key_path)
event_queue_consumer = self._ParseKeyWithPlugin(self._plugin, winreg_key)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEquals(len(event_objects), 62)
event_object = event_objects[0]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2010-11-10 07:49:37.078067')
self.assertEquals(event_object.timestamp, expected_timestamp)
regvalue_identifier = u'Microsoft.Windows.GettingStarted'
expected_value = (
u'[UserAssist entry: 1, Count: 14, Application focus count: 21, '
u'Focus duration: 420000]')
self._TestRegvalue(event_object, regvalue_identifier, expected_value)
expected_msg = u'[{0:s}\\Count] {1:s}: {2:s}'.format(
key_path, regvalue_identifier, expected_value)
# The short message contains the first 76 characters of the key path.
expected_msg_short = u'[{0:s}...'.format(key_path[:76])
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
if __name__ == '__main__':
unittest.main()
| cvandeplas/plaso | plaso/parsers/winreg_plugins/userassist_test.py | Python | apache-2.0 | 3,967 |
from __future__ import unicode_literals
import time
import pytest
@pytest.mark.selenium
def test_ui(selenium):
selenium.browser.get(selenium.url('/download'))
time.sleep(3)
| clld/tsezacp | tests/test_selenium.py | Python | apache-2.0 | 183 |
# Copyright 2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class FpsProcessor(object):
"""
Provides common object for processing surfaceFlinger output for frame
statistics.
This processor returns the four frame statistics below:
:FPS: Frames Per Second. This is the frame rate of the workload.
:frames: The total number of frames rendered during the execution of
the workload.
:janks: The number of "janks" that occurred during execution of the
workload. Janks are sudden shifts in frame rate. They result
in a "stuttery" UI. See http://jankfree.org/jank-busters-io
:not_at_vsync: The number of frames that did not render in a single
vsync cycle.
"""
def __init__(self, data, action=None):
"""
data - a pandas.DataFrame object with frame data (e.g. frames.csv)
action - output metrics names with additional action specifier
"""
self.data = data
self.action = action
def process(self, refresh_period, drop_threshold): # pylint: disable=too-many-locals
"""
Generate frame per second (fps) and associated metrics for workload.
refresh_period - the vsync interval
drop_threshold - data points below this fps will be dropped
"""
fps = float('nan')
frame_count, janks, not_at_vsync = 0, 0, 0
vsync_interval = refresh_period
# fiter out bogus frames.
bogus_frames_filter = self.data.actual_present_time != 0x7fffffffffffffff
actual_present_times = self.data.actual_present_time[bogus_frames_filter]
actual_present_time_deltas = actual_present_times - actual_present_times.shift()
actual_present_time_deltas = actual_present_time_deltas.drop(0)
vsyncs_to_compose = actual_present_time_deltas / vsync_interval
vsyncs_to_compose.apply(lambda x: int(round(x, 0)))
# drop values lower than drop_threshold FPS as real in-game frame
# rate is unlikely to drop below that (except on loading screens
# etc, which should not be factored in frame rate calculation).
per_frame_fps = (1.0 / (vsyncs_to_compose * (vsync_interval / 1e9)))
keep_filter = per_frame_fps > drop_threshold
filtered_vsyncs_to_compose = vsyncs_to_compose[keep_filter]
per_frame_fps.name = 'fps'
if not filtered_vsyncs_to_compose.empty:
total_vsyncs = filtered_vsyncs_to_compose.sum()
frame_count = filtered_vsyncs_to_compose.size
if total_vsyncs:
fps = 1e9 * frame_count / (vsync_interval * total_vsyncs)
janks = self._calc_janks(filtered_vsyncs_to_compose)
not_at_vsync = self._calc_not_at_vsync(vsyncs_to_compose)
metrics = (fps, frame_count, janks, not_at_vsync)
return per_frame_fps, metrics
@staticmethod
def _calc_janks(filtered_vsyncs_to_compose):
"""
Internal method for calculating jank frames.
"""
pause_latency = 20
vtc_deltas = filtered_vsyncs_to_compose - filtered_vsyncs_to_compose.shift()
vtc_deltas.index = range(0, vtc_deltas.size)
vtc_deltas = vtc_deltas.drop(0).abs()
janks = vtc_deltas.apply(lambda x: (pause_latency > x > 1.5) and 1 or 0).sum()
return janks
@staticmethod
def _calc_not_at_vsync(vsyncs_to_compose):
"""
Internal method for calculating the number of frames that did not
render in a single vsync cycle.
"""
epsilon = 0.0001
func = lambda x: (abs(x - 1.0) > epsilon) and 1 or 0
not_at_vsync = vsyncs_to_compose.apply(func).sum()
return not_at_vsync
| ep1cman/workload-automation | wlauto/utils/fps.py | Python | apache-2.0 | 4,301 |
# Copyright 2013 Answers for AWS LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import sys
from backup_monkey.core import BackupMonkey, Logging
from backup_monkey import __version__
from backup_monkey.exceptions import BackupMonkeyException
from boto.utils import get_instance_metadata
__all__ = ('run', )
log = logging.getLogger(__name__)
LIMIT_LABEL = 32 # Label is added to description when created snapshot.
# The description limit in aws is 255
def _fail(message="Unknown failure", code=1):
log.error(message)
sys.exit(code)
def run():
parser = argparse.ArgumentParser(description='Loops through all EBS volumes, and snapshots them, then loops through all snapshots, and removes the oldest ones.')
parser.add_argument('--region', metavar='REGION',
help='the region to loop through and snapshot (default is current region of EC2 instance this is running on). E.g. us-east-1')
parser.add_argument('--max-snapshots-per-volume', metavar='SNAPSHOTS', default=3, type=int,
help='the maximum number of snapshots to keep per EBS volume. The oldest snapshots will be deleted. Default: 3')
parser.add_argument('--snapshot-only', action='store_true', default=False,
help='Only snapshot EBS volumes, do not remove old snapshots')
parser.add_argument('--remove-only', action='store_true', default=False,
help='Only remove old snapshots, do not create new snapshots')
parser.add_argument('--verbose', '-v', action='count',
help='enable verbose output (-vvv for more)')
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__,
help='display version number and exit')
parser.add_argument('--tags', nargs="+",
help='Only snapshot instances that match passed in tags. E.g. --tag Name:foo will snapshot all instances with a tag `Name` and value is `foo`')
parser.add_argument('--reverse-tags', action='store_true', default=False,
help='Do a reverse match on the passed in tags. E.g. --tag Name:foo --reverse-tags will snapshot all instances that do not have a `Name` tag with the value `foo`')
parser.add_argument('--label', action='store',
help='Only snapshot instances that match passed in label are created or deleted. Default: None. Selected all snapshot. You have the posibility of create a different strategies for daily, weekly and monthly for example. Label daily won\'t deleted label weekly')
parser.add_argument('--cross-account-number', action='store',
help='Do a cross-account snapshot (this is the account number to do snapshots on). NOTE: This requires that you pass in the --cross-account-role parameter. E.g. --cross-account-number 111111111111 --cross-account-role Snapshot')
parser.add_argument('--cross-account-role', action='store',
help='The name of the role that backup-monkey will assume when doing a cross-account snapshot. E.g. --cross-account-role Snapshot')
args = parser.parse_args()
if args.cross_account_number and not args.cross_account_role:
parser.error('The --cross-account-role parameter is required if you specify --cross-account-number (doing a cross-account snapshot)')
if args.cross_account_role and not args.cross_account_number:
parser.error('The --cross-account-number parameter is required if you specify --cross-account-role (doing a cross-account snapshot)')
if args.reverse_tags and not args.tags:
parser.error('The --tags parameter is required if you specify --reverse-tags (doing a blacklist filter)')
if args.label and len(args.label) > LIMIT_LABEL:
parser.error('The --label parameter lenght should be less than 32')
Logging().configure(args.verbose)
log.debug("CLI parse args: %s", args)
if args.region:
region = args.region
else:
# If no region was specified, assume this is running on an EC2 instance
# and work out what region it is in
log.debug("Figure out which region I am running in...")
instance_metadata = get_instance_metadata(timeout=5)
log.debug('Instance meta-data: %s', instance_metadata)
if not instance_metadata:
_fail('Could not determine region. This script is either not running on an EC2 instance (in which case you should use the --region option), or the meta-data service is down')
region = instance_metadata['placement']['availability-zone'][:-1]
log.debug("Running in region: %s", region)
try:
monkey = BackupMonkey(region,
args.max_snapshots_per_volume,
args.tags,
args.reverse_tags,
args.label,
args.cross_account_number,
args.cross_account_role)
if not args.remove_only:
monkey.snapshot_volumes()
if not args.snapshot_only:
monkey.remove_old_snapshots()
except BackupMonkeyException as e:
_fail(e.message)
log.info('Backup Monkey completed successfully!')
sys.exit(0)
| Answers4AWS/backup-monkey | backup_monkey/cli.py | Python | apache-2.0 | 5,890 |
'''
Classes for using multipart form data from Python, which does not (at the
time of writing) support this directly.
To use this, make an instance of Multipart and add parts to it via the factory
methods field and file. When you are done, get the content via the get method.
@author: Stacy Prowell (http://stacyprowell.com)
'''
import mimetypes
class Part(object):
'''
Class holding a single part of the form. You should never need to use
this class directly; instead, use the factory methods in Multipart:
field and file.
'''
# The boundary to use. This is shamelessly taken from the standard.
BOUNDARY = '----------AaB03x'
CRLF = '\r\n'
# Common headers.
CONTENT_TYPE = 'Content-Type'
CONTENT_DISPOSITION = 'Content-Disposition'
# The default content type for parts.
DEFAULT_CONTENT_TYPE = 'application/octet-stream'
def __init__(self, name, filename, body, headers):
'''
Make a new part. The part will have the given headers added initially.
@param name: The part name.
@type name: str
@param filename: If this is a file, the name of the file. Otherwise
None.
@type filename: str
@param body: The body of the part.
@type body: str
@param headers: Additional headers, or overrides, for this part.
You can override Content-Type here.
@type headers: dict
'''
self._headers = headers.copy()
self._name = name
self._filename = filename
self._body = body
# We respect any content type passed in, but otherwise set it here.
# We set the content disposition now, overwriting any prior value.
if self._filename == None:
self._headers[Part.CONTENT_DISPOSITION] = \
('form-data; name="%s"' % self._name)
self._headers.setdefault(Part.CONTENT_TYPE,
Part.DEFAULT_CONTENT_TYPE)
else:
self._headers[Part.CONTENT_DISPOSITION] = \
('form-data; name="%s"; filename="%s"' %
(self._name, self._filename))
self._headers.setdefault(Part.CONTENT_TYPE,
mimetypes.guess_type(filename)[0]
or Part.DEFAULT_CONTENT_TYPE)
return
def get(self):
'''
Convert the part into a list of lines for output. This includes
the boundary lines, part header lines, and the part itself. A
blank line is included between the header and the body.
@return: Lines of this part.
@rtype: list
'''
lines = []
lines.append('--' + Part.BOUNDARY)
for (key, val) in self._headers.items():
lines.append(str('%s: %s' % (key, val)))
lines.append('')
lines.append(self._body)
return lines
class Multipart(object):
'''
Encapsulate multipart form data. To use this, make an instance and then
add parts to it via the two methods (field and file). When done, you can
get the result via the get method.
See http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4.2 for
details on multipart/form-data.
Watch http://bugs.python.org/issue3244 to see if this is fixed in the
Python libraries.
@return: content type, body
@rtype: tuple
'''
def __init__(self):
self.parts = []
return
def field(self, name, value, headers={}):
'''
Create and append a field part. This kind of part has a field name
and value.
@param name: The field name.
@type name: str
@param value: The field value.
@type value: str
@param headers: Headers to set in addition to disposition.
@type headers: dict
'''
self.parts.append(Part(name, None, value, headers))
return
def file(self, name, filename, value, headers={}):
'''
Create and append a file part. THis kind of part has a field name,
a filename, and a value.
@param name: The field name.
@type name: str
@param value: The field value.
@type value: str
@param headers: Headers to set in addition to disposition.
@type headers: dict
'''
self.parts.append(Part(name, filename, value, headers))
return
def get(self):
'''
Get the multipart form data. This returns the content type, which
specifies the boundary marker, and also returns the body containing
all parts and bondary markers.
@return: content type, body
@rtype: tuple
'''
all = []
for part in self.parts:
all += part.get()
all.append('--' + Part.BOUNDARY + '--')
all.append('')
# We have to return the content type, since it specifies the boundary.
content_type = 'multipart/form-data; boundary=%s' % Part.BOUNDARY
return content_type, Part.CRLF.join(all) | mpetyx/pychatbot | AIML/AliceTwitter/GAEAlice/multipart.py | Python | apache-2.0 | 5,097 |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
from . import reset_triggers
class overload_bit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/global/lsp-bit/overload-bit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines Overload Bit configuration.
"""
__slots__ = (
"_path_helper", "_extmethods", "__config", "__state", "__reset_triggers"
)
_yang_name = "overload-bit"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__reset_triggers = YANGDynClass(
base=reset_triggers.reset_triggers,
is_container="container",
yang_name="reset-triggers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"lsp-bit",
"overload-bit",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/config (container)
YANG Description: This container defines ISIS Overload Bit configuration.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: This container defines ISIS Overload Bit configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/state (container)
YANG Description: This container defines state for ISIS Overload Bit.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container defines state for ISIS Overload Bit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_reset_triggers(self):
"""
Getter method for reset_triggers, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/reset_triggers (container)
YANG Description: This container defines state for ISIS Overload Bit reset triggers
"""
return self.__reset_triggers
def _set_reset_triggers(self, v, load=False):
"""
Setter method for reset_triggers, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/reset_triggers (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_reset_triggers is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_reset_triggers() directly.
YANG Description: This container defines state for ISIS Overload Bit reset triggers
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=reset_triggers.reset_triggers,
is_container="container",
yang_name="reset-triggers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """reset_triggers must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=reset_triggers.reset_triggers, is_container='container', yang_name="reset-triggers", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__reset_triggers = t
if hasattr(self, "_set"):
self._set()
def _unset_reset_triggers(self):
self.__reset_triggers = YANGDynClass(
base=reset_triggers.reset_triggers,
is_container="container",
yang_name="reset-triggers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
reset_triggers = __builtin__.property(_get_reset_triggers, _set_reset_triggers)
_pyangbind_elements = OrderedDict(
[("config", config), ("state", state), ("reset_triggers", reset_triggers)]
)
from . import config
from . import state
from . import reset_triggers
class overload_bit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/global/lsp-bit/overload-bit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines Overload Bit configuration.
"""
__slots__ = (
"_path_helper", "_extmethods", "__config", "__state", "__reset_triggers"
)
_yang_name = "overload-bit"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__reset_triggers = YANGDynClass(
base=reset_triggers.reset_triggers,
is_container="container",
yang_name="reset-triggers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"lsp-bit",
"overload-bit",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/config (container)
YANG Description: This container defines ISIS Overload Bit configuration.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: This container defines ISIS Overload Bit configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/state (container)
YANG Description: This container defines state for ISIS Overload Bit.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container defines state for ISIS Overload Bit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_reset_triggers(self):
"""
Getter method for reset_triggers, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/reset_triggers (container)
YANG Description: This container defines state for ISIS Overload Bit reset triggers
"""
return self.__reset_triggers
def _set_reset_triggers(self, v, load=False):
"""
Setter method for reset_triggers, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit/reset_triggers (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_reset_triggers is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_reset_triggers() directly.
YANG Description: This container defines state for ISIS Overload Bit reset triggers
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=reset_triggers.reset_triggers,
is_container="container",
yang_name="reset-triggers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """reset_triggers must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=reset_triggers.reset_triggers, is_container='container', yang_name="reset-triggers", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__reset_triggers = t
if hasattr(self, "_set"):
self._set()
def _unset_reset_triggers(self):
self.__reset_triggers = YANGDynClass(
base=reset_triggers.reset_triggers,
is_container="container",
yang_name="reset-triggers",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
reset_triggers = __builtin__.property(_get_reset_triggers, _set_reset_triggers)
_pyangbind_elements = OrderedDict(
[("config", config), ("state", state), ("reset_triggers", reset_triggers)]
)
| napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/global_/lsp_bit/overload_bit/__init__.py | Python | apache-2.0 | 25,678 |
from django.utils import unittest
from spacescout_web.test.not_found import NotFound404Test
from spacescout_web.test.url_filtering import URLFiltering
| uw-it-aca/spacescout_web | spacescout_web/tests.py | Python | apache-2.0 | 152 |
#!/usr/bin/env python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates an expanded text ad.
To get expanded text ads, run get_expanded_text_ads.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import uuid
from googleads import adwords
AD_ID = 'INSERT_AD_ID_HERE'
def main(client, ad_id):
# Initialize appropriate service.
ad_service = client.GetService('AdService', version='v201809')
# Create an expanded text ad using the provided ad ID.
expanded_text_ad = {
'xsi_type': 'ExpandedTextAd',
'id': ad_id,
'headlinePart1': 'Cruise to Pluto #' + str(uuid.uuid4())[:8],
'headlinePart2': 'Tickets on sale now',
'description': 'Best space cruise ever.',
'finalUrls': ['http://www.example.com'],
'finalMobileUrls': ['http://www.example.com/mobile']
}
# Create ad group ad operation.
operations = [{
'operator': 'SET',
'operand': expanded_text_ad
}]
# Updates the ad on ther server.
result = ad_service.mutate(operations)
updated_ad = result['value'][0]
print 'Expanded text ad with ID %s was updated.' % updated_ad['id']
print ('\tHeadline part 1: %s\nHeadline part 2: %s\nDescription: %s\n'
'Final URL: %s\nFinal mobile URL: %s' %
(updated_ad['headlinePart1'],
updated_ad['headlinePart2'],
updated_ad['description'],
updated_ad['finalUrls'][0],
updated_ad['finalMobileUrls'][0]))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_ID)
| Aloomaio/googleads-python-lib | examples/adwords/v201809/basic_operations/update_expanded_text_ad.py | Python | apache-2.0 | 2,330 |
# Copyright 2005-2010 Wesabe, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ofxtools.CsvConverter - translate CSV files into OFX files.
#
import datetime
import dateutil.parser
import ofx
import ofxtools
import re
import sys
import xml.sax.saxutils as sax
from decimal import *
from ofx.builder import *
class CsvConverter:
def __init__(self, qif, colspec=None, fid="UNKNOWN", org="UNKNOWN",
bankid="UNKNOWN", accttype="UNKNOWN", acctid="UNKNOWN",
balance="UNKNOWN", curdef=None, lang="ENG", dayfirst=False,
debug=False):
self.qif = qif
self.colspec = colspec
self.fid = fid
self.org = org
self.bankid = bankid
self.accttype = accttype
self.acctid = acctid
self.balance = balance
self.curdef = curdef
self.lang = lang
self.debug = debug
self.dayfirst = dayfirst
self.parsed_csv = None
# FIXME: Move this to one of the OFX generation classes (Document or Response).
self.txns_by_date = {}
if self.debug: sys.stderr.write("Parsing document.\n")
parser = ofxtools.QifParser() # debug=debug)
self.parsed_qif = parser.parse(self.qif)
if self.debug: sys.stderr.write("Cleaning transactions.\n")
# We do a two-pass conversion in order to check the dates of all
# transactions in the statement, and convert all the dates using
# the same date format. The first pass does nothing but look
# at dates; the second actually applies the date conversion and
# all other conversions, and extracts information needed for
# the final output (like date range).
txn_list = self._extract_txn_list(self.parsed_qif)
self._guess_formats(txn_list)
self._clean_txn_list(txn_list)
def _extract_txn_list(self, qif):
stmt_obj = qif.asDict()["QifStatement"]
if self.accttype == "UNKNOWN":
if "BankTransactions" in stmt_obj:
self.accttype = "CHECKING"
elif "CreditCardTransactions" in stmt_obj:
self.accttype = "CREDITCARD"
txn_list = []
for stmt in stmt_obj:
for txn in stmt:
txn_list.append(txn)
if len(txn_list) == 0:
raise ValueError("Found no transactions to convert " +
"in the QIF source.")
else:
return txn_list
#
# Date methods
#
def _guess_formats(self, txn_list):
# Go through the transactions one at a time, and try to parse the date
# field and currency format. If we check the date format and find a
# transaction where the first number must be the day (that is, the first
# number is in the range 13..31), then set the state of the converter to
# use dayfirst for all transaction cleanups. This is a guess because the
# method will only work for UK dates if the statement contains a day in
# the 13..31 range. (We could also test whether a date appears out of
# order, or whether the jumps between transactions are especially long,
# if this guessing method doesn't work reliably.)
for txn_obj in txn_list:
txn = txn_obj.asDict()
txn_date = txn.get("Date", "UNKNOWN")
txn_currency = txn.get("Currency", "UNKNOWN")
# Look for date format.
parsed_date = self._parse_date(txn_date)
self._check_date_format(parsed_date)
def _parse_date(self, txn_date, dayfirst=False):
def _check_date_format(self, parsed_date):
# If we *ever* find a date that parses as dayfirst, treat
# *all* transactions in this statement as dayfirst.
if parsed_date is not None and parsed_date != "UNKNOWN" and parsed_date.microsecond == 3:
self.dayfirst = True
#
# Cleanup methods
#
def _clean_txn_list(self, txn_list):
for txn_obj in txn_list:
try:
txn = self._clean_txn(txn_obj)
txn_date = txn["Date"]
txn_date_list = self.txns_by_date.get(txn_date, [])
txn_date_list.append(txn)
self.txns_by_date[txn_date] = txn_date_list
except ValueError:
# The _clean_txn method will sometimes find transactions
# that are inherently unclean and are unable to be purified.
# In these cases it will reject the transaction by throwing
# a ValueError, which signals us not to store the transaction.
if self.debug: sys.stderr.write("Skipping transaction '%s'." %
str(txn_obj.asDict()))
# Sort the dates (in YYYYMMDD format) and choose the lowest
# date as our start date, and the highest date as our end
# date.
date_list = self.txns_by_date.keys()
date_list.sort()
self.start_date = date_list[0]
self.end_date = date_list[-1]
def _clean_txn(self, txn_obj):
# This is sort of the brute-force method of the converter. It
# looks at the data we get from the bank and tries as hard as
# possible to make best-effort guesses about what the OFX 2.0
# standard values for the transaction should be. There's a
# reasonable amount of guesswork in here -- some of it wise,
# maybe some of it not. If the cleanup method determines that
# the txn_obj shouldn't be in the data, it will return None.
# Otherwise, it will return a transaction cleaned to the best
# of our abilities.
txn = txn_obj.asDict()
self._clean_txn_date(txn)
self._clean_txn_amount(txn)
self._clean_txn_number(txn)
self._clean_txn_type(txn)
self._clean_txn_payee(txn)
return txn
def _clean_txn_date(self, txn):
txn_date = txn.get("Date", "UNKNOWN").strip()
if txn_date != "UNKNOWN":
parsed_date = self._parse_date(txn_date, dayfirst=self.dayfirst)
txn["Date"] = parsed_date.strftime("%Y%m%d")
else:
txn["Date"] = "UNKNOWN"
def _clean_txn_amount(self, txn):
txn_amount = txn.get("Amount", "00.00")
txn_amount2 = txn.get("Amount2", "00.00")
# Home Depot Credit Card seems to send two transaction records for each
# transaction. They're out of order (that is, the second record is not
# directly after the first, nor even necessarily after it at all), and
# the second one *sometimes* appears to be a memo field on the first one
# (e.g., a credit card payment will show up with an amount and date, and
# then the next transaction will have the same date and a payee that
# reads, "Thank you for your payment!"), and *sometimes* is the real
# payee (e.g., the first will say "Home Depot" and the second will say
# "Seasonal/Garden"). One of the two transaction records will have a
# transaction amount of "-", and the other will have the real
# transaction amount. Ideally, we would pull out the memo and attach it
# to the right transaction, but unless the two transactions are the only
# transactions on that date, there doesn't seem to be a good clue (order
# in statement, amount, etc.) as to how to associate them. So, instead,
# we're returning None, which means this transaction should be removed
# from the statement and not displayed to the user. The result is that
# for Home Depot cards, sometimes we lose the memo (which isn't that big
# a deal), and sometimes we make the memo into the payee (which sucks).
if txn_amount == "-" or txn_amount == " ":
raise ValueError("Transaction amount is undefined.")
# Some QIF sources put the amount in Amount2 instead, for unknown
# reasons. Here we ignore Amount2 unless Amount is unknown.
if txn_amount == "00.00":
txn_amount = txn_amount2
# Okay, now strip out whitespace padding.
txn_amount = txn_amount.strip()
# Some QIF files have dollar signs in the amount. Hey, why not?
txn_amount = txn_amount.replace('$', '', 1)
# Some QIF sources put three digits after the decimal, and the Ruby
# code thinks that means we're in Europe. So.....let's deal with
# that now.
try:
txn_amount = str(Decimal(txn_amount).quantize(Decimal('.01')))
except:
# Just keep truckin'.
pass
txn["Amount"] = txn_amount
def _clean_txn_number(self, txn):
txn_number = txn.get("Number", "UNKNOWN").strip()
# Clean up bad check number behavior
all_digits = re.compile("\d+")
if txn_number == "N/A":
# Get rid of brain-dead Chase check number "N/A"s
del txn["Number"]
elif txn_number.startswith("XXXX-XXXX-XXXX"):
# Home Depot credit cards throw THE CREDIT CARD NUMBER
# into the check number field. Oy! At least they mask
# the first twelve digits, so we know they're insane.
del txn["Number"]
elif txn_number != "UNKNOWN" and self.accttype == "CREDITCARD":
# Several other credit card companies (MBNA, CapitalOne)
# seem to use the number field as a transaction ID. Get
# rid of this.
del txn["Number"]
elif txn_number == "0000000000" and self.accttype != "CREDITCARD":
# There's some bank that puts "N0000000000" in every non-check
# transaction. (They do use normal check numbers for checks.)
del txn["Number"]
elif txn_number != "UNKNOWN" and all_digits.search(txn_number):
# Washington Mutual doesn't indicate a CHECK transaction
# when a check number is present.
txn["Type"] = "CHECK"
def _clean_txn_type(self, txn):
txn_type = "UNKNOWN"
txn_amount = txn.get("Amount", "UNKNOWN")
txn_payee = txn.get("Payee", "UNKNOWN")
txn_memo = txn.get("Memo", "UNKNOWN")
txn_number = txn.get("Number", "UNKNOWN")
txn_sign = self._txn_sign(txn_amount)
# Try to figure out the transaction type from the Payee or
# Memo field.
for typestr in self.txn_types.keys():
if txn_number == typestr:
# US Bank sends "DEBIT" or "CREDIT" as a check number
# on credit card transactions.
txn["Type"] = self.txn_types[typestr]
del txn["Number"]
break
elif txn_payee.startswith(typestr + "/") or \
txn_memo.startswith(typestr + "/") or \
txn_memo == typestr or txn_payee == typestr:
if typestr == "ACH" and txn_sign == "credit":
txn["Type"] = "DIRECTDEP"
elif typestr == "ACH" and txn_sign == "debit":
txn["Type"] = "DIRECTDEBIT"
else:
txn["Type"] = self.txn_types[typestr]
break
def _clean_txn_payee(self, txn):
txn_payee = txn.get("Payee", "UNKNOWN")
txn_memo = txn.get("Memo", "UNKNOWN")
txn_number = txn.get("Number", "UNKNOWN")
txn_type = txn.get("Type", "UNKNOWN")
txn_amount = txn.get("Amount", "UNKNOWN")
txn_sign = self._txn_sign(txn_amount)
# Try to fill in the payee field with some meaningful value.
if txn_payee == "UNKNOWN":
if txn_number != "UNKNOWN" and (self.accttype == "CHECKING" or
self.accttype == "SAVINGS"):
txn["Payee"] = "Check #%s" % txn_number
txn["Type"] = "CHECK"
elif txn_type == "INT" and txn_sign == "debit":
txn["Payee"] = "Interest paid"
elif txn_type == "INT" and txn_sign == "credit":
txn["Payee"] = "Interest earned"
elif txn_type == "ATM" and txn_sign == "debit":
txn["Payee"] = "ATM Withdrawal"
elif txn_type == "ATM" and txn_sign == "credit":
txn["Payee"] = "ATM Deposit"
elif txn_type == "POS" and txn_sign == "debit":
txn["Payee"] = "Point of Sale Payment"
elif txn_type == "POS" and txn_sign == "credit":
txn["Payee"] = "Point of Sale Credit"
elif txn_memo != "UNKNOWN":
txn["Payee"] = txn_memo
# Down here, we have no payee, no memo, no check number,
# and no type. Who knows what this stuff is.
elif txn_type == "UNKNOWN" and txn_sign == "debit":
txn["Payee"] = "Other Debit"
txn["Type"] = "DEBIT"
elif txn_type == "UNKNOWN" and txn_sign == "credit":
txn["Payee"] = "Other Credit"
txn["Type"] = "CREDIT"
# Make sure the transaction type has some valid value.
if not txn.has_key("Type") and txn_sign == "debit":
txn["Type"] = "DEBIT"
elif not txn.has_key("Type") and txn_sign == "credit":
txn["Type"] = "CREDIT"
def _txn_sign(self, txn_amount):
# Is this a credit or a debit?
if txn_amount.startswith("-"):
return "debit"
else:
return "credit"
#
# Conversion methods
#
def to_ofx102(self):
if self.debug: sys.stderr.write("Making OFX/1.02.\n")
return DOCUMENT(self._ofx_header(),
OFX(self._ofx_signon(),
self._ofx_stmt()))
def to_xml(self):
ofx102 = self.to_ofx102()
if self.debug:
sys.stderr.write(ofx102 + "\n")
sys.stderr.write("Parsing OFX/1.02.\n")
response = ofx.Response(ofx102) #, debug=self.debug)
if self.debug: sys.stderr.write("Making OFX/2.0.\n")
if self.dayfirst:
date_format = "DD/MM/YY"
else:
date_format = "MM/DD/YY"
xml = response.as_xml(original_format="QIF", date_format=date_format)
return xml
| wesabe/fixofx | lib/ofxtools/csv_converter.py | Python | apache-2.0 | 14,873 |
from abc import ABCMeta, abstractmethod, abstractproperty
from contextlib import contextmanager
from functools import wraps
import gzip
from inspect import getargspec
from itertools import (
combinations,
count,
product,
)
import operator
import os
from os.path import abspath, dirname, join, realpath
import shutil
from sys import _getframe
import tempfile
from logbook import TestHandler
from mock import patch
from nose.tools import nottest
from numpy.testing import assert_allclose, assert_array_equal
import pandas as pd
from six import itervalues, iteritems, with_metaclass
from six.moves import filter, map
from sqlalchemy import create_engine
from testfixtures import TempDirectory
from toolz import concat, curry
from catalyst.assets import AssetFinder, AssetDBWriter
from catalyst.assets.synthetic import make_simple_equity_info
from catalyst.data.data_portal import DataPortal
from catalyst.data.loader import get_benchmark_filename, INDEX_MAPPING
from catalyst.data.minute_bars import (
BcolzMinuteBarReader,
BcolzMinuteBarWriter,
US_EQUITIES_MINUTES_PER_DAY
)
from catalyst.data.us_equity_pricing import (
BcolzDailyBarReader,
BcolzDailyBarWriter,
SQLiteAdjustmentWriter,
)
from catalyst.finance.blotter import Blotter
from catalyst.finance.trading import TradingEnvironment
from catalyst.finance.order import ORDER_STATUS
from catalyst.lib.labelarray import LabelArray
from catalyst.pipeline.data import USEquityPricing
from catalyst.pipeline.engine import SimplePipelineEngine
from catalyst.pipeline.factors import CustomFactor
from catalyst.pipeline.loaders.testing import make_seeded_random_loader
from catalyst.utils import security_list
from catalyst.utils.calendars import get_calendar
from catalyst.utils.input_validation import expect_dimensions
from catalyst.utils.numpy_utils import as_column, isnat
from catalyst.utils.pandas_utils import timedelta_to_integral_seconds
from catalyst.utils.paths import ensure_directory
from catalyst.utils.sentinel import sentinel
import numpy as np
from numpy import float64
EPOCH = pd.Timestamp(0, tz='UTC')
def seconds_to_timestamp(seconds):
return pd.Timestamp(seconds, unit='s', tz='UTC')
def to_utc(time_str):
"""Convert a string in US/Eastern time to UTC"""
return pd.Timestamp(time_str, tz='US/Eastern').tz_convert('UTC')
def str_to_seconds(s):
"""
Convert a pandas-intelligible string to (integer) seconds since UTC.
>>> from pandas import Timestamp
>>> (Timestamp('2014-01-01') - Timestamp(0)).total_seconds()
1388534400.0
>>> str_to_seconds('2014-01-01')
1388534400
"""
return timedelta_to_integral_seconds(pd.Timestamp(s, tz='UTC') - EPOCH)
def drain_catalyst(test, catalyst):
output = []
transaction_count = 0
msg_counter = 0
# start the simulation
for update in catalyst:
msg_counter += 1
output.append(update)
if 'daily_perf' in update:
transaction_count += \
len(update['daily_perf']['transactions'])
return output, transaction_count
def check_algo_results(test,
results,
expected_transactions_count=None,
expected_order_count=None,
expected_positions_count=None,
sid=None):
if expected_transactions_count is not None:
txns = flatten_list(results["transactions"])
test.assertEqual(expected_transactions_count, len(txns))
if expected_positions_count is not None:
raise NotImplementedError
if expected_order_count is not None:
# de-dup orders on id, because orders are put back into perf packets
# whenever they a txn is filled
orders = set([order['id'] for order in
flatten_list(results["orders"])])
test.assertEqual(expected_order_count, len(orders))
def flatten_list(list):
return [item for sublist in list for item in sublist]
def assert_single_position(test, catalyst):
output, transaction_count = drain_catalyst(test, catalyst)
if 'expected_transactions' in test.catalyst_test_config:
test.assertEqual(
test.catalyst_test_config['expected_transactions'],
transaction_count
)
else:
test.assertEqual(
test.catalyst_test_config['order_count'],
transaction_count
)
# the final message is the risk report, the second to
# last is the final day's results. Positions is a list of
# dicts.
closing_positions = output[-2]['daily_perf']['positions']
# confirm that all orders were filled.
# iterate over the output updates, overwriting
# orders when they are updated. Then check the status on all.
orders_by_id = {}
for update in output:
if 'daily_perf' in update:
if 'orders' in update['daily_perf']:
for order in update['daily_perf']['orders']:
orders_by_id[order['id']] = order
for order in itervalues(orders_by_id):
test.assertEqual(
order['status'],
ORDER_STATUS.FILLED,
"")
test.assertEqual(
len(closing_positions),
1,
"Portfolio should have one position."
)
sid = test.catalyst_test_config['sid']
test.assertEqual(
closing_positions[0]['sid'],
sid,
"Portfolio should have one position in " + str(sid)
)
return output, transaction_count
@contextmanager
def security_list_copy():
old_dir = security_list.SECURITY_LISTS_DIR
new_dir = tempfile.mkdtemp()
try:
for subdir in os.listdir(old_dir):
shutil.copytree(os.path.join(old_dir, subdir),
os.path.join(new_dir, subdir))
with patch.object(security_list, 'SECURITY_LISTS_DIR', new_dir), \
patch.object(security_list, 'using_copy', True,
create=True):
yield
finally:
shutil.rmtree(new_dir, True)
def add_security_data(adds, deletes):
if not hasattr(security_list, 'using_copy'):
raise Exception('add_security_data must be used within '
'security_list_copy context')
directory = os.path.join(
security_list.SECURITY_LISTS_DIR,
"leveraged_etf_list/20150127/20150125"
)
if not os.path.exists(directory):
os.makedirs(directory)
del_path = os.path.join(directory, "delete")
with open(del_path, 'w') as f:
for sym in deletes:
f.write(sym)
f.write('\n')
add_path = os.path.join(directory, "add")
with open(add_path, 'w') as f:
for sym in adds:
f.write(sym)
f.write('\n')
def all_pairs_matching_predicate(values, pred):
"""
Return an iterator of all pairs, (v0, v1) from values such that
`pred(v0, v1) == True`
Parameters
----------
values : iterable
pred : function
Returns
-------
pairs_iterator : generator
Generator yielding pairs matching `pred`.
Examples
--------
>>> from catalyst.testing import all_pairs_matching_predicate
>>> from operator import eq, lt
>>> list(all_pairs_matching_predicate(range(5), eq))
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> list(all_pairs_matching_predicate("abcd", lt))
[('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')]
"""
return filter(lambda pair: pred(*pair), product(values, repeat=2))
def product_upper_triangle(values, include_diagonal=False):
"""
Return an iterator over pairs, (v0, v1), drawn from values.
If `include_diagonal` is True, returns all pairs such that v0 <= v1.
If `include_diagonal` is False, returns all pairs such that v0 < v1.
"""
return all_pairs_matching_predicate(
values,
operator.le if include_diagonal else operator.lt,
)
def all_subindices(index):
"""
Return all valid sub-indices of a pandas Index.
"""
return (
index[start:stop]
for start, stop in product_upper_triangle(range(len(index) + 1))
)
def chrange(start, stop):
"""
Construct an iterable of length-1 strings beginning with `start` and ending
with `stop`.
Parameters
----------
start : str
The first character.
stop : str
The last character.
Returns
-------
chars: iterable[str]
Iterable of strings beginning with start and ending with stop.
Examples
--------
>>> chrange('A', 'C')
['A', 'B', 'C']
"""
return list(map(chr, range(ord(start), ord(stop) + 1)))
def make_trade_data_for_asset_info(dates,
asset_info,
price_start,
price_step_by_date,
price_step_by_sid,
volume_start,
volume_step_by_date,
volume_step_by_sid,
frequency,
writer=None):
"""
Convert the asset info dataframe into a dataframe of trade data for each
sid, and write to the writer if provided. Write NaNs for locations where
assets did not exist. Return a dict of the dataframes, keyed by sid.
"""
trade_data = {}
sids = asset_info.index
price_sid_deltas = np.arange(len(sids), dtype=float64) * price_step_by_sid
price_date_deltas = (np.arange(len(dates), dtype=float64) *
price_step_by_date)
prices = (price_sid_deltas + as_column(price_date_deltas)) + price_start
volume_sid_deltas = np.arange(len(sids)) * volume_step_by_sid
volume_date_deltas = np.arange(len(dates)) * volume_step_by_date
volumes = volume_sid_deltas + as_column(volume_date_deltas) + volume_start
for j, sid in enumerate(sids):
start_date, end_date = asset_info.loc[sid, ['start_date', 'end_date']]
# Normalize here so the we still generate non-NaN values on the minutes
# for an asset's last trading day.
for i, date in enumerate(dates.normalize()):
if not (start_date <= date <= end_date):
prices[i, j] = 0
volumes[i, j] = 0
df = pd.DataFrame(
{
"open": prices[:, j],
"high": prices[:, j],
"low": prices[:, j],
"close": prices[:, j],
"volume": volumes[:, j],
},
index=dates,
)
if writer:
writer.write_sid(sid, df)
trade_data[sid] = df
return trade_data
def check_allclose(actual,
desired,
rtol=1e-07,
atol=0,
err_msg='',
verbose=True):
"""
Wrapper around np.testing.assert_allclose that also verifies that inputs
are ndarrays.
See Also
--------
np.assert_allclose
"""
if type(actual) != type(desired):
raise AssertionError("%s != %s" % (type(actual), type(desired)))
return assert_allclose(
actual,
desired,
atol=atol,
rtol=rtol,
err_msg=err_msg,
verbose=verbose,
)
def check_arrays(x, y, err_msg='', verbose=True, check_dtypes=True):
"""
Wrapper around np.testing.assert_array_equal that also verifies that inputs
are ndarrays.
See Also
--------
np.assert_array_equal
"""
assert type(x) == type(y), "{x} != {y}".format(x=type(x), y=type(y))
assert x.dtype == y.dtype, "{x.dtype} != {y.dtype}".format(x=x, y=y)
if isinstance(x, LabelArray):
# Check that both arrays have missing values in the same locations...
assert_array_equal(
x.is_missing(),
y.is_missing(),
err_msg=err_msg,
verbose=verbose,
)
# ...then check the actual values as well.
x = x.as_string_array()
y = y.as_string_array()
elif x.dtype.kind in 'mM':
x_isnat = isnat(x)
y_isnat = isnat(y)
assert_array_equal(
x_isnat,
y_isnat,
err_msg="NaTs not equal",
verbose=verbose,
)
# Fill NaTs with zero for comparison.
x = np.where(x_isnat, np.zeros_like(x), x)
y = np.where(y_isnat, np.zeros_like(y), y)
return assert_array_equal(x, y, err_msg=err_msg, verbose=verbose)
class UnexpectedAttributeAccess(Exception):
pass
class ExplodingObject(object):
"""
Object that will raise an exception on any attribute access.
Useful for verifying that an object is never touched during a
function/method call.
"""
def __getattribute__(self, name):
raise UnexpectedAttributeAccess(name)
def write_minute_data(trading_calendar, tempdir, minutes, sids):
first_session = trading_calendar.minute_to_session_label(
minutes[0], direction="none"
)
last_session = trading_calendar.minute_to_session_label(
minutes[-1], direction="none"
)
sessions = trading_calendar.sessions_in_range(first_session, last_session)
write_bcolz_minute_data(
trading_calendar,
sessions,
tempdir.path,
create_minute_bar_data(minutes, sids),
)
return tempdir.path
def create_minute_bar_data(minutes, sids):
length = len(minutes)
for sid_idx, sid in enumerate(sids):
yield sid, pd.DataFrame(
{
'open': np.arange(length) + 10 + sid_idx,
'high': np.arange(length) + 15 + sid_idx,
'low': np.arange(length) + 8 + sid_idx,
'close': np.arange(length) + 10 + sid_idx,
'volume': 100 + sid_idx,
},
index=minutes,
)
def create_daily_bar_data(sessions, sids):
length = len(sessions)
for sid_idx, sid in enumerate(sids):
yield sid, pd.DataFrame(
{
"open": (np.array(range(10, 10 + length)) + sid_idx),
"high": (np.array(range(15, 15 + length)) + sid_idx),
"low": (np.array(range(8, 8 + length)) + sid_idx),
"close": (np.array(range(10, 10 + length)) + sid_idx),
"volume": np.array(range(100, 100 + length)) + sid_idx,
"day": [session.value for session in sessions]
},
index=sessions,
)
def write_daily_data(tempdir, sim_params, sids, trading_calendar):
path = os.path.join(tempdir.path, "testdaily.bcolz")
BcolzDailyBarWriter(path, trading_calendar,
sim_params.start_session,
sim_params.end_session).write(
create_daily_bar_data(sim_params.sessions, sids),
)
return path
def create_data_portal(asset_finder, tempdir, sim_params, sids,
trading_calendar, adjustment_reader=None):
if sim_params.data_frequency == "daily":
daily_path = write_daily_data(tempdir, sim_params, sids,
trading_calendar)
equity_daily_reader = BcolzDailyBarReader(daily_path)
return DataPortal(
asset_finder, trading_calendar,
first_trading_day=equity_daily_reader.first_trading_day,
equity_daily_reader=equity_daily_reader,
adjustment_reader=adjustment_reader
)
else:
minutes = trading_calendar.minutes_in_range(
sim_params.first_open,
sim_params.last_close
)
minute_path = write_minute_data(trading_calendar, tempdir, minutes,
sids)
equity_minute_reader = BcolzMinuteBarReader(minute_path)
return DataPortal(
asset_finder, trading_calendar,
first_trading_day=equity_minute_reader.first_trading_day,
equity_minute_reader=equity_minute_reader,
adjustment_reader=adjustment_reader
)
def write_bcolz_minute_data(trading_calendar, days, path, data):
BcolzMinuteBarWriter(
path,
trading_calendar,
days[0],
days[-1],
US_EQUITIES_MINUTES_PER_DAY
).write(data)
def create_minute_df_for_asset(trading_calendar,
start_dt,
end_dt,
interval=1,
start_val=1,
minute_blacklist=None):
asset_minutes = trading_calendar.minutes_for_sessions_in_range(
start_dt, end_dt
)
minutes_count = len(asset_minutes)
minutes_arr = np.array(range(start_val, start_val + minutes_count))
df = pd.DataFrame(
{
"open": minutes_arr + 1,
"high": minutes_arr + 2,
"low": minutes_arr - 1,
"close": minutes_arr,
"volume": 100 * minutes_arr,
},
index=asset_minutes,
)
if interval > 1:
counter = 0
while counter < len(minutes_arr):
df[counter:(counter + interval - 1)] = 0
counter += interval
if minute_blacklist is not None:
for minute in minute_blacklist:
df.loc[minute] = 0
return df
def create_daily_df_for_asset(trading_calendar, start_day, end_day,
interval=1):
days = trading_calendar.sessions_in_range(start_day, end_day)
days_count = len(days)
days_arr = np.arange(days_count) + 2
df = pd.DataFrame(
{
"open": days_arr + 1,
"high": days_arr + 2,
"low": days_arr - 1,
"close": days_arr,
"volume": days_arr * 100,
},
index=days,
)
if interval > 1:
# only keep every 'interval' rows
for idx, _ in enumerate(days_arr):
if (idx + 1) % interval != 0:
df["open"].iloc[idx] = 0
df["high"].iloc[idx] = 0
df["low"].iloc[idx] = 0
df["close"].iloc[idx] = 0
df["volume"].iloc[idx] = 0
return df
def trades_by_sid_to_dfs(trades_by_sid, index):
for sidint, trades in iteritems(trades_by_sid):
opens = []
highs = []
lows = []
closes = []
volumes = []
for trade in trades:
opens.append(trade.open_price)
highs.append(trade.high)
lows.append(trade.low)
closes.append(trade.close_price)
volumes.append(trade.volume)
yield sidint, pd.DataFrame(
{
"open": opens,
"high": highs,
"low": lows,
"close": closes,
"volume": volumes,
},
index=index,
)
def create_data_portal_from_trade_history(asset_finder, trading_calendar,
tempdir, sim_params, trades_by_sid):
if sim_params.data_frequency == "daily":
path = os.path.join(tempdir.path, "testdaily.bcolz")
writer = BcolzDailyBarWriter(
path, trading_calendar,
sim_params.start_session,
sim_params.end_session
)
writer.write(
trades_by_sid_to_dfs(trades_by_sid, sim_params.sessions),
)
equity_daily_reader = BcolzDailyBarReader(path)
return DataPortal(
asset_finder, trading_calendar,
first_trading_day=equity_daily_reader.first_trading_day,
daily_reader=equity_daily_reader,
)
else:
minutes = trading_calendar.minutes_in_range(
sim_params.first_open,
sim_params.last_close
)
length = len(minutes)
assets = {}
for sidint, trades in iteritems(trades_by_sid):
opens = np.zeros(length)
highs = np.zeros(length)
lows = np.zeros(length)
closes = np.zeros(length)
volumes = np.zeros(length)
for trade in trades:
# put them in the right place
idx = minutes.searchsorted(trade.dt)
opens[idx] = trade.open_price * 1000
highs[idx] = trade.high * 1000
lows[idx] = trade.low * 1000
closes[idx] = trade.close_price * 1000
volumes[idx] = trade.volume
assets[sidint] = pd.DataFrame({
"open": opens,
"high": highs,
"low": lows,
"close": closes,
"volume": volumes,
"dt": minutes
}).set_index("dt")
write_bcolz_minute_data(
trading_calendar,
sim_params.sessions,
tempdir.path,
assets
)
equity_minute_reader = BcolzMinuteBarReader(tempdir.path)
return DataPortal(
asset_finder, trading_calendar,
first_trading_day=equity_minute_reader.first_trading_day,
equity_minute_reader=equity_minute_reader,
)
class FakeDataPortal(DataPortal):
def __init__(self, env, trading_calendar=None,
first_trading_day=None):
if trading_calendar is None:
trading_calendar = get_calendar("NYSE")
super(FakeDataPortal, self).__init__(env.asset_finder,
trading_calendar,
first_trading_day)
def get_spot_value(self, asset, field, dt, data_frequency):
if field == "volume":
return 100
else:
return 1.0
def get_history_window(self, assets, end_dt, bar_count, frequency, field,
data_frequency, ffill=True):
if frequency == "1d":
end_idx = \
self.trading_calendar.all_sessions.searchsorted(end_dt)
days = self.trading_calendar.all_sessions[
(end_idx - bar_count + 1):(end_idx + 1)
]
df = pd.DataFrame(
np.full((bar_count, len(assets)), 100.0),
index=days,
columns=assets
)
return df
class FetcherDataPortal(DataPortal):
"""
Mock dataportal that returns fake data for history and non-fetcher
spot value.
"""
def __init__(self, asset_finder, trading_calendar, first_trading_day=None):
super(FetcherDataPortal, self).__init__(asset_finder, trading_calendar,
first_trading_day)
def get_spot_value(self, asset, field, dt, data_frequency):
# if this is a fetcher field, exercise the regular code path
if self._is_extra_source(asset, field, self._augmented_sources_map):
return super(FetcherDataPortal, self).get_spot_value(
asset, field, dt, data_frequency)
# otherwise just return a fixed value
return int(asset)
# XXX: These aren't actually the methods that are used by the superclasses,
# so these don't do anything, and this class will likely produce unexpected
# results for history().
def _get_daily_window_for_sid(self, asset, field, days_in_window,
extra_slot=True):
return np.arange(days_in_window, dtype=np.float64)
def _get_minute_window_for_asset(self, asset, field, minutes_for_window):
return np.arange(minutes_for_window, dtype=np.float64)
class tmp_assets_db(object):
"""Create a temporary assets sqlite database.
This is meant to be used as a context manager.
Parameters
----------
url : string
The URL for the database connection.
**frames
The frames to pass to the AssetDBWriter.
By default this maps equities:
('A', 'B', 'C') -> map(ord, 'ABC')
See Also
--------
empty_assets_db
tmp_asset_finder
"""
_default_equities = sentinel('_default_equities')
def __init__(self,
url='sqlite:///:memory:',
equities=_default_equities,
**frames):
self._url = url
self._eng = None
if equities is self._default_equities:
equities = make_simple_equity_info(
list(map(ord, 'ABC')),
pd.Timestamp(0),
pd.Timestamp('2015'),
)
frames['equities'] = equities
self._frames = frames
self._eng = None # set in enter and exit
def __enter__(self):
self._eng = eng = create_engine(self._url)
AssetDBWriter(eng).write(**self._frames)
return eng
def __exit__(self, *excinfo):
assert self._eng is not None, '_eng was not set in __enter__'
self._eng.dispose()
self._eng = None
def empty_assets_db():
"""Context manager for creating an empty assets db.
See Also
--------
tmp_assets_db
"""
return tmp_assets_db(equities=None)
class tmp_asset_finder(tmp_assets_db):
"""Create a temporary asset finder using an in memory sqlite db.
Parameters
----------
url : string
The URL for the database connection.
finder_cls : type, optional
The type of asset finder to create from the assets db.
**frames
Forwarded to ``tmp_assets_db``.
See Also
--------
tmp_assets_db
"""
def __init__(self,
url='sqlite:///:memory:',
finder_cls=AssetFinder,
**frames):
self._finder_cls = finder_cls
super(tmp_asset_finder, self).__init__(url=url, **frames)
def __enter__(self):
return self._finder_cls(super(tmp_asset_finder, self).__enter__())
def empty_asset_finder():
"""Context manager for creating an empty asset finder.
See Also
--------
empty_assets_db
tmp_assets_db
tmp_asset_finder
"""
return tmp_asset_finder(equities=None)
class tmp_trading_env(tmp_asset_finder):
"""Create a temporary trading environment.
Parameters
----------
load : callable, optional
Function that returns benchmark returns and treasury curves.
finder_cls : type, optional
The type of asset finder to create from the assets db.
**frames
Forwarded to ``tmp_assets_db``.
See Also
--------
empty_trading_env
tmp_asset_finder
"""
def __init__(self, load=None, *args, **kwargs):
super(tmp_trading_env, self).__init__(*args, **kwargs)
self._load = load
def __enter__(self):
return TradingEnvironment(
load=self._load,
asset_db_path=super(tmp_trading_env, self).__enter__().engine,
)
def empty_trading_env():
return tmp_trading_env(equities=None)
class SubTestFailures(AssertionError):
def __init__(self, *failures):
self.failures = failures
def __str__(self):
return 'failures:\n %s' % '\n '.join(
'\n '.join((
', '.join('%s=%r' % item for item in scope.items()),
'%s: %s' % (type(exc).__name__, exc),
)) for scope, exc in self.failures,
)
@nottest
def subtest(iterator, *_names):
"""
Construct a subtest in a unittest.
Consider using ``catalyst.testing.parameter_space`` when subtests
are constructed over a single input or over the cross-product of multiple
inputs.
``subtest`` works by decorating a function as a subtest. The decorated
function will be run by iterating over the ``iterator`` and *unpacking the
values into the function. If any of the runs fail, the result will be put
into a set and the rest of the tests will be run. Finally, if any failed,
all of the results will be dumped as one failure.
Parameters
----------
iterator : iterable[iterable]
The iterator of arguments to pass to the function.
*name : iterator[str]
The names to use for each element of ``iterator``. These will be used
to print the scope when a test fails. If not provided, it will use the
integer index of the value as the name.
Examples
--------
::
class MyTest(TestCase):
def test_thing(self):
# Example usage inside another test.
@subtest(([n] for n in range(100000)), 'n')
def subtest(n):
self.assertEqual(n % 2, 0, 'n was not even')
subtest()
@subtest(([n] for n in range(100000)), 'n')
def test_decorated_function(self, n):
# Example usage to parameterize an entire function.
self.assertEqual(n % 2, 1, 'n was not odd')
Notes
-----
We use this when we:
* Will never want to run each parameter individually.
* Have a large parameter space we are testing
(see tests/utils/test_events.py).
``nose_parameterized.expand`` will create a test for each parameter
combination which bloats the test output and makes the travis pages slow.
We cannot use ``unittest2.TestCase.subTest`` because nose, pytest, and
nose2 do not support ``addSubTest``.
See Also
--------
catalyst.testing.parameter_space
"""
def dec(f):
@wraps(f)
def wrapped(*args, **kwargs):
names = _names
failures = []
for scope in iterator:
scope = tuple(scope)
try:
f(*args + scope, **kwargs)
except Exception as e:
if not names:
names = count()
failures.append((dict(zip(names, scope)), e))
if failures:
raise SubTestFailures(*failures)
return wrapped
return dec
class MockDailyBarReader(object):
def get_value(self, col, sid, dt):
return 100
def create_mock_adjustment_data(splits=None, dividends=None, mergers=None):
if splits is None:
splits = create_empty_splits_mergers_frame()
elif not isinstance(splits, pd.DataFrame):
splits = pd.DataFrame(splits)
if mergers is None:
mergers = create_empty_splits_mergers_frame()
elif not isinstance(mergers, pd.DataFrame):
mergers = pd.DataFrame(mergers)
if dividends is None:
dividends = create_empty_dividends_frame()
elif not isinstance(dividends, pd.DataFrame):
dividends = pd.DataFrame(dividends)
return splits, mergers, dividends
def create_mock_adjustments(tempdir, days, splits=None, dividends=None,
mergers=None):
path = tempdir.getpath("test_adjustments.db")
SQLiteAdjustmentWriter(path, MockDailyBarReader(), days).write(
*create_mock_adjustment_data(splits, dividends, mergers)
)
return path
def assert_timestamp_equal(left, right, compare_nat_equal=True, msg=""):
"""
Assert that two pandas Timestamp objects are the same.
Parameters
----------
left, right : pd.Timestamp
The values to compare.
compare_nat_equal : bool, optional
Whether to consider `NaT` values equal. Defaults to True.
msg : str, optional
A message to forward to `pd.util.testing.assert_equal`.
"""
if compare_nat_equal and left is pd.NaT and right is pd.NaT:
return
return pd.util.testing.assert_equal(left, right, msg=msg)
def powerset(values):
"""
Return the power set (i.e., the set of all subsets) of entries in `values`.
"""
return concat(combinations(values, i) for i in range(len(values) + 1))
def to_series(knowledge_dates, earning_dates):
"""
Helper for converting a dict of strings to a Series of datetimes.
This is just for making the test cases more readable.
"""
return pd.Series(
index=pd.to_datetime(knowledge_dates),
data=pd.to_datetime(earning_dates),
)
def gen_calendars(start, stop, critical_dates):
"""
Generate calendars to use as inputs.
"""
all_dates = pd.date_range(start, stop, tz='utc')
for to_drop in map(list, powerset(critical_dates)):
# Have to yield tuples.
yield (all_dates.drop(to_drop),)
# Also test with the trading calendar.
trading_days = get_calendar("NYSE").all_days
yield (trading_days[trading_days.slice_indexer(start, stop)],)
@contextmanager
def temp_pipeline_engine(calendar, sids, random_seed, symbols=None):
"""
A contextManager that yields a SimplePipelineEngine holding a reference to
an AssetFinder generated via tmp_asset_finder.
Parameters
----------
calendar : pd.DatetimeIndex
Calendar to pass to the constructed PipelineEngine.
sids : iterable[int]
Sids to use for the temp asset finder.
random_seed : int
Integer used to seed instances of SeededRandomLoader.
symbols : iterable[str], optional
Symbols for constructed assets. Forwarded to make_simple_equity_info.
"""
equity_info = make_simple_equity_info(
sids=sids,
start_date=calendar[0],
end_date=calendar[-1],
symbols=symbols,
)
loader = make_seeded_random_loader(random_seed, calendar, sids)
def get_loader(column):
return loader
with tmp_asset_finder(equities=equity_info) as finder:
yield SimplePipelineEngine(get_loader, calendar, finder)
def parameter_space(__fail_fast=False, **params):
"""
Wrapper around subtest that allows passing keywords mapping names to
iterables of values.
The decorated test function will be called with the cross-product of all
possible inputs
Examples
--------
>>> from unittest import TestCase
>>> class SomeTestCase(TestCase):
... @parameter_space(x=[1, 2], y=[2, 3])
... def test_some_func(self, x, y):
... # Will be called with every possible combination of x and y.
... self.assertEqual(somefunc(x, y), expected_result(x, y))
See Also
--------
catalyst.testing.subtest
"""
def decorator(f):
argspec = getargspec(f)
if argspec.varargs:
raise AssertionError("parameter_space() doesn't support *args")
if argspec.keywords:
raise AssertionError("parameter_space() doesn't support **kwargs")
if argspec.defaults:
raise AssertionError("parameter_space() doesn't support defaults.")
# Skip over implicit self.
argnames = argspec.args
if argnames[0] == 'self':
argnames = argnames[1:]
extra = set(params) - set(argnames)
if extra:
raise AssertionError(
"Keywords %s supplied to parameter_space() are "
"not in function signature." % extra
)
unspecified = set(argnames) - set(params)
if unspecified:
raise AssertionError(
"Function arguments %s were not "
"supplied to parameter_space()." % extra
)
def make_param_sets():
return product(*(params[name] for name in argnames))
if __fail_fast:
@wraps(f)
def wrapped(self):
for args in make_param_sets():
f(self, *args)
return wrapped
else:
@wraps(f)
def wrapped(*args, **kwargs):
subtest(make_param_sets(), *argnames)(f)(*args, **kwargs)
return wrapped
return decorator
def create_empty_dividends_frame():
return pd.DataFrame(
np.array(
[],
dtype=[
('ex_date', 'datetime64[ns]'),
('pay_date', 'datetime64[ns]'),
('record_date', 'datetime64[ns]'),
('declared_date', 'datetime64[ns]'),
('amount', 'float64'),
('sid', 'int32'),
],
),
index=pd.DatetimeIndex([], tz='UTC'),
)
def create_empty_splits_mergers_frame():
return pd.DataFrame(
np.array(
[],
dtype=[
('effective_date', 'int64'),
('ratio', 'float64'),
('sid', 'int64'),
],
),
index=pd.DatetimeIndex([]),
)
def make_alternating_boolean_array(shape, first_value=True):
"""
Create a 2D numpy array with the given shape containing alternating values
of False, True, False, True,... along each row and each column.
Examples
--------
>>> make_alternating_boolean_array((4,4))
array([[ True, False, True, False],
[False, True, False, True],
[ True, False, True, False],
[False, True, False, True]], dtype=bool)
>>> make_alternating_boolean_array((4,3), first_value=False)
array([[False, True, False],
[ True, False, True],
[False, True, False],
[ True, False, True]], dtype=bool)
"""
if len(shape) != 2:
raise ValueError(
'Shape must be 2-dimensional. Given shape was {}'.format(shape)
)
alternating = np.empty(shape, dtype=np.bool)
for row in alternating:
row[::2] = first_value
row[1::2] = not(first_value)
first_value = not(first_value)
return alternating
def make_cascading_boolean_array(shape, first_value=True):
"""
Create a numpy array with the given shape containing cascading boolean
values, with `first_value` being the top-left value.
Examples
--------
>>> make_cascading_boolean_array((4,4))
array([[ True, True, True, False],
[ True, True, False, False],
[ True, False, False, False],
[False, False, False, False]], dtype=bool)
>>> make_cascading_boolean_array((4,2))
array([[ True, False],
[False, False],
[False, False],
[False, False]], dtype=bool)
>>> make_cascading_boolean_array((2,4))
array([[ True, True, True, False],
[ True, True, False, False]], dtype=bool)
"""
if len(shape) != 2:
raise ValueError(
'Shape must be 2-dimensional. Given shape was {}'.format(shape)
)
cascading = np.full(shape, not(first_value), dtype=np.bool)
ending_col = shape[1] - 1
for row in cascading:
if ending_col > 0:
row[:ending_col] = first_value
ending_col -= 1
else:
break
return cascading
@expect_dimensions(array=2)
def permute_rows(seed, array):
"""
Shuffle each row in ``array`` based on permutations generated by ``seed``.
Parameters
----------
seed : int
Seed for numpy.RandomState
array : np.ndarray[ndim=2]
Array over which to apply permutations.
"""
rand = np.random.RandomState(seed)
return np.apply_along_axis(rand.permutation, 1, array)
@nottest
def make_test_handler(testcase, *args, **kwargs):
"""
Returns a TestHandler which will be used by the given testcase. This
handler can be used to test log messages.
Parameters
----------
testcase: unittest.TestCase
The test class in which the log handler will be used.
*args, **kwargs
Forwarded to the new TestHandler object.
Returns
-------
handler: logbook.TestHandler
The handler to use for the test case.
"""
handler = TestHandler(*args, **kwargs)
testcase.addCleanup(handler.close)
return handler
def write_compressed(path, content):
"""
Write a compressed (gzipped) file to `path`.
"""
with gzip.open(path, 'wb') as f:
f.write(content)
def read_compressed(path):
"""
Write a compressed (gzipped) file from `path`.
"""
with gzip.open(path, 'rb') as f:
return f.read()
catalyst_git_root = abspath(
join(realpath(dirname(__file__)), '..', '..'),
)
@nottest
def test_resource_path(*path_parts):
return os.path.join(catalyst_git_root, 'tests', 'resources', *path_parts)
@contextmanager
def patch_os_environment(remove=None, **values):
"""
Context manager for patching the operating system environment.
"""
old_values = {}
remove = remove or []
for key in remove:
old_values[key] = os.environ.pop(key)
for key, value in values.iteritems():
old_values[key] = os.getenv(key)
os.environ[key] = value
try:
yield
finally:
for old_key, old_value in old_values.iteritems():
if old_value is None:
# Value was not present when we entered, so del it out if it's
# still present.
try:
del os.environ[key]
except KeyError:
pass
else:
# Restore the old value.
os.environ[old_key] = old_value
class tmp_dir(TempDirectory, object):
"""New style class that wrapper for TempDirectory in python 2.
"""
pass
class _TmpBarReader(with_metaclass(ABCMeta, tmp_dir)):
"""A helper for tmp_bcolz_equity_minute_bar_reader and
tmp_bcolz_equity_daily_bar_reader.
Parameters
----------
env : TradingEnvironment
The trading env.
days : pd.DatetimeIndex
The days to write for.
data : dict[int -> pd.DataFrame]
The data to write.
path : str, optional
The path to the directory to write the data into. If not given, this
will be a unique name.
"""
@abstractproperty
def _reader_cls(self):
raise NotImplementedError('_reader')
@abstractmethod
def _write(self, env, days, path, data):
raise NotImplementedError('_write')
def __init__(self, env, days, data, path=None):
super(_TmpBarReader, self).__init__(path=path)
self._env = env
self._days = days
self._data = data
def __enter__(self):
tmpdir = super(_TmpBarReader, self).__enter__()
env = self._env
try:
self._write(
env,
self._days,
tmpdir.path,
self._data,
)
return self._reader_cls(tmpdir.path)
except:
self.__exit__(None, None, None)
raise
class tmp_bcolz_equity_minute_bar_reader(_TmpBarReader):
"""A temporary BcolzMinuteBarReader object.
Parameters
----------
env : TradingEnvironment
The trading env.
days : pd.DatetimeIndex
The days to write for.
data : iterable[(int, pd.DataFrame)]
The data to write.
path : str, optional
The path to the directory to write the data into. If not given, this
will be a unique name.
See Also
--------
tmp_bcolz_equity_daily_bar_reader
"""
_reader_cls = BcolzMinuteBarReader
_write = staticmethod(write_bcolz_minute_data)
class tmp_bcolz_equity_daily_bar_reader(_TmpBarReader):
"""A temporary BcolzDailyBarReader object.
Parameters
----------
env : TradingEnvironment
The trading env.
days : pd.DatetimeIndex
The days to write for.
data : dict[int -> pd.DataFrame]
The data to write.
path : str, optional
The path to the directory to write the data into. If not given, this
will be a unique name.
See Also
--------
tmp_bcolz_equity_daily_bar_reader
"""
_reader_cls = BcolzDailyBarReader
@staticmethod
def _write(env, days, path, data):
BcolzDailyBarWriter(path, days).write(data)
@contextmanager
def patch_read_csv(url_map, module=pd, strict=False):
"""Patch pandas.read_csv to map lookups from url to another.
Parameters
----------
url_map : mapping[str or file-like object -> str or file-like object]
The mapping to use to redirect read_csv calls.
module : module, optional
The module to patch ``read_csv`` on. By default this is ``pandas``.
This should be set to another module if ``read_csv`` is early-bound
like ``from pandas import read_csv`` instead of late-bound like:
``import pandas as pd; pd.read_csv``.
strict : bool, optional
If true, then this will assert that ``read_csv`` is only called with
elements in the ``url_map``.
"""
read_csv = pd.read_csv
def patched_read_csv(filepath_or_buffer, *args, **kwargs):
if filepath_or_buffer in url_map:
return read_csv(url_map[filepath_or_buffer], *args, **kwargs)
elif not strict:
return read_csv(filepath_or_buffer, *args, **kwargs)
else:
raise AssertionError(
'attempted to call read_csv on %r which not in the url map' %
filepath_or_buffer,
)
with patch.object(module, 'read_csv', patched_read_csv):
yield
def copy_market_data(src_market_data_dir, dest_root_dir):
symbol = 'SPY'
filenames = (get_benchmark_filename(symbol), INDEX_MAPPING[symbol][1])
ensure_directory(os.path.join(dest_root_dir, 'data'))
for filename in filenames:
shutil.copyfile(
os.path.join(src_market_data_dir, filename),
os.path.join(dest_root_dir, 'data', filename)
)
@curry
def ensure_doctest(f, name=None):
"""Ensure that an object gets doctested. This is useful for instances
of objects like curry or partial which are not discovered by default.
Parameters
----------
f : any
The thing to doctest.
name : str, optional
The name to use in the doctest function mapping. If this is None,
Then ``f.__name__`` will be used.
Returns
-------
f : any
``f`` unchanged.
"""
_getframe(2).f_globals.setdefault('__test__', {})[
f.__name__ if name is None else name
] = f
return f
class RecordBatchBlotter(Blotter):
"""Blotter that tracks how its batch_order method was called.
"""
def __init__(self, data_frequency):
super(RecordBatchBlotter, self).__init__(data_frequency)
self.order_batch_called = []
def batch_order(self, *args, **kwargs):
self.order_batch_called.append((args, kwargs))
return super(RecordBatchBlotter, self).batch_order(*args, **kwargs)
####################################
# Shared factors for pipeline tests.
####################################
class AssetID(CustomFactor):
"""
CustomFactor that returns the AssetID of each asset.
Useful for providing a Factor that produces a different value for each
asset.
"""
window_length = 1
inputs = ()
def compute(self, today, assets, out):
out[:] = assets
class AssetIDPlusDay(CustomFactor):
window_length = 1
inputs = ()
def compute(self, today, assets, out):
out[:] = assets + today.day
class OpenPrice(CustomFactor):
window_length = 1
inputs = [USEquityPricing.open]
def compute(self, today, assets, out, open):
out[:] = open
| enigmampc/catalyst | catalyst/testing/core.py | Python | apache-2.0 | 47,204 |
"""
WSGI config for websqlrunner project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "websqlrunner.settings")
application = get_wsgi_application()
| snava10/sqlRunner | websqlrunner/websqlrunner/wsgi.py | Python | apache-2.0 | 402 |